From c8e4c77277ca5db0c4ddbfb4bc628b8abad585b0 Mon Sep 17 00:00:00 2001 From: Marvin Raaijmakers Date: Wed, 14 Mar 2007 22:50:42 -0400 Subject: Input: add getkeycode and setkeycode methods Allow drivers to implement their own get and set keycode methods. This will allow drivers to change their keymaps without allocating huge tables covering entire range of possible scancodes. Signed-off-by: Dmitry Torokhov --- include/linux/input.h | 31 +++---------------------------- 1 file changed, 3 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/include/linux/input.h b/include/linux/input.h index bde65c8a3519..3a8b8c6f0ab5 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -913,33 +913,6 @@ struct ff_effect { #define BIT(x) (1UL<<((x)%BITS_PER_LONG)) #define LONG(x) ((x)/BITS_PER_LONG) -#define INPUT_KEYCODE(dev, scancode) ((dev->keycodesize == 1) ? ((u8*)dev->keycode)[scancode] : \ - ((dev->keycodesize == 2) ? ((u16*)dev->keycode)[scancode] : (((u32*)dev->keycode)[scancode]))) - -#define SET_INPUT_KEYCODE(dev, scancode, val) \ - ({ unsigned __old; \ - switch (dev->keycodesize) { \ - case 1: { \ - u8 *k = (u8 *)dev->keycode; \ - __old = k[scancode]; \ - k[scancode] = val; \ - break; \ - } \ - case 2: { \ - u16 *k = (u16 *)dev->keycode; \ - __old = k[scancode]; \ - k[scancode] = val; \ - break; \ - } \ - default: { \ - u32 *k = (u32 *)dev->keycode; \ - __old = k[scancode]; \ - k[scancode] = val; \ - break; \ - } \ - } \ - __old; }) - struct input_dev { void *private; @@ -962,6 +935,8 @@ struct input_dev { unsigned int keycodemax; unsigned int keycodesize; void *keycode; + int (*setkeycode)(struct input_dev *dev, int scancode, int keycode); + int (*getkeycode)(struct input_dev *dev, int scancode, int *keycode); struct ff_device *ff; @@ -1104,7 +1079,7 @@ struct input_handle { }; #define to_dev(n) container_of(n,struct input_dev,node) -#define to_handler(n) container_of(n,struct input_handler,node); +#define to_handler(n) container_of(n,struct input_handler,node) #define to_handle(n) container_of(n,struct input_handle,d_node) #define to_handle_h(n) container_of(n,struct input_handle,h_node) -- cgit v1.2.3 From 9575499dfebc0f0fbbf122223f02e9e92630661d Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Fri, 16 Mar 2007 00:59:29 -0400 Subject: Input: HIL - fix rwlock recursion bug The following bug happens when insmoding hp_sdc_mlc.ko: HP SDC MLC: Registering the System Domain Controller's HIL MLC. BUG: rwlock recursion on CPU#0, hotplug/1814, 00854734 Backtrace: [<10267560>] _raw_write_lock+0x50/0x88 [<10104008>] _write_lock_irqsave+0x14/0x24 [<008537d4>] hp_sdc_mlc_out+0x38/0x25c [hp_sdc_mlc] [<0084ebd8>] hilse_donode+0x308/0x470 [hil_mlc] [<0084ed80>] hil_mlcs_process+0x40/0x6c [hil_mlc] [<10130f80>] tasklet_action+0x78/0xb8 [<10130cec>] __do_softirq+0x60/0xcc [<1010428c>] __lock_text_end+0x38/0x48 [<10108348>] do_cpu_irq_mask+0xf0/0x11c [<1010b068>] intr_return+0x0/0xc Signed-off-by: Helge Deller Signed-off-by: Dmitry Torokhov --- drivers/input/serio/hil_mlc.c | 2 ++ drivers/input/serio/hp_sdc.c | 22 ++++++++++++++-------- drivers/input/serio/hp_sdc_mlc.c | 19 ++----------------- include/linux/hp_sdc.h | 1 + 4 files changed, 19 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c index 485b0742842b..93a1a6ba216a 100644 --- a/drivers/input/serio/hil_mlc.c +++ b/drivers/input/serio/hil_mlc.c @@ -716,7 +716,9 @@ static int hilse_donode(hil_mlc *mlc) break; case HILSE_CTS: + write_lock_irqsave(&mlc->lock, flags); nextidx = mlc->cts(mlc) ? node->bad : node->good; + write_unlock_irqrestore(&mlc->lock, flags); break; default: diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c index 31826e601fba..6af199805ffc 100644 --- a/drivers/input/serio/hp_sdc.c +++ b/drivers/input/serio/hp_sdc.c @@ -100,6 +100,7 @@ EXPORT_SYMBOL(hp_sdc_release_timer_irq); EXPORT_SYMBOL(hp_sdc_release_hil_irq); EXPORT_SYMBOL(hp_sdc_release_cooked_irq); +EXPORT_SYMBOL(__hp_sdc_enqueue_transaction); EXPORT_SYMBOL(hp_sdc_enqueue_transaction); EXPORT_SYMBOL(hp_sdc_dequeue_transaction); @@ -593,18 +594,15 @@ unsigned long hp_sdc_put(void) } /******* Functions called in either user or kernel context ****/ -int hp_sdc_enqueue_transaction(hp_sdc_transaction *this) +int __hp_sdc_enqueue_transaction(hp_sdc_transaction *this) { - unsigned long flags; int i; if (this == NULL) { - tasklet_schedule(&hp_sdc.task); + BUG(); return -EINVAL; } - write_lock_irqsave(&hp_sdc.lock, flags); - /* Can't have same transaction on queue twice */ for (i = 0; i < HP_SDC_QUEUE_LEN; i++) if (hp_sdc.tq[i] == this) @@ -617,21 +615,29 @@ int hp_sdc_enqueue_transaction(hp_sdc_transaction *this) for (i = 0; i < HP_SDC_QUEUE_LEN; i++) if (hp_sdc.tq[i] == NULL) { hp_sdc.tq[i] = this; - write_unlock_irqrestore(&hp_sdc.lock, flags); tasklet_schedule(&hp_sdc.task); return 0; } - write_unlock_irqrestore(&hp_sdc.lock, flags); printk(KERN_WARNING PREFIX "No free slot to add transaction.\n"); return -EBUSY; fail: - write_unlock_irqrestore(&hp_sdc.lock,flags); printk(KERN_WARNING PREFIX "Transaction add failed: transaction already queued?\n"); return -EINVAL; } +int hp_sdc_enqueue_transaction(hp_sdc_transaction *this) { + unsigned long flags; + int ret; + + write_lock_irqsave(&hp_sdc.lock, flags); + ret = __hp_sdc_enqueue_transaction(this); + write_unlock_irqrestore(&hp_sdc.lock,flags); + + return ret; +} + int hp_sdc_dequeue_transaction(hp_sdc_transaction *this) { unsigned long flags; diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c index cb0b28877e05..c45ea74d53e4 100644 --- a/drivers/input/serio/hp_sdc_mlc.c +++ b/drivers/input/serio/hp_sdc_mlc.c @@ -142,14 +142,11 @@ static void hp_sdc_mlc_isr (int irq, void *dev_id, static int hp_sdc_mlc_in(hil_mlc *mlc, suseconds_t timeout) { - unsigned long flags; struct hp_sdc_mlc_priv_s *priv; int rc = 2; priv = mlc->priv; - write_lock_irqsave(&mlc->lock, flags); - /* Try to down the semaphore */ if (down_trylock(&mlc->isem)) { struct timeval tv; @@ -178,21 +175,16 @@ static int hp_sdc_mlc_in(hil_mlc *mlc, suseconds_t timeout) wasup: up(&mlc->isem); rc = 0; - goto done; done: - write_unlock_irqrestore(&mlc->lock, flags); return rc; } static int hp_sdc_mlc_cts(hil_mlc *mlc) { struct hp_sdc_mlc_priv_s *priv; - unsigned long flags; priv = mlc->priv; - write_lock_irqsave(&mlc->lock, flags); - /* Try to down the semaphores -- they should be up. */ BUG_ON(down_trylock(&mlc->isem)); BUG_ON(down_trylock(&mlc->osem)); @@ -221,26 +213,21 @@ static int hp_sdc_mlc_cts(hil_mlc *mlc) priv->tseq[2] = 1; priv->tseq[3] = 0; priv->tseq[4] = 0; - hp_sdc_enqueue_transaction(&priv->trans); + __hp_sdc_enqueue_transaction(&priv->trans); busy: - write_unlock_irqrestore(&mlc->lock, flags); return 1; done: priv->trans.act.semaphore = &mlc->osem; up(&mlc->csem); - write_unlock_irqrestore(&mlc->lock, flags); return 0; } static void hp_sdc_mlc_out(hil_mlc *mlc) { struct hp_sdc_mlc_priv_s *priv; - unsigned long flags; priv = mlc->priv; - write_lock_irqsave(&mlc->lock, flags); - /* Try to down the semaphore -- it should be up. */ BUG_ON(down_trylock(&mlc->osem)); @@ -250,7 +237,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc) do_data: if (priv->emtestmode) { up(&mlc->osem); - goto done; + return; } /* Shouldn't be sending commands when loop may be busy */ BUG_ON(down_trylock(&mlc->csem)); @@ -313,8 +300,6 @@ static void hp_sdc_mlc_out(hil_mlc *mlc) } enqueue: hp_sdc_enqueue_transaction(&priv->trans); - done: - write_unlock_irqrestore(&mlc->lock, flags); } static int __init hp_sdc_mlc_init(void) diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h index debd71515312..9db3d454887f 100644 --- a/include/linux/hp_sdc.h +++ b/include/linux/hp_sdc.h @@ -71,6 +71,7 @@ typedef struct { struct semaphore *semaphore; /* Semaphore to sleep on. */ } act; } hp_sdc_transaction; +int __hp_sdc_enqueue_transaction(hp_sdc_transaction *this); int hp_sdc_enqueue_transaction(hp_sdc_transaction *this); int hp_sdc_dequeue_transaction(hp_sdc_transaction *this); -- cgit v1.2.3 From 5b2a08262a8c952fef008154933953f083ca5766 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Thu, 12 Apr 2007 01:29:46 -0400 Subject: Input: rework handle creation code - consolidate code for binding handlers to a device - return error codes from handlers connect() methods back to input core and log failures Signed-off-by: Dmitry Torokhov --- drivers/char/keyboard.c | 28 ++++++++++----- drivers/input/evbug.c | 32 ++++++++++++------ drivers/input/evdev.c | 47 ++++++++++++++++++++------ drivers/input/input.c | 88 ++++++++++++++++++++++++++++-------------------- drivers/input/joydev.c | 48 ++++++++++++++++++++------ drivers/input/mousedev.c | 61 ++++++++++++++++++++++++--------- drivers/input/power.c | 46 ++++++++++++++++--------- drivers/input/tsdev.c | 58 +++++++++++++++++++++---------- include/linux/input.h | 5 ++- 9 files changed, 288 insertions(+), 125 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c index 3d211e8553f7..59712546f911 100644 --- a/drivers/char/keyboard.c +++ b/drivers/char/keyboard.c @@ -41,7 +41,6 @@ #include #include -static void kbd_disconnect(struct input_handle *handle); extern void ctrl_alt_del(void); /* @@ -1260,11 +1259,11 @@ static void kbd_event(struct input_handle *handle, unsigned int event_type, * likes it, it can open it and get events from it. In this (kbd_connect) * function, we should decide which VT to bind that keyboard to initially. */ -static struct input_handle *kbd_connect(struct input_handler *handler, - struct input_dev *dev, - const struct input_device_id *id) +static int kbd_connect(struct input_handler *handler, struct input_dev *dev, + const struct input_device_id *id) { struct input_handle *handle; + int error; int i; for (i = KEY_RESERVED; i < BTN_MISC; i++) @@ -1272,24 +1271,37 @@ static struct input_handle *kbd_connect(struct input_handler *handler, break; if (i == BTN_MISC && !test_bit(EV_SND, dev->evbit)) - return NULL; + return -ENODEV; handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); if (!handle) - return NULL; + return -ENOMEM; handle->dev = dev; handle->handler = handler; handle->name = "kbd"; - input_open_device(handle); + error = input_register_handle(handle); + if (error) + goto err_free_handle; + + error = input_open_device(handle); + if (error) + goto err_unregister_handle; + + return 0; - return handle; + err_unregister_handle: + input_unregister_handle(handle); + err_free_handle: + kfree(handle); + return error; } static void kbd_disconnect(struct input_handle *handle) { input_close_device(handle); + input_unregister_handle(handle); kfree(handle); } diff --git a/drivers/input/evbug.c b/drivers/input/evbug.c index 5a9653c3128a..c21f2f127234 100644 --- a/drivers/input/evbug.c +++ b/drivers/input/evbug.c @@ -38,31 +38,43 @@ MODULE_AUTHOR("Vojtech Pavlik "); MODULE_DESCRIPTION("Input driver event debug module"); MODULE_LICENSE("GPL"); -static char evbug_name[] = "evbug"; - static void evbug_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) { printk(KERN_DEBUG "evbug.c: Event. Dev: %s, Type: %d, Code: %d, Value: %d\n", handle->dev->phys, type, code, value); } -static struct input_handle *evbug_connect(struct input_handler *handler, struct input_dev *dev, - const struct input_device_id *id) +static int evbug_connect(struct input_handler *handler, struct input_dev *dev, + const struct input_device_id *id) { struct input_handle *handle; + int error; - if (!(handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL))) - return NULL; + handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; handle->dev = dev; handle->handler = handler; - handle->name = evbug_name; + handle->name = "evbug"; + + error = input_register_handle(handle); + if (error) + goto err_free_handle; - input_open_device(handle); + error = input_open_device(handle); + if (error) + goto err_unregister_handle; printk(KERN_DEBUG "evbug.c: Connected device: \"%s\", %s\n", dev->name, dev->phys); - return handle; + return 0; + + err_unregister_handle: + input_unregister_handle(handle); + err_free_handle: + kfree(handle); + return error; } static void evbug_disconnect(struct input_handle *handle) @@ -70,7 +82,7 @@ static void evbug_disconnect(struct input_handle *handle) printk(KERN_DEBUG "evbug.c: Disconnected device: %s\n", handle->dev->phys); input_close_device(handle); - + input_unregister_handle(handle); kfree(handle); } diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 64b47de052bb..840fa1986527 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -605,21 +605,24 @@ static const struct file_operations evdev_fops = { .flush = evdev_flush }; -static struct input_handle *evdev_connect(struct input_handler *handler, struct input_dev *dev, - const struct input_device_id *id) +static int evdev_connect(struct input_handler *handler, struct input_dev *dev, + const struct input_device_id *id) { struct evdev *evdev; struct class_device *cdev; + dev_t devt; int minor; + int error; for (minor = 0; minor < EVDEV_MINORS && evdev_table[minor]; minor++); if (minor == EVDEV_MINORS) { printk(KERN_ERR "evdev: no more free evdev devices\n"); - return NULL; + return -ENFILE; } - if (!(evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL))) - return NULL; + evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL); + if (!evdev) + return -ENOMEM; INIT_LIST_HEAD(&evdev->list); init_waitqueue_head(&evdev->wait); @@ -634,15 +637,35 @@ static struct input_handle *evdev_connect(struct input_handler *handler, struct evdev_table[minor] = evdev; - cdev = class_device_create(&input_class, &dev->cdev, - MKDEV(INPUT_MAJOR, EVDEV_MINOR_BASE + minor), - dev->cdev.dev, evdev->name); + devt = MKDEV(INPUT_MAJOR, EVDEV_MINOR_BASE + minor), + + cdev = class_device_create(&input_class, &dev->cdev, devt, + dev->cdev.dev, evdev->name); + if (IS_ERR(cdev)) { + error = PTR_ERR(cdev); + goto err_free_evdev; + } /* temporary symlink to keep userspace happy */ - sysfs_create_link(&input_class.subsys.kset.kobj, &cdev->kobj, - evdev->name); + error = sysfs_create_link(&input_class.subsys.kset.kobj, + &cdev->kobj, evdev->name); + if (error) + goto err_cdev_destroy; + + error = input_register_handle(&evdev->handle); + if (error) + goto err_remove_link; - return &evdev->handle; + return 0; + + err_remove_link: + sysfs_remove_link(&input_class.subsys.kset.kobj, evdev->name); + err_cdev_destroy: + class_device_destroy(&input_class, devt); + err_free_evdev: + kfree(evdev); + evdev_table[minor] = NULL; + return error; } static void evdev_disconnect(struct input_handle *handle) @@ -650,6 +673,8 @@ static void evdev_disconnect(struct input_handle *handle) struct evdev *evdev = handle->private; struct evdev_list *list; + input_unregister_handle(handle); + sysfs_remove_link(&input_class.subsys.kset.kobj, evdev->name); class_device_destroy(&input_class, MKDEV(INPUT_MAJOR, EVDEV_MINOR_BASE + evdev->minor)); diff --git a/drivers/input/input.c b/drivers/input/input.c index 5629e397520d..86b27079004a 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -380,12 +380,6 @@ static int input_default_setkeycode(struct input_dev *dev, } -static void input_link_handle(struct input_handle *handle) -{ - list_add_tail(&handle->d_node, &handle->dev->h_list); - list_add_tail(&handle->h_node, &handle->handler->h_list); -} - #define MATCH_BIT(bit, max) \ for (i = 0; i < NBITS(max); i++) \ if ((id->bit[i] & dev->bit[i]) != id->bit[i]) \ @@ -432,6 +426,29 @@ static const struct input_device_id *input_match_device(const struct input_devic return NULL; } +static int input_attach_handler(struct input_dev *dev, struct input_handler *handler) +{ + const struct input_device_id *id; + int error; + + if (handler->blacklist && input_match_device(handler->blacklist, dev)) + return -ENODEV; + + id = input_match_device(handler->id_table, dev); + if (!id) + return -ENODEV; + + error = handler->connect(handler, dev, id); + if (error && error != -ENODEV) + printk(KERN_ERR + "input: failed to attach handler %s to device %s, " + "error: %d\n", + handler->name, kobject_name(&dev->cdev.kobj), error); + + return error; +} + + #ifdef CONFIG_PROC_FS static struct proc_dir_entry *proc_bus_input_dir; @@ -1032,9 +1049,7 @@ EXPORT_SYMBOL(input_free_device); int input_register_device(struct input_dev *dev) { static atomic_t input_no = ATOMIC_INIT(0); - struct input_handle *handle; struct input_handler *handler; - const struct input_device_id *id; const char *path; int error; @@ -1074,13 +1089,7 @@ int input_register_device(struct input_dev *dev) kfree(path); list_for_each_entry(handler, &input_handler_list, node) - if (!handler->blacklist || !input_match_device(handler->blacklist, dev)) - if ((id = input_match_device(handler->id_table, dev))) - if ((handle = handler->connect(handler, dev, id))) { - input_link_handle(handle); - if (handler->start) - handler->start(handle); - } + input_attach_handler(dev, handler); input_wakeup_procfs_readers(); @@ -1090,7 +1099,7 @@ EXPORT_SYMBOL(input_register_device); void input_unregister_device(struct input_dev *dev) { - struct list_head *node, *next; + struct input_handle *handle, *next; int code; for (code = 0; code <= KEY_MAX; code++) @@ -1100,12 +1109,9 @@ void input_unregister_device(struct input_dev *dev) del_timer_sync(&dev->timer); - list_for_each_safe(node, next, &dev->h_list) { - struct input_handle * handle = to_handle(node); - list_del_init(&handle->d_node); - list_del_init(&handle->h_node); + list_for_each_entry_safe(handle, next, &dev->h_list, d_node) handle->handler->disconnect(handle); - } + WARN_ON(!list_empty(&dev->h_list)); list_del_init(&dev->node); @@ -1118,8 +1124,6 @@ EXPORT_SYMBOL(input_unregister_device); int input_register_handler(struct input_handler *handler) { struct input_dev *dev; - struct input_handle *handle; - const struct input_device_id *id; INIT_LIST_HEAD(&handler->h_list); @@ -1133,13 +1137,7 @@ int input_register_handler(struct input_handler *handler) list_add_tail(&handler->node, &input_handler_list); list_for_each_entry(dev, &input_dev_list, node) - if (!handler->blacklist || !input_match_device(handler->blacklist, dev)) - if ((id = input_match_device(handler->id_table, dev))) - if ((handle = handler->connect(handler, dev, id))) { - input_link_handle(handle); - if (handler->start) - handler->start(handle); - } + input_attach_handler(dev, handler); input_wakeup_procfs_readers(); return 0; @@ -1148,14 +1146,11 @@ EXPORT_SYMBOL(input_register_handler); void input_unregister_handler(struct input_handler *handler) { - struct list_head *node, *next; + struct input_handle *handle, *next; - list_for_each_safe(node, next, &handler->h_list) { - struct input_handle * handle = to_handle_h(node); - list_del_init(&handle->h_node); - list_del_init(&handle->d_node); + list_for_each_entry_safe(handle, next, &handler->h_list, h_node) handler->disconnect(handle); - } + WARN_ON(!list_empty(&handler->h_list)); list_del_init(&handler->node); @@ -1166,6 +1161,27 @@ void input_unregister_handler(struct input_handler *handler) } EXPORT_SYMBOL(input_unregister_handler); +int input_register_handle(struct input_handle *handle) +{ + struct input_handler *handler = handle->handler; + + list_add_tail(&handle->d_node, &handle->dev->h_list); + list_add_tail(&handle->h_node, &handler->h_list); + + if (handler->start) + handler->start(handle); + + return 0; +} +EXPORT_SYMBOL(input_register_handle); + +void input_unregister_handle(struct input_handle *handle) +{ + list_del_init(&handle->h_node); + list_del_init(&handle->d_node); +} +EXPORT_SYMBOL(input_unregister_handle); + static int input_open_file(struct inode *inode, struct file *file) { struct input_handler *handler = input_table[iminor(inode) >> 5]; diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 9f3529ad3fda..cf24a5bde539 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -465,21 +465,24 @@ static const struct file_operations joydev_fops = { .fasync = joydev_fasync, }; -static struct input_handle *joydev_connect(struct input_handler *handler, struct input_dev *dev, - const struct input_device_id *id) +static int joydev_connect(struct input_handler *handler, struct input_dev *dev, + const struct input_device_id *id) { struct joydev *joydev; struct class_device *cdev; + dev_t devt; int i, j, t, minor; + int error; for (minor = 0; minor < JOYDEV_MINORS && joydev_table[minor]; minor++); if (minor == JOYDEV_MINORS) { printk(KERN_ERR "joydev: no more free joydev devices\n"); - return NULL; + return -ENFILE; } - if (!(joydev = kzalloc(sizeof(struct joydev), GFP_KERNEL))) - return NULL; + joydev = kzalloc(sizeof(struct joydev), GFP_KERNEL); + if (!joydev) + return -ENOMEM; INIT_LIST_HEAD(&joydev->list); init_waitqueue_head(&joydev->wait); @@ -534,22 +537,45 @@ static struct input_handle *joydev_connect(struct input_handler *handler, struct joydev_table[minor] = joydev; - cdev = class_device_create(&input_class, &dev->cdev, - MKDEV(INPUT_MAJOR, JOYDEV_MINOR_BASE + minor), - dev->cdev.dev, joydev->name); + devt = MKDEV(INPUT_MAJOR, JOYDEV_MINOR_BASE + minor), + + cdev = class_device_create(&input_class, &dev->cdev, devt, + dev->cdev.dev, joydev->name); + if (IS_ERR(cdev)) { + error = PTR_ERR(cdev); + goto err_free_joydev; + } /* temporary symlink to keep userspace happy */ - sysfs_create_link(&input_class.subsys.kset.kobj, &cdev->kobj, - joydev->name); + error = sysfs_create_link(&input_class.subsys.kset.kobj, + &cdev->kobj, joydev->name); + if (error) + goto err_cdev_destroy; + + error = input_register_handle(&joydev->handle); + if (error) + goto err_remove_link; + + return 0; - return &joydev->handle; + err_remove_link: + sysfs_remove_link(&input_class.subsys.kset.kobj, joydev->name); + err_cdev_destroy: + class_device_destroy(&input_class, devt); + err_free_joydev: + joydev_table[minor] = NULL; + kfree(joydev); + return error; } + static void joydev_disconnect(struct input_handle *handle) { struct joydev *joydev = handle->private; struct joydev_list *list; + input_unregister_handle(handle); + sysfs_remove_link(&input_class.subsys.kset.kobj, joydev->name); class_device_destroy(&input_class, MKDEV(INPUT_MAJOR, JOYDEV_MINOR_BASE + joydev->minor)); joydev->exist = 0; diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 664bcc8116fc..007e72f80251 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -624,23 +624,27 @@ static const struct file_operations mousedev_fops = { .fasync = mousedev_fasync, }; -static struct input_handle *mousedev_connect(struct input_handler *handler, struct input_dev *dev, - const struct input_device_id *id) +static int mousedev_connect(struct input_handler *handler, struct input_dev *dev, + const struct input_device_id *id) { struct mousedev *mousedev; struct class_device *cdev; - int minor = 0; + dev_t devt; + int minor; + int error; for (minor = 0; minor < MOUSEDEV_MINORS && mousedev_table[minor]; minor++); if (minor == MOUSEDEV_MINORS) { printk(KERN_ERR "mousedev: no more free mousedev devices\n"); - return NULL; + return -ENFILE; } - if (!(mousedev = kzalloc(sizeof(struct mousedev), GFP_KERNEL))) - return NULL; + mousedev = kzalloc(sizeof(struct mousedev), GFP_KERNEL); + if (!mousedev) + return -ENOMEM; INIT_LIST_HEAD(&mousedev->list); + INIT_LIST_HEAD(&mousedev->mixdev_node); init_waitqueue_head(&mousedev->wait); mousedev->minor = minor; @@ -651,20 +655,45 @@ static struct input_handle *mousedev_connect(struct input_handler *handler, stru mousedev->handle.private = mousedev; sprintf(mousedev->name, "mouse%d", minor); - if (mousedev_mix.open) - input_open_device(&mousedev->handle); - mousedev_table[minor] = mousedev; - cdev = class_device_create(&input_class, &dev->cdev, - MKDEV(INPUT_MAJOR, MOUSEDEV_MINOR_BASE + minor), - dev->cdev.dev, mousedev->name); + devt = MKDEV(INPUT_MAJOR, MOUSEDEV_MINOR_BASE + minor), + + cdev = class_device_create(&input_class, &dev->cdev, devt, + dev->cdev.dev, mousedev->name); + if (IS_ERR(cdev)) { + error = PTR_ERR(cdev); + goto err_free_mousedev; + } /* temporary symlink to keep userspace happy */ - sysfs_create_link(&input_class.subsys.kset.kobj, &cdev->kobj, - mousedev->name); + error = sysfs_create_link(&input_class.subsys.kset.kobj, + &cdev->kobj, mousedev->name); + if (error) + goto err_cdev_destroy; - return &mousedev->handle; + error = input_register_handle(&mousedev->handle); + if (error) + goto err_remove_link; + + if (mousedev_mix.open) { + error = input_open_device(&mousedev->handle); + if (error) + goto err_unregister_handle; + } + + return 0; + + err_unregister_handle: + input_unregister_handle(&mousedev->handle); + err_remove_link: + sysfs_remove_link(&input_class.subsys.kset.kobj, mousedev->name); + err_cdev_destroy: + class_device_destroy(&input_class, devt); + err_free_mousedev: + mousedev_table[minor] = NULL; + kfree(mousedev); + return error; } static void mousedev_disconnect(struct input_handle *handle) @@ -672,6 +701,8 @@ static void mousedev_disconnect(struct input_handle *handle) struct mousedev *mousedev = handle->private; struct mousedev_list *list; + input_unregister_handle(handle); + sysfs_remove_link(&input_class.subsys.kset.kobj, mousedev->name); class_device_destroy(&input_class, MKDEV(INPUT_MAJOR, MOUSEDEV_MINOR_BASE + mousedev->minor)); diff --git a/drivers/input/power.c b/drivers/input/power.c index ee82464a2fa7..e28d264b9e06 100644 --- a/drivers/input/power.c +++ b/drivers/input/power.c @@ -41,14 +41,14 @@ static struct input_handler power_handler; * Power management can't be done in a interrupt context. So we have to * use keventd. */ -static int suspend_button_pushed = 0; -static void suspend_button_task_handler(void *data) +static int suspend_button_pushed; +static void suspend_button_task_handler(struct work_struct *work) { udelay(200); /* debounce */ suspend_button_pushed = 0; } -static DECLARE_WORK(suspend_button_task, suspend_button_task_handler, NULL); +static DECLARE_WORK(suspend_button_task, suspend_button_task_handler); static void power_event(struct input_handle *handle, unsigned int type, unsigned int code, int down) @@ -63,9 +63,9 @@ static void power_event(struct input_handle *handle, unsigned int type, printk("Powering down entire device\n"); if (!suspend_button_pushed) { - suspend_button_pushed = 1; - schedule_work(&suspend_button_task); - } + suspend_button_pushed = 1; + schedule_work(&suspend_button_task); + } break; case KEY_POWER: /* Hum power down the machine. */ @@ -84,7 +84,7 @@ static void power_event(struct input_handle *handle, unsigned int type, dev->state = PM_RESUME; else dev->state = PM_SUSPEND; - pm_send(dev->pm_dev, dev->state, dev); + /* pm_send(dev->pm_dev, dev->state, dev); */ break; case KEY_POWER: /* Turn the input device off completely ? */ @@ -96,27 +96,41 @@ static void power_event(struct input_handle *handle, unsigned int type, return; } -static struct input_handle *power_connect(struct input_handler *handler, - struct input_dev *dev, - const struct input_device_id *id) +static int power_connect(struct input_handler *handler, struct input_dev *dev, + const struct input_device_id *id) { struct input_handle *handle; + int error; - if (!(handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL))) - return NULL; + handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; handle->dev = dev; handle->handler = handler; + handle->name = "power"; - input_open_device(handle); + error = input_register_handle(handle); + if (error) + goto err_free_handle; - printk(KERN_INFO "power.c: Adding power management to input layer\n"); - return handle; + error = input_open_device(handle); + if (error) + goto err_unregister_handle; + + return 0; + + err_unregister_handle: + input_unregister_handle(handle); + err_free_handle: + kfree(handle); + return error; } static void power_disconnect(struct input_handle *handle) { input_close_device(handle); + input_unregister_handle(handle); kfree(handle); } @@ -135,7 +149,7 @@ static const struct input_device_id power_ids[] = { .flags = INPUT_DEVICE_ID_MATCH_EVBIT, .evbit = { BIT(EV_PWR) }, }, - { }, /* Terminating entry */ + { }, /* Terminating entry */ }; MODULE_DEVICE_TABLE(input, power_ids); diff --git a/drivers/input/tsdev.c b/drivers/input/tsdev.c index 0300dca8591d..a23aedc64ab1 100644 --- a/drivers/input/tsdev.c +++ b/drivers/input/tsdev.c @@ -155,7 +155,7 @@ static int tsdev_open(struct inode *inode, struct file *file) "for removal.\nSee Documentation/feature-removal-schedule.txt " "for details.\n"); - if (i >= TSDEV_MINORS || !tsdev_table[i & TSDEV_MINOR_MASK]) + if (i >= TSDEV_MINORS) return -ENODEV; if (!(list = kzalloc(sizeof(struct tsdev_list), GFP_KERNEL))) @@ -246,14 +246,14 @@ static int tsdev_ioctl(struct inode *inode, struct file *file, switch (cmd) { case TS_GET_CAL: - if (copy_to_user ((void __user *)arg, &tsdev->cal, - sizeof (struct ts_calibration))) + if (copy_to_user((void __user *)arg, &tsdev->cal, + sizeof (struct ts_calibration))) retval = -EFAULT; break; case TS_SET_CAL: - if (copy_from_user (&tsdev->cal, (void __user *)arg, - sizeof (struct ts_calibration))) + if (copy_from_user(&tsdev->cal, (void __user *)arg, + sizeof (struct ts_calibration))) retval = -EFAULT; break; @@ -370,23 +370,25 @@ static void tsdev_event(struct input_handle *handle, unsigned int type, wake_up_interruptible(&tsdev->wait); } -static struct input_handle *tsdev_connect(struct input_handler *handler, - struct input_dev *dev, - const struct input_device_id *id) +static int tsdev_connect(struct input_handler *handler, struct input_dev *dev, + const struct input_device_id *id) { struct tsdev *tsdev; struct class_device *cdev; + dev_t devt; int minor, delta; + int error; for (minor = 0; minor < TSDEV_MINORS / 2 && tsdev_table[minor]; minor++); if (minor >= TSDEV_MINORS / 2) { printk(KERN_ERR "tsdev: You have way too many touchscreens\n"); - return NULL; + return -ENFILE; } - if (!(tsdev = kzalloc(sizeof(struct tsdev), GFP_KERNEL))) - return NULL; + tsdev = kzalloc(sizeof(struct tsdev), GFP_KERNEL); + if (!tsdev) + return -ENOMEM; INIT_LIST_HEAD(&tsdev->list); init_waitqueue_head(&tsdev->wait); @@ -415,15 +417,35 @@ static struct input_handle *tsdev_connect(struct input_handler *handler, tsdev_table[minor] = tsdev; - cdev = class_device_create(&input_class, &dev->cdev, - MKDEV(INPUT_MAJOR, TSDEV_MINOR_BASE + minor), - dev->cdev.dev, tsdev->name); + devt = MKDEV(INPUT_MAJOR, TSDEV_MINOR_BASE + minor), + + cdev = class_device_create(&input_class, &dev->cdev, devt, + dev->cdev.dev, tsdev->name); + if (IS_ERR(cdev)) { + error = PTR_ERR(cdev); + goto err_free_tsdev; + } /* temporary symlink to keep userspace happy */ - sysfs_create_link(&input_class.subsys.kset.kobj, &cdev->kobj, - tsdev->name); + error = sysfs_create_link(&input_class.subsys.kset.kobj, + &cdev->kobj, tsdev->name); + if (error) + goto err_cdev_destroy; - return &tsdev->handle; + error = input_register_handle(&tsdev->handle); + if (error) + goto err_remove_link; + + return 0; + + err_remove_link: + sysfs_remove_link(&input_class.subsys.kset.kobj, tsdev->name); + err_cdev_destroy: + class_device_destroy(&input_class, devt); + err_free_tsdev: + tsdev_table[minor] = NULL; + kfree(tsdev); + return error; } static void tsdev_disconnect(struct input_handle *handle) @@ -431,6 +453,8 @@ static void tsdev_disconnect(struct input_handle *handle) struct tsdev *tsdev = handle->private; struct tsdev_list *list; + input_unregister_handle(handle); + sysfs_remove_link(&input_class.subsys.kset.kobj, tsdev->name); class_device_destroy(&input_class, MKDEV(INPUT_MAJOR, TSDEV_MINOR_BASE + tsdev->minor)); diff --git a/include/linux/input.h b/include/linux/input.h index 3a8b8c6f0ab5..a51d6cf6824c 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -1049,7 +1049,7 @@ struct input_handler { void *private; void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value); - struct input_handle* (*connect)(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id); + int (*connect)(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id); void (*disconnect)(struct input_handle *handle); void (*start)(struct input_handle *handle); @@ -1102,6 +1102,9 @@ void input_unregister_device(struct input_dev *); int input_register_handler(struct input_handler *); void input_unregister_handler(struct input_handler *); +int input_register_handle(struct input_handle *); +void input_unregister_handle(struct input_handle *); + int input_grab_device(struct input_handle *); void input_release_device(struct input_handle *); -- cgit v1.2.3 From 6480e2a275ff8ff48ae23a011616fcf819ed7a4e Mon Sep 17 00:00:00 2001 From: Eric Piel Date: Thu, 12 Apr 2007 01:32:34 -0400 Subject: Input: wistron - add acerhk laptop database Acerhk supports already a lot of laptops. Lets import its database so that everyone can benefit of the work of Olaf Tauber. Only the "tm_new" laptops were imported. "tm_old" laptops could be possible but requires more testing and probably only few laptops are still alive. "dritek" laptops should probably be imported into a different driver. Also compress the keymaps by fitting each entry on an int. Most of the dmi matching was written based on google searches, so it's rather prone to errors. That's why I'm asking people to confirm it works. Support to generate switch input events was added as some laptops indicate lid open/close through this interface. This adds the following hardware: Acer TravelMate 370 Acer TravelMate 380 Acer TravelMate C300 Acer TravelMate C100 Acer TravelMate C110 Acer TravelMate 250 Acer TravelMate 350 Acer TravelMate 620 Acer TravelMate 630 Acer TravelMate 220 Acer TravelMate 230 Acer TravelMate 260 Acer TravelMate 280 Acer TravelMate 360 Acer TravelMate 2100 Acer TravelMate 2410 Acer Aspire 1500 Acer Aspire 1600 Acer Aspire 3020 Acer Aspire 5020 Medion MD 2900 Medion MD 40100 Medion MD 95400 Medion MD 96500 Fujitsu Siemens Amilo 7820 Signed-off-by: Eric Piel Signed-off-by: Dmitry Torokhov --- drivers/input/misc/wistron_btns.c | 595 +++++++++++++++++++++++++++++++++----- include/linux/input.h | 1 + 2 files changed, 524 insertions(+), 72 deletions(-) (limited to 'include/linux') diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c index 39e437599d35..47de377330b8 100644 --- a/drivers/input/misc/wistron_btns.c +++ b/drivers/input/misc/wistron_btns.c @@ -233,10 +233,20 @@ static void bios_set_state(u8 subsys, int enable) struct key_entry { char type; /* See KE_* below */ u8 code; - unsigned keycode; /* For KE_KEY */ + union { + u16 keycode; /* For KE_KEY */ + struct { /* For KE_SW */ + u8 code; + u8 value; + } sw; + }; }; -enum { KE_END, KE_KEY, KE_WIFI, KE_BLUETOOTH }; +enum { KE_END, KE_KEY, KE_SW, KE_WIFI, KE_BLUETOOTH }; + +#define FE_MAIL_LED 0x01 +#define FE_WIFI_LED 0x02 +#define FE_UNTESTED 0x80 static const struct key_entry *keymap; /* = NULL; Current key map */ static int have_wifi; @@ -261,104 +271,301 @@ static struct key_entry keymap_empty[] = { }; static struct key_entry keymap_fs_amilo_pro_v2000[] = { - { KE_KEY, 0x01, KEY_HELP }, - { KE_KEY, 0x11, KEY_PROG1 }, - { KE_KEY, 0x12, KEY_PROG2 }, - { KE_WIFI, 0x30, 0 }, - { KE_KEY, 0x31, KEY_MAIL }, - { KE_KEY, 0x36, KEY_WWW }, + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_WIFI, 0x30 }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, { KE_END, 0 } }; static struct key_entry keymap_fujitsu_n3510[] = { - { KE_KEY, 0x11, KEY_PROG1 }, - { KE_KEY, 0x12, KEY_PROG2 }, - { KE_KEY, 0x36, KEY_WWW }, - { KE_KEY, 0x31, KEY_MAIL }, - { KE_KEY, 0x71, KEY_STOPCD }, - { KE_KEY, 0x72, KEY_PLAYPAUSE }, - { KE_KEY, 0x74, KEY_REWIND }, - { KE_KEY, 0x78, KEY_FORWARD }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x71, {KEY_STOPCD} }, + { KE_KEY, 0x72, {KEY_PLAYPAUSE} }, + { KE_KEY, 0x74, {KEY_REWIND} }, + { KE_KEY, 0x78, {KEY_FORWARD} }, { KE_END, 0 } }; static struct key_entry keymap_wistron_ms2111[] = { - { KE_KEY, 0x11, KEY_PROG1 }, - { KE_KEY, 0x12, KEY_PROG2 }, - { KE_KEY, 0x13, KEY_PROG3 }, - { KE_KEY, 0x31, KEY_MAIL }, - { KE_KEY, 0x36, KEY_WWW }, - { KE_END, 0 } + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x13, {KEY_PROG3} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_END, FE_MAIL_LED } +}; + +static struct key_entry keymap_wistron_md40100[] = { + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x02, {KEY_CONFIG} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_KEY, 0x37, {KEY_DISPLAYTOGGLE} }, /* Display on/off */ + { KE_END, FE_MAIL_LED | FE_WIFI_LED | FE_UNTESTED } }; static struct key_entry keymap_wistron_ms2141[] = { - { KE_KEY, 0x11, KEY_PROG1 }, - { KE_KEY, 0x12, KEY_PROG2 }, - { KE_WIFI, 0x30, 0 }, - { KE_KEY, 0x22, KEY_REWIND }, - { KE_KEY, 0x23, KEY_FORWARD }, - { KE_KEY, 0x24, KEY_PLAYPAUSE }, - { KE_KEY, 0x25, KEY_STOPCD }, - { KE_KEY, 0x31, KEY_MAIL }, - { KE_KEY, 0x36, KEY_WWW }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_WIFI, 0x30 }, + { KE_KEY, 0x22, {KEY_REWIND} }, + { KE_KEY, 0x23, {KEY_FORWARD} }, + { KE_KEY, 0x24, {KEY_PLAYPAUSE} }, + { KE_KEY, 0x25, {KEY_STOPCD} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, { KE_END, 0 } }; static struct key_entry keymap_acer_aspire_1500[] = { - { KE_KEY, 0x11, KEY_PROG1 }, - { KE_KEY, 0x12, KEY_PROG2 }, - { KE_WIFI, 0x30, 0 }, - { KE_KEY, 0x31, KEY_MAIL }, - { KE_KEY, 0x36, KEY_WWW }, - { KE_BLUETOOTH, 0x44, 0 }, - { KE_END, 0 } + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x03, {KEY_POWER} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_WIFI, 0x30 }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_KEY, 0x49, {KEY_CONFIG} }, + { KE_BLUETOOTH, 0x44 }, + { KE_END, FE_UNTESTED } +}; + +static struct key_entry keymap_acer_aspire_1600[] = { + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x03, {KEY_POWER} }, + { KE_KEY, 0x08, {KEY_MUTE} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x13, {KEY_PROG3} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_KEY, 0x49, {KEY_CONFIG} }, + { KE_WIFI, 0x30 }, + { KE_BLUETOOTH, 0x44 }, + { KE_END, FE_MAIL_LED | FE_UNTESTED } +}; + +/* 3020 has been tested */ +static struct key_entry keymap_acer_aspire_5020[] = { + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x03, {KEY_POWER} }, + { KE_KEY, 0x05, {KEY_SWITCHVIDEOMODE} }, /* Display selection */ + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_KEY, 0x6a, {KEY_CONFIG} }, + { KE_WIFI, 0x30 }, + { KE_BLUETOOTH, 0x44 }, + { KE_END, FE_MAIL_LED | FE_UNTESTED } +}; + +static struct key_entry keymap_acer_travelmate_2410[] = { + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x6d, {KEY_POWER} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_KEY, 0x6a, {KEY_CONFIG} }, + { KE_WIFI, 0x30 }, + { KE_BLUETOOTH, 0x44 }, + { KE_END, FE_MAIL_LED | FE_UNTESTED } +}; + +static struct key_entry keymap_acer_travelmate_110[] = { + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x02, {KEY_CONFIG} }, + { KE_KEY, 0x03, {KEY_POWER} }, + { KE_KEY, 0x08, {KEY_MUTE} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x20, {KEY_VOLUMEUP} }, + { KE_KEY, 0x21, {KEY_VOLUMEDOWN} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_SW, 0x4a, {.sw = {SW_LID, 1}} }, /* lid close */ + { KE_SW, 0x4b, {.sw = {SW_LID, 0}} }, /* lid open */ + { KE_WIFI, 0x30 }, + { KE_END, FE_MAIL_LED | FE_UNTESTED } +}; + +static struct key_entry keymap_acer_travelmate_300[] = { + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x02, {KEY_CONFIG} }, + { KE_KEY, 0x03, {KEY_POWER} }, + { KE_KEY, 0x08, {KEY_MUTE} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x20, {KEY_VOLUMEUP} }, + { KE_KEY, 0x21, {KEY_VOLUMEDOWN} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_WIFI, 0x30 }, + { KE_BLUETOOTH, 0x44 }, + { KE_END, FE_MAIL_LED | FE_UNTESTED } +}; + +static struct key_entry keymap_acer_travelmate_380[] = { + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x02, {KEY_CONFIG} }, + { KE_KEY, 0x03, {KEY_POWER} }, /* not 370 */ + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x13, {KEY_PROG3} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_WIFI, 0x30 }, + { KE_END, FE_MAIL_LED | FE_UNTESTED } +}; + +/* unusual map */ +static struct key_entry keymap_acer_travelmate_220[] = { + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x02, {KEY_CONFIG} }, + { KE_KEY, 0x11, {KEY_MAIL} }, + { KE_KEY, 0x12, {KEY_WWW} }, + { KE_KEY, 0x13, {KEY_PROG2} }, + { KE_KEY, 0x31, {KEY_PROG1} }, + { KE_END, FE_WIFI_LED | FE_UNTESTED } +}; + +static struct key_entry keymap_acer_travelmate_230[] = { + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x02, {KEY_CONFIG} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_END, FE_WIFI_LED | FE_UNTESTED } }; static struct key_entry keymap_acer_travelmate_240[] = { - { KE_KEY, 0x31, KEY_MAIL }, - { KE_KEY, 0x36, KEY_WWW }, - { KE_KEY, 0x11, KEY_PROG1 }, - { KE_KEY, 0x12, KEY_PROG2 }, - { KE_BLUETOOTH, 0x44, 0 }, - { KE_WIFI, 0x30, 0 }, - { KE_END, 0 } + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x02, {KEY_CONFIG} }, + { KE_KEY, 0x03, {KEY_POWER} }, + { KE_KEY, 0x08, {KEY_MUTE} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_BLUETOOTH, 0x44 }, + { KE_WIFI, 0x30 }, + { KE_END, FE_UNTESTED } +}; + +static struct key_entry keymap_acer_travelmate_350[] = { + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x02, {KEY_CONFIG} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x13, {KEY_MAIL} }, + { KE_KEY, 0x14, {KEY_PROG3} }, + { KE_KEY, 0x15, {KEY_WWW} }, + { KE_END, FE_MAIL_LED | FE_WIFI_LED | FE_UNTESTED } +}; + +static struct key_entry keymap_acer_travelmate_360[] = { + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x02, {KEY_CONFIG} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x13, {KEY_MAIL} }, + { KE_KEY, 0x14, {KEY_PROG3} }, + { KE_KEY, 0x15, {KEY_WWW} }, + { KE_KEY, 0x40, {KEY_WLAN} }, + { KE_END, FE_WIFI_LED | FE_UNTESTED } /* no mail led */ }; /* Wifi subsystem only activates the led. Therefore we need to pass * wifi event as a normal key, then userspace can really change the wifi state. * TODO we need to export led state to userspace (wifi and mail) */ static struct key_entry keymap_acer_travelmate_610[] = { - { KE_KEY, 0x01, KEY_HELP }, - { KE_KEY, 0x02, KEY_CONFIG }, - { KE_KEY, 0x11, KEY_PROG1 }, - { KE_KEY, 0x12, KEY_PROG2 }, - { KE_KEY, 0x13, KEY_PROG3 }, - { KE_KEY, 0x14, KEY_MAIL }, - { KE_KEY, 0x15, KEY_WWW }, - { KE_KEY, 0x40, KEY_WLAN }, /* Wifi */ - { KE_END, 0 } + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x02, {KEY_CONFIG} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG3} }, + { KE_KEY, 0x13, {KEY_PROG3} }, + { KE_KEY, 0x14, {KEY_MAIL} }, + { KE_KEY, 0x15, {KEY_WWW} }, + { KE_KEY, 0x40, {KEY_WLAN} }, + { KE_END, FE_MAIL_LED | FE_WIFI_LED } +}; + +static struct key_entry keymap_acer_travelmate_630[] = { + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x02, {KEY_CONFIG} }, + { KE_KEY, 0x03, {KEY_POWER} }, + { KE_KEY, 0x08, {KEY_MUTE} }, /* not 620 */ + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x13, {KEY_PROG3} }, + { KE_KEY, 0x20, {KEY_VOLUMEUP} }, + { KE_KEY, 0x21, {KEY_VOLUMEDOWN} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_WIFI, 0x30 }, + { KE_END, FE_MAIL_LED | FE_UNTESTED } }; static struct key_entry keymap_aopen_1559as[] = { - { KE_KEY, 0x01, KEY_HELP }, - { KE_KEY, 0x06, KEY_PROG3 }, - { KE_KEY, 0x11, KEY_PROG1 }, - { KE_KEY, 0x12, KEY_PROG2 }, - { KE_WIFI, 0x30, 0 }, - { KE_KEY, 0x31, KEY_MAIL }, - { KE_KEY, 0x36, KEY_WWW }, + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x06, {KEY_PROG3} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_WIFI, 0x30 }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, { KE_END, 0 }, }; static struct key_entry keymap_fs_amilo_d88x0[] = { - { KE_KEY, 0x01, KEY_HELP }, - { KE_KEY, 0x08, KEY_MUTE }, - { KE_KEY, 0x31, KEY_MAIL }, - { KE_KEY, 0x36, KEY_WWW }, - { KE_KEY, 0x11, KEY_PROG1 }, - { KE_KEY, 0x12, KEY_PROG2 }, - { KE_KEY, 0x13, KEY_PROG3 }, - { KE_END, 0 } + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x08, {KEY_MUTE} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x13, {KEY_PROG3} }, + { KE_END, FE_MAIL_LED | FE_WIFI_LED | FE_UNTESTED } +}; + +static struct key_entry keymap_wistron_md2900[] = { + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x02, {KEY_CONFIG} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_WIFI, 0x30 }, + { KE_END, FE_MAIL_LED | FE_UNTESTED } +}; + +static struct key_entry keymap_wistron_md96500[] = { + { KE_KEY, 0x01, {KEY_HELP} }, + { KE_KEY, 0x02, {KEY_CONFIG} }, + { KE_KEY, 0x05, {KEY_SWITCHVIDEOMODE} }, /* Display selection */ + { KE_KEY, 0x06, {KEY_DISPLAYTOGGLE} }, /* Display on/off */ + { KE_KEY, 0x08, {KEY_MUTE} }, + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_KEY, 0x20, {KEY_VOLUMEUP} }, + { KE_KEY, 0x21, {KEY_VOLUMEDOWN} }, + { KE_KEY, 0x22, {KEY_REWIND} }, + { KE_KEY, 0x23, {KEY_FORWARD} }, + { KE_KEY, 0x24, {KEY_PLAYPAUSE} }, + { KE_KEY, 0x25, {KEY_STOPCD} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_WIFI, 0x30 }, + { KE_BLUETOOTH, 0x44 }, + { KE_END, FE_UNTESTED } }; /* @@ -403,6 +610,133 @@ static struct dmi_system_id dmi_ids[] __initdata = { }, .driver_data = keymap_acer_aspire_1500 }, + { + .callback = dmi_matched, + .ident = "Acer Aspire 1600", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1600"), + }, + .driver_data = keymap_acer_aspire_1600 + }, + { + .callback = dmi_matched, + .ident = "Acer Aspire 3020", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3020"), + }, + .driver_data = keymap_acer_aspire_5020 + }, + { + .callback = dmi_matched, + .ident = "Acer Aspire 5020", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5020"), + }, + .driver_data = keymap_acer_aspire_5020 + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 2100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2100"), + }, + .driver_data = keymap_acer_aspire_5020 + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 2410", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2410"), + }, + .driver_data = keymap_acer_travelmate_2410 + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate C300", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate C300"), + }, + .driver_data = keymap_acer_travelmate_300 + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate C100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate C100"), + }, + .driver_data = keymap_acer_travelmate_300 + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate C110", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate C110"), + }, + .driver_data = keymap_acer_travelmate_110 + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 380", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 380"), + }, + .driver_data = keymap_acer_travelmate_380 + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 370", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 370"), + }, + .driver_data = keymap_acer_travelmate_380 /* keyboard minus 1 key */ + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 220", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 220"), + }, + .driver_data = keymap_acer_travelmate_220 + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 260", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 260"), + }, + .driver_data = keymap_acer_travelmate_220 + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 230", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 230"), + /* acerhk looks for "TravelMate F4..." ?! */ + }, + .driver_data = keymap_acer_travelmate_230 + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 280", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 280"), + }, + .driver_data = keymap_acer_travelmate_230 + }, { .callback = dmi_matched, .ident = "Acer TravelMate 240", @@ -412,6 +746,15 @@ static struct dmi_system_id dmi_ids[] __initdata = { }, .driver_data = keymap_acer_travelmate_240 }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 250", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 250"), + }, + .driver_data = keymap_acer_travelmate_240 + }, { .callback = dmi_matched, .ident = "Acer TravelMate 2424NWXCi", @@ -421,6 +764,24 @@ static struct dmi_system_id dmi_ids[] __initdata = { }, .driver_data = keymap_acer_travelmate_240 }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 350", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 350"), + }, + .driver_data = keymap_acer_travelmate_350 + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 360", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), + }, + .driver_data = keymap_acer_travelmate_360 + }, { .callback = dmi_matched, .ident = "Acer TravelMate 610", @@ -430,6 +791,24 @@ static struct dmi_system_id dmi_ids[] __initdata = { }, .driver_data = keymap_acer_travelmate_610 }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 620", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 620"), + }, + .driver_data = keymap_acer_travelmate_630 + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 630", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 630"), + }, + .driver_data = keymap_acer_travelmate_630 + }, { .callback = dmi_matched, .ident = "AOpen 1559AS", @@ -448,6 +827,51 @@ static struct dmi_system_id dmi_ids[] __initdata = { }, .driver_data = keymap_wistron_ms2111 }, + { + .callback = dmi_matched, + .ident = "Medion MD 40100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDIONNB"), + DMI_MATCH(DMI_PRODUCT_NAME, "WID2000"), + }, + .driver_data = keymap_wistron_md40100 + }, + { + .callback = dmi_matched, + .ident = "Medion MD 2900", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDIONNB"), + DMI_MATCH(DMI_PRODUCT_NAME, "WIM 2000"), + }, + .driver_data = keymap_wistron_md2900 + }, + { + .callback = dmi_matched, + .ident = "Medion MD 96500", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDIONPC"), + DMI_MATCH(DMI_PRODUCT_NAME, "WIM 2040"), + }, + .driver_data = keymap_wistron_md96500 + }, + { + .callback = dmi_matched, + .ident = "Medion MD 95400", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDIONPC"), + DMI_MATCH(DMI_PRODUCT_NAME, "WIM 2050"), + }, + .driver_data = keymap_wistron_md96500 + }, + { + .callback = dmi_matched, + .ident = "Fujitsu Siemens Amilo D7820", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), /* not sure */ + DMI_MATCH(DMI_PRODUCT_NAME, "Amilo D"), + }, + .driver_data = keymap_fs_amilo_d88x0 + }, { .callback = dmi_matched, .ident = "Fujitsu Siemens Amilo D88x0", @@ -500,12 +924,28 @@ static int __devinit setup_input_dev(void) input_dev->cdev.dev = &wistron_device->dev; for (key = keymap; key->type != KE_END; key++) { - if (key->type == KE_KEY) { - input_dev->evbit[LONG(EV_KEY)] = BIT(EV_KEY); - set_bit(key->keycode, input_dev->keybit); + switch (key->type) { + case KE_KEY: + set_bit(EV_KEY, input_dev->evbit); + set_bit(key->keycode, input_dev->keybit); + break; + + case KE_SW: + set_bit(EV_SW, input_dev->evbit); + set_bit(key->sw.code, input_dev->swbit); + break; + + default: + ; } } + /* reads information flags on KE_END */ + if (key->code & FE_UNTESTED) + printk(KERN_WARNING "Untested laptop multimedia keys, " + "please report success or failure to eric.piel" + "@tremplin-utc.net\n"); + error = input_register_device(input_dev); if (error) { input_free_device(input_dev); @@ -523,6 +963,12 @@ static void report_key(unsigned keycode) input_sync(input_dev); } +static void report_switch(unsigned code, int value) +{ + input_report_switch(input_dev, code, value); + input_sync(input_dev); +} + /* Driver core */ static int wifi_enabled; @@ -543,6 +989,10 @@ static void handle_key(u8 code) report_key(key->keycode); break; + case KE_SW: + report_switch(key->sw.code, key->sw.value); + break; + case KE_WIFI: if (have_wifi) { wifi_enabled = !wifi_enabled; @@ -558,6 +1008,7 @@ static void handle_key(u8 code) break; case KE_END: + break; default: BUG(); } diff --git a/include/linux/input.h b/include/linux/input.h index a51d6cf6824c..9bd984b2d122 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -506,6 +506,7 @@ struct input_absinfo { #define KEY_VOICEMAIL 0x1ac #define KEY_ADDRESSBOOK 0x1ad #define KEY_MESSENGER 0x1ae +#define KEY_DISPLAYTOGGLE 0x1af /* Turn display (LCD) on and off */ #define KEY_DEL_EOL 0x1c0 #define KEY_DEL_EOS 0x1c1 -- cgit v1.2.3 From 3abccf36081ac827cf5d14db6837117f088937eb Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Thu, 12 Apr 2007 01:33:51 -0400 Subject: Input: add input_{get|set}_drvdata() helpers Add helpers to set up and access driver-specific data in input device structure. Once conversion to struct driver is complete we will drop input_dev->private and will use dev_get_drvdata() and dev_set_drvdata(). Signed-off-by: Dmitry Torokhov --- include/linux/input.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/input.h b/include/linux/input.h index 9bd984b2d122..d0bea9755186 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -1097,6 +1097,16 @@ static inline void input_put_device(struct input_dev *dev) class_device_put(&dev->cdev); } +static inline void *input_get_drvdata(struct input_dev *dev) +{ + return dev->private; +} + +static inline void input_set_drvdata(struct input_dev *dev, void *data) +{ + dev->private = data; +} + int input_register_device(struct input_dev *); void input_unregister_device(struct input_dev *); -- cgit v1.2.3 From 88a447a030bfec9f1e8666daf27d9d73c8c92448 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Thu, 12 Apr 2007 01:34:47 -0400 Subject: Input: prepare to switching to struct device In preparation to switching to struct device and class device going away provide an alias to allow drivers that create devices to use either input_dev->cdev.dev or input_dev->dev.parent to put them into sysfs tree. The former will go away once conversion to struct device is complete. Signed-off-by: Dmitry Torokhov --- drivers/input/input.c | 3 +++ include/linux/input.h | 3 +++ 2 files changed, 6 insertions(+) (limited to 'include/linux') diff --git a/drivers/input/input.c b/drivers/input/input.c index 86b27079004a..173c2861ec58 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1079,6 +1079,9 @@ int input_register_device(struct input_dev *dev) snprintf(dev->cdev.class_id, sizeof(dev->cdev.class_id), "input%ld", (unsigned long) atomic_inc_return(&input_no) - 1); + if (!dev->cdev.dev) + dev->cdev.dev = dev->dev.parent; + error = class_device_add(&dev->cdev); if (error) return error; diff --git a/include/linux/input.h b/include/linux/input.h index d0bea9755186..7b6d7c408b07 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -972,6 +972,9 @@ struct input_dev { unsigned int users; struct class_device cdev; + union { /* temporarily so while we switching to struct device */ + struct device *parent; + } dev; struct list_head h_list; struct list_head node; -- cgit v1.2.3 From 534565f254490227e3bec20d50f387800960acd9 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Wed, 25 Apr 2007 00:53:18 -0400 Subject: Input: add input_set_capability() helper Add input_set_capability() helper used to indicate that an input device supports a certain event without need to manipulate bitmaps directly. Signed-off-by: Dmitry Torokhov --- drivers/input/input.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/input.h | 2 ++ 2 files changed, 58 insertions(+) (limited to 'include/linux') diff --git a/drivers/input/input.c b/drivers/input/input.c index 173c2861ec58..915e9ab7cab0 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1046,6 +1046,62 @@ void input_free_device(struct input_dev *dev) } EXPORT_SYMBOL(input_free_device); +/** + * input_set_capability - mark device as capable of a certain event + * @dev: device that is capable of emitting or accepting event + * @type: type of the event (EV_KEY, EV_REL, etc...) + * @code: event code + * + * In addition to setting up corresponding bit in appropriate capability + * bitmap the function also adjusts dev->evbit. + */ +void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) +{ + switch (type) { + case EV_KEY: + __set_bit(code, dev->keybit); + break; + + case EV_REL: + __set_bit(code, dev->relbit); + break; + + case EV_ABS: + __set_bit(code, dev->absbit); + break; + + case EV_MSC: + __set_bit(code, dev->mscbit); + break; + + case EV_SW: + __set_bit(code, dev->swbit); + break; + + case EV_LED: + __set_bit(code, dev->ledbit); + break; + + case EV_SND: + __set_bit(code, dev->sndbit); + break; + + case EV_FF: + __set_bit(code, dev->ffbit); + break; + + default: + printk(KERN_ERR + "input_set_capability: unknown type %u (code %u)\n", + type, code); + dump_stack(); + return; + } + + __set_bit(type, dev->evbit); +} +EXPORT_SYMBOL(input_set_capability); + int input_register_device(struct input_dev *dev) { static atomic_t input_no = ATOMIC_INIT(0); diff --git a/include/linux/input.h b/include/linux/input.h index 7b6d7c408b07..1789ee9df4dd 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -1160,6 +1160,8 @@ static inline void input_sync(struct input_dev *dev) input_event(dev, EV_SYN, SYN_REPORT, 0); } +void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code); + static inline void input_set_abs_params(struct input_dev *dev, int axis, int min, int max, int fuzz, int flat) { dev->absmin[axis] = min; -- cgit v1.2.3 From 632786ce9ff6206951ee4c84fe5c0d5c1d12f4cc Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Thu, 19 Apr 2007 15:49:09 +0200 Subject: [CPUFREQ] Remove deprecated /proc/acpi/processor/performance write support Remove deprecated /proc/acpi/processor/performance write support Writing to /proc/acpi/processor/xy/performance interferes with sysfs cpufreq interface. Also removes buggy cpufreq_set_policy exported symbol. Signed-off-by: Thomas Renninger Signed-off-by: Dave Jones --- drivers/acpi/processor_perflib.c | 46 +--------------------------------------- drivers/cpufreq/cpufreq.c | 37 -------------------------------- include/linux/cpufreq.h | 1 - 3 files changed, 1 insertion(+), 83 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 2f2e7964226d..c4efc0c17f8f 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -433,49 +433,6 @@ static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file) PDE(inode)->data); } -static ssize_t -acpi_processor_write_performance(struct file *file, - const char __user * buffer, - size_t count, loff_t * data) -{ - int result = 0; - struct seq_file *m = file->private_data; - struct acpi_processor *pr = m->private; - struct acpi_processor_performance *perf; - char state_string[12] = { '\0' }; - unsigned int new_state = 0; - struct cpufreq_policy policy; - - - if (!pr || (count > sizeof(state_string) - 1)) - return -EINVAL; - - perf = pr->performance; - if (!perf) - return -EINVAL; - - if (copy_from_user(state_string, buffer, count)) - return -EFAULT; - - state_string[count] = '\0'; - new_state = simple_strtoul(state_string, NULL, 0); - - if (new_state >= perf->state_count) - return -EINVAL; - - cpufreq_get_policy(&policy, pr->id); - - policy.cpu = pr->id; - policy.min = perf->states[new_state].core_frequency * 1000; - policy.max = perf->states[new_state].core_frequency * 1000; - - result = cpufreq_set_policy(&policy); - if (result) - return result; - - return count; -} - static void acpi_cpufreq_add_file(struct acpi_processor *pr) { struct proc_dir_entry *entry = NULL; @@ -487,10 +444,9 @@ static void acpi_cpufreq_add_file(struct acpi_processor *pr) /* add file 'performance' [R/W] */ entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE, - S_IFREG | S_IRUGO | S_IWUSR, + S_IFREG | S_IRUGO, acpi_device_dir(device)); if (entry){ - acpi_processor_perf_fops.write = acpi_processor_write_performance; entry->proc_fops = &acpi_processor_perf_fops; entry->data = acpi_driver_data(device); entry->owner = THIS_MODULE; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index fcb86dd8903d..893dbaf386fb 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1625,43 +1625,6 @@ error_out: return ret; } -/** - * cpufreq_set_policy - set a new CPUFreq policy - * @policy: policy to be set. - * - * Sets a new CPU frequency and voltage scaling policy. - */ -int cpufreq_set_policy(struct cpufreq_policy *policy) -{ - int ret = 0; - struct cpufreq_policy *data; - - if (!policy) - return -EINVAL; - - data = cpufreq_cpu_get(policy->cpu); - if (!data) - return -EINVAL; - - if (unlikely(lock_policy_rwsem_write(policy->cpu))) - return -EINVAL; - - - ret = __cpufreq_set_policy(data, policy); - data->user_policy.min = data->min; - data->user_policy.max = data->max; - data->user_policy.policy = data->policy; - data->user_policy.governor = data->governor; - - unlock_policy_rwsem_write(policy->cpu); - - cpufreq_cpu_put(data); - - return ret; -} -EXPORT_SYMBOL(cpufreq_set_policy); - - /** * cpufreq_update_policy - re-evaluate an existing cpufreq policy * @cpu: CPU which shall be re-evaluated diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 0899e2cdcdd1..3ec6e7ff5fbd 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -257,7 +257,6 @@ struct freq_attr { /********************************************************************* * CPUFREQ 2.6. INTERFACE * *********************************************************************/ -int cpufreq_set_policy(struct cpufreq_policy *policy); int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_update_policy(unsigned int cpu); -- cgit v1.2.3 From 0dcd8073673115eeb67343787f244905f62532f2 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Sun, 29 Apr 2007 23:42:45 -0400 Subject: Input: add skeleton for simple polled devices input-polldev provides a skeleton for supporting simple input devices that need to be periodically scanned or polled to detect changes in their state. Signed-off-by: Dmitry Torokhov --- drivers/input/misc/Kconfig | 11 +++ drivers/input/misc/Makefile | 1 + drivers/input/misc/input-polldev.c | 171 +++++++++++++++++++++++++++++++++++++ include/linux/input-polldev.h | 46 ++++++++++ 4 files changed, 229 insertions(+) create mode 100644 drivers/input/misc/input-polldev.c create mode 100644 include/linux/input-polldev.h (limited to 'include/linux') diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 56941155ec87..696c9609d714 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -90,6 +90,17 @@ config INPUT_UINPUT To compile this driver as a module, choose M here: the module will be called uinput. +config INPUT_POLLDEV + tristate "Polled input device skeleton" + help + Say Y here if you are using a driver for an input + device that periodically polls hardware state. This + option is only useful for out-of-tree drivers since + in-tree drivers select it automatically. + + To compile this driver as a module, choose M here: the + module will be called input-polldev. + config HP_SDC_RTC tristate "HP SDC Real Time Clock" depends on GSC || HP300 diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 9f08f27b06de..21e3cca0d33e 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -4,6 +4,7 @@ # Each configuration option enables a list of files. +obj-$(CONFIG_INPUT_POLLDEV) += input-polldev.o obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o diff --git a/drivers/input/misc/input-polldev.c b/drivers/input/misc/input-polldev.c new file mode 100644 index 000000000000..1b2b9c9c5d88 --- /dev/null +++ b/drivers/input/misc/input-polldev.c @@ -0,0 +1,171 @@ +/* + * Generic implementation of a polled input device + + * Copyright (c) 2007 Dmitry Torokhov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include +#include +#include + +static DEFINE_MUTEX(polldev_mutex); +static int polldev_users; +static struct workqueue_struct *polldev_wq; + +static int input_polldev_start_workqueue(void) +{ + int retval; + + retval = mutex_lock_interruptible(&polldev_mutex); + if (retval) + return retval; + + if (!polldev_users) { + polldev_wq = create_singlethread_workqueue("ipolldevd"); + if (!polldev_wq) { + printk(KERN_ERR "input-polldev: failed to create " + "ipolldevd workqueue\n"); + retval = -ENOMEM; + goto out; + } + } + + polldev_users++; + + out: + mutex_unlock(&polldev_mutex); + return retval; +} + +static void input_polldev_stop_workqueue(void) +{ + mutex_lock(&polldev_mutex); + + if (!--polldev_users) + destroy_workqueue(polldev_wq); + + mutex_unlock(&polldev_mutex); +} + +static void input_polled_device_work(struct work_struct *work) +{ + struct input_polled_dev *dev = + container_of(work, struct input_polled_dev, work.work); + + dev->poll(dev); + queue_delayed_work(polldev_wq, &dev->work, + msecs_to_jiffies(dev->poll_interval)); +} + +static int input_open_polled_device(struct input_dev *input) +{ + struct input_polled_dev *dev = input->private; + int error; + + error = input_polldev_start_workqueue(); + if (error) + return error; + + if (dev->flush) + dev->flush(dev); + + queue_delayed_work(polldev_wq, &dev->work, + msecs_to_jiffies(dev->poll_interval)); + + return 0; +} + +static void input_close_polled_device(struct input_dev *input) +{ + struct input_polled_dev *dev = input->private; + + cancel_rearming_delayed_workqueue(polldev_wq, &dev->work); + input_polldev_stop_workqueue(); +} + +/** + * input_allocate_polled_device - allocated memory polled device + * + * The function allocates memory for a polled device and also + * for an input device associated with this polled device. + */ +struct input_polled_dev *input_allocate_polled_device(void) +{ + struct input_polled_dev *dev; + + dev = kzalloc(sizeof(struct input_polled_dev), GFP_KERNEL); + if (!dev) + return NULL; + + dev->input = input_allocate_device(); + if (!dev->input) { + kfree(dev); + return NULL; + } + + return dev; +} +EXPORT_SYMBOL(input_allocate_polled_device); + +/** + * input_free_polled_device - free memory allocated for polled device + * @dev: device to free + * + * The function frees memory allocated for polling device and drops + * reference to the associated input device (if present). + */ +void input_free_polled_device(struct input_polled_dev *dev) +{ + if (dev) { + input_free_device(dev->input); + kfree(dev); + } +} +EXPORT_SYMBOL(input_free_polled_device); + +/** + * input_register_polled_device - register polled device + * @dev: device to register + * + * The function registers previously initialized polled input device + * with input layer. The device should be allocated with call to + * input_allocate_polled_device(). Callers should also set up poll() + * method and set up capabilities (id, name, phys, bits) of the + * corresponing input_dev structure. + */ +int input_register_polled_device(struct input_polled_dev *dev) +{ + struct input_dev *input = dev->input; + + INIT_DELAYED_WORK(&dev->work, input_polled_device_work); + if (!dev->poll_interval) + dev->poll_interval = 500; + input->private = dev; + input->open = input_open_polled_device; + input->close = input_close_polled_device; + + return input_register_device(input); +} +EXPORT_SYMBOL(input_register_polled_device); + +/** + * input_unregister_polled_device - unregister polled device + * @dev: device to unregister + * + * The function unregisters previously registered polled input + * device from input layer. Polling is stopped and device is + * ready to be freed with call to input_free_polled_device(). + * Callers should not attempt to access dev->input pointer + * after calling this function. + */ +void input_unregister_polled_device(struct input_polled_dev *dev) +{ + input_unregister_device(dev->input); + dev->input = NULL; +} +EXPORT_SYMBOL(input_unregister_polled_device); + diff --git a/include/linux/input-polldev.h b/include/linux/input-polldev.h new file mode 100644 index 000000000000..597a0077b3c5 --- /dev/null +++ b/include/linux/input-polldev.h @@ -0,0 +1,46 @@ +#ifndef _INPUT_POLLDEV_H +#define _INPUT_POLLDEV_H + +/* + * Copyright (c) 2007 Dmitry Torokhov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include +#include + +/** + * struct input_polled_dev - simple polled input device + * @private: private driver data + * @flush: driver-supplied method that flushes device's state upon + * opening (optional) + * @poll: driver-supplied method that polls the device and posts + * input events (mandatory). + * @poll_interval: specifies how often the poll() method shoudl be called. + * @input: input device structire associated with the polled device. + * Must be properly initialized by the driver (id, name, phys, bits). + * + * Polled input device provides a skeleton for supporting simple input + * devices that do not raise interrupts but have to be periodically + * scanned or polled to detect changes in their state. + */ +struct input_polled_dev { + void *private; + + void (*flush)(struct input_polled_dev *dev); + void (*poll)(struct input_polled_dev *dev); + unsigned int poll_interval; /* msec */ + + struct input_dev *input; + struct delayed_work work; +}; + +struct input_polled_dev *input_allocate_polled_device(void); +void input_free_polled_device(struct input_polled_dev *dev); +int input_register_polled_device(struct input_polled_dev *dev); +void input_unregister_polled_device(struct input_polled_dev *dev); + +#endif -- cgit v1.2.3 From 9640d3d775aa325650c8fcdf49127542f77b2156 Mon Sep 17 00:00:00 2001 From: Kristian Høgsberg Date: Mon, 30 Apr 2007 15:03:15 -0400 Subject: firewire: Rename fw-device-cdev.c to fw-cdev.c and move header to include/linux. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Kristian Høgsberg Signed-off-by: Stefan Richter --- drivers/firewire/Makefile | 2 +- drivers/firewire/fw-cdev.c | 954 ++++++++++++++++++++++++++++++++++++++ drivers/firewire/fw-device-cdev.c | 954 -------------------------------------- drivers/firewire/fw-device-cdev.h | 268 ----------- include/linux/firewire-cdev.h | 268 +++++++++++ 5 files changed, 1223 insertions(+), 1223 deletions(-) create mode 100644 drivers/firewire/fw-cdev.c delete mode 100644 drivers/firewire/fw-device-cdev.c delete mode 100644 drivers/firewire/fw-device-cdev.h create mode 100644 include/linux/firewire-cdev.h (limited to 'include/linux') diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile index eddc16c701b5..7f02d6fe7a44 100644 --- a/drivers/firewire/Makefile +++ b/drivers/firewire/Makefile @@ -3,7 +3,7 @@ # fw-core-objs := fw-card.o fw-topology.o fw-transaction.o fw-iso.o \ - fw-device.o fw-device-cdev.o + fw-device.o fw-cdev.o obj-$(CONFIG_FIREWIRE) += fw-core.o obj-$(CONFIG_FIREWIRE_OHCI) += fw-ohci.o diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c new file mode 100644 index 000000000000..88b8fd917f54 --- /dev/null +++ b/drivers/firewire/fw-cdev.c @@ -0,0 +1,954 @@ +/* -*- c-basic-offset: 8 -*- + * + * fw-device-cdev.c - Char device for device raw access + * + * Copyright (C) 2005-2006 Kristian Hoegsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fw-transaction.h" +#include "fw-topology.h" +#include "fw-device.h" + +/* dequeue_event() just kfree()'s the event, so the event has to be + * the first field in the struct. */ + +struct client; +struct client_resource { + struct list_head link; + void (*release)(struct client *client, struct client_resource *r); + u32 handle; +}; + +struct event { + struct { void *data; size_t size; } v[2]; + struct list_head link; +}; + +struct bus_reset { + struct event event; + struct fw_cdev_event_bus_reset reset; +}; + +struct response { + struct event event; + struct fw_transaction transaction; + struct client *client; + struct client_resource resource; + struct fw_cdev_event_response response; +}; + +struct iso_interrupt { + struct event event; + struct fw_cdev_event_iso_interrupt interrupt; +}; + +struct client { + u32 version; + struct fw_device *device; + spinlock_t lock; + u32 resource_handle; + struct list_head resource_list; + struct list_head event_list; + wait_queue_head_t wait; + u64 bus_reset_closure; + + struct fw_iso_context *iso_context; + u64 iso_closure; + struct fw_iso_buffer buffer; + unsigned long vm_start; + + struct list_head link; +}; + +static inline void __user * +u64_to_uptr(__u64 value) +{ + return (void __user *)(unsigned long)value; +} + +static inline __u64 +uptr_to_u64(void __user *ptr) +{ + return (__u64)(unsigned long)ptr; +} + +static int fw_device_op_open(struct inode *inode, struct file *file) +{ + struct fw_device *device; + struct client *client; + unsigned long flags; + + device = fw_device_from_devt(inode->i_rdev); + if (device == NULL) + return -ENODEV; + + client = kzalloc(sizeof *client, GFP_KERNEL); + if (client == NULL) + return -ENOMEM; + + client->device = fw_device_get(device); + INIT_LIST_HEAD(&client->event_list); + INIT_LIST_HEAD(&client->resource_list); + spin_lock_init(&client->lock); + init_waitqueue_head(&client->wait); + + file->private_data = client; + + spin_lock_irqsave(&device->card->lock, flags); + list_add_tail(&client->link, &device->client_list); + spin_unlock_irqrestore(&device->card->lock, flags); + + return 0; +} + +static void queue_event(struct client *client, struct event *event, + void *data0, size_t size0, void *data1, size_t size1) +{ + unsigned long flags; + + event->v[0].data = data0; + event->v[0].size = size0; + event->v[1].data = data1; + event->v[1].size = size1; + + spin_lock_irqsave(&client->lock, flags); + + list_add_tail(&event->link, &client->event_list); + wake_up_interruptible(&client->wait); + + spin_unlock_irqrestore(&client->lock, flags); +} + +static int +dequeue_event(struct client *client, char __user *buffer, size_t count) +{ + unsigned long flags; + struct event *event; + size_t size, total; + int i, retval; + + retval = wait_event_interruptible(client->wait, + !list_empty(&client->event_list) || + fw_device_is_shutdown(client->device)); + if (retval < 0) + return retval; + + if (list_empty(&client->event_list) && + fw_device_is_shutdown(client->device)) + return -ENODEV; + + spin_lock_irqsave(&client->lock, flags); + event = container_of(client->event_list.next, struct event, link); + list_del(&event->link); + spin_unlock_irqrestore(&client->lock, flags); + + total = 0; + for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { + size = min(event->v[i].size, count - total); + if (copy_to_user(buffer + total, event->v[i].data, size)) { + retval = -EFAULT; + goto out; + } + total += size; + } + retval = total; + + out: + kfree(event); + + return retval; +} + +static ssize_t +fw_device_op_read(struct file *file, + char __user *buffer, size_t count, loff_t *offset) +{ + struct client *client = file->private_data; + + return dequeue_event(client, buffer, count); +} + +static void +fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, + struct client *client) +{ + struct fw_card *card = client->device->card; + + event->closure = client->bus_reset_closure; + event->type = FW_CDEV_EVENT_BUS_RESET; + event->node_id = client->device->node_id; + event->local_node_id = card->local_node->node_id; + event->bm_node_id = 0; /* FIXME: We don't track the BM. */ + event->irm_node_id = card->irm_node->node_id; + event->root_node_id = card->root_node->node_id; + event->generation = card->generation; +} + +static void +for_each_client(struct fw_device *device, + void (*callback)(struct client *client)) +{ + struct fw_card *card = device->card; + struct client *c; + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + + list_for_each_entry(c, &device->client_list, link) + callback(c); + + spin_unlock_irqrestore(&card->lock, flags); +} + +static void +queue_bus_reset_event(struct client *client) +{ + struct bus_reset *bus_reset; + + bus_reset = kzalloc(sizeof *bus_reset, GFP_ATOMIC); + if (bus_reset == NULL) { + fw_notify("Out of memory when allocating bus reset event\n"); + return; + } + + fill_bus_reset_event(&bus_reset->reset, client); + + queue_event(client, &bus_reset->event, + &bus_reset->reset, sizeof bus_reset->reset, NULL, 0); +} + +void fw_device_cdev_update(struct fw_device *device) +{ + for_each_client(device, queue_bus_reset_event); +} + +static void wake_up_client(struct client *client) +{ + wake_up_interruptible(&client->wait); +} + +void fw_device_cdev_remove(struct fw_device *device) +{ + for_each_client(device, wake_up_client); +} + +static int ioctl_get_info(struct client *client, void *buffer) +{ + struct fw_cdev_get_info *get_info = buffer; + struct fw_cdev_event_bus_reset bus_reset; + + client->version = get_info->version; + get_info->version = FW_CDEV_VERSION; + + if (get_info->rom != 0) { + void __user *uptr = u64_to_uptr(get_info->rom); + size_t want = get_info->rom_length; + size_t have = client->device->config_rom_length * 4; + + if (copy_to_user(uptr, client->device->config_rom, + min(want, have))) + return -EFAULT; + } + get_info->rom_length = client->device->config_rom_length * 4; + + client->bus_reset_closure = get_info->bus_reset_closure; + if (get_info->bus_reset != 0) { + void __user *uptr = u64_to_uptr(get_info->bus_reset); + + fill_bus_reset_event(&bus_reset, client); + if (copy_to_user(uptr, &bus_reset, sizeof bus_reset)) + return -EFAULT; + } + + get_info->card = client->device->card->index; + + return 0; +} + +static void +add_client_resource(struct client *client, struct client_resource *resource) +{ + unsigned long flags; + + spin_lock_irqsave(&client->lock, flags); + list_add_tail(&resource->link, &client->resource_list); + resource->handle = client->resource_handle++; + spin_unlock_irqrestore(&client->lock, flags); +} + +static int +release_client_resource(struct client *client, u32 handle, + struct client_resource **resource) +{ + struct client_resource *r; + unsigned long flags; + + spin_lock_irqsave(&client->lock, flags); + list_for_each_entry(r, &client->resource_list, link) { + if (r->handle == handle) { + list_del(&r->link); + break; + } + } + spin_unlock_irqrestore(&client->lock, flags); + + if (&r->link == &client->resource_list) + return -EINVAL; + + if (resource) + *resource = r; + else + r->release(client, r); + + return 0; +} + +static void +release_transaction(struct client *client, struct client_resource *resource) +{ + struct response *response = + container_of(resource, struct response, resource); + + fw_cancel_transaction(client->device->card, &response->transaction); +} + +static void +complete_transaction(struct fw_card *card, int rcode, + void *payload, size_t length, void *data) +{ + struct response *response = data; + struct client *client = response->client; + unsigned long flags; + + if (length < response->response.length) + response->response.length = length; + if (rcode == RCODE_COMPLETE) + memcpy(response->response.data, payload, + response->response.length); + + spin_lock_irqsave(&client->lock, flags); + list_del(&response->resource.link); + spin_unlock_irqrestore(&client->lock, flags); + + response->response.type = FW_CDEV_EVENT_RESPONSE; + response->response.rcode = rcode; + queue_event(client, &response->event, + &response->response, sizeof response->response, + response->response.data, response->response.length); +} + +static ssize_t ioctl_send_request(struct client *client, void *buffer) +{ + struct fw_device *device = client->device; + struct fw_cdev_send_request *request = buffer; + struct response *response; + + /* What is the biggest size we'll accept, really? */ + if (request->length > 4096) + return -EINVAL; + + response = kmalloc(sizeof *response + request->length, GFP_KERNEL); + if (response == NULL) + return -ENOMEM; + + response->client = client; + response->response.length = request->length; + response->response.closure = request->closure; + + if (request->data && + copy_from_user(response->response.data, + u64_to_uptr(request->data), request->length)) { + kfree(response); + return -EFAULT; + } + + response->resource.release = release_transaction; + add_client_resource(client, &response->resource); + + fw_send_request(device->card, &response->transaction, + request->tcode & 0x1f, + device->node->node_id, + request->generation, + device->node->max_speed, + request->offset, + response->response.data, request->length, + complete_transaction, response); + + if (request->data) + return sizeof request + request->length; + else + return sizeof request; +} + +struct address_handler { + struct fw_address_handler handler; + __u64 closure; + struct client *client; + struct client_resource resource; +}; + +struct request { + struct fw_request *request; + void *data; + size_t length; + struct client_resource resource; +}; + +struct request_event { + struct event event; + struct fw_cdev_event_request request; +}; + +static void +release_request(struct client *client, struct client_resource *resource) +{ + struct request *request = + container_of(resource, struct request, resource); + + fw_send_response(client->device->card, request->request, + RCODE_CONFLICT_ERROR); + kfree(request); +} + +static void +handle_request(struct fw_card *card, struct fw_request *r, + int tcode, int destination, int source, + int generation, int speed, + unsigned long long offset, + void *payload, size_t length, void *callback_data) +{ + struct address_handler *handler = callback_data; + struct request *request; + struct request_event *e; + struct client *client = handler->client; + + request = kmalloc(sizeof *request, GFP_ATOMIC); + e = kmalloc(sizeof *e, GFP_ATOMIC); + if (request == NULL || e == NULL) { + kfree(request); + kfree(e); + fw_send_response(card, r, RCODE_CONFLICT_ERROR); + return; + } + + request->request = r; + request->data = payload; + request->length = length; + + request->resource.release = release_request; + add_client_resource(client, &request->resource); + + e->request.type = FW_CDEV_EVENT_REQUEST; + e->request.tcode = tcode; + e->request.offset = offset; + e->request.length = length; + e->request.handle = request->resource.handle; + e->request.closure = handler->closure; + + queue_event(client, &e->event, + &e->request, sizeof e->request, payload, length); +} + +static void +release_address_handler(struct client *client, + struct client_resource *resource) +{ + struct address_handler *handler = + container_of(resource, struct address_handler, resource); + + fw_core_remove_address_handler(&handler->handler); + kfree(handler); +} + +static int ioctl_allocate(struct client *client, void *buffer) +{ + struct fw_cdev_allocate *request = buffer; + struct address_handler *handler; + struct fw_address_region region; + + handler = kmalloc(sizeof *handler, GFP_KERNEL); + if (handler == NULL) + return -ENOMEM; + + region.start = request->offset; + region.end = request->offset + request->length; + handler->handler.length = request->length; + handler->handler.address_callback = handle_request; + handler->handler.callback_data = handler; + handler->closure = request->closure; + handler->client = client; + + if (fw_core_add_address_handler(&handler->handler, ®ion) < 0) { + kfree(handler); + return -EBUSY; + } + + handler->resource.release = release_address_handler; + add_client_resource(client, &handler->resource); + request->handle = handler->resource.handle; + + return 0; +} + +static int ioctl_deallocate(struct client *client, void *buffer) +{ + struct fw_cdev_deallocate *request = buffer; + + return release_client_resource(client, request->handle, NULL); +} + +static int ioctl_send_response(struct client *client, void *buffer) +{ + struct fw_cdev_send_response *request = buffer; + struct client_resource *resource; + struct request *r; + + if (release_client_resource(client, request->handle, &resource) < 0) + return -EINVAL; + r = container_of(resource, struct request, resource); + if (request->length < r->length) + r->length = request->length; + if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) + return -EFAULT; + + fw_send_response(client->device->card, r->request, request->rcode); + kfree(r); + + return 0; +} + +static int ioctl_initiate_bus_reset(struct client *client, void *buffer) +{ + struct fw_cdev_initiate_bus_reset *request = buffer; + int short_reset; + + short_reset = (request->type == FW_CDEV_SHORT_RESET); + + return fw_core_initiate_bus_reset(client->device->card, short_reset); +} + +struct descriptor { + struct fw_descriptor d; + struct client_resource resource; + u32 data[0]; +}; + +static void release_descriptor(struct client *client, + struct client_resource *resource) +{ + struct descriptor *descriptor = + container_of(resource, struct descriptor, resource); + + fw_core_remove_descriptor(&descriptor->d); + kfree(descriptor); +} + +static int ioctl_add_descriptor(struct client *client, void *buffer) +{ + struct fw_cdev_add_descriptor *request = buffer; + struct descriptor *descriptor; + int retval; + + if (request->length > 256) + return -EINVAL; + + descriptor = + kmalloc(sizeof *descriptor + request->length * 4, GFP_KERNEL); + if (descriptor == NULL) + return -ENOMEM; + + if (copy_from_user(descriptor->data, + u64_to_uptr(request->data), request->length * 4)) { + kfree(descriptor); + return -EFAULT; + } + + descriptor->d.length = request->length; + descriptor->d.immediate = request->immediate; + descriptor->d.key = request->key; + descriptor->d.data = descriptor->data; + + retval = fw_core_add_descriptor(&descriptor->d); + if (retval < 0) { + kfree(descriptor); + return retval; + } + + descriptor->resource.release = release_descriptor; + add_client_resource(client, &descriptor->resource); + request->handle = descriptor->resource.handle; + + return 0; +} + +static int ioctl_remove_descriptor(struct client *client, void *buffer) +{ + struct fw_cdev_remove_descriptor *request = buffer; + + return release_client_resource(client, request->handle, NULL); +} + +static void +iso_callback(struct fw_iso_context *context, u32 cycle, + size_t header_length, void *header, void *data) +{ + struct client *client = data; + struct iso_interrupt *interrupt; + + interrupt = kzalloc(sizeof *interrupt + header_length, GFP_ATOMIC); + if (interrupt == NULL) + return; + + interrupt->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; + interrupt->interrupt.closure = client->iso_closure; + interrupt->interrupt.cycle = cycle; + interrupt->interrupt.header_length = header_length; + memcpy(interrupt->interrupt.header, header, header_length); + queue_event(client, &interrupt->event, + &interrupt->interrupt, + sizeof interrupt->interrupt + header_length, NULL, 0); +} + +static int ioctl_create_iso_context(struct client *client, void *buffer) +{ + struct fw_cdev_create_iso_context *request = buffer; + + if (request->channel > 63) + return -EINVAL; + + switch (request->type) { + case FW_ISO_CONTEXT_RECEIVE: + if (request->header_size < 4 || (request->header_size & 3)) + return -EINVAL; + + break; + + case FW_ISO_CONTEXT_TRANSMIT: + if (request->speed > SCODE_3200) + return -EINVAL; + + break; + + default: + return -EINVAL; + } + + client->iso_closure = request->closure; + client->iso_context = fw_iso_context_create(client->device->card, + request->type, + request->channel, + request->speed, + request->header_size, + iso_callback, client); + if (IS_ERR(client->iso_context)) + return PTR_ERR(client->iso_context); + + /* We only support one context at this time. */ + request->handle = 0; + + return 0; +} + +static int ioctl_queue_iso(struct client *client, void *buffer) +{ + struct fw_cdev_queue_iso *request = buffer; + struct fw_cdev_iso_packet __user *p, *end, *next; + struct fw_iso_context *ctx = client->iso_context; + unsigned long payload, buffer_end, header_length; + int count; + struct { + struct fw_iso_packet packet; + u8 header[256]; + } u; + + if (ctx == NULL || request->handle != 0) + return -EINVAL; + + /* If the user passes a non-NULL data pointer, has mmap()'ed + * the iso buffer, and the pointer points inside the buffer, + * we setup the payload pointers accordingly. Otherwise we + * set them both to 0, which will still let packets with + * payload_length == 0 through. In other words, if no packets + * use the indirect payload, the iso buffer need not be mapped + * and the request->data pointer is ignored.*/ + + payload = (unsigned long)request->data - client->vm_start; + buffer_end = client->buffer.page_count << PAGE_SHIFT; + if (request->data == 0 || client->buffer.pages == NULL || + payload >= buffer_end) { + payload = 0; + buffer_end = 0; + } + + if (!access_ok(VERIFY_READ, request->packets, request->size)) + return -EFAULT; + + p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets); + end = (void __user *)p + request->size; + count = 0; + while (p < end) { + if (__copy_from_user(&u.packet, p, sizeof *p)) + return -EFAULT; + + if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { + header_length = u.packet.header_length; + } else { + /* We require that header_length is a multiple of + * the fixed header size, ctx->header_size */ + if (ctx->header_size == 0) { + if (u.packet.header_length > 0) + return -EINVAL; + } else if (u.packet.header_length % ctx->header_size != 0) { + return -EINVAL; + } + header_length = 0; + } + + next = (struct fw_cdev_iso_packet __user *) + &p->header[header_length / 4]; + if (next > end) + return -EINVAL; + if (__copy_from_user + (u.packet.header, p->header, header_length)) + return -EFAULT; + if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && + u.packet.header_length + u.packet.payload_length > 0) + return -EINVAL; + if (payload + u.packet.payload_length > buffer_end) + return -EINVAL; + + if (fw_iso_context_queue(ctx, &u.packet, + &client->buffer, payload)) + break; + + p = next; + payload += u.packet.payload_length; + count++; + } + + request->size -= uptr_to_u64(p) - request->packets; + request->packets = uptr_to_u64(p); + request->data = client->vm_start + payload; + + return count; +} + +static int ioctl_start_iso(struct client *client, void *buffer) +{ + struct fw_cdev_start_iso *request = buffer; + + if (request->handle != 0) + return -EINVAL; + if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { + if (request->tags == 0 || request->tags > 15) + return -EINVAL; + + if (request->sync > 15) + return -EINVAL; + } + + return fw_iso_context_start(client->iso_context, request->cycle, + request->sync, request->tags); +} + +static int ioctl_stop_iso(struct client *client, void *buffer) +{ + struct fw_cdev_stop_iso *request = buffer; + + if (request->handle != 0) + return -EINVAL; + + return fw_iso_context_stop(client->iso_context); +} + +static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { + ioctl_get_info, + ioctl_send_request, + ioctl_allocate, + ioctl_deallocate, + ioctl_send_response, + ioctl_initiate_bus_reset, + ioctl_add_descriptor, + ioctl_remove_descriptor, + ioctl_create_iso_context, + ioctl_queue_iso, + ioctl_start_iso, + ioctl_stop_iso, +}; + +static int +dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) +{ + char buffer[256]; + int retval; + + if (_IOC_TYPE(cmd) != '#' || + _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) + return -EINVAL; + + if (_IOC_DIR(cmd) & _IOC_WRITE) { + if (_IOC_SIZE(cmd) > sizeof buffer || + copy_from_user(buffer, arg, _IOC_SIZE(cmd))) + return -EFAULT; + } + + retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer); + if (retval < 0) + return retval; + + if (_IOC_DIR(cmd) & _IOC_READ) { + if (_IOC_SIZE(cmd) > sizeof buffer || + copy_to_user(arg, buffer, _IOC_SIZE(cmd))) + return -EFAULT; + } + + return 0; +} + +static long +fw_device_op_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct client *client = file->private_data; + + return dispatch_ioctl(client, cmd, (void __user *) arg); +} + +#ifdef CONFIG_COMPAT +static long +fw_device_op_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct client *client = file->private_data; + + return dispatch_ioctl(client, cmd, compat_ptr(arg)); +} +#endif + +static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct client *client = file->private_data; + enum dma_data_direction direction; + unsigned long size; + int page_count, retval; + + /* FIXME: We could support multiple buffers, but we don't. */ + if (client->buffer.pages != NULL) + return -EBUSY; + + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + if (vma->vm_start & ~PAGE_MASK) + return -EINVAL; + + client->vm_start = vma->vm_start; + size = vma->vm_end - vma->vm_start; + page_count = size >> PAGE_SHIFT; + if (size & ~PAGE_MASK) + return -EINVAL; + + if (vma->vm_flags & VM_WRITE) + direction = DMA_TO_DEVICE; + else + direction = DMA_FROM_DEVICE; + + retval = fw_iso_buffer_init(&client->buffer, client->device->card, + page_count, direction); + if (retval < 0) + return retval; + + retval = fw_iso_buffer_map(&client->buffer, vma); + if (retval < 0) + fw_iso_buffer_destroy(&client->buffer, client->device->card); + + return retval; +} + +static int fw_device_op_release(struct inode *inode, struct file *file) +{ + struct client *client = file->private_data; + struct event *e, *next_e; + struct client_resource *r, *next_r; + unsigned long flags; + + if (client->buffer.pages) + fw_iso_buffer_destroy(&client->buffer, client->device->card); + + if (client->iso_context) + fw_iso_context_destroy(client->iso_context); + + list_for_each_entry_safe(r, next_r, &client->resource_list, link) + r->release(client, r); + + /* FIXME: We should wait for the async tasklets to stop + * running before freeing the memory. */ + + list_for_each_entry_safe(e, next_e, &client->event_list, link) + kfree(e); + + spin_lock_irqsave(&client->device->card->lock, flags); + list_del(&client->link); + spin_unlock_irqrestore(&client->device->card->lock, flags); + + fw_device_put(client->device); + kfree(client); + + return 0; +} + +static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) +{ + struct client *client = file->private_data; + unsigned int mask = 0; + + poll_wait(file, &client->wait, pt); + + if (fw_device_is_shutdown(client->device)) + mask |= POLLHUP | POLLERR; + if (!list_empty(&client->event_list)) + mask |= POLLIN | POLLRDNORM; + + return mask; +} + +const struct file_operations fw_device_ops = { + .owner = THIS_MODULE, + .open = fw_device_op_open, + .read = fw_device_op_read, + .unlocked_ioctl = fw_device_op_ioctl, + .poll = fw_device_op_poll, + .release = fw_device_op_release, + .mmap = fw_device_op_mmap, + +#ifdef CONFIG_COMPAT + .compat_ioctl = fw_device_op_compat_ioctl, +#endif +}; diff --git a/drivers/firewire/fw-device-cdev.c b/drivers/firewire/fw-device-cdev.c deleted file mode 100644 index 2910db69ec2c..000000000000 --- a/drivers/firewire/fw-device-cdev.c +++ /dev/null @@ -1,954 +0,0 @@ -/* -*- c-basic-offset: 8 -*- - * - * fw-device-cdev.c - Char device for device raw access - * - * Copyright (C) 2005-2006 Kristian Hoegsberg - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "fw-transaction.h" -#include "fw-topology.h" -#include "fw-device.h" -#include "fw-device-cdev.h" - -/* dequeue_event() just kfree()'s the event, so the event has to be - * the first field in the struct. */ - -struct client; -struct client_resource { - struct list_head link; - void (*release)(struct client *client, struct client_resource *r); - u32 handle; -}; - -struct event { - struct { void *data; size_t size; } v[2]; - struct list_head link; -}; - -struct bus_reset { - struct event event; - struct fw_cdev_event_bus_reset reset; -}; - -struct response { - struct event event; - struct fw_transaction transaction; - struct client *client; - struct client_resource resource; - struct fw_cdev_event_response response; -}; - -struct iso_interrupt { - struct event event; - struct fw_cdev_event_iso_interrupt interrupt; -}; - -struct client { - u32 version; - struct fw_device *device; - spinlock_t lock; - u32 resource_handle; - struct list_head resource_list; - struct list_head event_list; - wait_queue_head_t wait; - u64 bus_reset_closure; - - struct fw_iso_context *iso_context; - u64 iso_closure; - struct fw_iso_buffer buffer; - unsigned long vm_start; - - struct list_head link; -}; - -static inline void __user * -u64_to_uptr(__u64 value) -{ - return (void __user *)(unsigned long)value; -} - -static inline __u64 -uptr_to_u64(void __user *ptr) -{ - return (__u64)(unsigned long)ptr; -} - -static int fw_device_op_open(struct inode *inode, struct file *file) -{ - struct fw_device *device; - struct client *client; - unsigned long flags; - - device = fw_device_from_devt(inode->i_rdev); - if (device == NULL) - return -ENODEV; - - client = kzalloc(sizeof *client, GFP_KERNEL); - if (client == NULL) - return -ENOMEM; - - client->device = fw_device_get(device); - INIT_LIST_HEAD(&client->event_list); - INIT_LIST_HEAD(&client->resource_list); - spin_lock_init(&client->lock); - init_waitqueue_head(&client->wait); - - file->private_data = client; - - spin_lock_irqsave(&device->card->lock, flags); - list_add_tail(&client->link, &device->client_list); - spin_unlock_irqrestore(&device->card->lock, flags); - - return 0; -} - -static void queue_event(struct client *client, struct event *event, - void *data0, size_t size0, void *data1, size_t size1) -{ - unsigned long flags; - - event->v[0].data = data0; - event->v[0].size = size0; - event->v[1].data = data1; - event->v[1].size = size1; - - spin_lock_irqsave(&client->lock, flags); - - list_add_tail(&event->link, &client->event_list); - wake_up_interruptible(&client->wait); - - spin_unlock_irqrestore(&client->lock, flags); -} - -static int -dequeue_event(struct client *client, char __user *buffer, size_t count) -{ - unsigned long flags; - struct event *event; - size_t size, total; - int i, retval; - - retval = wait_event_interruptible(client->wait, - !list_empty(&client->event_list) || - fw_device_is_shutdown(client->device)); - if (retval < 0) - return retval; - - if (list_empty(&client->event_list) && - fw_device_is_shutdown(client->device)) - return -ENODEV; - - spin_lock_irqsave(&client->lock, flags); - event = container_of(client->event_list.next, struct event, link); - list_del(&event->link); - spin_unlock_irqrestore(&client->lock, flags); - - total = 0; - for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { - size = min(event->v[i].size, count - total); - if (copy_to_user(buffer + total, event->v[i].data, size)) { - retval = -EFAULT; - goto out; - } - total += size; - } - retval = total; - - out: - kfree(event); - - return retval; -} - -static ssize_t -fw_device_op_read(struct file *file, - char __user *buffer, size_t count, loff_t *offset) -{ - struct client *client = file->private_data; - - return dequeue_event(client, buffer, count); -} - -static void -fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, - struct client *client) -{ - struct fw_card *card = client->device->card; - - event->closure = client->bus_reset_closure; - event->type = FW_CDEV_EVENT_BUS_RESET; - event->node_id = client->device->node_id; - event->local_node_id = card->local_node->node_id; - event->bm_node_id = 0; /* FIXME: We don't track the BM. */ - event->irm_node_id = card->irm_node->node_id; - event->root_node_id = card->root_node->node_id; - event->generation = card->generation; -} - -static void -for_each_client(struct fw_device *device, - void (*callback)(struct client *client)) -{ - struct fw_card *card = device->card; - struct client *c; - unsigned long flags; - - spin_lock_irqsave(&card->lock, flags); - - list_for_each_entry(c, &device->client_list, link) - callback(c); - - spin_unlock_irqrestore(&card->lock, flags); -} - -static void -queue_bus_reset_event(struct client *client) -{ - struct bus_reset *bus_reset; - - bus_reset = kzalloc(sizeof *bus_reset, GFP_ATOMIC); - if (bus_reset == NULL) { - fw_notify("Out of memory when allocating bus reset event\n"); - return; - } - - fill_bus_reset_event(&bus_reset->reset, client); - - queue_event(client, &bus_reset->event, - &bus_reset->reset, sizeof bus_reset->reset, NULL, 0); -} - -void fw_device_cdev_update(struct fw_device *device) -{ - for_each_client(device, queue_bus_reset_event); -} - -static void wake_up_client(struct client *client) -{ - wake_up_interruptible(&client->wait); -} - -void fw_device_cdev_remove(struct fw_device *device) -{ - for_each_client(device, wake_up_client); -} - -static int ioctl_get_info(struct client *client, void *buffer) -{ - struct fw_cdev_get_info *get_info = buffer; - struct fw_cdev_event_bus_reset bus_reset; - - client->version = get_info->version; - get_info->version = FW_CDEV_VERSION; - - if (get_info->rom != 0) { - void __user *uptr = u64_to_uptr(get_info->rom); - size_t want = get_info->rom_length; - size_t have = client->device->config_rom_length * 4; - - if (copy_to_user(uptr, client->device->config_rom, - min(want, have))) - return -EFAULT; - } - get_info->rom_length = client->device->config_rom_length * 4; - - client->bus_reset_closure = get_info->bus_reset_closure; - if (get_info->bus_reset != 0) { - void __user *uptr = u64_to_uptr(get_info->bus_reset); - - fill_bus_reset_event(&bus_reset, client); - if (copy_to_user(uptr, &bus_reset, sizeof bus_reset)) - return -EFAULT; - } - - get_info->card = client->device->card->index; - - return 0; -} - -static void -add_client_resource(struct client *client, struct client_resource *resource) -{ - unsigned long flags; - - spin_lock_irqsave(&client->lock, flags); - list_add_tail(&resource->link, &client->resource_list); - resource->handle = client->resource_handle++; - spin_unlock_irqrestore(&client->lock, flags); -} - -static int -release_client_resource(struct client *client, u32 handle, - struct client_resource **resource) -{ - struct client_resource *r; - unsigned long flags; - - spin_lock_irqsave(&client->lock, flags); - list_for_each_entry(r, &client->resource_list, link) { - if (r->handle == handle) { - list_del(&r->link); - break; - } - } - spin_unlock_irqrestore(&client->lock, flags); - - if (&r->link == &client->resource_list) - return -EINVAL; - - if (resource) - *resource = r; - else - r->release(client, r); - - return 0; -} - -static void -release_transaction(struct client *client, struct client_resource *resource) -{ - struct response *response = - container_of(resource, struct response, resource); - - fw_cancel_transaction(client->device->card, &response->transaction); -} - -static void -complete_transaction(struct fw_card *card, int rcode, - void *payload, size_t length, void *data) -{ - struct response *response = data; - struct client *client = response->client; - unsigned long flags; - - if (length < response->response.length) - response->response.length = length; - if (rcode == RCODE_COMPLETE) - memcpy(response->response.data, payload, - response->response.length); - - spin_lock_irqsave(&client->lock, flags); - list_del(&response->resource.link); - spin_unlock_irqrestore(&client->lock, flags); - - response->response.type = FW_CDEV_EVENT_RESPONSE; - response->response.rcode = rcode; - queue_event(client, &response->event, - &response->response, sizeof response->response, - response->response.data, response->response.length); -} - -static ssize_t ioctl_send_request(struct client *client, void *buffer) -{ - struct fw_device *device = client->device; - struct fw_cdev_send_request *request = buffer; - struct response *response; - - /* What is the biggest size we'll accept, really? */ - if (request->length > 4096) - return -EINVAL; - - response = kmalloc(sizeof *response + request->length, GFP_KERNEL); - if (response == NULL) - return -ENOMEM; - - response->client = client; - response->response.length = request->length; - response->response.closure = request->closure; - - if (request->data && - copy_from_user(response->response.data, - u64_to_uptr(request->data), request->length)) { - kfree(response); - return -EFAULT; - } - - response->resource.release = release_transaction; - add_client_resource(client, &response->resource); - - fw_send_request(device->card, &response->transaction, - request->tcode & 0x1f, - device->node->node_id, - request->generation, - device->node->max_speed, - request->offset, - response->response.data, request->length, - complete_transaction, response); - - if (request->data) - return sizeof request + request->length; - else - return sizeof request; -} - -struct address_handler { - struct fw_address_handler handler; - __u64 closure; - struct client *client; - struct client_resource resource; -}; - -struct request { - struct fw_request *request; - void *data; - size_t length; - struct client_resource resource; -}; - -struct request_event { - struct event event; - struct fw_cdev_event_request request; -}; - -static void -release_request(struct client *client, struct client_resource *resource) -{ - struct request *request = - container_of(resource, struct request, resource); - - fw_send_response(client->device->card, request->request, - RCODE_CONFLICT_ERROR); - kfree(request); -} - -static void -handle_request(struct fw_card *card, struct fw_request *r, - int tcode, int destination, int source, - int generation, int speed, - unsigned long long offset, - void *payload, size_t length, void *callback_data) -{ - struct address_handler *handler = callback_data; - struct request *request; - struct request_event *e; - struct client *client = handler->client; - - request = kmalloc(sizeof *request, GFP_ATOMIC); - e = kmalloc(sizeof *e, GFP_ATOMIC); - if (request == NULL || e == NULL) { - kfree(request); - kfree(e); - fw_send_response(card, r, RCODE_CONFLICT_ERROR); - return; - } - - request->request = r; - request->data = payload; - request->length = length; - - request->resource.release = release_request; - add_client_resource(client, &request->resource); - - e->request.type = FW_CDEV_EVENT_REQUEST; - e->request.tcode = tcode; - e->request.offset = offset; - e->request.length = length; - e->request.handle = request->resource.handle; - e->request.closure = handler->closure; - - queue_event(client, &e->event, - &e->request, sizeof e->request, payload, length); -} - -static void -release_address_handler(struct client *client, - struct client_resource *resource) -{ - struct address_handler *handler = - container_of(resource, struct address_handler, resource); - - fw_core_remove_address_handler(&handler->handler); - kfree(handler); -} - -static int ioctl_allocate(struct client *client, void *buffer) -{ - struct fw_cdev_allocate *request = buffer; - struct address_handler *handler; - struct fw_address_region region; - - handler = kmalloc(sizeof *handler, GFP_KERNEL); - if (handler == NULL) - return -ENOMEM; - - region.start = request->offset; - region.end = request->offset + request->length; - handler->handler.length = request->length; - handler->handler.address_callback = handle_request; - handler->handler.callback_data = handler; - handler->closure = request->closure; - handler->client = client; - - if (fw_core_add_address_handler(&handler->handler, ®ion) < 0) { - kfree(handler); - return -EBUSY; - } - - handler->resource.release = release_address_handler; - add_client_resource(client, &handler->resource); - request->handle = handler->resource.handle; - - return 0; -} - -static int ioctl_deallocate(struct client *client, void *buffer) -{ - struct fw_cdev_deallocate *request = buffer; - - return release_client_resource(client, request->handle, NULL); -} - -static int ioctl_send_response(struct client *client, void *buffer) -{ - struct fw_cdev_send_response *request = buffer; - struct client_resource *resource; - struct request *r; - - if (release_client_resource(client, request->handle, &resource) < 0) - return -EINVAL; - r = container_of(resource, struct request, resource); - if (request->length < r->length) - r->length = request->length; - if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) - return -EFAULT; - - fw_send_response(client->device->card, r->request, request->rcode); - kfree(r); - - return 0; -} - -static int ioctl_initiate_bus_reset(struct client *client, void *buffer) -{ - struct fw_cdev_initiate_bus_reset *request = buffer; - int short_reset; - - short_reset = (request->type == FW_CDEV_SHORT_RESET); - - return fw_core_initiate_bus_reset(client->device->card, short_reset); -} - -struct descriptor { - struct fw_descriptor d; - struct client_resource resource; - u32 data[0]; -}; - -static void release_descriptor(struct client *client, - struct client_resource *resource) -{ - struct descriptor *descriptor = - container_of(resource, struct descriptor, resource); - - fw_core_remove_descriptor(&descriptor->d); - kfree(descriptor); -} - -static int ioctl_add_descriptor(struct client *client, void *buffer) -{ - struct fw_cdev_add_descriptor *request = buffer; - struct descriptor *descriptor; - int retval; - - if (request->length > 256) - return -EINVAL; - - descriptor = - kmalloc(sizeof *descriptor + request->length * 4, GFP_KERNEL); - if (descriptor == NULL) - return -ENOMEM; - - if (copy_from_user(descriptor->data, - u64_to_uptr(request->data), request->length * 4)) { - kfree(descriptor); - return -EFAULT; - } - - descriptor->d.length = request->length; - descriptor->d.immediate = request->immediate; - descriptor->d.key = request->key; - descriptor->d.data = descriptor->data; - - retval = fw_core_add_descriptor(&descriptor->d); - if (retval < 0) { - kfree(descriptor); - return retval; - } - - descriptor->resource.release = release_descriptor; - add_client_resource(client, &descriptor->resource); - request->handle = descriptor->resource.handle; - - return 0; -} - -static int ioctl_remove_descriptor(struct client *client, void *buffer) -{ - struct fw_cdev_remove_descriptor *request = buffer; - - return release_client_resource(client, request->handle, NULL); -} - -static void -iso_callback(struct fw_iso_context *context, u32 cycle, - size_t header_length, void *header, void *data) -{ - struct client *client = data; - struct iso_interrupt *interrupt; - - interrupt = kzalloc(sizeof *interrupt + header_length, GFP_ATOMIC); - if (interrupt == NULL) - return; - - interrupt->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; - interrupt->interrupt.closure = client->iso_closure; - interrupt->interrupt.cycle = cycle; - interrupt->interrupt.header_length = header_length; - memcpy(interrupt->interrupt.header, header, header_length); - queue_event(client, &interrupt->event, - &interrupt->interrupt, - sizeof interrupt->interrupt + header_length, NULL, 0); -} - -static int ioctl_create_iso_context(struct client *client, void *buffer) -{ - struct fw_cdev_create_iso_context *request = buffer; - - if (request->channel > 63) - return -EINVAL; - - switch (request->type) { - case FW_ISO_CONTEXT_RECEIVE: - if (request->header_size < 4 || (request->header_size & 3)) - return -EINVAL; - - break; - - case FW_ISO_CONTEXT_TRANSMIT: - if (request->speed > SCODE_3200) - return -EINVAL; - - break; - - default: - return -EINVAL; - } - - client->iso_closure = request->closure; - client->iso_context = fw_iso_context_create(client->device->card, - request->type, - request->channel, - request->speed, - request->header_size, - iso_callback, client); - if (IS_ERR(client->iso_context)) - return PTR_ERR(client->iso_context); - - /* We only support one context at this time. */ - request->handle = 0; - - return 0; -} - -static int ioctl_queue_iso(struct client *client, void *buffer) -{ - struct fw_cdev_queue_iso *request = buffer; - struct fw_cdev_iso_packet __user *p, *end, *next; - struct fw_iso_context *ctx = client->iso_context; - unsigned long payload, buffer_end, header_length; - int count; - struct { - struct fw_iso_packet packet; - u8 header[256]; - } u; - - if (ctx == NULL || request->handle != 0) - return -EINVAL; - - /* If the user passes a non-NULL data pointer, has mmap()'ed - * the iso buffer, and the pointer points inside the buffer, - * we setup the payload pointers accordingly. Otherwise we - * set them both to 0, which will still let packets with - * payload_length == 0 through. In other words, if no packets - * use the indirect payload, the iso buffer need not be mapped - * and the request->data pointer is ignored.*/ - - payload = (unsigned long)request->data - client->vm_start; - buffer_end = client->buffer.page_count << PAGE_SHIFT; - if (request->data == 0 || client->buffer.pages == NULL || - payload >= buffer_end) { - payload = 0; - buffer_end = 0; - } - - if (!access_ok(VERIFY_READ, request->packets, request->size)) - return -EFAULT; - - p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets); - end = (void __user *)p + request->size; - count = 0; - while (p < end) { - if (__copy_from_user(&u.packet, p, sizeof *p)) - return -EFAULT; - - if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { - header_length = u.packet.header_length; - } else { - /* We require that header_length is a multiple of - * the fixed header size, ctx->header_size */ - if (ctx->header_size == 0) { - if (u.packet.header_length > 0) - return -EINVAL; - } else if (u.packet.header_length % ctx->header_size != 0) { - return -EINVAL; - } - header_length = 0; - } - - next = (struct fw_cdev_iso_packet __user *) - &p->header[header_length / 4]; - if (next > end) - return -EINVAL; - if (__copy_from_user - (u.packet.header, p->header, header_length)) - return -EFAULT; - if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && - u.packet.header_length + u.packet.payload_length > 0) - return -EINVAL; - if (payload + u.packet.payload_length > buffer_end) - return -EINVAL; - - if (fw_iso_context_queue(ctx, &u.packet, - &client->buffer, payload)) - break; - - p = next; - payload += u.packet.payload_length; - count++; - } - - request->size -= uptr_to_u64(p) - request->packets; - request->packets = uptr_to_u64(p); - request->data = client->vm_start + payload; - - return count; -} - -static int ioctl_start_iso(struct client *client, void *buffer) -{ - struct fw_cdev_start_iso *request = buffer; - - if (request->handle != 0) - return -EINVAL; - if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { - if (request->tags == 0 || request->tags > 15) - return -EINVAL; - - if (request->sync > 15) - return -EINVAL; - } - - return fw_iso_context_start(client->iso_context, request->cycle, - request->sync, request->tags); -} - -static int ioctl_stop_iso(struct client *client, void *buffer) -{ - struct fw_cdev_stop_iso *request = buffer; - - if (request->handle != 0) - return -EINVAL; - - return fw_iso_context_stop(client->iso_context); -} - -static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { - ioctl_get_info, - ioctl_send_request, - ioctl_allocate, - ioctl_deallocate, - ioctl_send_response, - ioctl_initiate_bus_reset, - ioctl_add_descriptor, - ioctl_remove_descriptor, - ioctl_create_iso_context, - ioctl_queue_iso, - ioctl_start_iso, - ioctl_stop_iso, -}; - -static int -dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) -{ - char buffer[256]; - int retval; - - if (_IOC_TYPE(cmd) != '#' || - _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) - return -EINVAL; - - if (_IOC_DIR(cmd) & _IOC_WRITE) { - if (_IOC_SIZE(cmd) > sizeof buffer || - copy_from_user(buffer, arg, _IOC_SIZE(cmd))) - return -EFAULT; - } - - retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer); - if (retval < 0) - return retval; - - if (_IOC_DIR(cmd) & _IOC_READ) { - if (_IOC_SIZE(cmd) > sizeof buffer || - copy_to_user(arg, buffer, _IOC_SIZE(cmd))) - return -EFAULT; - } - - return 0; -} - -static long -fw_device_op_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct client *client = file->private_data; - - return dispatch_ioctl(client, cmd, (void __user *) arg); -} - -#ifdef CONFIG_COMPAT -static long -fw_device_op_compat_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct client *client = file->private_data; - - return dispatch_ioctl(client, cmd, compat_ptr(arg)); -} -#endif - -static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct client *client = file->private_data; - enum dma_data_direction direction; - unsigned long size; - int page_count, retval; - - /* FIXME: We could support multiple buffers, but we don't. */ - if (client->buffer.pages != NULL) - return -EBUSY; - - if (!(vma->vm_flags & VM_SHARED)) - return -EINVAL; - - if (vma->vm_start & ~PAGE_MASK) - return -EINVAL; - - client->vm_start = vma->vm_start; - size = vma->vm_end - vma->vm_start; - page_count = size >> PAGE_SHIFT; - if (size & ~PAGE_MASK) - return -EINVAL; - - if (vma->vm_flags & VM_WRITE) - direction = DMA_TO_DEVICE; - else - direction = DMA_FROM_DEVICE; - - retval = fw_iso_buffer_init(&client->buffer, client->device->card, - page_count, direction); - if (retval < 0) - return retval; - - retval = fw_iso_buffer_map(&client->buffer, vma); - if (retval < 0) - fw_iso_buffer_destroy(&client->buffer, client->device->card); - - return retval; -} - -static int fw_device_op_release(struct inode *inode, struct file *file) -{ - struct client *client = file->private_data; - struct event *e, *next_e; - struct client_resource *r, *next_r; - unsigned long flags; - - if (client->buffer.pages) - fw_iso_buffer_destroy(&client->buffer, client->device->card); - - if (client->iso_context) - fw_iso_context_destroy(client->iso_context); - - list_for_each_entry_safe(r, next_r, &client->resource_list, link) - r->release(client, r); - - /* FIXME: We should wait for the async tasklets to stop - * running before freeing the memory. */ - - list_for_each_entry_safe(e, next_e, &client->event_list, link) - kfree(e); - - spin_lock_irqsave(&client->device->card->lock, flags); - list_del(&client->link); - spin_unlock_irqrestore(&client->device->card->lock, flags); - - fw_device_put(client->device); - kfree(client); - - return 0; -} - -static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) -{ - struct client *client = file->private_data; - unsigned int mask = 0; - - poll_wait(file, &client->wait, pt); - - if (fw_device_is_shutdown(client->device)) - mask |= POLLHUP | POLLERR; - if (!list_empty(&client->event_list)) - mask |= POLLIN | POLLRDNORM; - - return mask; -} - -const struct file_operations fw_device_ops = { - .owner = THIS_MODULE, - .open = fw_device_op_open, - .read = fw_device_op_read, - .unlocked_ioctl = fw_device_op_ioctl, - .poll = fw_device_op_poll, - .release = fw_device_op_release, - .mmap = fw_device_op_mmap, - -#ifdef CONFIG_COMPAT - .compat_ioctl = fw_device_op_compat_ioctl, -#endif -}; diff --git a/drivers/firewire/fw-device-cdev.h b/drivers/firewire/fw-device-cdev.h deleted file mode 100644 index 026c768e3bf1..000000000000 --- a/drivers/firewire/fw-device-cdev.h +++ /dev/null @@ -1,268 +0,0 @@ -/* -*- c-basic-offset: 8 -*- - * - * fw-device-cdev.h -- Char device interface. - * - * Copyright (C) 2005-2006 Kristian Hoegsberg - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -#ifndef __fw_cdev_h -#define __fw_cdev_h - -#include -#include - -#define TCODE_WRITE_QUADLET_REQUEST 0 -#define TCODE_WRITE_BLOCK_REQUEST 1 -#define TCODE_WRITE_RESPONSE 2 -#define TCODE_READ_QUADLET_REQUEST 4 -#define TCODE_READ_BLOCK_REQUEST 5 -#define TCODE_READ_QUADLET_RESPONSE 6 -#define TCODE_READ_BLOCK_RESPONSE 7 -#define TCODE_CYCLE_START 8 -#define TCODE_LOCK_REQUEST 9 -#define TCODE_STREAM_DATA 10 -#define TCODE_LOCK_RESPONSE 11 - -#define TCODE_LOCK_MASK_SWAP 0x11 -#define TCODE_LOCK_COMPARE_SWAP 0x12 -#define TCODE_LOCK_FETCH_ADD 0x13 -#define TCODE_LOCK_LITTLE_ADD 0x14 -#define TCODE_LOCK_BOUNDED_ADD 0x15 -#define TCODE_LOCK_WRAP_ADD 0x16 -#define TCODE_LOCK_VENDOR_DEPENDENT 0x17 - -#define RCODE_COMPLETE 0x0 -#define RCODE_CONFLICT_ERROR 0x4 -#define RCODE_DATA_ERROR 0x5 -#define RCODE_TYPE_ERROR 0x6 -#define RCODE_ADDRESS_ERROR 0x7 - -#define RCODE_SEND_ERROR 0x10 -#define RCODE_CANCELLED 0x11 -#define RCODE_BUSY 0x12 -#define RCODE_GENERATION 0x13 -#define RCODE_NO_ACK 0x14 - -#define SCODE_100 0x0 -#define SCODE_200 0x1 -#define SCODE_400 0x2 -#define SCODE_800 0x3 -#define SCODE_1600 0x4 -#define SCODE_3200 0x5 - -#define FW_CDEV_EVENT_BUS_RESET 0x00 -#define FW_CDEV_EVENT_RESPONSE 0x01 -#define FW_CDEV_EVENT_REQUEST 0x02 -#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 - -/* The 'closure' fields are for user space to use. Data passed in the - * 'closure' field for a request will be returned in the corresponding - * event. It's a 64-bit type so that it's a fixed size type big - * enough to hold a pointer on all platforms. */ - -struct fw_cdev_event_common { - __u64 closure; - __u32 type; -}; - -struct fw_cdev_event_bus_reset { - __u64 closure; - __u32 type; - __u32 node_id; - __u32 local_node_id; - __u32 bm_node_id; - __u32 irm_node_id; - __u32 root_node_id; - __u32 generation; -}; - -struct fw_cdev_event_response { - __u64 closure; - __u32 type; - __u32 rcode; - __u32 length; - __u32 data[0]; -}; - -struct fw_cdev_event_request { - __u64 closure; - __u32 type; - __u32 tcode; - __u64 offset; - __u32 handle; - __u32 length; - __u32 data[0]; -}; - -struct fw_cdev_event_iso_interrupt { - __u64 closure; - __u32 type; - __u32 cycle; - __u32 header_length; /* Length in bytes of following headers. */ - __u32 header[0]; -}; - -union fw_cdev_event { - struct fw_cdev_event_common common; - struct fw_cdev_event_bus_reset bus_reset; - struct fw_cdev_event_response response; - struct fw_cdev_event_request request; - struct fw_cdev_event_iso_interrupt iso_interrupt; -}; - -#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info) -#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request) -#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate) -#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate) -#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response) -#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset) -#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor) -#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor) - -#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context) -#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) -#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso) -#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso) - -/* FW_CDEV_VERSION History - * - * 1 Feb 18, 2007: Initial version. - */ -#define FW_CDEV_VERSION 1 - -struct fw_cdev_get_info { - /* The version field is just a running serial number. We - * never break backwards compatibility. Userspace passes in - * the version it expects and the kernel passes back the - * highest version it can provide. Even if the structs in - * this interface are extended in a later version, the kernel - * will not copy back more data than what was present in the - * interface version userspace expects. */ - __u32 version; - - /* If non-zero, at most rom_length bytes of config rom will be - * copied into that user space address. In either case, - * rom_length is updated with the actual length of the config - * rom. */ - __u32 rom_length; - __u64 rom; - - /* If non-zero, a fw_cdev_event_bus_reset struct will be - * copied here with the current state of the bus. This does - * not cause a bus reset to happen. The value of closure in - * this and sub-sequent bus reset events is set to - * bus_reset_closure. */ - __u64 bus_reset; - __u64 bus_reset_closure; - - /* The index of the card this devices belongs to. */ - __u32 card; -}; - -struct fw_cdev_send_request { - __u32 tcode; - __u32 length; - __u64 offset; - __u64 closure; - __u64 data; - __u32 generation; -}; - -struct fw_cdev_send_response { - __u32 rcode; - __u32 length; - __u64 data; - __u32 handle; -}; - -struct fw_cdev_allocate { - __u64 offset; - __u64 closure; - __u32 length; - __u32 handle; -}; - -struct fw_cdev_deallocate { - __u32 handle; -}; - -#define FW_CDEV_LONG_RESET 0 -#define FW_CDEV_SHORT_RESET 1 - -struct fw_cdev_initiate_bus_reset { - __u32 type; -}; - -struct fw_cdev_add_descriptor { - __u32 immediate; - __u32 key; - __u64 data; - __u32 length; - __u32 handle; -}; - -struct fw_cdev_remove_descriptor { - __u32 handle; -}; - -#define FW_CDEV_ISO_CONTEXT_TRANSMIT 0 -#define FW_CDEV_ISO_CONTEXT_RECEIVE 1 - -#define FW_CDEV_ISO_CONTEXT_MATCH_TAG0 1 -#define FW_CDEV_ISO_CONTEXT_MATCH_TAG1 2 -#define FW_CDEV_ISO_CONTEXT_MATCH_TAG2 4 -#define FW_CDEV_ISO_CONTEXT_MATCH_TAG3 8 -#define FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS 15 - -struct fw_cdev_create_iso_context { - __u32 type; - __u32 header_size; - __u32 channel; - __u32 speed; - __u64 closure; - __u32 handle; -}; - -struct fw_cdev_iso_packet { - __u16 payload_length; /* Length of indirect payload. */ - __u32 interrupt : 1; /* Generate interrupt on this packet */ - __u32 skip : 1; /* Set to not send packet at all. */ - __u32 tag : 2; - __u32 sy : 4; - __u32 header_length : 8; /* Length of immediate header. */ - __u32 header[0]; -}; - -struct fw_cdev_queue_iso { - __u64 packets; - __u64 data; - __u32 size; - __u32 handle; -}; - -struct fw_cdev_start_iso { - __s32 cycle; - __u32 sync; - __u32 tags; - __u32 handle; -}; - -struct fw_cdev_stop_iso { - __u32 handle; -}; - -#endif /* __fw_cdev_h */ diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h new file mode 100644 index 000000000000..026c768e3bf1 --- /dev/null +++ b/include/linux/firewire-cdev.h @@ -0,0 +1,268 @@ +/* -*- c-basic-offset: 8 -*- + * + * fw-device-cdev.h -- Char device interface. + * + * Copyright (C) 2005-2006 Kristian Hoegsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __fw_cdev_h +#define __fw_cdev_h + +#include +#include + +#define TCODE_WRITE_QUADLET_REQUEST 0 +#define TCODE_WRITE_BLOCK_REQUEST 1 +#define TCODE_WRITE_RESPONSE 2 +#define TCODE_READ_QUADLET_REQUEST 4 +#define TCODE_READ_BLOCK_REQUEST 5 +#define TCODE_READ_QUADLET_RESPONSE 6 +#define TCODE_READ_BLOCK_RESPONSE 7 +#define TCODE_CYCLE_START 8 +#define TCODE_LOCK_REQUEST 9 +#define TCODE_STREAM_DATA 10 +#define TCODE_LOCK_RESPONSE 11 + +#define TCODE_LOCK_MASK_SWAP 0x11 +#define TCODE_LOCK_COMPARE_SWAP 0x12 +#define TCODE_LOCK_FETCH_ADD 0x13 +#define TCODE_LOCK_LITTLE_ADD 0x14 +#define TCODE_LOCK_BOUNDED_ADD 0x15 +#define TCODE_LOCK_WRAP_ADD 0x16 +#define TCODE_LOCK_VENDOR_DEPENDENT 0x17 + +#define RCODE_COMPLETE 0x0 +#define RCODE_CONFLICT_ERROR 0x4 +#define RCODE_DATA_ERROR 0x5 +#define RCODE_TYPE_ERROR 0x6 +#define RCODE_ADDRESS_ERROR 0x7 + +#define RCODE_SEND_ERROR 0x10 +#define RCODE_CANCELLED 0x11 +#define RCODE_BUSY 0x12 +#define RCODE_GENERATION 0x13 +#define RCODE_NO_ACK 0x14 + +#define SCODE_100 0x0 +#define SCODE_200 0x1 +#define SCODE_400 0x2 +#define SCODE_800 0x3 +#define SCODE_1600 0x4 +#define SCODE_3200 0x5 + +#define FW_CDEV_EVENT_BUS_RESET 0x00 +#define FW_CDEV_EVENT_RESPONSE 0x01 +#define FW_CDEV_EVENT_REQUEST 0x02 +#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 + +/* The 'closure' fields are for user space to use. Data passed in the + * 'closure' field for a request will be returned in the corresponding + * event. It's a 64-bit type so that it's a fixed size type big + * enough to hold a pointer on all platforms. */ + +struct fw_cdev_event_common { + __u64 closure; + __u32 type; +}; + +struct fw_cdev_event_bus_reset { + __u64 closure; + __u32 type; + __u32 node_id; + __u32 local_node_id; + __u32 bm_node_id; + __u32 irm_node_id; + __u32 root_node_id; + __u32 generation; +}; + +struct fw_cdev_event_response { + __u64 closure; + __u32 type; + __u32 rcode; + __u32 length; + __u32 data[0]; +}; + +struct fw_cdev_event_request { + __u64 closure; + __u32 type; + __u32 tcode; + __u64 offset; + __u32 handle; + __u32 length; + __u32 data[0]; +}; + +struct fw_cdev_event_iso_interrupt { + __u64 closure; + __u32 type; + __u32 cycle; + __u32 header_length; /* Length in bytes of following headers. */ + __u32 header[0]; +}; + +union fw_cdev_event { + struct fw_cdev_event_common common; + struct fw_cdev_event_bus_reset bus_reset; + struct fw_cdev_event_response response; + struct fw_cdev_event_request request; + struct fw_cdev_event_iso_interrupt iso_interrupt; +}; + +#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info) +#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request) +#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate) +#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate) +#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response) +#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset) +#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor) +#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor) + +#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context) +#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) +#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso) +#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso) + +/* FW_CDEV_VERSION History + * + * 1 Feb 18, 2007: Initial version. + */ +#define FW_CDEV_VERSION 1 + +struct fw_cdev_get_info { + /* The version field is just a running serial number. We + * never break backwards compatibility. Userspace passes in + * the version it expects and the kernel passes back the + * highest version it can provide. Even if the structs in + * this interface are extended in a later version, the kernel + * will not copy back more data than what was present in the + * interface version userspace expects. */ + __u32 version; + + /* If non-zero, at most rom_length bytes of config rom will be + * copied into that user space address. In either case, + * rom_length is updated with the actual length of the config + * rom. */ + __u32 rom_length; + __u64 rom; + + /* If non-zero, a fw_cdev_event_bus_reset struct will be + * copied here with the current state of the bus. This does + * not cause a bus reset to happen. The value of closure in + * this and sub-sequent bus reset events is set to + * bus_reset_closure. */ + __u64 bus_reset; + __u64 bus_reset_closure; + + /* The index of the card this devices belongs to. */ + __u32 card; +}; + +struct fw_cdev_send_request { + __u32 tcode; + __u32 length; + __u64 offset; + __u64 closure; + __u64 data; + __u32 generation; +}; + +struct fw_cdev_send_response { + __u32 rcode; + __u32 length; + __u64 data; + __u32 handle; +}; + +struct fw_cdev_allocate { + __u64 offset; + __u64 closure; + __u32 length; + __u32 handle; +}; + +struct fw_cdev_deallocate { + __u32 handle; +}; + +#define FW_CDEV_LONG_RESET 0 +#define FW_CDEV_SHORT_RESET 1 + +struct fw_cdev_initiate_bus_reset { + __u32 type; +}; + +struct fw_cdev_add_descriptor { + __u32 immediate; + __u32 key; + __u64 data; + __u32 length; + __u32 handle; +}; + +struct fw_cdev_remove_descriptor { + __u32 handle; +}; + +#define FW_CDEV_ISO_CONTEXT_TRANSMIT 0 +#define FW_CDEV_ISO_CONTEXT_RECEIVE 1 + +#define FW_CDEV_ISO_CONTEXT_MATCH_TAG0 1 +#define FW_CDEV_ISO_CONTEXT_MATCH_TAG1 2 +#define FW_CDEV_ISO_CONTEXT_MATCH_TAG2 4 +#define FW_CDEV_ISO_CONTEXT_MATCH_TAG3 8 +#define FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS 15 + +struct fw_cdev_create_iso_context { + __u32 type; + __u32 header_size; + __u32 channel; + __u32 speed; + __u64 closure; + __u32 handle; +}; + +struct fw_cdev_iso_packet { + __u16 payload_length; /* Length of indirect payload. */ + __u32 interrupt : 1; /* Generate interrupt on this packet */ + __u32 skip : 1; /* Set to not send packet at all. */ + __u32 tag : 2; + __u32 sy : 4; + __u32 header_length : 8; /* Length of immediate header. */ + __u32 header[0]; +}; + +struct fw_cdev_queue_iso { + __u64 packets; + __u64 data; + __u32 size; + __u32 handle; +}; + +struct fw_cdev_start_iso { + __s32 cycle; + __u32 sync; + __u32 tags; + __u32 handle; +}; + +struct fw_cdev_stop_iso { + __u32 handle; +}; + +#endif /* __fw_cdev_h */ -- cgit v1.2.3 From 84767d00a8fd54dd97866561f6e2ee246c8e1cdc Mon Sep 17 00:00:00 2001 From: Roman Moravcik Date: Tue, 1 May 2007 00:39:13 -0400 Subject: Input: gpio_keys - add support for switches (EV_SW) Signed-off-by: Roman Moravcik Signed-off-by: Paul Sokolovsky Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/gpio_keys.c | 19 ++++++++++++------- include/linux/gpio_keys.h | 3 ++- 2 files changed, 14 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index 0d2796cdf738..739212252b09 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -35,11 +35,14 @@ static irqreturn_t gpio_keys_isr(int irq, void *dev_id) struct input_dev *input = platform_get_drvdata(pdev); for (i = 0; i < pdata->nbuttons; i++) { - int gpio = pdata->buttons[i].gpio; + struct gpio_keys_button *button = &pdata->buttons[i]; + int gpio = button->gpio; + if (irq == gpio_to_irq(gpio)) { - int state = (gpio_get_value(gpio) ? 1 : 0) ^ (pdata->buttons[i].active_low); + unsigned int type = button->type ?: EV_KEY; + int state = (gpio_get_value(gpio) ? 1 : 0) ^ button->active_low; - input_report_key(input, pdata->buttons[i].keycode, state); + input_event(input, type, button->code, !!state); input_sync(input); } } @@ -71,19 +74,21 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev) input->id.version = 0x0100; for (i = 0; i < pdata->nbuttons; i++) { - int code = pdata->buttons[i].keycode; - int irq = gpio_to_irq(pdata->buttons[i].gpio); + struct gpio_keys_button *button = &pdata->buttons[i]; + int irq = gpio_to_irq(button->gpio); + unsigned int type = button->type ?: EV_KEY; set_irq_type(irq, IRQ_TYPE_EDGE_BOTH); error = request_irq(irq, gpio_keys_isr, IRQF_SAMPLE_RANDOM, - pdata->buttons[i].desc ? pdata->buttons[i].desc : "gpio_keys", + button->desc ? button->desc : "gpio_keys", pdev); if (error) { printk(KERN_ERR "gpio-keys: unable to claim irq %d; error %d\n", irq, error); goto fail; } - set_bit(code, input->keybit); + + input_set_capability(input, type, button->code); } error = input_register_device(input); diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index 2b217c7b9312..265d17830a0f 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h @@ -3,10 +3,11 @@ struct gpio_keys_button { /* Configuration parameters */ - int keycode; + int code; /* input event code (KEY_*, SW_*) */ int gpio; int active_low; char *desc; + int type; /* input event type (EV_KEY, EV_SW) */ }; struct gpio_keys_platform_data { -- cgit v1.2.3 From d8a5ad75cc4d577987964e37a4c43b1c648c201e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 2 Apr 2007 18:48:28 -0400 Subject: NFS: Cleanup the coalescing code Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 117 ++++++++++++++++++++++++++++++++++------------- fs/nfs/read.c | 24 +++++----- fs/nfs/write.c | 11 ++--- include/linux/nfs_page.h | 13 +++++- 4 files changed, 114 insertions(+), 51 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index ca4b1d4ff42b..7017eb0e0bc8 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -223,48 +223,101 @@ out: } /** - * nfs_coalesce_requests - Split coalesced requests out from a list. + * nfs_pageio_init - initialise a page io descriptor + * @desc: pointer to descriptor + * @iosize: io block size + */ +void nfs_pageio_init(struct nfs_pageio_descriptor *desc, unsigned int bsize) +{ + INIT_LIST_HEAD(&desc->pg_list); + desc->pg_count = 0; + desc->pg_bsize = bsize; + desc->pg_base = 0; +} + +/** + * nfs_can_coalesce_requests - test two requests for compatibility + * @prev: pointer to nfs_page + * @req: pointer to nfs_page + * + * The nfs_page structures 'prev' and 'req' are compared to ensure that the + * page data area they describe is contiguous, and that their RPC + * credentials, NFSv4 open state, and lockowners are the same. + * + * Return 'true' if this is the case, else return 'false'. + */ +static int nfs_can_coalesce_requests(struct nfs_page *prev, + struct nfs_page *req) +{ + if (req->wb_context->cred != prev->wb_context->cred) + return 0; + if (req->wb_context->lockowner != prev->wb_context->lockowner) + return 0; + if (req->wb_context->state != prev->wb_context->state) + return 0; + if (req->wb_index != (prev->wb_index + 1)) + return 0; + if (req->wb_pgbase != 0) + return 0; + if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) + return 0; + return 1; +} + +/** + * nfs_pageio_add_request - Attempt to coalesce a request into a page list. + * @desc: destination io descriptor + * @req: request + * + * Returns true if the request 'req' was successfully coalesced into the + * existing list of pages 'desc'. + */ +static int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, + struct nfs_page *req) +{ + size_t newlen = req->wb_bytes; + + if (desc->pg_count != 0) { + struct nfs_page *prev; + + /* + * FIXME: ideally we should be able to coalesce all requests + * that are not block boundary aligned, but currently this + * is problematic for the case of bsize < PAGE_CACHE_SIZE, + * since nfs_flush_multi and nfs_pagein_multi assume you + * can have only one struct nfs_page. + */ + newlen += desc->pg_count; + if (desc->pg_base + newlen > desc->pg_bsize) + return 0; + prev = nfs_list_entry(desc->pg_list.prev); + if (!nfs_can_coalesce_requests(prev, req)) + return 0; + } else + desc->pg_base = req->wb_pgbase; + nfs_list_remove_request(req); + nfs_list_add_request(req, &desc->pg_list); + desc->pg_count = newlen; + return 1; +} + +/** + * nfs_pageio_add_list - Split coalesced requests out from a list. + * @desc: destination io descriptor * @head: source list - * @dst: destination list - * @nmax: maximum number of requests to coalesce * * Moves a maximum of 'nmax' elements from one list to another. * The elements are checked to ensure that they form a contiguous set * of pages, and that the RPC credentials are the same. */ -int -nfs_coalesce_requests(struct list_head *head, struct list_head *dst, - unsigned int nmax) +void nfs_pageio_add_list(struct nfs_pageio_descriptor *desc, + struct list_head *head) { - struct nfs_page *req = NULL; - unsigned int npages = 0; - while (!list_empty(head)) { - struct nfs_page *prev = req; - - req = nfs_list_entry(head->next); - if (prev) { - if (req->wb_context->cred != prev->wb_context->cred) - break; - if (req->wb_context->lockowner != prev->wb_context->lockowner) - break; - if (req->wb_context->state != prev->wb_context->state) - break; - if (req->wb_index != (prev->wb_index + 1)) - break; - - if (req->wb_pgbase != 0) - break; - } - nfs_list_remove_request(req); - nfs_list_add_request(req, dst); - npages++; - if (req->wb_pgbase + req->wb_bytes != PAGE_CACHE_SIZE) - break; - if (npages >= nmax) + struct nfs_page *req = nfs_list_entry(head->next); + if (!nfs_pageio_add_request(desc, req)) break; } - return npages; } #define NFS_SCAN_MAXENTRIES 16 diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 6ab4d5a9edf2..97f0f42e136d 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -328,24 +328,26 @@ out_bad: } static int -nfs_pagein_list(struct list_head *head, int rpages) +nfs_pagein_list(struct list_head *head, unsigned int rsize) { - LIST_HEAD(one_request); - struct nfs_page *req; - int error = 0; - unsigned int pages = 0; + struct nfs_pageio_descriptor desc; + struct nfs_page *req; + unsigned int pages = 0; + int error = 0; while (!list_empty(head)) { - pages += nfs_coalesce_requests(head, &one_request, rpages); - req = nfs_list_entry(one_request.next); - error = nfs_pagein_one(&one_request, req->wb_context->dentry->d_inode); + nfs_pageio_init(&desc, rsize); + nfs_pageio_add_list(&desc, head); + req = nfs_list_entry(desc.pg_list.next); + error = nfs_pagein_one(&desc.pg_list, req->wb_context->dentry->d_inode); if (error < 0) break; + pages += (desc.pg_count + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; } - if (error >= 0) - return pages; nfs_async_read_error(head); + if (error >= 0) + return pages; return error; } @@ -595,7 +597,7 @@ int nfs_readpages(struct file *filp, struct address_space *mapping, filp->private_data); ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); if (!list_empty(&head)) { - int err = nfs_pagein_list(&head, server->rpages); + int err = nfs_pagein_list(&head, server->rsize); if (!ret) nfs_add_stats(inode, NFSIOS_READPAGES, err); ret = err; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index dbad89c8e427..b03ec1ba4d75 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -945,9 +945,8 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how) { - LIST_HEAD(one_request); + struct nfs_pageio_descriptor desc; int (*flush_one)(struct inode *, struct list_head *, int); - struct nfs_page *req; int wpages = NFS_SERVER(inode)->wpages; int wsize = NFS_SERVER(inode)->wsize; int error; @@ -961,16 +960,16 @@ static int nfs_flush_list(struct inode *inode, struct list_head *head, int npage how |= FLUSH_STABLE; do { - nfs_coalesce_requests(head, &one_request, wpages); - req = nfs_list_entry(one_request.next); - error = flush_one(inode, &one_request, how); + nfs_pageio_init(&desc, wsize); + nfs_pageio_add_list(&desc, head); + error = flush_one(inode, &desc.pg_list, how); if (error < 0) goto out_err; } while (!list_empty(head)); return 0; out_err: while (!list_empty(head)) { - req = nfs_list_entry(head->next); + struct nfs_page *req = nfs_list_entry(head->next); nfs_list_remove_request(req); nfs_redirty_request(req); nfs_end_page_writeback(req->wb_page); diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 16b0266b14fd..3ef8e0441473 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -48,6 +48,13 @@ struct nfs_page { struct nfs_writeverf wb_verf; /* Commit cookie */ }; +struct nfs_pageio_descriptor { + struct list_head pg_list; + size_t pg_count; + size_t pg_bsize; + unsigned int pg_base; +}; + #define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx, @@ -64,8 +71,10 @@ extern long nfs_scan_dirty(struct address_space *mapping, struct list_head *dst); extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, struct list_head *dst, unsigned long idx_start, unsigned int npages); -extern int nfs_coalesce_requests(struct list_head *, struct list_head *, - unsigned int); +extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, + size_t iosize); +extern void nfs_pageio_add_list(struct nfs_pageio_descriptor *, + struct list_head *); extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); extern int nfs_set_page_writeback_locked(struct nfs_page *req); -- cgit v1.2.3 From bcb71bba7e64f0442d0ca339d7d3117a7060589f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 2 Apr 2007 18:48:28 -0400 Subject: NFS: Another cleanup of the read/write request coalescing code Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 71 ++++++++++++++++++++++++++++++++++++++++++++---- fs/nfs/read.c | 62 ++++++++++++++++++------------------------ fs/nfs/write.c | 53 ++++++++++++++---------------------- include/linux/nfs_page.h | 14 ++++++++-- 4 files changed, 125 insertions(+), 75 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 7017eb0e0bc8..528128545d66 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -225,14 +225,26 @@ out: /** * nfs_pageio_init - initialise a page io descriptor * @desc: pointer to descriptor - * @iosize: io block size + * @inode: pointer to inode + * @doio: pointer to io function + * @bsize: io block size + * @io_flags: extra parameters for the io function */ -void nfs_pageio_init(struct nfs_pageio_descriptor *desc, unsigned int bsize) +void nfs_pageio_init(struct nfs_pageio_descriptor *desc, + struct inode *inode, + int (*doio)(struct inode *, struct list_head *, size_t, int), + unsigned int bsize, + int io_flags) { INIT_LIST_HEAD(&desc->pg_list); + desc->pg_bytes_written = 0; desc->pg_count = 0; desc->pg_bsize = bsize; desc->pg_base = 0; + desc->pg_inode = inode; + desc->pg_doio = doio; + desc->pg_ioflags = io_flags; + desc->pg_error = 0; } /** @@ -265,15 +277,15 @@ static int nfs_can_coalesce_requests(struct nfs_page *prev, } /** - * nfs_pageio_add_request - Attempt to coalesce a request into a page list. + * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list. * @desc: destination io descriptor * @req: request * * Returns true if the request 'req' was successfully coalesced into the * existing list of pages 'desc'. */ -static int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, - struct nfs_page *req) +static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, + struct nfs_page *req) { size_t newlen = req->wb_bytes; @@ -301,6 +313,46 @@ static int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, return 1; } +/* + * Helper for nfs_pageio_add_request and nfs_pageio_complete + */ +static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) +{ + if (!list_empty(&desc->pg_list)) { + int error = desc->pg_doio(desc->pg_inode, + &desc->pg_list, + desc->pg_count, + desc->pg_ioflags); + if (error < 0) + desc->pg_error = error; + else + desc->pg_bytes_written += desc->pg_count; + } + if (list_empty(&desc->pg_list)) { + desc->pg_count = 0; + desc->pg_base = 0; + } +} + +/** + * nfs_pageio_add_request - Attempt to coalesce a request into a page list. + * @desc: destination io descriptor + * @req: request + * + * Returns true if the request 'req' was successfully coalesced into the + * existing list of pages 'desc'. + */ +static int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, + struct nfs_page *req) +{ + while (!nfs_pageio_do_add_request(desc, req)) { + nfs_pageio_doio(desc); + if (desc->pg_error < 0) + return 0; + } + return 1; +} + /** * nfs_pageio_add_list - Split coalesced requests out from a list. * @desc: destination io descriptor @@ -320,6 +372,15 @@ void nfs_pageio_add_list(struct nfs_pageio_descriptor *desc, } } +/** + * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor + * @desc: pointer to io descriptor + */ +void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) +{ + nfs_pageio_doio(desc); +} + #define NFS_SCAN_MAXENTRIES 16 /** * nfs_scan_dirty - Scan the radix tree for dirty requests diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 97f0f42e136d..0effa74992df 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -27,7 +27,8 @@ #define NFSDBG_FACILITY NFSDBG_PAGECACHE -static int nfs_pagein_one(struct list_head *, struct inode *); +static int nfs_pagein_multi(struct inode *, struct list_head *, size_t, int); +static int nfs_pagein_one(struct inode *, struct list_head *, size_t, int); static const struct rpc_call_ops nfs_read_partial_ops; static const struct rpc_call_ops nfs_read_full_ops; @@ -133,7 +134,10 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); nfs_list_add_request(new, &one_request); - nfs_pagein_one(&one_request, inode); + if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) + nfs_pagein_multi(inode, &one_request, len, 0); + else + nfs_pagein_one(inode, &one_request, len, 0); return 0; } @@ -230,7 +234,7 @@ static void nfs_execute_read(struct nfs_read_data *data) * won't see the new data until our attribute cache is updated. This is more * or less conventional NFS client behavior. */ -static int nfs_pagein_multi(struct list_head *head, struct inode *inode) +static int nfs_pagein_multi(struct inode *inode, struct list_head *head, size_t count, int flags) { struct nfs_page *req = nfs_list_entry(head->next); struct page *page = req->wb_page; @@ -242,7 +246,7 @@ static int nfs_pagein_multi(struct list_head *head, struct inode *inode) nfs_list_remove_request(req); - nbytes = req->wb_bytes; + nbytes = count; do { size_t len = min(nbytes,rsize); @@ -258,23 +262,19 @@ static int nfs_pagein_multi(struct list_head *head, struct inode *inode) ClearPageError(page); offset = 0; - nbytes = req->wb_bytes; + nbytes = count; do { data = list_entry(list.next, struct nfs_read_data, pages); list_del_init(&data->pages); data->pagevec[0] = page; - if (nbytes > rsize) { - nfs_read_rpcsetup(req, data, &nfs_read_partial_ops, - rsize, offset); - offset += rsize; - nbytes -= rsize; - } else { - nfs_read_rpcsetup(req, data, &nfs_read_partial_ops, - nbytes, offset); - nbytes = 0; - } + if (nbytes < rsize) + rsize = nbytes; + nfs_read_rpcsetup(req, data, &nfs_read_partial_ops, + rsize, offset); + offset += rsize; + nbytes -= rsize; nfs_execute_read(data); } while (nbytes != 0); @@ -291,30 +291,24 @@ out_bad: return -ENOMEM; } -static int nfs_pagein_one(struct list_head *head, struct inode *inode) +static int nfs_pagein_one(struct inode *inode, struct list_head *head, size_t count, int flags) { struct nfs_page *req; struct page **pages; struct nfs_read_data *data; - unsigned int count; - - if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) - return nfs_pagein_multi(head, inode); - data = nfs_readdata_alloc(NFS_SERVER(inode)->rsize); + data = nfs_readdata_alloc(count); if (!data) goto out_bad; INIT_LIST_HEAD(&data->pages); pages = data->pagevec; - count = 0; while (!list_empty(head)) { req = nfs_list_entry(head->next); nfs_list_remove_request(req); nfs_list_add_request(req, &data->pages); ClearPageError(req->wb_page); *pages++ = req->wb_page; - count += req->wb_bytes; } req = nfs_list_entry(data->pages.next); @@ -328,22 +322,20 @@ out_bad: } static int -nfs_pagein_list(struct list_head *head, unsigned int rsize) +nfs_pagein_list(struct inode *inode, struct list_head *head, unsigned int rsize) { struct nfs_pageio_descriptor desc; - struct nfs_page *req; unsigned int pages = 0; int error = 0; - while (!list_empty(head)) { - nfs_pageio_init(&desc, rsize); - nfs_pageio_add_list(&desc, head); - req = nfs_list_entry(desc.pg_list.next); - error = nfs_pagein_one(&desc.pg_list, req->wb_context->dentry->d_inode); - if (error < 0) - break; - pages += (desc.pg_count + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - } + if (rsize < PAGE_CACHE_SIZE) + nfs_pageio_init(&desc, inode, nfs_pagein_multi, rsize, 0); + else + nfs_pageio_init(&desc, inode, nfs_pagein_one, rsize, 0); + + nfs_pageio_add_list(&desc, head); + nfs_pageio_complete(&desc); + pages += (desc.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; nfs_async_read_error(head); if (error >= 0) @@ -597,7 +589,7 @@ int nfs_readpages(struct file *filp, struct address_space *mapping, filp->private_data); ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); if (!list_empty(&head)) { - int err = nfs_pagein_list(&head, server->rsize); + int err = nfs_pagein_list(inode, &head, server->rsize); if (!ret) nfs_add_stats(inode, NFSIOS_READPAGES, err); ret = err; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index b03ec1ba4d75..b6749967eead 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -835,7 +835,7 @@ static void nfs_execute_write(struct nfs_write_data *data) * Generate multiple small requests to write out a single * contiguous dirty area on one page. */ -static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) +static int nfs_flush_multi(struct inode *inode, struct list_head *head, size_t count, int how) { struct nfs_page *req = nfs_list_entry(head->next); struct page *page = req->wb_page; @@ -847,7 +847,7 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) nfs_list_remove_request(req); - nbytes = req->wb_bytes; + nbytes = count; do { size_t len = min(nbytes, wsize); @@ -862,23 +862,19 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) ClearPageError(page); offset = 0; - nbytes = req->wb_bytes; + nbytes = count; do { data = list_entry(list.next, struct nfs_write_data, pages); list_del_init(&data->pages); data->pagevec[0] = page; - if (nbytes > wsize) { - nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, - wsize, offset, how); - offset += wsize; - nbytes -= wsize; - } else { - nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, - nbytes, offset, how); - nbytes = 0; - } + if (nbytes < wsize) + wsize = nbytes; + nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, + wsize, offset, how); + offset += wsize; + nbytes -= wsize; nfs_execute_write(data); } while (nbytes != 0); @@ -904,26 +900,23 @@ out_bad: * This is the case if nfs_updatepage detects a conflicting request * that has been written but not committed. */ -static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) +static int nfs_flush_one(struct inode *inode, struct list_head *head, size_t count, int how) { struct nfs_page *req; struct page **pages; struct nfs_write_data *data; - unsigned int count; - data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize); + data = nfs_writedata_alloc(count); if (!data) goto out_bad; pages = data->pagevec; - count = 0; while (!list_empty(head)) { req = nfs_list_entry(head->next); nfs_list_remove_request(req); nfs_list_add_request(req, &data->pages); ClearPageError(req->wb_page); *pages++ = req->wb_page; - count += req->wb_bytes; } req = nfs_list_entry(data->pages.next); @@ -946,28 +939,22 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how) { struct nfs_pageio_descriptor desc; - int (*flush_one)(struct inode *, struct list_head *, int); int wpages = NFS_SERVER(inode)->wpages; int wsize = NFS_SERVER(inode)->wsize; - int error; - flush_one = nfs_flush_one; - if (wsize < PAGE_CACHE_SIZE) - flush_one = nfs_flush_multi; /* For single writes, FLUSH_STABLE is more efficient */ if (npages <= wpages && npages == NFS_I(inode)->npages && nfs_list_entry(head->next)->wb_bytes <= wsize) how |= FLUSH_STABLE; - do { - nfs_pageio_init(&desc, wsize); - nfs_pageio_add_list(&desc, head); - error = flush_one(inode, &desc.pg_list, how); - if (error < 0) - goto out_err; - } while (!list_empty(head)); - return 0; -out_err: + if (wsize < PAGE_CACHE_SIZE) + nfs_pageio_init(&desc, inode, nfs_flush_multi, wsize, how); + else + nfs_pageio_init(&desc, inode, nfs_flush_one, wsize, how); + nfs_pageio_add_list(&desc, head); + nfs_pageio_complete(&desc); + if (desc.pg_error == 0) + return 0; while (!list_empty(head)) { struct nfs_page *req = nfs_list_entry(head->next); nfs_list_remove_request(req); @@ -975,7 +962,7 @@ out_err: nfs_end_page_writeback(req->wb_page); nfs_clear_page_writeback(req); } - return error; + return desc.pg_error; } /* diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 3ef8e0441473..91c7b18c47d8 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -50,9 +50,15 @@ struct nfs_page { struct nfs_pageio_descriptor { struct list_head pg_list; + unsigned long pg_bytes_written; size_t pg_count; size_t pg_bsize; unsigned int pg_base; + + struct inode *pg_inode; + int (*pg_doio)(struct inode *, struct list_head *, size_t, int); + int pg_ioflags; + int pg_error; }; #define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) @@ -71,10 +77,14 @@ extern long nfs_scan_dirty(struct address_space *mapping, struct list_head *dst); extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, struct list_head *dst, unsigned long idx_start, unsigned int npages); -extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, - size_t iosize); +extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, + struct inode *inode, + int (*doio)(struct inode *, struct list_head *, size_t, int), + size_t bsize, + int how); extern void nfs_pageio_add_list(struct nfs_pageio_descriptor *, struct list_head *); +extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); extern int nfs_set_page_writeback_locked(struct nfs_page *req); -- cgit v1.2.3 From 8b09bee3083897e375bd0bf9d60f48daedfab3e0 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 2 Apr 2007 18:48:28 -0400 Subject: NFS: Cleanup for nfs_readpages() Do the coalescing of read requests into block sized requests at start of I/O as we scan through the pages instead of going through a second pass. Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 4 ++-- fs/nfs/read.c | 47 +++++++++++++++-------------------------------- include/linux/nfs_page.h | 2 ++ 3 files changed, 19 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 528128545d66..094537ddd344 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -342,8 +342,8 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) * Returns true if the request 'req' was successfully coalesced into the * existing list of pages 'desc'. */ -static int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, - struct nfs_page *req) +int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, + struct nfs_page *req) { while (!nfs_pageio_do_add_request(desc, req)) { nfs_pageio_doio(desc); diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 0effa74992df..f0016062340d 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -321,28 +321,6 @@ out_bad: return -ENOMEM; } -static int -nfs_pagein_list(struct inode *inode, struct list_head *head, unsigned int rsize) -{ - struct nfs_pageio_descriptor desc; - unsigned int pages = 0; - int error = 0; - - if (rsize < PAGE_CACHE_SIZE) - nfs_pageio_init(&desc, inode, nfs_pagein_multi, rsize, 0); - else - nfs_pageio_init(&desc, inode, nfs_pagein_one, rsize, 0); - - nfs_pageio_add_list(&desc, head); - nfs_pageio_complete(&desc); - pages += (desc.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - - nfs_async_read_error(head); - if (error >= 0) - return pages; - return error; -} - /* * This is the callback from RPC telling us whether a reply was * received or some error occurred (timeout or socket shutdown). @@ -532,7 +510,7 @@ out_error: } struct nfs_readdesc { - struct list_head *head; + struct nfs_pageio_descriptor *pgio; struct nfs_open_context *ctx; }; @@ -556,19 +534,21 @@ readpage_async_filler(void *data, struct page *page) } if (len < PAGE_CACHE_SIZE) memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); - nfs_list_add_request(new, desc->head); + nfs_pageio_add_request(desc->pgio, new); return 0; } int nfs_readpages(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { - LIST_HEAD(head); + struct nfs_pageio_descriptor pgio; struct nfs_readdesc desc = { - .head = &head, + .pgio = &pgio, }; struct inode *inode = mapping->host; struct nfs_server *server = NFS_SERVER(inode); + size_t rsize = server->rsize; + unsigned long npages; int ret = -ESTALE; dprintk("NFS: nfs_readpages (%s/%Ld %d)\n", @@ -587,13 +567,16 @@ int nfs_readpages(struct file *filp, struct address_space *mapping, } else desc.ctx = get_nfs_open_context((struct nfs_open_context *) filp->private_data); + if (rsize < PAGE_CACHE_SIZE) + nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0); + else + nfs_pageio_init(&pgio, inode, nfs_pagein_one, rsize, 0); + ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); - if (!list_empty(&head)) { - int err = nfs_pagein_list(inode, &head, server->rsize); - if (!ret) - nfs_add_stats(inode, NFSIOS_READPAGES, err); - ret = err; - } + + nfs_pageio_complete(&pgio); + npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + nfs_add_stats(inode, NFSIOS_READPAGES, npages); put_nfs_open_context(desc.ctx); out: return ret; diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 91c7b18c47d8..b8b7bca3bac8 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -82,6 +82,8 @@ extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, int (*doio)(struct inode *, struct list_head *, size_t, int), size_t bsize, int how); +extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *, + struct nfs_page *); extern void nfs_pageio_add_list(struct nfs_pageio_descriptor *, struct list_head *); extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); -- cgit v1.2.3 From c63c7b051395368573779c8309aa5c990dcf2f96 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 2 Apr 2007 19:29:52 -0400 Subject: NFS: Fix a race when doing NFS write coalescing Currently we do write coalescing in a very inefficient manner: one pass in generic_writepages() in order to lock the pages for writing, then one pass in nfs_flush_mapping() and/or nfs_sync_mapping_wait() in order to gather the locked pages for coalescing into RPC requests of size "wsize". In fact, it turns out there is actually a deadlock possible here since we only start I/O on the second pass. If the user signals the process while we're in nfs_sync_mapping_wait(), for instance, then we may exit before starting I/O on all the requests that have been queued up. Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 92 ---------------------------- fs/nfs/write.c | 149 +++++++++++++++------------------------------- include/linux/nfs_page.h | 8 +-- include/linux/writeback.h | 2 + 4 files changed, 50 insertions(+), 201 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 094537ddd344..ea1a85df9ab1 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -17,7 +17,6 @@ #include #include #include -#include #define NFS_PARANOIA 1 @@ -353,25 +352,6 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, return 1; } -/** - * nfs_pageio_add_list - Split coalesced requests out from a list. - * @desc: destination io descriptor - * @head: source list - * - * Moves a maximum of 'nmax' elements from one list to another. - * The elements are checked to ensure that they form a contiguous set - * of pages, and that the RPC credentials are the same. - */ -void nfs_pageio_add_list(struct nfs_pageio_descriptor *desc, - struct list_head *head) -{ - while (!list_empty(head)) { - struct nfs_page *req = nfs_list_entry(head->next); - if (!nfs_pageio_add_request(desc, req)) - break; - } -} - /** * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor * @desc: pointer to io descriptor @@ -382,78 +362,6 @@ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) } #define NFS_SCAN_MAXENTRIES 16 -/** - * nfs_scan_dirty - Scan the radix tree for dirty requests - * @mapping: pointer to address space - * @wbc: writeback_control structure - * @dst: Destination list - * - * Moves elements from one of the inode request lists. - * If the number of requests is set to 0, the entire address_space - * starting at index idx_start, is scanned. - * The requests are *not* checked to ensure that they form a contiguous set. - * You must be holding the inode's req_lock when calling this function - */ -long nfs_scan_dirty(struct address_space *mapping, - struct writeback_control *wbc, - struct list_head *dst) -{ - struct nfs_inode *nfsi = NFS_I(mapping->host); - struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; - struct nfs_page *req; - pgoff_t idx_start, idx_end; - long res = 0; - int found, i; - - if (nfsi->ndirty == 0) - return 0; - if (wbc->range_cyclic) { - idx_start = 0; - idx_end = ULONG_MAX; - } else if (wbc->range_end == 0) { - idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; - idx_end = ULONG_MAX; - } else { - idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; - idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; - } - - for (;;) { - unsigned int toscan = NFS_SCAN_MAXENTRIES; - - found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, - (void **)&pgvec[0], idx_start, toscan, - NFS_PAGE_TAG_DIRTY); - - /* Did we make progress? */ - if (found <= 0) - break; - - for (i = 0; i < found; i++) { - req = pgvec[i]; - if (!wbc->range_cyclic && req->wb_index > idx_end) - goto out; - - /* Try to lock request and mark it for writeback */ - if (!nfs_set_page_writeback_locked(req)) - goto next; - radix_tree_tag_clear(&nfsi->nfs_page_tree, - req->wb_index, NFS_PAGE_TAG_DIRTY); - nfsi->ndirty--; - nfs_list_remove_request(req); - nfs_list_add_request(req, dst); - res++; - if (res == LONG_MAX) - goto out; -next: - idx_start = req->wb_index + 1; - } - } -out: - WARN_ON ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)); - return res; -} - /** * nfs_scan_list - Scan a list for matching requests * @nfsi: NFS inode diff --git a/fs/nfs/write.c b/fs/nfs/write.c index b6749967eead..6ce2d94e7b3f 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -38,7 +38,8 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context*, struct page *, unsigned int, unsigned int); -static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); +static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc, + struct inode *inode, int ioflags); static const struct rpc_call_ops nfs_write_partial_ops; static const struct rpc_call_ops nfs_write_full_ops; static const struct rpc_call_ops nfs_commit_ops; @@ -201,7 +202,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, static int wb_priority(struct writeback_control *wbc) { if (wbc->for_reclaim) - return FLUSH_HIGHPRI; + return FLUSH_HIGHPRI | FLUSH_STABLE; if (wbc->for_kupdate) return FLUSH_LOWPRI; return 0; @@ -251,7 +252,8 @@ static void nfs_end_page_writeback(struct page *page) * was not tagged. * May also return an error if the user signalled nfs_wait_on_request(). */ -static int nfs_page_mark_flush(struct page *page) +static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, + struct page *page) { struct nfs_page *req; struct nfs_inode *nfsi = NFS_I(page->mapping->host); @@ -273,6 +275,8 @@ static int nfs_page_mark_flush(struct page *page) * request as dirty (in which case we don't care). */ spin_unlock(req_lock); + /* Prevent deadlock! */ + nfs_pageio_complete(pgio); ret = nfs_wait_on_request(req); nfs_release_request(req); if (ret != 0) @@ -283,21 +287,18 @@ static int nfs_page_mark_flush(struct page *page) /* This request is marked for commit */ spin_unlock(req_lock); nfs_unlock_request(req); + nfs_pageio_complete(pgio); return 1; } - if (nfs_set_page_writeback(page) == 0) { - nfs_list_remove_request(req); - /* add the request to the inode's dirty list. */ - radix_tree_tag_set(&nfsi->nfs_page_tree, - req->wb_index, NFS_PAGE_TAG_DIRTY); - nfs_list_add_request(req, &nfsi->dirty); - nfsi->ndirty++; - spin_unlock(req_lock); - __mark_inode_dirty(page->mapping->host, I_DIRTY_PAGES); - } else + if (nfs_set_page_writeback(page) != 0) { spin_unlock(req_lock); + BUG(); + } + radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, + NFS_PAGE_TAG_WRITEBACK); ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); - nfs_unlock_request(req); + spin_unlock(req_lock); + nfs_pageio_add_request(pgio, req); return ret; } @@ -306,6 +307,7 @@ static int nfs_page_mark_flush(struct page *page) */ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) { + struct nfs_pageio_descriptor mypgio, *pgio; struct nfs_open_context *ctx; struct inode *inode = page->mapping->host; unsigned offset; @@ -314,7 +316,14 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); - err = nfs_page_mark_flush(page); + if (wbc->for_writepages) + pgio = wbc->fs_private; + else { + nfs_pageio_init_write(&mypgio, inode, wb_priority(wbc)); + pgio = &mypgio; + } + + err = nfs_page_async_flush(pgio, page); if (err <= 0) goto out; err = 0; @@ -331,12 +340,12 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc put_nfs_open_context(ctx); if (err != 0) goto out; - err = nfs_page_mark_flush(page); + err = nfs_page_async_flush(pgio, page); if (err > 0) err = 0; out: if (!wbc->for_writepages) - nfs_flush_mapping(page->mapping, wbc, FLUSH_STABLE|wb_priority(wbc)); + nfs_pageio_complete(pgio); return err; } @@ -352,20 +361,20 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc) int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; + struct nfs_pageio_descriptor pgio; int err; nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); + nfs_pageio_init_write(&pgio, inode, wb_priority(wbc)); + wbc->fs_private = &pgio; err = generic_writepages(mapping, wbc); + nfs_pageio_complete(&pgio); if (err) return err; - err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc)); - if (err < 0) - goto out; - nfs_add_stats(inode, NFSIOS_WRITEPAGES, err); - err = 0; -out: - return err; + if (pgio.pg_error) + return pgio.pg_error; + return 0; } /* @@ -536,18 +545,6 @@ static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_st return res; } -static void nfs_cancel_dirty_list(struct list_head *head) -{ - struct nfs_page *req; - while(!list_empty(head)) { - req = nfs_list_entry(head->next); - nfs_list_remove_request(req); - nfs_end_page_writeback(req->wb_page); - nfs_inode_remove_request(req); - nfs_clear_page_writeback(req); - } -} - static void nfs_cancel_commit_list(struct list_head *head) { struct nfs_page *req; @@ -936,33 +933,15 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, size_t cou return -ENOMEM; } -static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how) +static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, + struct inode *inode, int ioflags) { - struct nfs_pageio_descriptor desc; - int wpages = NFS_SERVER(inode)->wpages; int wsize = NFS_SERVER(inode)->wsize; - /* For single writes, FLUSH_STABLE is more efficient */ - if (npages <= wpages && npages == NFS_I(inode)->npages - && nfs_list_entry(head->next)->wb_bytes <= wsize) - how |= FLUSH_STABLE; - if (wsize < PAGE_CACHE_SIZE) - nfs_pageio_init(&desc, inode, nfs_flush_multi, wsize, how); + nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags); else - nfs_pageio_init(&desc, inode, nfs_flush_one, wsize, how); - nfs_pageio_add_list(&desc, head); - nfs_pageio_complete(&desc); - if (desc.pg_error == 0) - return 0; - while (!list_empty(head)) { - struct nfs_page *req = nfs_list_entry(head->next); - nfs_list_remove_request(req); - nfs_redirty_request(req); - nfs_end_page_writeback(req->wb_page); - nfs_clear_page_writeback(req); - } - return desc.pg_error; + nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags); } /* @@ -1286,31 +1265,7 @@ static const struct rpc_call_ops nfs_commit_ops = { .rpc_call_done = nfs_commit_done, .rpc_release = nfs_commit_release, }; -#else -static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how) -{ - return 0; -} -#endif - -static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how) -{ - struct nfs_inode *nfsi = NFS_I(mapping->host); - LIST_HEAD(head); - long res; - - spin_lock(&nfsi->req_lock); - res = nfs_scan_dirty(mapping, wbc, &head); - spin_unlock(&nfsi->req_lock); - if (res) { - int error = nfs_flush_list(mapping->host, &head, res, how); - if (error < 0) - return error; - } - return res; -} -#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) int nfs_commit_inode(struct inode *inode, int how) { struct nfs_inode *nfsi = NFS_I(inode); @@ -1327,6 +1282,11 @@ int nfs_commit_inode(struct inode *inode, int how) } return res; } +#else +static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how) +{ + return 0; +} #endif long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) @@ -1360,19 +1320,6 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr ret = nfs_wait_on_requests_locked(inode, idx_start, npages); if (ret != 0) continue; - pages = nfs_scan_dirty(mapping, wbc, &head); - if (pages != 0) { - spin_unlock(&nfsi->req_lock); - if (how & FLUSH_INVALIDATE) { - nfs_cancel_dirty_list(&head); - ret = pages; - } else - ret = nfs_flush_list(inode, &head, pages, how); - spin_lock(&nfsi->req_lock); - continue; - } - if (wbc->pages_skipped != 0) - continue; if (nocommit) break; pages = nfs_scan_commit(inode, &head, idx_start, npages); @@ -1412,7 +1359,7 @@ int nfs_wb_all(struct inode *inode) }; int ret; - ret = generic_writepages(mapping, &wbc); + ret = nfs_writepages(mapping, &wbc); if (ret < 0) goto out; ret = nfs_sync_mapping_wait(mapping, &wbc, 0); @@ -1435,11 +1382,9 @@ int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, lo }; int ret; - if (!(how & FLUSH_NOWRITEPAGE)) { - ret = generic_writepages(mapping, &wbc); - if (ret < 0) - goto out; - } + ret = nfs_writepages(mapping, &wbc); + if (ret < 0) + goto out; ret = nfs_sync_mapping_wait(mapping, &wbc, how); if (ret >= 0) return 0; @@ -1462,7 +1407,7 @@ int nfs_wb_page_priority(struct inode *inode, struct page *page, int how) int ret; BUG_ON(!PageLocked(page)); - if (!(how & FLUSH_NOWRITEPAGE) && clear_page_dirty_for_io(page)) { + if (clear_page_dirty_for_io(page)) { ret = nfs_writepage_locked(page, &wbc); if (ret < 0) goto out; diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index b8b7bca3bac8..e556e57ef7ad 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -21,8 +21,7 @@ /* * Valid flags for the radix tree */ -#define NFS_PAGE_TAG_DIRTY 0 -#define NFS_PAGE_TAG_WRITEBACK 1 +#define NFS_PAGE_TAG_WRITEBACK 0 /* * Valid flags for a dirty buffer @@ -72,9 +71,6 @@ extern void nfs_clear_request(struct nfs_page *req); extern void nfs_release_request(struct nfs_page *req); -extern long nfs_scan_dirty(struct address_space *mapping, - struct writeback_control *wbc, - struct list_head *dst); extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, struct list_head *dst, unsigned long idx_start, unsigned int npages); extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, @@ -84,8 +80,6 @@ extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, int how); extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *, struct nfs_page *); -extern void nfs_pageio_add_list(struct nfs_pageio_descriptor *, - struct list_head *); extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 0c78f7f4a976..daa6c125f66e 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -59,6 +59,8 @@ struct writeback_control { unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned for_writepages:1; /* This is a writepages() call */ unsigned range_cyclic:1; /* range_start is cyclic */ + + void *fs_private; /* For use by ->writepages() */ }; /* -- cgit v1.2.3 From 8d5658c949e6d89edc579a1f112aeee3bc232a8e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 10 Apr 2007 09:26:35 -0400 Subject: NFS: Fix a buffer overflow in the allocation of struct nfs_read/writedata Signed-off-by: Trond Myklebust --- fs/nfs/direct.c | 5 +++-- fs/nfs/internal.h | 12 ++++++++++++ fs/nfs/pagelist.c | 10 ++++++++-- fs/nfs/read.c | 19 +++++++++---------- fs/nfs/write.c | 11 +++++------ include/linux/nfs_fs.h | 4 ++-- include/linux/nfs_page.h | 4 ++-- 7 files changed, 41 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 2877744cb606..889de60f8a84 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -54,6 +54,7 @@ #include #include +#include "internal.h" #include "iostat.h" #define NFSDBG_FACILITY NFSDBG_VFS @@ -271,7 +272,7 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo bytes = min(rsize,count); result = -ENOMEM; - data = nfs_readdata_alloc(pgbase + bytes); + data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes)); if (unlikely(!data)) break; @@ -602,7 +603,7 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l bytes = min(wsize,count); result = -ENOMEM; - data = nfs_writedata_alloc(pgbase + bytes); + data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes)); if (unlikely(!data)) break; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 6610f2b02077..ad2b40db1e65 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -231,3 +231,15 @@ unsigned int nfs_page_length(struct page *page) } return 0; } + +/* + * Determine the number of pages in an array of length 'len' and + * with a base offset of 'base' + */ +static inline +unsigned int nfs_page_array_len(unsigned int base, size_t len) +{ + return ((unsigned long)len + (unsigned long)base + + PAGE_SIZE - 1) >> PAGE_SHIFT; +} + diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index ea1a85df9ab1..096efd73eb4c 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -18,6 +18,8 @@ #include #include +#include "internal.h" + #define NFS_PARANOIA 1 static struct kmem_cache *nfs_page_cachep; @@ -231,7 +233,7 @@ out: */ void nfs_pageio_init(struct nfs_pageio_descriptor *desc, struct inode *inode, - int (*doio)(struct inode *, struct list_head *, size_t, int), + int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int), unsigned int bsize, int io_flags) { @@ -298,8 +300,10 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, * since nfs_flush_multi and nfs_pagein_multi assume you * can have only one struct nfs_page. */ + if (desc->pg_bsize < PAGE_SIZE) + return 0; newlen += desc->pg_count; - if (desc->pg_base + newlen > desc->pg_bsize) + if (newlen > desc->pg_bsize) return 0; prev = nfs_list_entry(desc->pg_list.prev); if (!nfs_can_coalesce_requests(prev, req)) @@ -320,6 +324,8 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) if (!list_empty(&desc->pg_list)) { int error = desc->pg_doio(desc->pg_inode, &desc->pg_list, + nfs_page_array_len(desc->pg_base, + desc->pg_count), desc->pg_count, desc->pg_ioflags); if (error < 0) diff --git a/fs/nfs/read.c b/fs/nfs/read.c index f0016062340d..9a55807b2a70 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -27,8 +27,8 @@ #define NFSDBG_FACILITY NFSDBG_PAGECACHE -static int nfs_pagein_multi(struct inode *, struct list_head *, size_t, int); -static int nfs_pagein_one(struct inode *, struct list_head *, size_t, int); +static int nfs_pagein_multi(struct inode *, struct list_head *, unsigned int, size_t, int); +static int nfs_pagein_one(struct inode *, struct list_head *, unsigned int, size_t, int); static const struct rpc_call_ops nfs_read_partial_ops; static const struct rpc_call_ops nfs_read_full_ops; @@ -37,9 +37,8 @@ static mempool_t *nfs_rdata_mempool; #define MIN_POOL_READ (32) -struct nfs_read_data *nfs_readdata_alloc(size_t len) +struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) { - unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS); if (p) { @@ -135,9 +134,9 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, nfs_list_add_request(new, &one_request); if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) - nfs_pagein_multi(inode, &one_request, len, 0); + nfs_pagein_multi(inode, &one_request, 1, len, 0); else - nfs_pagein_one(inode, &one_request, len, 0); + nfs_pagein_one(inode, &one_request, 1, len, 0); return 0; } @@ -234,7 +233,7 @@ static void nfs_execute_read(struct nfs_read_data *data) * won't see the new data until our attribute cache is updated. This is more * or less conventional NFS client behavior. */ -static int nfs_pagein_multi(struct inode *inode, struct list_head *head, size_t count, int flags) +static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags) { struct nfs_page *req = nfs_list_entry(head->next); struct page *page = req->wb_page; @@ -250,7 +249,7 @@ static int nfs_pagein_multi(struct inode *inode, struct list_head *head, size_t do { size_t len = min(nbytes,rsize); - data = nfs_readdata_alloc(len); + data = nfs_readdata_alloc(1); if (!data) goto out_bad; INIT_LIST_HEAD(&data->pages); @@ -291,13 +290,13 @@ out_bad: return -ENOMEM; } -static int nfs_pagein_one(struct inode *inode, struct list_head *head, size_t count, int flags) +static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags) { struct nfs_page *req; struct page **pages; struct nfs_read_data *data; - data = nfs_readdata_alloc(count); + data = nfs_readdata_alloc(npages); if (!data) goto out_bad; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 6ce2d94e7b3f..0a8bbc399689 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -72,9 +72,8 @@ void nfs_commit_free(struct nfs_write_data *wdata) call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free); } -struct nfs_write_data *nfs_writedata_alloc(size_t len) +struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) { - unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS); if (p) { @@ -832,7 +831,7 @@ static void nfs_execute_write(struct nfs_write_data *data) * Generate multiple small requests to write out a single * contiguous dirty area on one page. */ -static int nfs_flush_multi(struct inode *inode, struct list_head *head, size_t count, int how) +static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how) { struct nfs_page *req = nfs_list_entry(head->next); struct page *page = req->wb_page; @@ -848,7 +847,7 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, size_t c do { size_t len = min(nbytes, wsize); - data = nfs_writedata_alloc(len); + data = nfs_writedata_alloc(1); if (!data) goto out_bad; list_add(&data->pages, &list); @@ -897,13 +896,13 @@ out_bad: * This is the case if nfs_updatepage detects a conflicting request * that has been written but not committed. */ -static int nfs_flush_one(struct inode *inode, struct list_head *head, size_t count, int how) +static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how) { struct nfs_page *req; struct page **pages; struct nfs_write_data *data; - data = nfs_writedata_alloc(count); + data = nfs_writedata_alloc(npages); if (!data) goto out_bad; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index e9ae0c6e2c62..0543439a97af 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -455,7 +455,7 @@ nfs_have_writebacks(struct inode *inode) /* * Allocate nfs_write_data structures */ -extern struct nfs_write_data *nfs_writedata_alloc(size_t len); +extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages); /* * linux/fs/nfs/read.c @@ -469,7 +469,7 @@ extern void nfs_readdata_release(void *data); /* * Allocate nfs_read_data structures */ -extern struct nfs_read_data *nfs_readdata_alloc(size_t len); +extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages); /* * linux/fs/nfs3proc.c diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index e556e57ef7ad..8e9e7bceda48 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -55,7 +55,7 @@ struct nfs_pageio_descriptor { unsigned int pg_base; struct inode *pg_inode; - int (*pg_doio)(struct inode *, struct list_head *, size_t, int); + int (*pg_doio)(struct inode *, struct list_head *, unsigned int, size_t, int); int pg_ioflags; int pg_error; }; @@ -75,7 +75,7 @@ extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, struct unsigned long idx_start, unsigned int npages); extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, struct inode *inode, - int (*doio)(struct inode *, struct list_head *, size_t, int), + int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int), size_t bsize, int how); extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *, -- cgit v1.2.3 From ca52fec152282ef73e5e882b847b36b1febbb1c6 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 17 Apr 2007 17:22:13 -0400 Subject: NFS: Use pgoff_t in structures and functions that pass page cache offsets Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 4 ++-- fs/nfs/write.c | 18 +++++++++--------- include/linux/nfs_page.h | 4 ++-- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 096efd73eb4c..d846d39a31ef 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -383,12 +383,12 @@ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) * You must be holding the inode's req_lock when calling this function */ int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, - struct list_head *dst, unsigned long idx_start, + struct list_head *dst, pgoff_t idx_start, unsigned int npages) { struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; struct nfs_page *req; - unsigned long idx_end; + pgoff_t idx_end; int found, i; int res; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 424c4cea1208..5d44b8bd1070 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -139,7 +139,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c { struct inode *inode = page->mapping->host; loff_t end, i_size = i_size_read(inode); - unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; + pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; if (i_size > 0 && page->index < end_index) return; @@ -511,11 +511,11 @@ int nfs_reschedule_unstable_write(struct nfs_page *req) * * Interruptible by signals only if mounted with intr flag. */ -static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages) +static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs_page *req; - unsigned long idx_end, next; + pgoff_t idx_end, next; unsigned int res = 0; int error; @@ -570,7 +570,7 @@ static void nfs_cancel_commit_list(struct list_head *head) * The requests are *not* checked to ensure that they form a contiguous set. */ static int -nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) +nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) { struct nfs_inode *nfsi = NFS_I(inode); int res = 0; @@ -584,7 +584,7 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_st return res; } #else -static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) +static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) { return 0; } @@ -604,7 +604,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, struct inode *inode = mapping->host; struct nfs_inode *nfsi = NFS_I(inode); struct nfs_page *req, *new = NULL; - unsigned long rqend, end; + pgoff_t rqend, end; end = offset + bytes; @@ -1292,7 +1292,7 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr { struct inode *inode = mapping->host; struct nfs_inode *nfsi = NFS_I(inode); - unsigned long idx_start, idx_end; + pgoff_t idx_start, idx_end; unsigned int npages = 0; LIST_HEAD(head); int nocommit = how & FLUSH_NOCOMMIT; @@ -1305,10 +1305,10 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; if (idx_end > idx_start) { - unsigned long l_npages = 1 + idx_end - idx_start; + pgoff_t l_npages = 1 + idx_end - idx_start; npages = l_npages; if (sizeof(npages) != sizeof(l_npages) && - (unsigned long)npages != l_npages) + (pgoff_t)npages != l_npages) npages = 0; } } diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 8e9e7bceda48..41afab6b5f09 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -38,7 +38,7 @@ struct nfs_page { struct page *wb_page; /* page to read in/write out */ struct nfs_open_context *wb_context; /* File state context info */ atomic_t wb_complete; /* i/os we're waiting for */ - unsigned long wb_index; /* Offset >> PAGE_CACHE_SHIFT */ + pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ wb_pgbase, /* Start of page data */ wb_bytes; /* Length of request */ @@ -72,7 +72,7 @@ extern void nfs_release_request(struct nfs_page *req); extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, struct list_head *dst, - unsigned long idx_start, unsigned int npages); + pgoff_t idx_start, unsigned int npages); extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, struct inode *inode, int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int), -- cgit v1.2.3 From 511d2e8855a065c8251d0c140ebc353854f1929e Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 29 Mar 2007 16:47:47 -0400 Subject: NLM: Shrink the maximum request size of NLM4 requests NLM version 4 requests estimate the call and reply header sizes rather conservatively, using the very maximum size allowed in the protocol even though Linux always uses only a small fraction of the allowable space. Reduce the size of caller and lock arguments to conserve RPC buffer space while XDR encoding NLM4 arguments. Add compile-time checks to ensure the hostname string won't overflow NLM protocol maximums. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/xdr.c | 13 ++++++++----- fs/lockd/xdr4.c | 17 ++++++++++++----- include/linux/lockd/lockd.h | 2 +- 3 files changed, 21 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c index 34dae5d70738..6aac4b2c9ff0 100644 --- a/fs/lockd/xdr.c +++ b/fs/lockd/xdr.c @@ -510,17 +510,20 @@ nlmclt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) return 0; } +#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ) +# error "NLM host name cannot be larger than XDR_MAX_NETOBJ!" +#endif + /* * Buffer requirements for NLM */ #define NLM_void_sz 0 #define NLM_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN) -#define NLM_caller_sz 1+XDR_QUADLEN(sizeof(utsname()->nodename)) -#define NLM_netobj_sz 1+XDR_QUADLEN(XDR_MAX_NETOBJ) -/* #define NLM_owner_sz 1+XDR_QUADLEN(NLM_MAXOWNER) */ +#define NLM_caller_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE) +#define NLM_owner_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE) #define NLM_fhandle_sz 1+XDR_QUADLEN(NFS2_FHSIZE) -#define NLM_lock_sz 3+NLM_caller_sz+NLM_netobj_sz+NLM_fhandle_sz -#define NLM_holder_sz 4+NLM_netobj_sz +#define NLM_lock_sz 3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz +#define NLM_holder_sz 4+NLM_owner_sz #define NLM_testargs_sz NLM_cookie_sz+1+NLM_lock_sz #define NLM_lockargs_sz NLM_cookie_sz+4+NLM_lock_sz diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c index a78240551219..7c8b679c394c 100644 --- a/fs/lockd/xdr4.c +++ b/fs/lockd/xdr4.c @@ -516,17 +516,24 @@ nlm4clt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) return 0; } +#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ) +# error "NLM host name cannot be larger than XDR_MAX_NETOBJ!" +#endif + +#if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN) +# error "NLM host name cannot be larger than NLM's maximum string length!" +#endif + /* * Buffer requirements for NLM */ #define NLM4_void_sz 0 #define NLM4_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN) -#define NLM4_caller_sz 1+XDR_QUADLEN(NLM_MAXSTRLEN) -#define NLM4_netobj_sz 1+XDR_QUADLEN(XDR_MAX_NETOBJ) -/* #define NLM4_owner_sz 1+XDR_QUADLEN(NLM4_MAXOWNER) */ +#define NLM4_caller_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE) +#define NLM4_owner_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE) #define NLM4_fhandle_sz 1+XDR_QUADLEN(NFS3_FHSIZE) -#define NLM4_lock_sz 5+NLM4_caller_sz+NLM4_netobj_sz+NLM4_fhandle_sz -#define NLM4_holder_sz 6+NLM4_netobj_sz +#define NLM4_lock_sz 5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz +#define NLM4_holder_sz 6+NLM4_owner_sz #define NLM4_testargs_sz NLM4_cookie_sz+1+NLM4_lock_sz #define NLM4_lockargs_sz NLM4_cookie_sz+4+NLM4_lock_sz diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index ac25b5649c59..f6a81e0b1b93 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -88,7 +88,7 @@ struct nlm_wait; /* * Memory chunk for NLM client RPC request. */ -#define NLMCLNT_OHSIZE (sizeof(utsname()->nodename)+10) +#define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u) struct nlm_rqst { unsigned int a_flags; /* initial RPC task flags */ struct nlm_host * a_host; /* host handle */ -- cgit v1.2.3 From 2bea90d43a050bbc4021d44e59beb34f384438db Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 29 Mar 2007 16:47:53 -0400 Subject: SUNRPC: RPC buffer size estimates are too large The RPC buffer size estimation logic in net/sunrpc/clnt.c always significantly overestimates the requirements for the buffer size. A little instrumentation demonstrated that in fact rpc_malloc was never allocating the buffer from the mempool, but almost always called kmalloc. To compute the size of the RPC buffer more precisely, split p_bufsiz into two fields; one for the argument size, and one for the result size. Then, compute the sum of the exact call and reply header sizes, and split the RPC buffer precisely between the two. That should keep almost all RPC buffers within the 2KiB buffer mempool limit. And, we can finally be rid of RPC_SLACK_SPACE! Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/mon.c | 10 +++----- fs/lockd/xdr.c | 7 ++--- fs/lockd/xdr4.c | 7 ++--- fs/nfs/mount_clnt.c | 7 +++-- fs/nfs/nfs2xdr.c | 7 ++--- fs/nfs/nfs3xdr.c | 13 +++++----- fs/nfs/nfs4xdr.c | 7 ++--- fs/nfsd/nfs4callback.c | 7 ++--- include/linux/sunrpc/clnt.h | 3 ++- include/linux/sunrpc/xprt.h | 4 ++- net/sunrpc/clnt.c | 62 +++++++++++++++++++++++++++------------------ net/sunrpc/pmap_clnt.c | 9 ++++--- net/sunrpc/xprt.c | 1 - 13 files changed, 74 insertions(+), 70 deletions(-) (limited to 'include/linux') diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index eb243edf8932..2102e2d0134d 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -225,16 +225,13 @@ xdr_decode_stat(struct rpc_rqst *rqstp, __be32 *p, struct nsm_res *resp) #define SM_monres_sz 2 #define SM_unmonres_sz 1 -#ifndef MAX -# define MAX(a, b) (((a) > (b))? (a) : (b)) -#endif - static struct rpc_procinfo nsm_procedures[] = { [SM_MON] = { .p_proc = SM_MON, .p_encode = (kxdrproc_t) xdr_encode_mon, .p_decode = (kxdrproc_t) xdr_decode_stat_res, - .p_bufsiz = MAX(SM_mon_sz, SM_monres_sz) << 2, + .p_arglen = SM_mon_sz, + .p_replen = SM_monres_sz, .p_statidx = SM_MON, .p_name = "MONITOR", }, @@ -242,7 +239,8 @@ static struct rpc_procinfo nsm_procedures[] = { .p_proc = SM_UNMON, .p_encode = (kxdrproc_t) xdr_encode_unmon, .p_decode = (kxdrproc_t) xdr_decode_stat, - .p_bufsiz = MAX(SM_mon_id_sz, SM_unmonres_sz) << 2, + .p_arglen = SM_mon_id_sz, + .p_replen = SM_unmonres_sz, .p_statidx = SM_UNMON, .p_name = "UNMONITOR", }, diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c index 6aac4b2c9ff0..9702956d206c 100644 --- a/fs/lockd/xdr.c +++ b/fs/lockd/xdr.c @@ -534,10 +534,6 @@ nlmclt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) #define NLM_res_sz NLM_cookie_sz+1 #define NLM_norep_sz 0 -#ifndef MAX -# define MAX(a, b) (((a) > (b))? (a) : (b)) -#endif - /* * For NLM, a void procedure really returns nothing */ @@ -548,7 +544,8 @@ nlmclt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) .p_proc = NLMPROC_##proc, \ .p_encode = (kxdrproc_t) nlmclt_encode_##argtype, \ .p_decode = (kxdrproc_t) nlmclt_decode_##restype, \ - .p_bufsiz = MAX(NLM_##argtype##_sz, NLM_##restype##_sz) << 2, \ + .p_arglen = NLM_##argtype##_sz, \ + .p_replen = NLM_##restype##_sz, \ .p_statidx = NLMPROC_##proc, \ .p_name = #proc, \ } diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c index 7c8b679c394c..ce1efdbe1b3a 100644 --- a/fs/lockd/xdr4.c +++ b/fs/lockd/xdr4.c @@ -544,10 +544,6 @@ nlm4clt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) #define NLM4_res_sz NLM4_cookie_sz+1 #define NLM4_norep_sz 0 -#ifndef MAX -# define MAX(a,b) (((a) > (b))? (a) : (b)) -#endif - /* * For NLM, a void procedure really returns nothing */ @@ -558,7 +554,8 @@ nlm4clt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) .p_proc = NLMPROC_##proc, \ .p_encode = (kxdrproc_t) nlm4clt_encode_##argtype, \ .p_decode = (kxdrproc_t) nlm4clt_decode_##restype, \ - .p_bufsiz = MAX(NLM4_##argtype##_sz, NLM4_##restype##_sz) << 2, \ + .p_arglen = NLM4_##argtype##_sz, \ + .p_replen = NLM4_##restype##_sz, \ .p_statidx = NLMPROC_##proc, \ .p_name = #proc, \ } diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index f75fe72b4160..ca5a266a3140 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -133,13 +133,15 @@ xdr_decode_fhstatus3(struct rpc_rqst *req, __be32 *p, struct mnt_fhstatus *res) #define MNT_dirpath_sz (1 + 256) #define MNT_fhstatus_sz (1 + 8) +#define MNT_fhstatus3_sz (1 + 16) static struct rpc_procinfo mnt_procedures[] = { [MNTPROC_MNT] = { .p_proc = MNTPROC_MNT, .p_encode = (kxdrproc_t) xdr_encode_dirpath, .p_decode = (kxdrproc_t) xdr_decode_fhstatus, - .p_bufsiz = MNT_dirpath_sz << 2, + .p_arglen = MNT_dirpath_sz, + .p_replen = MNT_fhstatus_sz, .p_statidx = MNTPROC_MNT, .p_name = "MOUNT", }, @@ -150,7 +152,8 @@ static struct rpc_procinfo mnt3_procedures[] = { .p_proc = MOUNTPROC3_MNT, .p_encode = (kxdrproc_t) xdr_encode_dirpath, .p_decode = (kxdrproc_t) xdr_decode_fhstatus3, - .p_bufsiz = MNT_dirpath_sz << 2, + .p_arglen = MNT_dirpath_sz, + .p_replen = MNT_fhstatus3_sz, .p_statidx = MOUNTPROC3_MNT, .p_name = "MOUNT", }, diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 3be4e72a0227..abd9f8b48943 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -687,16 +687,13 @@ nfs_stat_to_errno(int stat) return nfs_errtbl[i].errno; } -#ifndef MAX -# define MAX(a, b) (((a) > (b))? (a) : (b)) -#endif - #define PROC(proc, argtype, restype, timer) \ [NFSPROC_##proc] = { \ .p_proc = NFSPROC_##proc, \ .p_encode = (kxdrproc_t) nfs_xdr_##argtype, \ .p_decode = (kxdrproc_t) nfs_xdr_##restype, \ - .p_bufsiz = MAX(NFS_##argtype##_sz,NFS_##restype##_sz) << 2, \ + .p_arglen = NFS_##argtype##_sz, \ + .p_replen = NFS_##restype##_sz, \ .p_timer = timer, \ .p_statidx = NFSPROC_##proc, \ .p_name = #proc, \ diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 0ace092d126f..b51df8eb9f01 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -1102,16 +1102,13 @@ nfs3_xdr_setaclres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) } #endif /* CONFIG_NFS_V3_ACL */ -#ifndef MAX -# define MAX(a, b) (((a) > (b))? (a) : (b)) -#endif - #define PROC(proc, argtype, restype, timer) \ [NFS3PROC_##proc] = { \ .p_proc = NFS3PROC_##proc, \ .p_encode = (kxdrproc_t) nfs3_xdr_##argtype, \ .p_decode = (kxdrproc_t) nfs3_xdr_##restype, \ - .p_bufsiz = MAX(NFS3_##argtype##_sz,NFS3_##restype##_sz) << 2, \ + .p_arglen = NFS3_##argtype##_sz, \ + .p_replen = NFS3_##restype##_sz, \ .p_timer = timer, \ .p_statidx = NFS3PROC_##proc, \ .p_name = #proc, \ @@ -1153,7 +1150,8 @@ static struct rpc_procinfo nfs3_acl_procedures[] = { .p_proc = ACLPROC3_GETACL, .p_encode = (kxdrproc_t) nfs3_xdr_getaclargs, .p_decode = (kxdrproc_t) nfs3_xdr_getaclres, - .p_bufsiz = MAX(ACL3_getaclargs_sz, ACL3_getaclres_sz) << 2, + .p_arglen = ACL3_getaclargs_sz, + .p_replen = ACL3_getaclres_sz, .p_timer = 1, .p_name = "GETACL", }, @@ -1161,7 +1159,8 @@ static struct rpc_procinfo nfs3_acl_procedures[] = { .p_proc = ACLPROC3_SETACL, .p_encode = (kxdrproc_t) nfs3_xdr_setaclargs, .p_decode = (kxdrproc_t) nfs3_xdr_setaclres, - .p_bufsiz = MAX(ACL3_setaclargs_sz, ACL3_setaclres_sz) << 2, + .p_arglen = ACL3_setaclargs_sz, + .p_replen = ACL3_setaclres_sz, .p_timer = 0, .p_name = "SETACL", }, diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index f02d522fd788..b8c28f2380a5 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4546,16 +4546,13 @@ nfs4_stat_to_errno(int stat) return stat; } -#ifndef MAX -# define MAX(a, b) (((a) > (b))? (a) : (b)) -#endif - #define PROC(proc, argtype, restype) \ [NFSPROC4_CLNT_##proc] = { \ .p_proc = NFSPROC4_COMPOUND, \ .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \ .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \ - .p_bufsiz = MAX(NFS4_##argtype##_sz,NFS4_##restype##_sz) << 2, \ + .p_arglen = NFS4_##argtype##_sz, \ + .p_replen = NFS4_##restype##_sz, \ .p_statidx = NFSPROC4_CLNT_##proc, \ .p_name = #proc, \ } diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index fb14d68eacab..32ffea033c7a 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -315,16 +315,13 @@ out: /* * RPC procedure tables */ -#ifndef MAX -# define MAX(a, b) (((a) > (b))? (a) : (b)) -#endif - #define PROC(proc, call, argtype, restype) \ [NFSPROC4_CLNT_##proc] = { \ .p_proc = NFSPROC4_CB_##call, \ .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \ .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \ - .p_bufsiz = MAX(NFS4_##argtype##_sz,NFS4_##restype##_sz) << 2, \ + .p_arglen = NFS4_##argtype##_sz, \ + .p_replen = NFS4_##restype##_sz, \ .p_statidx = NFSPROC4_CB_##call, \ .p_name = #proc, \ } diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index c7a78eef2b4f..32c48a0b0d71 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -84,7 +84,8 @@ struct rpc_procinfo { u32 p_proc; /* RPC procedure number */ kxdrproc_t p_encode; /* XDR encode function */ kxdrproc_t p_decode; /* XDR decode function */ - unsigned int p_bufsiz; /* req. buffer size */ + unsigned int p_arglen; /* argument hdr length (u32) */ + unsigned int p_replen; /* reply hdr length (u32) */ unsigned int p_count; /* call count */ unsigned int p_timer; /* Which RTT timer to use */ u32 p_statidx; /* Which procedure to account */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index f780e72fc417..7aa29502b187 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -84,7 +84,9 @@ struct rpc_rqst { struct list_head rq_list; __u32 * rq_buffer; /* XDR encode buffer */ - size_t rq_bufsize; + size_t rq_bufsize, + rq_callsize, + rq_rcvsize; struct xdr_buf rq_private_buf; /* The receive buffer * used in the softirq. diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 396cdbe249d1..12487aafaab5 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -36,8 +36,6 @@ #include -#define RPC_SLACK_SPACE (1024) /* total overkill */ - #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_CALL #endif @@ -747,21 +745,37 @@ call_reserveresult(struct rpc_task *task) static void call_allocate(struct rpc_task *task) { + unsigned int slack = task->tk_auth->au_cslack; struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = task->tk_xprt; - unsigned int bufsiz; + struct rpc_procinfo *proc = task->tk_msg.rpc_proc; dprint_status(task); + task->tk_status = 0; task->tk_action = call_bind; + if (req->rq_buffer) return; - /* FIXME: compute buffer requirements more exactly using - * auth->au_wslack */ - bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE; + if (proc->p_proc != 0) { + BUG_ON(proc->p_arglen == 0); + if (proc->p_decode != NULL) + BUG_ON(proc->p_replen == 0); + } - if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) + /* + * Calculate the size (in quads) of the RPC call + * and reply headers, and convert both values + * to byte sizes. + */ + req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen; + req->rq_callsize <<= 2; + req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen; + req->rq_rcvsize <<= 2; + + xprt->ops->buf_alloc(task, req->rq_callsize + req->rq_rcvsize); + if (req->rq_buffer != NULL) return; dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); @@ -788,6 +802,17 @@ rpc_task_force_reencode(struct rpc_task *task) task->tk_rqstp->rq_snd_buf.len = 0; } +static inline void +rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) +{ + buf->head[0].iov_base = start; + buf->head[0].iov_len = len; + buf->tail[0].iov_len = 0; + buf->page_len = 0; + buf->len = 0; + buf->buflen = len; +} + /* * 3. Encode arguments of an RPC call */ @@ -795,28 +820,17 @@ static void call_encode(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; - struct xdr_buf *sndbuf = &req->rq_snd_buf; - struct xdr_buf *rcvbuf = &req->rq_rcv_buf; - unsigned int bufsiz; kxdrproc_t encode; __be32 *p; dprint_status(task); - /* Default buffer setup */ - bufsiz = req->rq_bufsize >> 1; - sndbuf->head[0].iov_base = (void *)req->rq_buffer; - sndbuf->head[0].iov_len = bufsiz; - sndbuf->tail[0].iov_len = 0; - sndbuf->page_len = 0; - sndbuf->len = 0; - sndbuf->buflen = bufsiz; - rcvbuf->head[0].iov_base = (void *)((char *)req->rq_buffer + bufsiz); - rcvbuf->head[0].iov_len = bufsiz; - rcvbuf->tail[0].iov_len = 0; - rcvbuf->page_len = 0; - rcvbuf->len = 0; - rcvbuf->buflen = bufsiz; + rpc_xdr_buf_init(&req->rq_snd_buf, + req->rq_buffer, + req->rq_callsize); + rpc_xdr_buf_init(&req->rq_rcv_buf, + (char *)req->rq_buffer + req->rq_callsize, + req->rq_rcvsize); /* Encode header and provided arguments */ encode = task->tk_msg.rpc_proc->p_encode; diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c index d9f765344589..c45fc4c99513 100644 --- a/net/sunrpc/pmap_clnt.c +++ b/net/sunrpc/pmap_clnt.c @@ -335,7 +335,8 @@ static struct rpc_procinfo pmap_procedures[] = { .p_proc = PMAP_SET, .p_encode = (kxdrproc_t) xdr_encode_mapping, .p_decode = (kxdrproc_t) xdr_decode_bool, - .p_bufsiz = 4, + .p_arglen = 4, + .p_replen = 1, .p_count = 1, .p_statidx = PMAP_SET, .p_name = "SET", @@ -344,7 +345,8 @@ static struct rpc_procinfo pmap_procedures[] = { .p_proc = PMAP_UNSET, .p_encode = (kxdrproc_t) xdr_encode_mapping, .p_decode = (kxdrproc_t) xdr_decode_bool, - .p_bufsiz = 4, + .p_arglen = 4, + .p_replen = 1, .p_count = 1, .p_statidx = PMAP_UNSET, .p_name = "UNSET", @@ -353,7 +355,8 @@ static struct rpc_procinfo pmap_procedures[] = { .p_proc = PMAP_GETPORT, .p_encode = (kxdrproc_t) xdr_encode_mapping, .p_decode = (kxdrproc_t) xdr_decode_port, - .p_bufsiz = 4, + .p_arglen = 4, + .p_replen = 1, .p_count = 1, .p_statidx = PMAP_GETPORT, .p_name = "GETPORT", diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 456a14510308..432ee92cf262 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -823,7 +823,6 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) req->rq_task = task; req->rq_xprt = xprt; req->rq_buffer = NULL; - req->rq_bufsize = 0; req->rq_xid = xprt_alloc_xid(xprt); req->rq_release_snd_buf = NULL; xprt_reset_majortimeo(req); -- cgit v1.2.3 From c5a4dd8b7c15927a8fbff83171b57cad675a79b9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 29 Mar 2007 16:47:58 -0400 Subject: SUNRPC: Eliminate side effects from rpc_malloc Currently rpc_malloc sets req->rq_buffer internally. Make this a more generic interface: return a pointer to the new buffer (or NULL) and make the caller set req->rq_buffer and req->rq_bufsize. This looks much more like kmalloc and eliminates the side effects. To fix a potential deadlock, this patch also replaces GFP_NOFS with GFP_NOWAIT in rpc_malloc. This prevents async RPCs from sleeping outside the RPC's task scheduler while allocating their buffer. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- include/linux/sunrpc/sched.h | 2 +- include/linux/sunrpc/xprt.h | 2 +- net/sunrpc/clnt.c | 3 +- net/sunrpc/sched.c | 65 +++++++++++++++++++++++--------------------- net/sunrpc/xprt.c | 2 +- 5 files changed, 39 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 3069ecca0129..2047fb202a13 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -264,7 +264,7 @@ struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); void rpc_wake_up_status(struct rpc_wait_queue *, int); void rpc_delay(struct rpc_task *, unsigned long); void * rpc_malloc(struct rpc_task *, size_t); -void rpc_free(struct rpc_task *); +void rpc_free(void *); int rpciod_up(void); void rpciod_down(void); int __rpc_wait_for_completion_task(struct rpc_task *task, int (*)(void *)); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 7aa29502b187..745afc1d3068 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -114,7 +114,7 @@ struct rpc_xprt_ops { void (*set_port)(struct rpc_xprt *xprt, unsigned short port); void (*connect)(struct rpc_task *task); void * (*buf_alloc)(struct rpc_task *task, size_t size); - void (*buf_free)(struct rpc_task *task); + void (*buf_free)(void *buffer); int (*send_request)(struct rpc_task *task); void (*set_retrans_timeout)(struct rpc_task *task); void (*timer)(struct rpc_task *task); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 12487aafaab5..e7dc09ecc470 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -774,7 +774,8 @@ call_allocate(struct rpc_task *task) req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen; req->rq_rcvsize <<= 2; - xprt->ops->buf_alloc(task, req->rq_callsize + req->rq_rcvsize); + req->rq_buffer = xprt->ops->buf_alloc(task, + req->rq_callsize + req->rq_rcvsize); if (req->rq_buffer != NULL) return; diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 6d87320074b1..4a53e94f8134 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -741,50 +741,53 @@ static void rpc_async_schedule(struct work_struct *work) * @task: RPC task that will use this buffer * @size: requested byte size * - * We try to ensure that some NFS reads and writes can always proceed - * by using a mempool when allocating 'small' buffers. + * To prevent rpciod from hanging, this allocator never sleeps, + * returning NULL if the request cannot be serviced immediately. + * The caller can arrange to sleep in a way that is safe for rpciod. + * + * Most requests are 'small' (under 2KiB) and can be serviced from a + * mempool, ensuring that NFS reads and writes can always proceed, + * and that there is good locality of reference for these buffers. + * * In order to avoid memory starvation triggering more writebacks of - * NFS requests, we use GFP_NOFS rather than GFP_KERNEL. + * NFS requests, we avoid using GFP_KERNEL. */ -void * rpc_malloc(struct rpc_task *task, size_t size) +void *rpc_malloc(struct rpc_task *task, size_t size) { - struct rpc_rqst *req = task->tk_rqstp; - gfp_t gfp; + size_t *buf; + gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT; - if (task->tk_flags & RPC_TASK_SWAPPER) - gfp = GFP_ATOMIC; + size += sizeof(size_t); + if (size <= RPC_BUFFER_MAXSIZE) + buf = mempool_alloc(rpc_buffer_mempool, gfp); else - gfp = GFP_NOFS; - - if (size > RPC_BUFFER_MAXSIZE) { - req->rq_buffer = kmalloc(size, gfp); - if (req->rq_buffer) - req->rq_bufsize = size; - } else { - req->rq_buffer = mempool_alloc(rpc_buffer_mempool, gfp); - if (req->rq_buffer) - req->rq_bufsize = RPC_BUFFER_MAXSIZE; - } - return req->rq_buffer; + buf = kmalloc(size, gfp); + *buf = size; + dprintk("RPC: %5u allocated buffer of size %u at %p\n", + task->tk_pid, size, buf); + return (void *) ++buf; } /** * rpc_free - free buffer allocated via rpc_malloc - * @task: RPC task with a buffer to be freed + * @buffer: buffer to free * */ -void rpc_free(struct rpc_task *task) +void rpc_free(void *buffer) { - struct rpc_rqst *req = task->tk_rqstp; + size_t size, *buf = (size_t *) buffer; - if (req->rq_buffer) { - if (req->rq_bufsize == RPC_BUFFER_MAXSIZE) - mempool_free(req->rq_buffer, rpc_buffer_mempool); - else - kfree(req->rq_buffer); - req->rq_buffer = NULL; - req->rq_bufsize = 0; - } + if (!buffer) + return; + size = *buf; + buf--; + + dprintk("RPC: freeing buffer of size %u at %p\n", + size, buf); + if (size <= RPC_BUFFER_MAXSIZE) + mempool_free(buf, rpc_buffer_mempool); + else + kfree(buf); } /* diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 432ee92cf262..81fe830da8aa 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -854,7 +854,7 @@ void xprt_release(struct rpc_task *task) mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); spin_unlock_bh(&xprt->transport_lock); - xprt->ops->buf_free(task); + xprt->ops->buf_free(req->rq_buffer); task->tk_rqstp = NULL; if (req->rq_release_snd_buf) req->rq_release_snd_buf(req); -- cgit v1.2.3 From a509050bd3b8e0aa269c2241aa10d74ca7701e2f Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 29 Mar 2007 16:48:04 -0400 Subject: SUNRPC: introduce rpcbind: replacement for in-kernel portmapper Introduce a replacement for the in-kernel portmapper client that supports all 3 versions of the rpcbind protocol. This code is not used yet. Original code by Groupe Bull updated for the latest kernel, with multiple bug fixes. Note that rpcb_clnt.c does not yet support registering via versions 3 and 4 of the rpcbind protocol. That is planned for a later patch. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- include/linux/sunrpc/clnt.h | 3 + include/linux/sunrpc/debug.h | 1 + include/linux/sunrpc/xprt.h | 1 + net/sunrpc/Makefile | 2 +- net/sunrpc/rpcb_clnt.c | 625 +++++++++++++++++++++++++++++++++++++++++++ net/sunrpc/xprt.c | 1 + 6 files changed, 632 insertions(+), 1 deletion(-) create mode 100644 net/sunrpc/rpcb_clnt.c (limited to 'include/linux') diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 32c48a0b0d71..ca7553785932 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -124,6 +124,8 @@ int rpc_destroy_client(struct rpc_clnt *); void rpc_release_client(struct rpc_clnt *); void rpc_getport(struct rpc_task *); int rpc_register(u32, u32, int, unsigned short, int *); +int rpcb_register(u32, u32, int, unsigned short, int *); +void rpcb_getport(struct rpc_task *); void rpc_call_setup(struct rpc_task *, struct rpc_message *, int); @@ -146,6 +148,7 @@ char * rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); * Helper function for NFSroot support */ int rpc_getport_external(struct sockaddr_in *, __u32, __u32, int); +int rpcb_getport_external(struct sockaddr_in *, __u32, __u32, int); #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index b7c7307ceec6..707f96fe47d3 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h @@ -18,6 +18,7 @@ #define RPCDBG_NFS 0x0008 #define RPCDBG_AUTH 0x0010 #define RPCDBG_PMAP 0x0020 +#define RPCDBG_BIND 0x0020 #define RPCDBG_SCHED 0x0040 #define RPCDBG_TRANS 0x0080 #define RPCDBG_SVCSOCK 0x0100 diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 745afc1d3068..fa89ce6ce076 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -152,6 +152,7 @@ struct rpc_xprt { unsigned long state; /* transport state */ unsigned char shutdown : 1, /* being shut down */ resvport : 1; /* use a reserved port */ + unsigned int bind_index; /* bind function index */ /* * Connection of transports diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile index cdcab9ca4c60..3417a1ef1f95 100644 --- a/net/sunrpc/Makefile +++ b/net/sunrpc/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_SUNRPC_GSS) += auth_gss/ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ auth.o auth_null.o auth_unix.o \ svc.o svcsock.o svcauth.o svcauth_unix.o \ - pmap_clnt.o timer.o xdr.o \ + pmap_clnt.o rpcb_clnt.o timer.o xdr.o \ sunrpc_syms.o cache.o rpc_pipe.o sunrpc-$(CONFIG_PROC_FS) += stats.o sunrpc-$(CONFIG_SYSCTL) += sysctl.o diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c new file mode 100644 index 000000000000..6c7aa8a1f0c6 --- /dev/null +++ b/net/sunrpc/rpcb_clnt.c @@ -0,0 +1,625 @@ +/* + * In-kernel rpcbind client supporting versions 2, 3, and 4 of the rpcbind + * protocol + * + * Based on RFC 1833: "Binding Protocols for ONC RPC Version 2" and + * RFC 3530: "Network File System (NFS) version 4 Protocol" + * + * Original: Gilles Quillard, Bull Open Source, 2005 + * Updated: Chuck Lever, Oracle Corporation, 2007 + * + * Descended from net/sunrpc/pmap_clnt.c, + * Copyright (C) 1996, Olaf Kirch + */ + +#include +#include +#include +#include + +#include +#include + +#ifdef RPC_DEBUG +# define RPCDBG_FACILITY RPCDBG_BIND +#endif + +#define RPCBIND_PROGRAM (100000u) +#define RPCBIND_PORT (111u) + +enum { + RPCBPROC_NULL, + RPCBPROC_SET, + RPCBPROC_UNSET, + RPCBPROC_GETPORT, + RPCBPROC_GETADDR = 3, /* alias for GETPORT */ + RPCBPROC_DUMP, + RPCBPROC_CALLIT, + RPCBPROC_BCAST = 5, /* alias for CALLIT */ + RPCBPROC_GETTIME, + RPCBPROC_UADDR2TADDR, + RPCBPROC_TADDR2UADDR, + RPCBPROC_GETVERSADDR, + RPCBPROC_INDIRECT, + RPCBPROC_GETADDRLIST, + RPCBPROC_GETSTAT, +}; + +#define RPCB_HIGHPROC_2 RPCBPROC_CALLIT +#define RPCB_HIGHPROC_3 RPCBPROC_TADDR2UADDR +#define RPCB_HIGHPROC_4 RPCBPROC_GETSTAT + +/* + * r_addr + * + * Quoting RFC 3530, section 2.2: + * + * For TCP over IPv4 and for UDP over IPv4, the format of r_addr is the + * US-ASCII string: + * + * h1.h2.h3.h4.p1.p2 + * + * The prefix, "h1.h2.h3.h4", is the standard textual form for + * representing an IPv4 address, which is always four octets long. + * Assuming big-endian ordering, h1, h2, h3, and h4, are respectively, + * the first through fourth octets each converted to ASCII-decimal. + * Assuming big-endian ordering, p1 and p2 are, respectively, the first + * and second octets each converted to ASCII-decimal. For example, if a + * host, in big-endian order, has an address of 0x0A010307 and there is + * a service listening on, in big endian order, port 0x020F (decimal + * 527), then the complete universal address is "10.1.3.7.2.15". + * + * ... + * + * For TCP over IPv6 and for UDP over IPv6, the format of r_addr is the + * US-ASCII string: + * + * x1:x2:x3:x4:x5:x6:x7:x8.p1.p2 + * + * The suffix "p1.p2" is the service port, and is computed the same way + * as with universal addresses for TCP and UDP over IPv4. The prefix, + * "x1:x2:x3:x4:x5:x6:x7:x8", is the standard textual form for + * representing an IPv6 address as defined in Section 2.2 of [RFC2373]. + * Additionally, the two alternative forms specified in Section 2.2 of + * [RFC2373] are also acceptable. + * + * XXX: Currently this implementation does not explicitly convert the + * stored address to US-ASCII on non-ASCII systems. + */ +#define RPCB_MAXADDRLEN (128u) + +/* + * r_netid + * + * Quoting RFC 3530, section 2.2: + * + * For TCP over IPv4 the value of r_netid is the string "tcp". For UDP + * over IPv4 the value of r_netid is the string "udp". + * + * ... + * + * For TCP over IPv6 the value of r_netid is the string "tcp6". For UDP + * over IPv6 the value of r_netid is the string "udp6". + */ +#define RPCB_NETID_UDP "\165\144\160" /* "udp" */ +#define RPCB_NETID_TCP "\164\143\160" /* "tcp" */ +#define RPCB_NETID_UDP6 "\165\144\160\066" /* "udp6" */ +#define RPCB_NETID_TCP6 "\164\143\160\066" /* "tcp6" */ + +#define RPCB_MAXNETIDLEN (4u) + +/* + * r_owner + * + * The "owner" is allowed to unset a service in the rpcbind database. + * We always use the following (arbitrary) fixed string. + */ +#define RPCB_OWNER_STRING "rpcb" +#define RPCB_MAXOWNERLEN sizeof(RPCB_OWNER_STRING) + +static void rpcb_getport_done(struct rpc_task *, void *); +extern struct rpc_program rpcb_program; + +struct rpcbind_args { + struct rpc_xprt * r_xprt; + + u32 r_prog; + u32 r_vers; + u32 r_prot; + unsigned short r_port; + char * r_netid; + char r_addr[RPCB_MAXADDRLEN]; + char * r_owner; +}; + +static struct rpc_procinfo rpcb_procedures2[]; +static struct rpc_procinfo rpcb_procedures3[]; + +static struct rpcb_info { + int rpc_vers; + struct rpc_procinfo * rpc_proc; +} rpcb_next_version[]; + +static void rpcb_getport_prepare(struct rpc_task *task, void *calldata) +{ + struct rpcbind_args *map = calldata; + struct rpc_xprt *xprt = map->r_xprt; + struct rpc_message msg = { + .rpc_proc = rpcb_next_version[xprt->bind_index].rpc_proc, + .rpc_argp = map, + .rpc_resp = &map->r_port, + }; + + rpc_call_setup(task, &msg, 0); +} + +static void rpcb_map_release(void *data) +{ + struct rpcbind_args *map = data; + + xprt_put(map->r_xprt); + kfree(map); +} + +static const struct rpc_call_ops rpcb_getport_ops = { + .rpc_call_prepare = rpcb_getport_prepare, + .rpc_call_done = rpcb_getport_done, + .rpc_release = rpcb_map_release, +}; + +static void rpcb_wake_rpcbind_waiters(struct rpc_xprt *xprt, int status) +{ + xprt_clear_binding(xprt); + rpc_wake_up_status(&xprt->binding, status); +} + +static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr, + int proto, int version, int privileged) +{ + struct rpc_create_args args = { + .protocol = proto, + .address = srvaddr, + .addrsize = sizeof(struct sockaddr_in), + .servername = hostname, + .program = &rpcb_program, + .version = version, + .authflavor = RPC_AUTH_UNIX, + .flags = (RPC_CLNT_CREATE_ONESHOT | + RPC_CLNT_CREATE_NOPING), + }; + + ((struct sockaddr_in *)srvaddr)->sin_port = htons(RPCBIND_PORT); + if (!privileged) + args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; + return rpc_create(&args); +} + +/** + * rpcb_register - set or unset a port registration with the local rpcbind svc + * @prog: RPC program number to bind + * @vers: RPC version number to bind + * @prot: transport protocol to use to make this request + * @port: port value to register + * @okay: result code + * + * port == 0 means unregister, port != 0 means register. + * + * This routine supports only rpcbind version 2. + */ +int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) +{ + struct sockaddr_in sin = { + .sin_family = AF_INET, + .sin_addr.s_addr = htonl(INADDR_LOOPBACK), + }; + struct rpcbind_args map = { + .r_prog = prog, + .r_vers = vers, + .r_prot = prot, + .r_port = port, + }; + struct rpc_message msg = { + .rpc_proc = &rpcb_procedures2[port ? + RPCBPROC_SET : RPCBPROC_UNSET], + .rpc_argp = &map, + .rpc_resp = okay, + }; + struct rpc_clnt *rpcb_clnt; + int error = 0; + + dprintk("RPC: %sregistering (%u, %u, %d, %u) with local " + "rpcbind\n", (port ? "" : "un"), + prog, vers, prot, port); + + rpcb_clnt = rpcb_create("localhost", (struct sockaddr *) &sin, + IPPROTO_UDP, 2, 1); + if (IS_ERR(rpcb_clnt)) + return PTR_ERR(rpcb_clnt); + + error = rpc_call_sync(rpcb_clnt, &msg, 0); + + if (error < 0) + printk(KERN_WARNING "RPC: failed to contact local rpcbind " + "server (errno %d).\n", -error); + dprintk("RPC: registration status %d/%d\n", error, *okay); + + return error; +} + +#ifdef CONFIG_ROOT_NFS +/** + * rpcb_getport_external - obtain the port for an RPC service on a given host + * @sin: address of remote peer + * @prog: RPC program number to bind + * @vers: RPC version number to bind + * @prot: transport protocol to use to make this request + * + * Called from outside the RPC client in a synchronous task context. + * + * For now, this supports only version 2 queries, but is used only by + * mount_clnt for NFS_ROOT. + */ +int rpcb_getport_external(struct sockaddr_in *sin, __u32 prog, + __u32 vers, int prot) +{ + struct rpcbind_args map = { + .r_prog = prog, + .r_vers = vers, + .r_prot = prot, + .r_port = 0, + }; + struct rpc_message msg = { + .rpc_proc = &rpcb_procedures2[RPCBPROC_GETPORT], + .rpc_argp = &map, + .rpc_resp = &map.r_port, + }; + struct rpc_clnt *rpcb_clnt; + char hostname[40]; + int status; + + dprintk("RPC: rpcb_getport_external(%u.%u.%u.%u, %u, %u, %d)\n", + NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); + + sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); + rpcb_clnt = rpcb_create(hostname, (struct sockaddr *)sin, prot, 2, 0); + if (IS_ERR(rpcb_clnt)) + return PTR_ERR(rpcb_clnt); + + status = rpc_call_sync(rpcb_clnt, &msg, 0); + + if (status >= 0) { + if (map.r_port != 0) + return map.r_port; + status = -EACCES; + } + return status; +} +#endif + +/** + * rpcb_getport - obtain the port for a given RPC service on a given host + * @task: task that is waiting for portmapper request + * + * This one can be called for an ongoing RPC request, and can be used in + * an async (rpciod) context. + */ +void rpcb_getport(struct rpc_task *task) +{ + struct rpc_clnt *clnt = task->tk_client; + int bind_version; + struct rpc_xprt *xprt = task->tk_xprt; + struct rpc_clnt *rpcb_clnt; + static struct rpcbind_args *map; + struct rpc_task *child; + struct sockaddr addr; + int status; + + dprintk("RPC: %5u rpcb_getport(%s, %u, %u, %d)\n", + task->tk_pid, clnt->cl_server, + clnt->cl_prog, clnt->cl_vers, xprt->prot); + + /* Autobind on cloned rpc clients is discouraged */ + BUG_ON(clnt->cl_parent != clnt); + + if (xprt_test_and_set_binding(xprt)) { + status = -EACCES; /* tell caller to check again */ + dprintk("RPC: %5u rpcb_getport waiting for another binder\n", + task->tk_pid); + goto bailout_nowake; + } + + /* Put self on queue before sending rpcbind request, in case + * rpcb_getport_done completes before we return from rpc_run_task */ + rpc_sleep_on(&xprt->binding, task, NULL, NULL); + + /* Someone else may have bound if we slept */ + if (xprt_bound(xprt)) { + status = 0; + dprintk("RPC: %5u rpcb_getport already bound\n", task->tk_pid); + goto bailout_nofree; + } + + if (rpcb_next_version[xprt->bind_index].rpc_proc == NULL) { + xprt->bind_index = 0; + status = -EACCES; /* tell caller to try again later */ + dprintk("RPC: %5u rpcb_getport no more getport versions " + "available\n", task->tk_pid); + goto bailout_nofree; + } + bind_version = rpcb_next_version[xprt->bind_index].rpc_vers; + + dprintk("RPC: %5u rpcb_getport trying rpcbind version %u\n", + task->tk_pid, bind_version); + + map = kzalloc(sizeof(struct rpcbind_args), GFP_ATOMIC); + if (!map) { + status = -ENOMEM; + dprintk("RPC: %5u rpcb_getport no memory available\n", + task->tk_pid); + goto bailout_nofree; + } + map->r_prog = clnt->cl_prog; + map->r_vers = clnt->cl_vers; + map->r_prot = xprt->prot; + map->r_port = 0; + map->r_xprt = xprt_get(xprt); + map->r_netid = (xprt->prot == IPPROTO_TCP) ? RPCB_NETID_TCP : + RPCB_NETID_UDP; + memcpy(&map->r_addr, rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR), + sizeof(map->r_addr)); + map->r_owner = RPCB_OWNER_STRING; /* ignored for GETADDR */ + + rpc_peeraddr(clnt, (void *)&addr, sizeof(addr)); + rpcb_clnt = rpcb_create(clnt->cl_server, &addr, xprt->prot, bind_version, 0); + if (IS_ERR(rpcb_clnt)) { + status = PTR_ERR(rpcb_clnt); + dprintk("RPC: %5u rpcb_getport rpcb_create failed, error %ld\n", + task->tk_pid, PTR_ERR(rpcb_clnt)); + goto bailout; + } + + child = rpc_run_task(rpcb_clnt, RPC_TASK_ASYNC, &rpcb_getport_ops, map); + if (IS_ERR(child)) { + status = -EIO; + dprintk("RPC: %5u rpcb_getport rpc_run_task failed\n", + task->tk_pid); + goto bailout_nofree; + } + rpc_put_task(child); + + task->tk_xprt->stat.bind_count++; + return; + +bailout: + kfree(map); + xprt_put(xprt); +bailout_nofree: + rpcb_wake_rpcbind_waiters(xprt, status); +bailout_nowake: + task->tk_status = status; +} + +/* + * Rpcbind child task calls this callback via tk_exit. + */ +static void rpcb_getport_done(struct rpc_task *child, void *data) +{ + struct rpcbind_args *map = data; + struct rpc_xprt *xprt = map->r_xprt; + int status = child->tk_status; + + /* rpcbind server doesn't support this rpcbind protocol version */ + if (status == -EPROTONOSUPPORT) + xprt->bind_index++; + + if (status < 0) { + /* rpcbind server not available on remote host? */ + xprt->ops->set_port(xprt, 0); + } else if (map->r_port == 0) { + /* Requested RPC service wasn't registered on remote host */ + xprt->ops->set_port(xprt, 0); + status = -EACCES; + } else { + /* Succeeded */ + xprt->ops->set_port(xprt, map->r_port); + xprt_set_bound(xprt); + status = 0; + } + + dprintk("RPC: %5u rpcb_getport_done(status %d, port %u)\n", + child->tk_pid, status, map->r_port); + + rpcb_wake_rpcbind_waiters(xprt, status); +} + +static int rpcb_encode_mapping(struct rpc_rqst *req, __be32 *p, + struct rpcbind_args *rpcb) +{ + dprintk("RPC: rpcb_encode_mapping(%u, %u, %d, %u)\n", + rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port); + *p++ = htonl(rpcb->r_prog); + *p++ = htonl(rpcb->r_vers); + *p++ = htonl(rpcb->r_prot); + *p++ = htonl(rpcb->r_port); + + req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); + return 0; +} + +static int rpcb_decode_getport(struct rpc_rqst *req, __be32 *p, + unsigned short *portp) +{ + *portp = (unsigned short) ntohl(*p++); + dprintk("RPC: rpcb_decode_getport result %u\n", + *portp); + return 0; +} + +static int rpcb_decode_set(struct rpc_rqst *req, __be32 *p, + unsigned int *boolp) +{ + *boolp = (unsigned int) ntohl(*p++); + dprintk("RPC: rpcb_decode_set result %u\n", + *boolp); + return 0; +} + +static int rpcb_encode_getaddr(struct rpc_rqst *req, __be32 *p, + struct rpcbind_args *rpcb) +{ + dprintk("RPC: rpcb_encode_getaddr(%u, %u, %s)\n", + rpcb->r_prog, rpcb->r_vers, rpcb->r_addr); + *p++ = htonl(rpcb->r_prog); + *p++ = htonl(rpcb->r_vers); + + p = xdr_encode_string(p, rpcb->r_netid); + p = xdr_encode_string(p, rpcb->r_addr); + p = xdr_encode_string(p, rpcb->r_owner); + + req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); + + return 0; +} + +static int rpcb_decode_getaddr(struct rpc_rqst *req, __be32 *p, + unsigned short *portp) +{ + char *addr; + int addr_len, c, i, f, first, val; + + *portp = 0; + addr_len = (unsigned int) ntohl(*p++); + if (addr_len > RPCB_MAXADDRLEN) /* sanity */ + return -EINVAL; + + dprintk("RPC: rpcb_decode_getaddr returned string: '%s'\n", + (char *) p); + + addr = (char *)p; + val = 0; + first = 1; + f = 1; + for (i = addr_len - 1; i > 0; i--) { + c = addr[i]; + if (c >= '0' && c <= '9') { + val += (c - '0') * f; + f *= 10; + } else if (c == '.') { + if (first) { + *portp = val; + val = first = 0; + f = 1; + } else { + *portp |= (val << 8); + break; + } + } + } + + dprintk("RPC: rpcb_decode_getaddr port=%u\n", *portp); + return 0; +} + +#define RPCB_program_sz (1u) +#define RPCB_version_sz (1u) +#define RPCB_protocol_sz (1u) +#define RPCB_port_sz (1u) +#define RPCB_boolean_sz (1u) + +#define RPCB_netid_sz (1+XDR_QUADLEN(RPCB_MAXNETIDLEN)) +#define RPCB_addr_sz (1+XDR_QUADLEN(RPCB_MAXADDRLEN)) +#define RPCB_ownerstring_sz (1+XDR_QUADLEN(RPCB_MAXOWNERLEN)) + +#define RPCB_mappingargs_sz RPCB_program_sz+RPCB_version_sz+ \ + RPCB_protocol_sz+RPCB_port_sz +#define RPCB_getaddrargs_sz RPCB_program_sz+RPCB_version_sz+ \ + RPCB_netid_sz+RPCB_addr_sz+ \ + RPCB_ownerstring_sz + +#define RPCB_setres_sz RPCB_boolean_sz +#define RPCB_getportres_sz RPCB_port_sz + +/* + * Note that RFC 1833 does not put any size restrictions on the + * address string returned by the remote rpcbind database. + */ +#define RPCB_getaddrres_sz RPCB_addr_sz + +#define PROC(proc, argtype, restype) \ + [RPCBPROC_##proc] = { \ + .p_proc = RPCBPROC_##proc, \ + .p_encode = (kxdrproc_t) rpcb_encode_##argtype, \ + .p_decode = (kxdrproc_t) rpcb_decode_##restype, \ + .p_arglen = RPCB_##argtype##args_sz, \ + .p_replen = RPCB_##restype##res_sz, \ + .p_statidx = RPCBPROC_##proc, \ + .p_timer = 0, \ + .p_name = #proc, \ + } + +/* + * Not all rpcbind procedures described in RFC 1833 are implemented + * since the Linux kernel RPC code requires only these. + */ +static struct rpc_procinfo rpcb_procedures2[] = { + PROC(SET, mapping, set), + PROC(UNSET, mapping, set), + PROC(GETADDR, mapping, getport), +}; + +static struct rpc_procinfo rpcb_procedures3[] = { + PROC(SET, mapping, set), + PROC(UNSET, mapping, set), + PROC(GETADDR, getaddr, getaddr), +}; + +static struct rpc_procinfo rpcb_procedures4[] = { + PROC(SET, mapping, set), + PROC(UNSET, mapping, set), + PROC(GETVERSADDR, getaddr, getaddr), +}; + +static struct rpcb_info rpcb_next_version[] = { +#ifdef CONFIG_SUNRPC_BIND34 + { 4, &rpcb_procedures4[RPCBPROC_GETVERSADDR] }, + { 3, &rpcb_procedures3[RPCBPROC_GETADDR] }, +#endif + { 2, &rpcb_procedures2[RPCBPROC_GETPORT] }, + { 0, NULL }, +}; + +static struct rpc_version rpcb_version2 = { + .number = 2, + .nrprocs = RPCB_HIGHPROC_2, + .procs = rpcb_procedures2 +}; + +static struct rpc_version rpcb_version3 = { + .number = 3, + .nrprocs = RPCB_HIGHPROC_3, + .procs = rpcb_procedures3 +}; + +static struct rpc_version rpcb_version4 = { + .number = 4, + .nrprocs = RPCB_HIGHPROC_4, + .procs = rpcb_procedures4 +}; + +static struct rpc_version *rpcb_version[] = { + NULL, + NULL, + &rpcb_version2, + &rpcb_version3, + &rpcb_version4 +}; + +static struct rpc_stat rpcb_stats; + +struct rpc_program rpcb_program = { + .name = "rpcbind", + .number = RPCBIND_PROGRAM, + .nrvers = ARRAY_SIZE(rpcb_version), + .version = rpcb_version, + .stats = &rpcb_stats, +}; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 81fe830da8aa..5b05b73e4c1d 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -927,6 +927,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si xprt->timer.data = (unsigned long) xprt; xprt->last_used = jiffies; xprt->cwnd = RPC_INITCWND; + xprt->bind_index = 0; rpc_init_wait_queue(&xprt->binding, "xprt_binding"); rpc_init_wait_queue(&xprt->pending, "xprt_pending"); -- cgit v1.2.3 From 4c2eaf073f0cc2b5bf593b8133c078b9d9406e95 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 29 Mar 2007 16:48:27 -0400 Subject: SUNRPC: remove old portmapper net/sunrpc/pmap_clnt.c has been replaced by net/sunrpc/rpcb_clnt.c. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- include/linux/sunrpc/clnt.h | 3 - include/linux/sunrpc/debug.h | 1 - include/linux/sunrpc/msg_prot.h | 4 - net/sunrpc/Makefile | 2 +- net/sunrpc/pmap_clnt.c | 386 ---------------------------------------- 5 files changed, 1 insertion(+), 395 deletions(-) delete mode 100644 net/sunrpc/pmap_clnt.c (limited to 'include/linux') diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index ca7553785932..66611423c8ee 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -122,8 +122,6 @@ struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); int rpc_shutdown_client(struct rpc_clnt *); int rpc_destroy_client(struct rpc_clnt *); void rpc_release_client(struct rpc_clnt *); -void rpc_getport(struct rpc_task *); -int rpc_register(u32, u32, int, unsigned short, int *); int rpcb_register(u32, u32, int, unsigned short, int *); void rpcb_getport(struct rpc_task *); @@ -147,7 +145,6 @@ char * rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); /* * Helper function for NFSroot support */ -int rpc_getport_external(struct sockaddr_in *, __u32, __u32, int); int rpcb_getport_external(struct sockaddr_in *, __u32, __u32, int); #endif /* __KERNEL__ */ diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index 707f96fe47d3..3912cf16361e 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h @@ -17,7 +17,6 @@ #define RPCDBG_DEBUG 0x0004 #define RPCDBG_NFS 0x0008 #define RPCDBG_AUTH 0x0010 -#define RPCDBG_PMAP 0x0020 #define RPCDBG_BIND 0x0020 #define RPCDBG_SCHED 0x0040 #define RPCDBG_TRANS 0x0080 diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index 606cb2165232..784d4c3ef651 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -78,10 +78,6 @@ enum rpc_auth_stat { RPCSEC_GSS_CTXPROBLEM = 14 }; -#define RPC_PMAP_PROGRAM 100000 -#define RPC_PMAP_VERSION 2 -#define RPC_PMAP_PORT 111 - #define RPC_MAXNETNAMELEN 256 /* diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile index 3417a1ef1f95..8ebfc4db7f51 100644 --- a/net/sunrpc/Makefile +++ b/net/sunrpc/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_SUNRPC_GSS) += auth_gss/ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ auth.o auth_null.o auth_unix.o \ svc.o svcsock.o svcauth.o svcauth_unix.o \ - pmap_clnt.o rpcb_clnt.o timer.o xdr.o \ + rpcb_clnt.o timer.o xdr.o \ sunrpc_syms.o cache.o rpc_pipe.o sunrpc-$(CONFIG_PROC_FS) += stats.o sunrpc-$(CONFIG_SYSCTL) += sysctl.o diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c deleted file mode 100644 index c45fc4c99513..000000000000 --- a/net/sunrpc/pmap_clnt.c +++ /dev/null @@ -1,386 +0,0 @@ -/* - * linux/net/sunrpc/pmap_clnt.c - * - * In-kernel RPC portmapper client. - * - * Portmapper supports version 2 of the rpcbind protocol (RFC 1833). - * - * Copyright (C) 1996, Olaf Kirch - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef RPC_DEBUG -# define RPCDBG_FACILITY RPCDBG_PMAP -#endif - -#define PMAP_SET 1 -#define PMAP_UNSET 2 -#define PMAP_GETPORT 3 - -struct portmap_args { - u32 pm_prog; - u32 pm_vers; - u32 pm_prot; - unsigned short pm_port; - struct rpc_xprt * pm_xprt; -}; - -static struct rpc_procinfo pmap_procedures[]; -static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int, int); -static void pmap_getport_done(struct rpc_task *, void *); -static struct rpc_program pmap_program; - -static void pmap_getport_prepare(struct rpc_task *task, void *calldata) -{ - struct portmap_args *map = calldata; - struct rpc_message msg = { - .rpc_proc = &pmap_procedures[PMAP_GETPORT], - .rpc_argp = map, - .rpc_resp = &map->pm_port, - }; - - rpc_call_setup(task, &msg, 0); -} - -static inline struct portmap_args *pmap_map_alloc(void) -{ - return kmalloc(sizeof(struct portmap_args), GFP_NOFS); -} - -static inline void pmap_map_free(struct portmap_args *map) -{ - kfree(map); -} - -static void pmap_map_release(void *data) -{ - struct portmap_args *map = data; - - xprt_put(map->pm_xprt); - pmap_map_free(map); -} - -static const struct rpc_call_ops pmap_getport_ops = { - .rpc_call_prepare = pmap_getport_prepare, - .rpc_call_done = pmap_getport_done, - .rpc_release = pmap_map_release, -}; - -static inline void pmap_wake_portmap_waiters(struct rpc_xprt *xprt, int status) -{ - xprt_clear_binding(xprt); - rpc_wake_up_status(&xprt->binding, status); -} - -/** - * rpc_getport - obtain the port for a given RPC service on a given host - * @task: task that is waiting for portmapper request - * - * This one can be called for an ongoing RPC request, and can be used in - * an async (rpciod) context. - */ -void rpc_getport(struct rpc_task *task) -{ - struct rpc_clnt *clnt = task->tk_client; - struct rpc_xprt *xprt = task->tk_xprt; - struct sockaddr_in addr; - struct portmap_args *map; - struct rpc_clnt *pmap_clnt; - struct rpc_task *child; - int status; - - dprintk("RPC: %5u rpc_getport(%s, %u, %u, %d)\n", - task->tk_pid, clnt->cl_server, - clnt->cl_prog, clnt->cl_vers, xprt->prot); - - /* Autobind on cloned rpc clients is discouraged */ - BUG_ON(clnt->cl_parent != clnt); - - status = -EACCES; /* tell caller to check again */ - if (xprt_test_and_set_binding(xprt)) - goto bailout_nowake; - - /* Put self on queue before sending rpcbind request, in case - * pmap_getport_done completes before we return from rpc_run_task */ - rpc_sleep_on(&xprt->binding, task, NULL, NULL); - - /* Someone else may have bound if we slept */ - status = 0; - if (xprt_bound(xprt)) - goto bailout_nofree; - - status = -ENOMEM; - map = pmap_map_alloc(); - if (!map) - goto bailout_nofree; - map->pm_prog = clnt->cl_prog; - map->pm_vers = clnt->cl_vers; - map->pm_prot = xprt->prot; - map->pm_port = 0; - map->pm_xprt = xprt_get(xprt); - - rpc_peeraddr(clnt, (struct sockaddr *) &addr, sizeof(addr)); - pmap_clnt = pmap_create(clnt->cl_server, &addr, map->pm_prot, 0); - status = PTR_ERR(pmap_clnt); - if (IS_ERR(pmap_clnt)) - goto bailout; - - status = -EIO; - child = rpc_run_task(pmap_clnt, RPC_TASK_ASYNC, &pmap_getport_ops, map); - if (IS_ERR(child)) - goto bailout_nofree; - rpc_put_task(child); - - task->tk_xprt->stat.bind_count++; - return; - -bailout: - pmap_map_free(map); - xprt_put(xprt); -bailout_nofree: - pmap_wake_portmap_waiters(xprt, status); -bailout_nowake: - task->tk_status = status; -} - -#ifdef CONFIG_ROOT_NFS -/** - * rpc_getport_external - obtain the port for a given RPC service on a given host - * @sin: address of remote peer - * @prog: RPC program number to bind - * @vers: RPC version number to bind - * @prot: transport protocol to use to make this request - * - * This one is called from outside the RPC client in a synchronous task context. - */ -int rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot) -{ - struct portmap_args map = { - .pm_prog = prog, - .pm_vers = vers, - .pm_prot = prot, - .pm_port = 0 - }; - struct rpc_message msg = { - .rpc_proc = &pmap_procedures[PMAP_GETPORT], - .rpc_argp = &map, - .rpc_resp = &map.pm_port, - }; - struct rpc_clnt *pmap_clnt; - char hostname[32]; - int status; - - dprintk("RPC: rpc_getport_external(%u.%u.%u.%u, %u, %u, %d)\n", - NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); - - sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); - pmap_clnt = pmap_create(hostname, sin, prot, 0); - if (IS_ERR(pmap_clnt)) - return PTR_ERR(pmap_clnt); - - /* Setup the call info struct */ - status = rpc_call_sync(pmap_clnt, &msg, 0); - - if (status >= 0) { - if (map.pm_port != 0) - return map.pm_port; - status = -EACCES; - } - return status; -} -#endif - -/* - * Portmapper child task invokes this callback via tk_exit. - */ -static void pmap_getport_done(struct rpc_task *child, void *data) -{ - struct portmap_args *map = data; - struct rpc_xprt *xprt = map->pm_xprt; - int status = child->tk_status; - - if (status < 0) { - /* Portmapper not available */ - xprt->ops->set_port(xprt, 0); - } else if (map->pm_port == 0) { - /* Requested RPC service wasn't registered */ - xprt->ops->set_port(xprt, 0); - status = -EACCES; - } else { - /* Succeeded */ - xprt->ops->set_port(xprt, map->pm_port); - xprt_set_bound(xprt); - status = 0; - } - - dprintk("RPC: %5u pmap_getport_done(status %d, port %u)\n", - child->tk_pid, status, map->pm_port); - - pmap_wake_portmap_waiters(xprt, status); -} - -/** - * rpc_register - set or unset a port registration with the local portmapper - * @prog: RPC program number to bind - * @vers: RPC version number to bind - * @prot: transport protocol to use to make this request - * @port: port value to register - * @okay: result code - * - * port == 0 means unregister, port != 0 means register. - */ -int rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) -{ - struct sockaddr_in sin = { - .sin_family = AF_INET, - .sin_addr.s_addr = htonl(INADDR_LOOPBACK), - }; - struct portmap_args map = { - .pm_prog = prog, - .pm_vers = vers, - .pm_prot = prot, - .pm_port = port, - }; - struct rpc_message msg = { - .rpc_proc = &pmap_procedures[port ? PMAP_SET : PMAP_UNSET], - .rpc_argp = &map, - .rpc_resp = okay, - }; - struct rpc_clnt *pmap_clnt; - int error = 0; - - dprintk("RPC: registering (%u, %u, %d, %u) with portmapper.\n", - prog, vers, prot, port); - - pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); - if (IS_ERR(pmap_clnt)) { - error = PTR_ERR(pmap_clnt); - dprintk("RPC: couldn't create pmap client. Error = %d\n", - error); - return error; - } - - error = rpc_call_sync(pmap_clnt, &msg, 0); - - if (error < 0) { - printk(KERN_WARNING - "RPC: failed to contact portmap (errno %d).\n", - error); - } - dprintk("RPC: registration status %d/%d\n", error, *okay); - - /* Client deleted automatically because cl_oneshot == 1 */ - return error; -} - -static struct rpc_clnt *pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileged) -{ - struct rpc_create_args args = { - .protocol = proto, - .address = (struct sockaddr *)srvaddr, - .addrsize = sizeof(*srvaddr), - .servername = hostname, - .program = &pmap_program, - .version = RPC_PMAP_VERSION, - .authflavor = RPC_AUTH_UNIX, - .flags = (RPC_CLNT_CREATE_ONESHOT | - RPC_CLNT_CREATE_NOPING), - }; - - srvaddr->sin_port = htons(RPC_PMAP_PORT); - if (!privileged) - args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; - return rpc_create(&args); -} - -/* - * XDR encode/decode functions for PMAP - */ -static int xdr_encode_mapping(struct rpc_rqst *req, __be32 *p, struct portmap_args *map) -{ - dprintk("RPC: xdr_encode_mapping(%u, %u, %u, %u)\n", - map->pm_prog, map->pm_vers, - map->pm_prot, map->pm_port); - *p++ = htonl(map->pm_prog); - *p++ = htonl(map->pm_vers); - *p++ = htonl(map->pm_prot); - *p++ = htonl(map->pm_port); - - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - -static int xdr_decode_port(struct rpc_rqst *req, __be32 *p, unsigned short *portp) -{ - *portp = (unsigned short) ntohl(*p++); - return 0; -} - -static int xdr_decode_bool(struct rpc_rqst *req, __be32 *p, unsigned int *boolp) -{ - *boolp = (unsigned int) ntohl(*p++); - return 0; -} - -static struct rpc_procinfo pmap_procedures[] = { -[PMAP_SET] = { - .p_proc = PMAP_SET, - .p_encode = (kxdrproc_t) xdr_encode_mapping, - .p_decode = (kxdrproc_t) xdr_decode_bool, - .p_arglen = 4, - .p_replen = 1, - .p_count = 1, - .p_statidx = PMAP_SET, - .p_name = "SET", - }, -[PMAP_UNSET] = { - .p_proc = PMAP_UNSET, - .p_encode = (kxdrproc_t) xdr_encode_mapping, - .p_decode = (kxdrproc_t) xdr_decode_bool, - .p_arglen = 4, - .p_replen = 1, - .p_count = 1, - .p_statidx = PMAP_UNSET, - .p_name = "UNSET", - }, -[PMAP_GETPORT] = { - .p_proc = PMAP_GETPORT, - .p_encode = (kxdrproc_t) xdr_encode_mapping, - .p_decode = (kxdrproc_t) xdr_decode_port, - .p_arglen = 4, - .p_replen = 1, - .p_count = 1, - .p_statidx = PMAP_GETPORT, - .p_name = "GETPORT", - }, -}; - -static struct rpc_version pmap_version2 = { - .number = 2, - .nrprocs = 4, - .procs = pmap_procedures -}; - -static struct rpc_version * pmap_version[] = { - NULL, - NULL, - &pmap_version2 -}; - -static struct rpc_stat pmap_stats; - -static struct rpc_program pmap_program = { - .name = "portmap", - .number = RPC_PMAP_PROGRAM, - .nrvers = ARRAY_SIZE(pmap_version), - .version = pmap_version, - .stats = &pmap_stats, -}; -- cgit v1.2.3 From 74dd34e6e8bb127ff4c182423154b294729b663b Mon Sep 17 00:00:00 2001 From: Steve Dickson Date: Sat, 14 Apr 2007 17:01:15 -0400 Subject: NFS: Added support to turn off the NFSv3 READDIRPLUS RPC. READDIRPLUS can be a performance hindrance when the client is working with large directories. In addition, some servers still have bugs in their implementations (e.g. Tru64 returns wrong values for the fsid). Add a mount flag to enable users to turn it off at mount time following the implementation in Apple's NFS client. Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 3 ++- fs/nfs/super.c | 1 + include/linux/nfs_mount.h | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 2190e6c2792e..5bd03b97002e 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -618,7 +618,8 @@ static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_dat if (clp->cl_nfsversion == 3) { if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) server->namelen = NFS3_MAXNAMLEN; - server->caps |= NFS_CAP_READDIRPLUS; + if (!(data->flags & NFS_MOUNT_NORDIRPLUS)) + server->caps |= NFS_CAP_READDIRPLUS; } else { if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) server->namelen = NFS2_MAXNAMLEN; diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 719464a04dda..ca20d3cc2609 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -290,6 +290,7 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, { NFS_MOUNT_NOAC, ",noac", "" }, { NFS_MOUNT_NONLM, ",nolock", "" }, { NFS_MOUNT_NOACL, ",noacl", "" }, + { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" }, { 0, NULL, NULL } }; const struct proc_nfs_info *nfs_infop; diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h index 659c75438454..cc8b9c59acb8 100644 --- a/include/linux/nfs_mount.h +++ b/include/linux/nfs_mount.h @@ -61,6 +61,7 @@ struct nfs_mount_data { #define NFS_MOUNT_NOACL 0x0800 /* 4 */ #define NFS_MOUNT_STRICTLOCK 0x1000 /* reserved for NFSv4 */ #define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */ +#define NFS_MOUNT_NORDIRPLUS 0x4000 /* 5 */ #define NFS_MOUNT_FLAGMASK 0xFFFF #endif -- cgit v1.2.3 From 72c2be776bd6eec5186e316e6d9dd4aab78d314d Mon Sep 17 00:00:00 2001 From: David Teigland Date: Fri, 30 Mar 2007 15:06:16 -0500 Subject: [DLM] interface for purge (2/2) Add code to accept purge commands from userland. Signed-off-by: David Teigland Signed-off-by: Steven Whitehouse --- fs/dlm/lock.h | 2 ++ fs/dlm/user.c | 28 ++++++++++++++++++++++++++++ include/linux/dlm_device.h | 9 ++++++++- 3 files changed, 38 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h index 0843a3073ec3..64fc4ec40668 100644 --- a/fs/dlm/lock.h +++ b/fs/dlm/lock.h @@ -41,6 +41,8 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, uint32_t flags, uint32_t lkid, char *lvb_in); int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, uint32_t flags, uint32_t lkid); +int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc, + int nodeid, int pid); void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc); static inline int is_master(struct dlm_rsb *r) diff --git a/fs/dlm/user.c b/fs/dlm/user.c index c978c67b1eff..3e746a6ebac3 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -56,6 +56,7 @@ struct dlm_write_request32 { union { struct dlm_lock_params32 lock; struct dlm_lspace_params lspace; + struct dlm_purge_params purge; } i; }; @@ -92,6 +93,9 @@ static void compat_input(struct dlm_write_request *kb, kb->i.lspace.flags = kb32->i.lspace.flags; kb->i.lspace.minor = kb32->i.lspace.minor; strcpy(kb->i.lspace.name, kb32->i.lspace.name); + } else if (kb->cmd == DLM_USER_PURGE) { + kb->i.purge.nodeid = kb32->i.purge.nodeid; + kb->i.purge.pid = kb32->i.purge.pid; } else { kb->i.lock.mode = kb32->i.lock.mode; kb->i.lock.namelen = kb32->i.lock.namelen; @@ -320,6 +324,22 @@ fail: return error; } +static int device_user_purge(struct dlm_user_proc *proc, + struct dlm_purge_params *params) +{ + struct dlm_ls *ls; + int error; + + ls = dlm_find_lockspace_local(proc->lockspace); + if (!ls) + return -ENOENT; + + error = dlm_user_purge(ls, proc, params->nodeid, params->pid); + + dlm_put_lockspace(ls); + return error; +} + static int device_create_lockspace(struct dlm_lspace_params *params) { dlm_lockspace_t *lockspace; @@ -522,6 +542,14 @@ static ssize_t device_write(struct file *file, const char __user *buf, error = device_remove_lockspace(&kbuf->i.lspace); break; + case DLM_USER_PURGE: + if (!proc) { + log_print("no locking on control device"); + goto out_sig; + } + error = device_user_purge(proc, &kbuf->i.purge); + break; + default: log_print("Unknown command passed to DLM device : %d\n", kbuf->cmd); diff --git a/include/linux/dlm_device.h b/include/linux/dlm_device.h index 2a2dd189b9fd..c2735cab2ebf 100644 --- a/include/linux/dlm_device.h +++ b/include/linux/dlm_device.h @@ -19,7 +19,7 @@ /* Version of the device interface */ #define DLM_DEVICE_VERSION_MAJOR 5 -#define DLM_DEVICE_VERSION_MINOR 0 +#define DLM_DEVICE_VERSION_MINOR 1 #define DLM_DEVICE_VERSION_PATCH 0 /* struct passed to the lock write */ @@ -44,6 +44,11 @@ struct dlm_lspace_params { char name[0]; }; +struct dlm_purge_params { + __u32 nodeid; + __u32 pid; +}; + struct dlm_write_request { __u32 version[3]; __u8 cmd; @@ -53,6 +58,7 @@ struct dlm_write_request { union { struct dlm_lock_params lock; struct dlm_lspace_params lspace; + struct dlm_purge_params purge; } i; }; @@ -76,6 +82,7 @@ struct dlm_lock_result { #define DLM_USER_QUERY 3 #define DLM_USER_CREATE_LOCKSPACE 4 #define DLM_USER_REMOVE_LOCKSPACE 5 +#define DLM_USER_PURGE 6 /* Arbitrary length restriction */ #define MAX_LS_NAME_LEN 64 -- cgit v1.2.3 From 4552f0cbd45225f2c1cbadc224505f14f8749569 Mon Sep 17 00:00:00 2001 From: Alex Dubov Date: Thu, 12 Apr 2007 16:59:12 +1000 Subject: tifm: hide details of interrupt processing from socket drivers Instead of passing transformed value of adapter interrupt status to socket drivers, implement two separate callbacks - one for card events and another for dma events. Signed-off-by: Alex Dubov Signed-off-by: Pierre Ossman --- drivers/misc/tifm_7xx1.c | 22 ++++++++++++++-------- drivers/misc/tifm_core.c | 11 ++++++----- drivers/mmc/tifm_sd.c | 43 ++++++++++++++++++++++++++++--------------- include/linux/tifm.h | 11 ++--------- 4 files changed, 50 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index bc60e2fc3c2c..d6652b3301dc 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -14,7 +14,13 @@ #include #define DRIVER_NAME "tifm_7xx1" -#define DRIVER_VERSION "0.7" +#define DRIVER_VERSION "0.8" + +#define TIFM_IRQ_ENABLE 0x80000000 +#define TIFM_IRQ_SOCKMASK(x) (x) +#define TIFM_IRQ_CARDMASK(x) ((x) << 8) +#define TIFM_IRQ_FIFOMASK(x) ((x) << 16) +#define TIFM_IRQ_SETALL 0xffffffff static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock) { @@ -31,7 +37,7 @@ static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id) struct tifm_adapter *fm = dev_id; struct tifm_dev *sock; unsigned int irq_status; - unsigned int sock_irq_status, cnt; + unsigned int cnt; spin_lock(&fm->lock); irq_status = readl(fm->addr + FM_INTERRUPT_STATUS); @@ -45,12 +51,12 @@ static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id) for (cnt = 0; cnt < fm->num_sockets; cnt++) { sock = fm->sockets[cnt]; - sock_irq_status = (irq_status >> cnt) - & (TIFM_IRQ_FIFOMASK(1) - | TIFM_IRQ_CARDMASK(1)); - - if (sock && sock_irq_status) - sock->signal_irq(sock, sock_irq_status); + if (sock) { + if ((irq_status >> cnt) & TIFM_IRQ_FIFOMASK(1)) + sock->data_event(sock); + if ((irq_status >> cnt) & TIFM_IRQ_CARDMASK(1)) + sock->card_event(sock); + } } fm->socket_change_set |= irq_status diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index 6b10ebe9d936..6799b9cca055 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c @@ -14,7 +14,7 @@ #include #define DRIVER_NAME "tifm_core" -#define DRIVER_VERSION "0.7" +#define DRIVER_VERSION "0.8" static DEFINE_IDR(tifm_adapter_idr); static DEFINE_SPINLOCK(tifm_adapter_lock); @@ -175,8 +175,7 @@ void tifm_free_device(struct device *dev) } EXPORT_SYMBOL(tifm_free_device); -static void tifm_dummy_signal_irq(struct tifm_dev *sock, - unsigned int sock_irq_status) +static void tifm_dummy_event(struct tifm_dev *sock) { return; } @@ -191,7 +190,8 @@ struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm) dev->dev.parent = fm->dev; dev->dev.bus = &tifm_bus_type; dev->dev.release = tifm_free_device; - dev->signal_irq = tifm_dummy_signal_irq; + dev->card_event = tifm_dummy_event; + dev->data_event = tifm_dummy_event; } return dev; } @@ -249,7 +249,8 @@ static int tifm_device_remove(struct device *dev) struct tifm_driver *drv = fm_dev->drv; if (drv) { - fm_dev->signal_irq = tifm_dummy_signal_irq; + fm_dev->card_event = tifm_dummy_event; + fm_dev->data_event = tifm_dummy_event; if (drv->remove) drv->remove(fm_dev); fm_dev->drv = NULL; diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c index 0581d09c58fc..8905b129e4e1 100644 --- a/drivers/mmc/tifm_sd.c +++ b/drivers/mmc/tifm_sd.c @@ -17,7 +17,7 @@ #include #define DRIVER_NAME "tifm_sd" -#define DRIVER_VERSION "0.7" +#define DRIVER_VERSION "0.8" static int no_dma = 0; static int fixed_timeout = 0; @@ -316,24 +316,38 @@ change_state: } /* Called from interrupt handler */ -static void tifm_sd_signal_irq(struct tifm_dev *sock, - unsigned int sock_irq_status) +static void tifm_sd_data_event(struct tifm_dev *sock) { struct tifm_sd *host; - unsigned int host_status = 0, fifo_status = 0; - int error_code = 0; + unsigned int fifo_status = 0; spin_lock(&sock->lock); host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); - if (sock_irq_status & FIFO_EVENT) { - fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); - writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS); + fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); + writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS); + + host->flags |= fifo_status & FIFO_RDY; + + if (host->req) + tifm_sd_process_cmd(sock, host, 0); + + dev_dbg(&sock->dev, "fifo_status %x\n", fifo_status); + spin_unlock(&sock->lock); + +} + +/* Called from interrupt handler */ +static void tifm_sd_card_event(struct tifm_dev *sock) +{ + struct tifm_sd *host; + unsigned int host_status = 0; + int error_code = 0; + + spin_lock(&sock->lock); + host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); - host->flags |= fifo_status & FIFO_RDY; - } - if (sock_irq_status & CARD_EVENT) { host_status = readl(sock->addr + SOCK_MMCSD_STATUS); writel(host_status, sock->addr + SOCK_MMCSD_STATUS); @@ -377,13 +391,11 @@ static void tifm_sd_signal_irq(struct tifm_dev *sock, host->written_blocks++; host->flags &= ~CARD_BUSY; } - } if (host->req) tifm_sd_process_cmd(sock, host, host_status); done: - dev_dbg(&sock->dev, "host_status %x, fifo_status %x\n", - host_status, fifo_status); + dev_dbg(&sock->dev, "host_status %x\n", host_status); spin_unlock(&sock->lock); } @@ -882,7 +894,8 @@ static int tifm_sd_probe(struct tifm_dev *sock) mmc->max_blk_size = 2048; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; - sock->signal_irq = tifm_sd_signal_irq; + sock->card_event = tifm_sd_card_event; + sock->data_event = tifm_sd_data_event; rc = tifm_sd_initialize_host(host); if (!rc) diff --git a/include/linux/tifm.h b/include/linux/tifm.h index 3deb0a6c1370..4470961655c5 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -60,13 +60,6 @@ enum { SOCK_MS_SYSTEM = 0x190, SOCK_FIFO_ACCESS = 0x200 }; - -#define TIFM_IRQ_ENABLE 0x80000000 -#define TIFM_IRQ_SOCKMASK(x) (x) -#define TIFM_IRQ_CARDMASK(x) ((x) << 8) -#define TIFM_IRQ_FIFOMASK(x) ((x) << 16) -#define TIFM_IRQ_SETALL 0xffffffff - #define TIFM_CTRL_LED 0x00000040 #define TIFM_CTRL_FAST_CLK 0x00000100 @@ -90,8 +83,8 @@ struct tifm_dev { tifm_media_id media_id; unsigned int socket_id; - void (*signal_irq)(struct tifm_dev *sock, - unsigned int sock_irq_status); + void (*card_event)(struct tifm_dev *sock); + void (*data_event)(struct tifm_dev *sock); struct tifm_driver *drv; struct device dev; -- cgit v1.2.3 From 8dc4a61eca31dd45a9d45f9bc9c67d959f0f6cbd Mon Sep 17 00:00:00 2001 From: Alex Dubov Date: Thu, 12 Apr 2007 16:59:13 +1000 Subject: tifm: use bus methods to handle probe/remove instead of driver ones. Signed-off-by: Alex Dubov Signed-off-by: Pierre Ossman --- drivers/misc/tifm_core.c | 102 ++++++++++++++++++++++------------------------- include/linux/tifm.h | 1 - 2 files changed, 47 insertions(+), 56 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index 6799b9cca055..dcff45a19bcb 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c @@ -60,14 +60,54 @@ static int tifm_uevent(struct device *dev, char **envp, int num_envp, return 0; } +static int tifm_device_probe(struct device *dev) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, + driver); + int rc = -ENODEV; + + get_device(dev); + if (dev->driver && drv->probe) { + rc = drv->probe(sock); + if (!rc) + return 0; + } + put_device(dev); + return rc; +} + +static void tifm_dummy_event(struct tifm_dev *sock) +{ + return; +} + +static int tifm_device_remove(struct device *dev) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, + driver); + + if (dev->driver && drv->remove) { + sock->card_event = tifm_dummy_event; + sock->data_event = tifm_dummy_event; + drv->remove(sock); + sock->dev.driver = NULL; + } + + put_device(dev); + return 0; +} + #ifdef CONFIG_PM static int tifm_device_suspend(struct device *dev, pm_message_t state) { struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); - struct tifm_driver *drv = fm_dev->drv; + struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, + driver); - if (drv && drv->suspend) + if (dev->driver && drv->suspend) return drv->suspend(fm_dev, state); return 0; } @@ -75,9 +115,10 @@ static int tifm_device_suspend(struct device *dev, pm_message_t state) static int tifm_device_resume(struct device *dev) { struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); - struct tifm_driver *drv = fm_dev->drv; + struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, + driver); - if (drv && drv->resume) + if (dev->driver && drv->resume) return drv->resume(fm_dev); return 0; } @@ -93,6 +134,8 @@ static struct bus_type tifm_bus_type = { .name = "tifm", .match = tifm_match, .uevent = tifm_uevent, + .probe = tifm_device_probe, + .remove = tifm_device_remove, .suspend = tifm_device_suspend, .resume = tifm_device_resume }; @@ -175,11 +218,6 @@ void tifm_free_device(struct device *dev) } EXPORT_SYMBOL(tifm_free_device); -static void tifm_dummy_event(struct tifm_dev *sock) -{ - return; -} - struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm) { struct tifm_dev *dev = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL); @@ -218,55 +256,9 @@ void tifm_unmap_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents, } EXPORT_SYMBOL(tifm_unmap_sg); -static int tifm_device_probe(struct device *dev) -{ - struct tifm_driver *drv; - struct tifm_dev *fm_dev; - int rc = 0; - const tifm_media_id *id; - - drv = container_of(dev->driver, struct tifm_driver, driver); - fm_dev = container_of(dev, struct tifm_dev, dev); - get_device(dev); - if (!fm_dev->drv && drv->probe && drv->id_table) { - rc = -ENODEV; - id = tifm_device_match(drv->id_table, fm_dev); - if (id) - rc = drv->probe(fm_dev); - if (rc >= 0) { - rc = 0; - fm_dev->drv = drv; - } - } - if (rc) - put_device(dev); - return rc; -} - -static int tifm_device_remove(struct device *dev) -{ - struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); - struct tifm_driver *drv = fm_dev->drv; - - if (drv) { - fm_dev->card_event = tifm_dummy_event; - fm_dev->data_event = tifm_dummy_event; - if (drv->remove) - drv->remove(fm_dev); - fm_dev->drv = NULL; - } - - put_device(dev); - return 0; -} - int tifm_register_driver(struct tifm_driver *drv) { drv->driver.bus = &tifm_bus_type; - drv->driver.probe = tifm_device_probe; - drv->driver.remove = tifm_device_remove; - drv->driver.suspend = tifm_device_suspend; - drv->driver.resume = tifm_device_resume; return driver_register(&drv->driver); } diff --git a/include/linux/tifm.h b/include/linux/tifm.h index 4470961655c5..ee1056396b95 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -86,7 +86,6 @@ struct tifm_dev { void (*card_event)(struct tifm_dev *sock); void (*data_event)(struct tifm_dev *sock); - struct tifm_driver *drv; struct device dev; }; -- cgit v1.2.3 From e23f2b8a1a52c00f0150659eb0bfde3a73976ffe Mon Sep 17 00:00:00 2001 From: Alex Dubov Date: Thu, 12 Apr 2007 16:59:14 +1000 Subject: tifm: simplify bus match and uevent handlers Remove code duplicating the kernel functionality and clean up data structures involved in driver matching. Signed-off-by: Alex Dubov Signed-off-by: Pierre Ossman --- drivers/misc/tifm_7xx1.c | 14 ++++++------ drivers/misc/tifm_core.c | 57 +++++++++++++++++++++++++++++------------------- drivers/mmc/tifm_sd.c | 6 ++--- include/linux/tifm.h | 16 ++++++++------ 4 files changed, 53 insertions(+), 40 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index d6652b3301dc..fd7b8dadc821 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -73,7 +73,7 @@ static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, +static unsigned char tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is_x2) { unsigned int s_state; @@ -90,7 +90,7 @@ static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, s_state = readl(sock_addr + SOCK_PRESENT_STATE); if (!(TIFM_SOCK_STATE_OCCUPIED & s_state)) - return FM_NULL; + return 0; if (is_x2) { writel((s_state & 7) | 0x0c00, sock_addr + SOCK_CONTROL); @@ -129,7 +129,7 @@ static int tifm_7xx1_switch_media(void *data) { struct tifm_adapter *fm = data; unsigned long flags; - tifm_media_id media_id; + unsigned char media_id; char *card_name = "xx"; int cnt, rc; struct tifm_dev *sock; @@ -184,7 +184,7 @@ static int tifm_7xx1_switch_media(void *data) if (sock) { sock->addr = tifm_7xx1_sock_addr(fm->addr, cnt); - sock->media_id = media_id; + sock->type = media_id; sock->socket_id = cnt; switch (media_id) { case 1: @@ -266,7 +266,7 @@ static int tifm_7xx1_resume(struct pci_dev *dev) struct tifm_adapter *fm = pci_get_drvdata(dev); int cnt, rc; unsigned long flags; - tifm_media_id new_ids[fm->num_sockets]; + unsigned char new_ids[fm->num_sockets]; pci_set_power_state(dev, PCI_D0); pci_restore_state(dev); @@ -285,10 +285,10 @@ static int tifm_7xx1_resume(struct pci_dev *dev) fm->socket_change_set = 0; for (cnt = 0; cnt < fm->num_sockets; cnt++) { if (fm->sockets[cnt]) { - if (fm->sockets[cnt]->media_id == new_ids[cnt]) + if (fm->sockets[cnt]->type == new_ids[cnt]) fm->socket_change_set |= 1 << cnt; - fm->sockets[cnt]->media_id = new_ids[cnt]; + fm->sockets[cnt]->type = new_ids[cnt]; } } diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index dcff45a19bcb..6b2c447dc8ee 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c @@ -19,42 +19,53 @@ static DEFINE_IDR(tifm_adapter_idr); static DEFINE_SPINLOCK(tifm_adapter_lock); -static tifm_media_id *tifm_device_match(tifm_media_id *ids, - struct tifm_dev *dev) +static const char *tifm_media_type_name(unsigned char type, unsigned char nt) { - while (*ids) { - if (dev->media_id == *ids) - return ids; - ids++; - } - return NULL; + const char *card_type_name[3][3] = { + { "SmartMedia/xD", "MemoryStick", "MMC/SD" }, + { "XD", "MS", "SD"}, + { "xd", "ms", "sd"} + }; + + if (nt > 2 || type < 1 || type > 3) + return NULL; + return card_type_name[nt][type - 1]; } -static int tifm_match(struct device *dev, struct device_driver *drv) +static int tifm_dev_match(struct tifm_dev *sock, struct tifm_device_id *id) { - struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); - struct tifm_driver *fm_drv; - - fm_drv = container_of(drv, struct tifm_driver, driver); - if (!fm_drv->id_table) - return -EINVAL; - if (tifm_device_match(fm_drv->id_table, fm_dev)) + if (sock->type == id->type) return 1; - return -ENODEV; + return 0; +} + +static int tifm_bus_match(struct device *dev, struct device_driver *drv) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + struct tifm_driver *fm_drv = container_of(drv, struct tifm_driver, + driver); + struct tifm_device_id *ids = fm_drv->id_table; + + if (ids) { + while (ids->type) { + if (tifm_dev_match(sock, ids)) + return 1; + ++ids; + } + } + return 0; } static int tifm_uevent(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) { - struct tifm_dev *fm_dev; + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); int i = 0; int length = 0; - const char *card_type_name[] = {"INV", "SM", "MS", "SD"}; - if (!dev || !(fm_dev = container_of(dev, struct tifm_dev, dev))) - return -ENODEV; if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, - "TIFM_CARD_TYPE=%s", card_type_name[fm_dev->media_id])) + "TIFM_CARD_TYPE=%s", + tifm_media_type_name(sock->type, 1))) return -ENOMEM; return 0; @@ -132,7 +143,7 @@ static int tifm_device_resume(struct device *dev) static struct bus_type tifm_bus_type = { .name = "tifm", - .match = tifm_match, + .match = tifm_bus_match, .uevent = tifm_uevent, .probe = tifm_device_probe, .remove = tifm_device_remove, diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c index 8905b129e4e1..4388ee9062a0 100644 --- a/drivers/mmc/tifm_sd.c +++ b/drivers/mmc/tifm_sd.c @@ -948,7 +948,7 @@ static int tifm_sd_resume(struct tifm_dev *sock) struct mmc_host *mmc = tifm_get_drvdata(sock); struct tifm_sd *host = mmc_priv(mmc); - if (sock->media_id != FM_SD + if (sock->type != TIFM_TYPE_SD || tifm_sd_initialize_host(host)) { tifm_eject(sock); return 0; @@ -964,8 +964,8 @@ static int tifm_sd_resume(struct tifm_dev *sock) #endif /* CONFIG_PM */ -static tifm_media_id tifm_sd_id_tbl[] = { - FM_SD, 0 +static struct tifm_device_id tifm_sd_id_tbl[] = { + { TIFM_TYPE_SD }, { } }; static struct tifm_driver tifm_sd_driver = { diff --git a/include/linux/tifm.h b/include/linux/tifm.h index ee1056396b95..57b2653494cf 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -74,13 +74,19 @@ enum { #define TIFM_DMA_TX 0x00008000 /* Meaning of this constant is unverified */ #define TIFM_DMA_EN 0x00000001 /* Meaning of this constant is unverified */ -typedef enum {FM_NULL = 0, FM_XD = 0x01, FM_MS = 0x02, FM_SD = 0x03} tifm_media_id; +#define TIFM_TYPE_XD 1 +#define TIFM_TYPE_MS 2 +#define TIFM_TYPE_SD 3 + +struct tifm_device_id { + unsigned char type; +}; struct tifm_driver; struct tifm_dev { char __iomem *addr; spinlock_t lock; - tifm_media_id media_id; + unsigned char type; unsigned int socket_id; void (*card_event)(struct tifm_dev *sock); @@ -90,7 +96,7 @@ struct tifm_dev { }; struct tifm_driver { - tifm_media_id *id_table; + struct tifm_device_id *id_table; int (*probe)(struct tifm_dev *dev); void (*remove)(struct tifm_dev *dev); int (*suspend)(struct tifm_dev *dev, @@ -141,8 +147,4 @@ static inline void tifm_set_drvdata(struct tifm_dev *dev, void *data) dev_set_drvdata(&dev->dev, data); } -struct tifm_device_id { - tifm_media_id media_id; -}; - #endif -- cgit v1.2.3 From 3540af8ffddcdbc7573451ac0b5cd57a2eaf8af5 Mon Sep 17 00:00:00 2001 From: Alex Dubov Date: Thu, 12 Apr 2007 16:59:15 +1000 Subject: tifm: replace per-adapter kthread with freezeable workqueue Freezeable workqueue makes sure that adapter work items (device insertions and removals) would be handled after the system is fully resumed. Previously this was achieved by explicit freezing of the kthread. Signed-off-by: Alex Dubov Signed-off-by: Pierre Ossman --- drivers/misc/tifm_7xx1.c | 89 ++++++++++++++++++------------------------------ drivers/misc/tifm_core.c | 51 ++++++++++++++++++--------- include/linux/tifm.h | 8 ++--- 3 files changed, 72 insertions(+), 76 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index fd7b8dadc821..e5655fef42d7 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -28,7 +28,7 @@ static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock) spin_lock_irqsave(&fm->lock, flags); fm->socket_change_set |= 1 << sock->socket_id; - wake_up_all(&fm->change_set_notify); + tifm_queue_work(&fm->media_switcher); spin_unlock_irqrestore(&fm->lock, flags); } @@ -64,10 +64,12 @@ static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id) } writel(irq_status, fm->addr + FM_INTERRUPT_STATUS); - if (!fm->socket_change_set) + if (fm->finish_me) + complete_all(fm->finish_me); + else if (!fm->socket_change_set) writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE); else - wake_up_all(&fm->change_set_notify); + tifm_queue_work(&fm->media_switcher); spin_unlock(&fm->lock); return IRQ_HANDLED; @@ -125,37 +127,29 @@ tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num) return base_addr + ((sock_num + 1) << 10); } -static int tifm_7xx1_switch_media(void *data) +static void tifm_7xx1_switch_media(struct work_struct *work) { - struct tifm_adapter *fm = data; + struct tifm_adapter *fm = container_of(work, struct tifm_adapter, + media_switcher); unsigned long flags; unsigned char media_id; char *card_name = "xx"; - int cnt, rc; + int cnt; struct tifm_dev *sock; unsigned int socket_change_set; - while (1) { - rc = wait_event_interruptible(fm->change_set_notify, - fm->socket_change_set); - if (rc == -ERESTARTSYS) - try_to_freeze(); - - spin_lock_irqsave(&fm->lock, flags); - socket_change_set = fm->socket_change_set; - fm->socket_change_set = 0; + spin_lock_irqsave(&fm->lock, flags); + socket_change_set = fm->socket_change_set; + fm->socket_change_set = 0; - dev_dbg(fm->dev, "checking media set %x\n", - socket_change_set); + dev_dbg(fm->dev, "checking media set %x\n", + socket_change_set); - if (kthread_should_stop()) - socket_change_set = (1 << fm->num_sockets) - 1; + if (!socket_change_set) { spin_unlock_irqrestore(&fm->lock, flags); + return; + } - if (!socket_change_set) - continue; - - spin_lock_irqsave(&fm->lock, flags); for (cnt = 0; cnt < fm->num_sockets; cnt++) { if (!(socket_change_set & (1 << cnt))) continue; @@ -172,8 +166,6 @@ static int tifm_7xx1_switch_media(void *data) tifm_7xx1_sock_addr(fm->addr, cnt) + SOCK_CONTROL); } - if (kthread_should_stop()) - continue; spin_unlock_irqrestore(&fm->lock, flags); media_id = tifm_7xx1_toggle_sock_power( @@ -222,30 +214,16 @@ static int tifm_7xx1_switch_media(void *data) } } - if (!kthread_should_stop()) { - writel(TIFM_IRQ_FIFOMASK(socket_change_set) - | TIFM_IRQ_CARDMASK(socket_change_set), - fm->addr + FM_CLEAR_INTERRUPT_ENABLE); - writel(TIFM_IRQ_FIFOMASK(socket_change_set) - | TIFM_IRQ_CARDMASK(socket_change_set), - fm->addr + FM_SET_INTERRUPT_ENABLE); - writel(TIFM_IRQ_ENABLE, - fm->addr + FM_SET_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&fm->lock, flags); - } else { - for (cnt = 0; cnt < fm->num_sockets; cnt++) { - if (fm->sockets[cnt]) - fm->socket_change_set |= 1 << cnt; - } - if (!fm->socket_change_set) { - spin_unlock_irqrestore(&fm->lock, flags); - return 0; - } else { - spin_unlock_irqrestore(&fm->lock, flags); - } - } - } - return 0; + writel(TIFM_IRQ_FIFOMASK(socket_change_set) + | TIFM_IRQ_CARDMASK(socket_change_set), + fm->addr + FM_CLEAR_INTERRUPT_ENABLE); + + writel(TIFM_IRQ_FIFOMASK(socket_change_set) + | TIFM_IRQ_CARDMASK(socket_change_set), + fm->addr + FM_SET_INTERRUPT_ENABLE); + + writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE); + spin_unlock_irqrestore(&fm->lock, flags); } #ifdef CONFIG_PM @@ -267,6 +245,7 @@ static int tifm_7xx1_resume(struct pci_dev *dev) int cnt, rc; unsigned long flags; unsigned char new_ids[fm->num_sockets]; + DECLARE_COMPLETION_ONSTACK(finish_resume); pci_set_power_state(dev, PCI_D0); pci_restore_state(dev); @@ -299,12 +278,14 @@ static int tifm_7xx1_resume(struct pci_dev *dev) return 0; } else { fm->socket_change_set = 0; + fm->finish_me = &finish_resume; spin_unlock_irqrestore(&fm->lock, flags); } - wait_event_timeout(fm->change_set_notify, fm->socket_change_set, HZ); + wait_for_completion_timeout(&finish_resume, HZ); spin_lock_irqsave(&fm->lock, flags); + fm->finish_me = NULL; writel(TIFM_IRQ_FIFOMASK(fm->socket_change_set) | TIFM_IRQ_CARDMASK(fm->socket_change_set), fm->addr + FM_CLEAR_INTERRUPT_ENABLE); @@ -365,6 +346,7 @@ static int tifm_7xx1_probe(struct pci_dev *dev, if (!fm->sockets) goto err_out_free; + INIT_WORK(&fm->media_switcher, tifm_7xx1_switch_media); fm->eject = tifm_7xx1_eject; pci_set_drvdata(dev, fm); @@ -377,15 +359,14 @@ static int tifm_7xx1_probe(struct pci_dev *dev, if (rc) goto err_out_unmap; - init_waitqueue_head(&fm->change_set_notify); - rc = tifm_add_adapter(fm, tifm_7xx1_switch_media); + rc = tifm_add_adapter(fm); if (rc) goto err_out_irq; writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1), fm->addr + FM_SET_INTERRUPT_ENABLE); - wake_up_process(fm->media_switcher); + return 0; err_out_irq: @@ -417,8 +398,6 @@ static void tifm_7xx1_remove(struct pci_dev *dev) fm->socket_change_set = (1 << fm->num_sockets) - 1; spin_unlock_irqrestore(&fm->lock, flags); - kthread_stop(fm->media_switcher); - tifm_remove_adapter(fm); pci_set_drvdata(dev, NULL); diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index 6b2c447dc8ee..ef8a97b819dd 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c @@ -16,6 +16,7 @@ #define DRIVER_NAME "tifm_core" #define DRIVER_VERSION "0.8" +static struct workqueue_struct *workqueue; static DEFINE_IDR(tifm_adapter_idr); static DEFINE_SPINLOCK(tifm_adapter_lock); @@ -184,8 +185,7 @@ void tifm_free_adapter(struct tifm_adapter *fm) } EXPORT_SYMBOL(tifm_free_adapter); -int tifm_add_adapter(struct tifm_adapter *fm, - int (*mediathreadfn)(void *data)) +int tifm_add_adapter(struct tifm_adapter *fm) { int rc; @@ -197,16 +197,13 @@ int tifm_add_adapter(struct tifm_adapter *fm, spin_unlock(&tifm_adapter_lock); if (!rc) { snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id); - fm->media_switcher = kthread_create(mediathreadfn, - fm, "tifm/%u", fm->id); + rc = class_device_add(&fm->cdev); - if (!IS_ERR(fm->media_switcher)) - return class_device_add(&fm->cdev); - - spin_lock(&tifm_adapter_lock); - idr_remove(&tifm_adapter_idr, fm->id); - spin_unlock(&tifm_adapter_lock); - rc = -ENOMEM; + if (rc) { + spin_lock(&tifm_adapter_lock); + idr_remove(&tifm_adapter_idr, fm->id); + spin_unlock(&tifm_adapter_lock); + } } return rc; } @@ -214,6 +211,7 @@ EXPORT_SYMBOL(tifm_add_adapter); void tifm_remove_adapter(struct tifm_adapter *fm) { + flush_workqueue(workqueue); class_device_del(&fm->cdev); spin_lock(&tifm_adapter_lock); @@ -267,6 +265,12 @@ void tifm_unmap_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents, } EXPORT_SYMBOL(tifm_unmap_sg); +void tifm_queue_work(struct work_struct *work) +{ + queue_work(workqueue, work); +} +EXPORT_SYMBOL(tifm_queue_work); + int tifm_register_driver(struct tifm_driver *drv) { drv->driver.bus = &tifm_bus_type; @@ -283,13 +287,25 @@ EXPORT_SYMBOL(tifm_unregister_driver); static int __init tifm_init(void) { - int rc = bus_register(&tifm_bus_type); + int rc; - if (!rc) { - rc = class_register(&tifm_adapter_class); - if (rc) - bus_unregister(&tifm_bus_type); - } + workqueue = create_freezeable_workqueue("tifm"); + if (!workqueue) + return -ENOMEM; + + rc = bus_register(&tifm_bus_type); + + if (rc) + goto err_out_wq; + + rc = class_register(&tifm_adapter_class); + if (!rc) + return 0; + + bus_unregister(&tifm_bus_type); + +err_out_wq: + destroy_workqueue(workqueue); return rc; } @@ -298,6 +314,7 @@ static void __exit tifm_exit(void) { class_unregister(&tifm_adapter_class); bus_unregister(&tifm_bus_type); + destroy_workqueue(workqueue); } subsys_initcall(tifm_init); diff --git a/include/linux/tifm.h b/include/linux/tifm.h index 57b2653494cf..d9de79275c21 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -111,11 +111,11 @@ struct tifm_adapter { spinlock_t lock; unsigned int irq_status; unsigned int socket_change_set; - wait_queue_head_t change_set_notify; unsigned int id; unsigned int num_sockets; + struct completion *finish_me; struct tifm_dev **sockets; - struct task_struct *media_switcher; + struct work_struct media_switcher; struct class_device cdev; struct device *dev; @@ -125,7 +125,7 @@ struct tifm_adapter { struct tifm_adapter *tifm_alloc_adapter(void); void tifm_free_device(struct device *dev); void tifm_free_adapter(struct tifm_adapter *fm); -int tifm_add_adapter(struct tifm_adapter *fm, int (*mediathreadfn)(void *data)); +int tifm_add_adapter(struct tifm_adapter *fm); void tifm_remove_adapter(struct tifm_adapter *fm); struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm); int tifm_register_driver(struct tifm_driver *drv); @@ -135,7 +135,7 @@ int tifm_map_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents, int direction); void tifm_unmap_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents, int direction); - +void tifm_queue_work(struct work_struct *work); static inline void *tifm_get_drvdata(struct tifm_dev *dev) { -- cgit v1.2.3 From 6113ed73e61a13db9da48831e1b35788b7f837cc Mon Sep 17 00:00:00 2001 From: Alex Dubov Date: Thu, 12 Apr 2007 16:59:17 +1000 Subject: tifm: move common adapter management tasks from tifm_7xx1 to tifm_core Some details of the adapter management (create, add, remove) are really belong to the tifm_core, as they are not hardware specific. Signed-off-by: Alex Dubov Signed-off-by: Pierre Ossman --- drivers/misc/tifm_7xx1.c | 28 ++++++++++---------------- drivers/misc/tifm_core.c | 52 +++++++++++++++++++++++++++++------------------- include/linux/tifm.h | 16 +++++++++------ 3 files changed, 51 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index 34746798ba8e..e051f9da9c5c 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -22,6 +22,11 @@ #define TIFM_IRQ_FIFOMASK(x) ((x) << 16) #define TIFM_IRQ_SETALL 0xffffffff +static void tifm_7xx1_dummy_eject(struct tifm_adapter *fm, + struct tifm_dev *sock) +{ +} + static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock) { unsigned long flags; @@ -140,7 +145,7 @@ static void tifm_7xx1_switch_media(struct work_struct *work) socket_change_set = fm->socket_change_set; fm->socket_change_set = 0; - dev_dbg(fm->dev, "checking media set %x\n", + dev_dbg(fm->cdev.dev, "checking media set %x\n", socket_change_set); if (!socket_change_set) { @@ -328,20 +333,13 @@ static int tifm_7xx1_probe(struct pci_dev *dev, pci_intx(dev, 1); - fm = tifm_alloc_adapter(); + fm = tifm_alloc_adapter(dev->device == PCI_DEVICE_ID_TI_XX21_XX11_FM + ? 4 : 2, &dev->dev); if (!fm) { rc = -ENOMEM; goto err_out_int; } - fm->dev = &dev->dev; - fm->num_sockets = (dev->device == PCI_DEVICE_ID_TI_XX21_XX11_FM) - ? 4 : 2; - fm->sockets = kzalloc(sizeof(struct tifm_dev*) * fm->num_sockets, - GFP_KERNEL); - if (!fm->sockets) - goto err_out_free; - INIT_WORK(&fm->media_switcher, tifm_7xx1_switch_media); fm->eject = tifm_7xx1_eject; pci_set_drvdata(dev, fm); @@ -351,7 +349,7 @@ static int tifm_7xx1_probe(struct pci_dev *dev, if (!fm->addr) goto err_out_free; - rc = request_irq(dev->irq, tifm_7xx1_isr, IRQF_SHARED, DRIVER_NAME, fm); + rc = request_irq(dev->irq, tifm_7xx1_isr, SA_SHIRQ, DRIVER_NAME, fm); if (rc) goto err_out_unmap; @@ -359,10 +357,8 @@ static int tifm_7xx1_probe(struct pci_dev *dev, if (rc) goto err_out_irq; - writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1), fm->addr + FM_SET_INTERRUPT_ENABLE); - return 0; err_out_irq: @@ -384,16 +380,12 @@ err_out: static void tifm_7xx1_remove(struct pci_dev *dev) { struct tifm_adapter *fm = pci_get_drvdata(dev); - unsigned long flags; + fm->eject = tifm_7xx1_dummy_eject; writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); mmiowb(); free_irq(dev->irq, fm); - spin_lock_irqsave(&fm->lock, flags); - fm->socket_change_set = (1 << fm->num_sockets) - 1; - spin_unlock_irqrestore(&fm->lock, flags); - tifm_remove_adapter(fm); pci_set_drvdata(dev, NULL); diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index ef8a97b819dd..f0cce2a642df 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c @@ -156,7 +156,6 @@ static void tifm_free(struct class_device *cdev) { struct tifm_adapter *fm = container_of(cdev, struct tifm_adapter, cdev); - kfree(fm->sockets); kfree(fm); } @@ -165,26 +164,24 @@ static struct class tifm_adapter_class = { .release = tifm_free }; -struct tifm_adapter *tifm_alloc_adapter(void) +struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets, + struct device *dev) { struct tifm_adapter *fm; - fm = kzalloc(sizeof(struct tifm_adapter), GFP_KERNEL); + fm = kzalloc(sizeof(struct tifm_adapter) + + sizeof(struct tifm_dev*) * num_sockets, GFP_KERNEL); if (fm) { fm->cdev.class = &tifm_adapter_class; - spin_lock_init(&fm->lock); + fm->cdev.dev = dev; class_device_initialize(&fm->cdev); + spin_lock_init(&fm->lock); + fm->num_sockets = num_sockets; } return fm; } EXPORT_SYMBOL(tifm_alloc_adapter); -void tifm_free_adapter(struct tifm_adapter *fm) -{ - class_device_put(&fm->cdev); -} -EXPORT_SYMBOL(tifm_free_adapter); - int tifm_add_adapter(struct tifm_adapter *fm) { int rc; @@ -195,31 +192,44 @@ int tifm_add_adapter(struct tifm_adapter *fm) spin_lock(&tifm_adapter_lock); rc = idr_get_new(&tifm_adapter_idr, fm, &fm->id); spin_unlock(&tifm_adapter_lock); - if (!rc) { - snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id); - rc = class_device_add(&fm->cdev); - - if (rc) { - spin_lock(&tifm_adapter_lock); - idr_remove(&tifm_adapter_idr, fm->id); - spin_unlock(&tifm_adapter_lock); - } + if (rc) + return rc; + + snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id); + rc = class_device_add(&fm->cdev); + if (rc) { + spin_lock(&tifm_adapter_lock); + idr_remove(&tifm_adapter_idr, fm->id); + spin_unlock(&tifm_adapter_lock); } + return rc; } EXPORT_SYMBOL(tifm_add_adapter); void tifm_remove_adapter(struct tifm_adapter *fm) { + unsigned int cnt; + flush_workqueue(workqueue); - class_device_del(&fm->cdev); + for (cnt = 0; cnt < fm->num_sockets; ++cnt) { + if (fm->sockets[cnt]) + device_unregister(&fm->sockets[cnt]->dev); + } spin_lock(&tifm_adapter_lock); idr_remove(&tifm_adapter_idr, fm->id); spin_unlock(&tifm_adapter_lock); + class_device_del(&fm->cdev); } EXPORT_SYMBOL(tifm_remove_adapter); +void tifm_free_adapter(struct tifm_adapter *fm) +{ + class_device_put(&fm->cdev); +} +EXPORT_SYMBOL(tifm_free_adapter); + void tifm_free_device(struct device *dev) { struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); @@ -234,7 +244,7 @@ struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm) if (dev) { spin_lock_init(&dev->lock); - dev->dev.parent = fm->dev; + dev->dev.parent = fm->cdev.dev; dev->dev.bus = &tifm_bus_type; dev->dev.release = tifm_free_device; dev->card_event = tifm_dummy_event; diff --git a/include/linux/tifm.h b/include/linux/tifm.h index d9de79275c21..a7bd654e2ee5 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -114,19 +114,23 @@ struct tifm_adapter { unsigned int id; unsigned int num_sockets; struct completion *finish_me; - struct tifm_dev **sockets; + struct work_struct media_switcher; struct class_device cdev; - struct device *dev; - void (*eject)(struct tifm_adapter *fm, struct tifm_dev *sock); + void (*eject)(struct tifm_adapter *fm, + struct tifm_dev *sock); + + struct tifm_dev *sockets[0]; }; -struct tifm_adapter *tifm_alloc_adapter(void); -void tifm_free_device(struct device *dev); -void tifm_free_adapter(struct tifm_adapter *fm); +struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets, + struct device *dev); int tifm_add_adapter(struct tifm_adapter *fm); void tifm_remove_adapter(struct tifm_adapter *fm); +void tifm_free_adapter(struct tifm_adapter *fm); + +void tifm_free_device(struct device *dev); struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm); int tifm_register_driver(struct tifm_driver *drv); void tifm_unregister_driver(struct tifm_driver *drv); -- cgit v1.2.3 From 2428a8fe2261e901e058d9ea8b6ed7e1b4268b79 Mon Sep 17 00:00:00 2001 From: Alex Dubov Date: Thu, 12 Apr 2007 16:59:18 +1000 Subject: tifm: move common device management tasks from tifm_7xx1 to tifm_core Some details of the device management (create, add, remove) are really belong to the tifm_core, as they are not hardware specific. Signed-off-by: Alex Dubov Signed-off-by: Pierre Ossman --- drivers/misc/tifm_7xx1.c | 94 ++++++++++++++++++------------------------------ drivers/misc/tifm_core.c | 39 +++++++++++++------- include/linux/tifm.h | 4 ++- 3 files changed, 65 insertions(+), 72 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index e051f9da9c5c..356386904a5d 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -136,7 +136,6 @@ static void tifm_7xx1_switch_media(struct work_struct *work) media_switcher); unsigned long flags; unsigned char media_id; - char *card_name = "xx"; int cnt; struct tifm_dev *sock; unsigned int socket_change_set; @@ -153,68 +152,45 @@ static void tifm_7xx1_switch_media(struct work_struct *work) return; } - for (cnt = 0; cnt < fm->num_sockets; cnt++) { - if (!(socket_change_set & (1 << cnt))) - continue; - sock = fm->sockets[cnt]; - if (sock) { - printk(KERN_INFO DRIVER_NAME - ": demand removing card from socket %d\n", - cnt); - fm->sockets[cnt] = NULL; - spin_unlock_irqrestore(&fm->lock, flags); - device_unregister(&sock->dev); - spin_lock_irqsave(&fm->lock, flags); - writel(0x0e00, - tifm_7xx1_sock_addr(fm->addr, cnt) - + SOCK_CONTROL); - } - + for (cnt = 0; cnt < fm->num_sockets; cnt++) { + if (!(socket_change_set & (1 << cnt))) + continue; + sock = fm->sockets[cnt]; + if (sock) { + printk(KERN_INFO + "%s : demand removing card from socket %u:%u\n", + fm->cdev.class_id, fm->id, cnt); + fm->sockets[cnt] = NULL; spin_unlock_irqrestore(&fm->lock, flags); - media_id = tifm_7xx1_toggle_sock_power( - tifm_7xx1_sock_addr(fm->addr, cnt)); - if (media_id) { - sock = tifm_alloc_device(fm); - if (sock) { - sock->addr = tifm_7xx1_sock_addr(fm->addr, - cnt); - sock->type = media_id; - sock->socket_id = cnt; - switch (media_id) { - case 1: - card_name = "xd"; - break; - case 2: - card_name = "ms"; - break; - case 3: - card_name = "sd"; - break; - default: - tifm_free_device(&sock->dev); - spin_lock_irqsave(&fm->lock, flags); - continue; - } - snprintf(sock->dev.bus_id, BUS_ID_SIZE, - "tifm_%s%u:%u", card_name, - fm->id, cnt); - printk(KERN_INFO DRIVER_NAME - ": %s card detected in socket %d\n", - card_name, cnt); - if (!device_register(&sock->dev)) { - spin_lock_irqsave(&fm->lock, flags); - if (!fm->sockets[cnt]) { - fm->sockets[cnt] = sock; - sock = NULL; - } - spin_unlock_irqrestore(&fm->lock, flags); - } - if (sock) - tifm_free_device(&sock->dev); - } + device_unregister(&sock->dev); + spin_lock_irqsave(&fm->lock, flags); + writel(0x0e00, tifm_7xx1_sock_addr(fm->addr, cnt) + + SOCK_CONTROL); + } + + spin_unlock_irqrestore(&fm->lock, flags); + + media_id = tifm_7xx1_toggle_sock_power( + tifm_7xx1_sock_addr(fm->addr, cnt)); + + // tifm_alloc_device will check if media_id is valid + sock = tifm_alloc_device(fm, cnt, media_id); + if (sock) { + sock->addr = tifm_7xx1_sock_addr(fm->addr, cnt); + + if (!device_register(&sock->dev)) { spin_lock_irqsave(&fm->lock, flags); + if (!fm->sockets[cnt]) { + fm->sockets[cnt] = sock; + sock = NULL; + } + spin_unlock_irqrestore(&fm->lock, flags); } + if (sock) + tifm_free_device(&sock->dev); } + spin_lock_irqsave(&fm->lock, flags); + } writel(TIFM_IRQ_FIFOMASK(socket_change_set) | TIFM_IRQ_CARDMASK(socket_change_set), diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index f0cce2a642df..1e591989835d 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c @@ -232,25 +232,40 @@ EXPORT_SYMBOL(tifm_free_adapter); void tifm_free_device(struct device *dev) { - struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); - kfree(fm_dev); + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + kfree(sock); } EXPORT_SYMBOL(tifm_free_device); -struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm) +struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id, + unsigned char type) { - struct tifm_dev *dev = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL); + struct tifm_dev *sock = NULL; + + if (!tifm_media_type_name(type, 0)) + return sock; - if (dev) { - spin_lock_init(&dev->lock); + sock = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL); + if (sock) { + spin_lock_init(&sock->lock); + sock->type = type; + sock->socket_id = id; + sock->card_event = tifm_dummy_event; + sock->data_event = tifm_dummy_event; - dev->dev.parent = fm->cdev.dev; - dev->dev.bus = &tifm_bus_type; - dev->dev.release = tifm_free_device; - dev->card_event = tifm_dummy_event; - dev->data_event = tifm_dummy_event; + sock->dev.parent = fm->cdev.dev; + sock->dev.bus = &tifm_bus_type; + sock->dev.dma_mask = fm->cdev.dev->dma_mask; + sock->dev.release = tifm_free_device; + + snprintf(sock->dev.bus_id, BUS_ID_SIZE, + "tifm_%s%u:%u", tifm_media_type_name(type, 2), + fm->id, id); + printk(KERN_INFO DRIVER_NAME + ": %s card detected in socket %u:%u\n", + tifm_media_type_name(type, 0), fm->id, id); } - return dev; + return sock; } EXPORT_SYMBOL(tifm_alloc_device); diff --git a/include/linux/tifm.h b/include/linux/tifm.h index a7bd654e2ee5..82da028d8c07 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -131,7 +131,9 @@ void tifm_remove_adapter(struct tifm_adapter *fm); void tifm_free_adapter(struct tifm_adapter *fm); void tifm_free_device(struct device *dev); -struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm); +struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id, + unsigned char type); + int tifm_register_driver(struct tifm_driver *drv); void tifm_unregister_driver(struct tifm_driver *drv); void tifm_eject(struct tifm_dev *sock); -- cgit v1.2.3 From 72dc9d9619dd4682f4197e7a7f19af22fd6516a7 Mon Sep 17 00:00:00 2001 From: Alex Dubov Date: Thu, 12 Apr 2007 17:05:23 +1000 Subject: tifm_sd: replace command completion state machine with full checking State machine used to to track mmc command state was found to be fragile and unreliable, making many cards unusable. The safer solution is to perform all needed checks at every card event. Signed-off-by: Alex Dubov Signed-off-by: Pierre Ossman --- drivers/mmc/tifm_sd.c | 259 ++++++++++++++++++++++++++------------------------ include/linux/tifm.h | 1 + 2 files changed, 138 insertions(+), 122 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c index 52499548abe8..103060f490a6 100644 --- a/drivers/mmc/tifm_sd.c +++ b/drivers/mmc/tifm_sd.c @@ -70,19 +70,14 @@ module_param(fixed_timeout, bool, 0644); #define TIFM_MMCSD_CMD_AC 0x2000 #define TIFM_MMCSD_CMD_ADTC 0x3000 -typedef enum { - IDLE = 0, - CMD, /* main command ended */ - BRS, /* block transfer finished */ - SCMD, /* stop command ended */ - CARD, /* card left busy state */ - FIFO, /* FIFO operation completed (uncertain) */ - READY -} card_state_t; - enum { - FIFO_RDY = 0x0001, /* hardware dependent value */ - CARD_BUSY = 0x0010 + CMD_READY = 0x0001, + FIFO_READY = 0x0002, + BRS_READY = 0x0004, + SCMD_ACTIVE = 0x0008, + SCMD_READY = 0x0010, + CARD_BUSY = 0x0020, + DATA_CARRY = 0x0040 }; struct tifm_sd { @@ -92,7 +87,7 @@ struct tifm_sd { open_drain:1, no_dma:1; unsigned short cmd_flags; - card_state_t state; + unsigned int clk_freq; unsigned int clk_div; unsigned long timeout_jiffies; @@ -234,87 +229,76 @@ static void tifm_sd_fetch_resp(struct mmc_command *cmd, struct tifm_dev *sock) | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x00); } -static void tifm_sd_process_cmd(struct tifm_dev *sock, struct tifm_sd *host, - unsigned int host_status) +static void tifm_sd_check_status(struct tifm_sd *host) { + struct tifm_dev *sock = host->dev; struct mmc_command *cmd = host->req->cmd; -change_state: - switch (host->state) { - case IDLE: + if (cmd->error != MMC_ERR_NONE) + goto finish_request; + + if (!(host->cmd_flags & CMD_READY)) return; - case CMD: - if (host_status & (TIFM_MMCSD_EOC | TIFM_MMCSD_CERR)) { - tifm_sd_fetch_resp(cmd, sock); - if (cmd->data) { - host->state = BRS; - } else { - host->state = READY; - } - goto change_state; - } - break; - case BRS: - if (tifm_sd_transfer_data(sock, host, host_status)) { - if (cmd->data->flags & MMC_DATA_WRITE) { - host->state = CARD; - } else { - if (host->no_dma) { - if (host->req->stop) { - tifm_sd_exec(host, host->req->stop); - host->state = SCMD; - } else { - host->state = READY; - } - } else { - host->state = FIFO; - } - } - goto change_state; - } - break; - case SCMD: - if (host_status & TIFM_MMCSD_EOC) { - tifm_sd_fetch_resp(host->req->stop, sock); - host->state = READY; - goto change_state; + + if (cmd->data) { + if (cmd->data->error != MMC_ERR_NONE) { + if ((host->cmd_flags & SCMD_ACTIVE) + && !(host->cmd_flags & SCMD_READY)) + return; + + goto finish_request; } - break; - case CARD: - dev_dbg(&sock->dev, "waiting for CARD, have %zd blocks\n", - host->written_blocks); - if (!(host->cmd_flags & CARD_BUSY) - && (host->written_blocks == cmd->data->blocks)) { - if (host->no_dma) { - if (host->req->stop) { + + if (!(host->cmd_flags & BRS_READY)) + return; + + if (!(host->no_dma || (host->cmd_flags & FIFO_READY))) + return; + + if (cmd->data->flags & MMC_DATA_WRITE) { + if (host->req->stop) { + if (!(host->cmd_flags & SCMD_ACTIVE)) { + host->cmd_flags |= SCMD_ACTIVE; + writel(TIFM_MMCSD_EOFB + | readl(sock->addr + + SOCK_MMCSD_INT_ENABLE), + sock->addr + + SOCK_MMCSD_INT_ENABLE); tifm_sd_exec(host, host->req->stop); - host->state = SCMD; + return; } else { - host->state = READY; + if (!(host->cmd_flags & SCMD_READY) + || (host->cmd_flags & CARD_BUSY)) + return; + writel((~TIFM_MMCSD_EOFB) + & readl(sock->addr + + SOCK_MMCSD_INT_ENABLE), + sock->addr + + SOCK_MMCSD_INT_ENABLE); } } else { - host->state = FIFO; + if (host->cmd_flags & CARD_BUSY) + return; + writel((~TIFM_MMCSD_EOFB) + & readl(sock->addr + + SOCK_MMCSD_INT_ENABLE), + sock->addr + SOCK_MMCSD_INT_ENABLE); } - goto change_state; - } - break; - case FIFO: - if (host->cmd_flags & FIFO_RDY) { - host->cmd_flags &= ~FIFO_RDY; + } else { if (host->req->stop) { - tifm_sd_exec(host, host->req->stop); - host->state = SCMD; - } else { - host->state = READY; + if (!(host->cmd_flags & SCMD_ACTIVE)) { + host->cmd_flags |= SCMD_ACTIVE; + tifm_sd_exec(host, host->req->stop); + return; + } else { + if (!(host->cmd_flags & SCMD_READY)) + return; + } } - goto change_state; } - break; - case READY: - tasklet_schedule(&host->finish_tasklet); - return; } - +finish_request: + tasklet_schedule(&host->finish_tasklet); } /* Called from interrupt handler */ @@ -322,21 +306,25 @@ static void tifm_sd_data_event(struct tifm_dev *sock) { struct tifm_sd *host; unsigned int fifo_status = 0; + struct mmc_data *r_data = NULL; spin_lock(&sock->lock); host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); - fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); - writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS); + dev_dbg(&sock->dev, "data event: fifo_status %x, flags %x\n", + fifo_status, host->cmd_flags); - host->cmd_flags |= fifo_status & FIFO_RDY; + if (host->req) { + r_data = host->req->cmd->data; - if (host->req) - tifm_sd_process_cmd(sock, host, 0); + if (r_data && (fifo_status & TIFM_FIFO_READY)) { + host->cmd_flags |= FIFO_READY; + tifm_sd_check_status(host); + } + } - dev_dbg(&sock->dev, "fifo_status %x\n", fifo_status); + writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS); spin_unlock(&sock->lock); - } /* Called from interrupt handler */ @@ -344,60 +332,88 @@ static void tifm_sd_card_event(struct tifm_dev *sock) { struct tifm_sd *host; unsigned int host_status = 0; - int error_code = 0; + int cmd_error = MMC_ERR_NONE; + struct mmc_command *cmd = NULL; + unsigned long flags; spin_lock(&sock->lock); host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); + host_status = readl(sock->addr + SOCK_MMCSD_STATUS); + dev_dbg(&sock->dev, "host event: host_status %x, flags %x\n", + host_status, host->cmd_flags); - - host_status = readl(sock->addr + SOCK_MMCSD_STATUS); - writel(host_status, sock->addr + SOCK_MMCSD_STATUS); - - if (!host->req) - goto done; + if (host->req) { + cmd = host->req->cmd; if (host_status & TIFM_MMCSD_ERRMASK) { - if (host_status & (TIFM_MMCSD_CTO | TIFM_MMCSD_DTO)) - error_code = MMC_ERR_TIMEOUT; - else if (host_status - & (TIFM_MMCSD_CCRC | TIFM_MMCSD_DCRC)) - error_code = MMC_ERR_BADCRC; + writel(host_status & TIFM_MMCSD_ERRMASK, + sock->addr + SOCK_MMCSD_STATUS); + if (host_status & TIFM_MMCSD_CTO) + cmd_error = MMC_ERR_TIMEOUT; + else if (host_status & TIFM_MMCSD_CCRC) + cmd_error = MMC_ERR_BADCRC; + + if (cmd->data) { + if (host_status & TIFM_MMCSD_DTO) + cmd->data->error = MMC_ERR_TIMEOUT; + else if (host_status & TIFM_MMCSD_DCRC) + cmd->data->error = MMC_ERR_BADCRC; + } writel(TIFM_FIFO_INT_SETALL, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL); if (host->req->stop) { - if (host->state == SCMD) { - host->req->stop->error = error_code; - } else if (host->state == BRS - || host->state == CARD - || host->state == FIFO) { - host->req->cmd->error = error_code; + if (host->cmd_flags & SCMD_ACTIVE) { + host->req->stop->error = cmd_error; + host->cmd_flags |= SCMD_READY; + } else { + cmd->error = cmd_error; + host->cmd_flags |= SCMD_ACTIVE; tifm_sd_exec(host, host->req->stop); - host->state = SCMD; goto done; - } else { - host->req->cmd->error = error_code; } - } else { - host->req->cmd->error = error_code; + } else + cmd->error = cmd_error; + } else { + if (host_status & (TIFM_MMCSD_EOC | TIFM_MMCSD_CERR)) { + if (!(host->cmd_flags & CMD_READY)) { + host->cmd_flags |= CMD_READY; + tifm_sd_fetch_resp(cmd, sock); + } else if (host->cmd_flags & SCMD_ACTIVE) { + host->cmd_flags |= SCMD_READY; + tifm_sd_fetch_resp(host->req->stop, + sock); + } } - host->state = READY; + if (host_status & TIFM_MMCSD_BRS) + host->cmd_flags |= BRS_READY; } - if (host_status & TIFM_MMCSD_CB) - host->cmd_flags |= CARD_BUSY; - if ((host_status & TIFM_MMCSD_EOFB) - && (host->cmd_flags & CARD_BUSY)) { - host->written_blocks++; - host->cmd_flags &= ~CARD_BUSY; + if (host->no_dma && cmd->data) { + if (host_status & TIFM_MMCSD_AE) + writel(host_status & TIFM_MMCSD_AE, + sock->addr + SOCK_MMCSD_STATUS); + + if (host_status & (TIFM_MMCSD_AE | TIFM_MMCSD_AF + | TIFM_MMCSD_BRS)) { + local_irq_save(flags); + tifm_sd_transfer_data(sock, host, host_status); + local_irq_restore(flags); + host_status &= ~TIFM_MMCSD_AE; + } } - if (host->req) - tifm_sd_process_cmd(sock, host, host_status); + if (host_status & TIFM_MMCSD_EOFB) + host->cmd_flags &= ~CARD_BUSY; + else if (host_status & TIFM_MMCSD_CB) + host->cmd_flags |= CARD_BUSY; + + tifm_sd_check_status(host); + } done: - dev_dbg(&sock->dev, "host_status %x\n", host_status); + writel(host_status, sock->addr + SOCK_MMCSD_STATUS); spin_unlock(&sock->lock); } @@ -522,7 +538,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) host->req = mrq; mod_timer(&host->timer, jiffies + host->timeout_jiffies); - host->state = CMD; + host->cmd_flags = 0; writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), sock->addr + SOCK_CONTROL); tifm_sd_exec(host, mrq->cmd); @@ -553,7 +569,6 @@ static void tifm_sd_end_cmd(unsigned long data) del_timer(&host->timer); mrq = host->req; host->req = NULL; - host->state = IDLE; if (!mrq) { printk(KERN_ERR DRIVER_NAME ": no request to complete?\n"); diff --git a/include/linux/tifm.h b/include/linux/tifm.h index 82da028d8c07..c8449fcea0c7 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -67,6 +67,7 @@ enum { #define TIFM_SOCK_STATE_POWERED 0x00000080 #define TIFM_FIFO_ENABLE 0x00000001 /* Meaning of this constant is unverified */ +#define TIFM_FIFO_READY 0x00000001 /* Meaning of this constant is unverified */ #define TIFM_FIFO_INT_SETALL 0x0000ffff #define TIFM_FIFO_INTMASK 0x00000005 /* Meaning of this constant is unverified */ -- cgit v1.2.3 From 13cdf48ef15befbd36f8295091b9e0f9bd322963 Mon Sep 17 00:00:00 2001 From: Alex Dubov Date: Thu, 12 Apr 2007 17:05:25 +1000 Subject: tifm_sd: implement software scatter-gather It was found that delays associated with issue and completion of the commands severely limit performance of the new, fast SD cards. To alleviate this issue scatter-gather emulation in software is implemented for both dma and pio transfer modes. Non-block aligned and high memory sg entries are accounted for. Signed-off-by: Alex Dubov Signed-off-by: Pierre Ossman --- drivers/mmc/tifm_sd.c | 443 +++++++++++++++++++++++++++++++++----------------- include/linux/tifm.h | 1 + 2 files changed, 297 insertions(+), 147 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c index d20ccfcd911e..8e69514e415d 100644 --- a/drivers/mmc/tifm_sd.c +++ b/drivers/mmc/tifm_sd.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #define DRIVER_NAME "tifm_sd" @@ -69,6 +70,8 @@ module_param(fixed_timeout, bool, 0644); #define TIFM_MMCSD_CMD_AC 0x2000 #define TIFM_MMCSD_CMD_ADTC 0x3000 +#define TIFM_MMCSD_MAX_BLOCK_SIZE 0x0800UL + enum { CMD_READY = 0x0001, FIFO_READY = 0x0002, @@ -95,63 +98,227 @@ struct tifm_sd { struct timer_list timer; struct mmc_request *req; - size_t written_blocks; - size_t buffer_size; - size_t buffer_pos; - + int sg_len; + int sg_pos; + unsigned int block_pos; + struct scatterlist bounce_buf; + unsigned char bounce_buf_data[TIFM_MMCSD_MAX_BLOCK_SIZE]; }; -static char* tifm_sd_data_buffer(struct mmc_data *data) +/* for some reason, host won't respond correctly to readw/writew */ +static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg, + unsigned int off, unsigned int cnt) { - return page_address(data->sg->page) + data->sg->offset; + struct tifm_dev *sock = host->dev; + unsigned char *buf; + unsigned int pos = 0, val; + + buf = kmap_atomic(pg, KM_BIO_DST_IRQ) + off; + if (host->cmd_flags & DATA_CARRY) { + buf[pos++] = host->bounce_buf_data[0]; + host->cmd_flags &= ~DATA_CARRY; + } + + while (pos < cnt) { + val = readl(sock->addr + SOCK_MMCSD_DATA); + buf[pos++] = val & 0xff; + if (pos == cnt) { + host->bounce_buf_data[0] = (val >> 8) & 0xff; + host->cmd_flags |= DATA_CARRY; + break; + } + buf[pos++] = (val >> 8) & 0xff; + } + kunmap_atomic(buf - off, KM_BIO_DST_IRQ); } -static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host, - unsigned int host_status) +static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg, + unsigned int off, unsigned int cnt) { - struct mmc_command *cmd = host->req->cmd; - unsigned int t_val = 0, cnt = 0; - char *buffer; - - if (host_status & TIFM_MMCSD_BRS) { - /* in non-dma rx mode BRS fires when fifo is still not empty */ - if (host->no_dma && (cmd->data->flags & MMC_DATA_READ)) { - buffer = tifm_sd_data_buffer(host->req->data); - while (host->buffer_size > host->buffer_pos) { - t_val = readl(sock->addr + SOCK_MMCSD_DATA); - buffer[host->buffer_pos++] = t_val & 0xff; - buffer[host->buffer_pos++] = - (t_val >> 8) & 0xff; - } + struct tifm_dev *sock = host->dev; + unsigned char *buf; + unsigned int pos = 0, val; + + buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + off; + if (host->cmd_flags & DATA_CARRY) { + val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00); + writel(val, sock->addr + SOCK_MMCSD_DATA); + host->cmd_flags &= ~DATA_CARRY; + } + + while (pos < cnt) { + val = buf[pos++]; + if (pos == cnt) { + host->bounce_buf_data[0] = val & 0xff; + host->cmd_flags |= DATA_CARRY; + break; } - return 1; - } else if (host->no_dma) { - buffer = tifm_sd_data_buffer(host->req->data); - if ((cmd->data->flags & MMC_DATA_READ) && - (host_status & TIFM_MMCSD_AF)) { - for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { - t_val = readl(sock->addr + SOCK_MMCSD_DATA); - if (host->buffer_size > host->buffer_pos) { - buffer[host->buffer_pos++] = - t_val & 0xff; - buffer[host->buffer_pos++] = - (t_val >> 8) & 0xff; - } - } - } else if ((cmd->data->flags & MMC_DATA_WRITE) - && (host_status & TIFM_MMCSD_AE)) { - for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { - if (host->buffer_size > host->buffer_pos) { - t_val = buffer[host->buffer_pos++] - & 0x00ff; - t_val |= ((buffer[host->buffer_pos++]) - << 8) & 0xff00; - writel(t_val, - sock->addr + SOCK_MMCSD_DATA); - } + val |= (buf[pos++] << 8) & 0xff00; + writel(val, sock->addr + SOCK_MMCSD_DATA); + } + kunmap_atomic(buf - off, KM_BIO_SRC_IRQ); +} + +static void tifm_sd_transfer_data(struct tifm_sd *host) +{ + struct mmc_data *r_data = host->req->cmd->data; + struct scatterlist *sg = r_data->sg; + unsigned int off, cnt, t_size = TIFM_MMCSD_FIFO_SIZE * 2; + unsigned int p_off, p_cnt; + struct page *pg; + + if (host->sg_pos == host->sg_len) + return; + while (t_size) { + cnt = sg[host->sg_pos].length - host->block_pos; + if (!cnt) { + host->block_pos = 0; + host->sg_pos++; + if (host->sg_pos == host->sg_len) { + if ((r_data->flags & MMC_DATA_WRITE) + && DATA_CARRY) + writel(host->bounce_buf_data[0], + host->dev->addr + + SOCK_MMCSD_DATA); + + return; } + cnt = sg[host->sg_pos].length; } + off = sg[host->sg_pos].offset + host->block_pos; + + pg = nth_page(sg[host->sg_pos].page, off >> PAGE_SHIFT); + p_off = offset_in_page(off); + p_cnt = PAGE_SIZE - p_off; + p_cnt = min(p_cnt, cnt); + p_cnt = min(p_cnt, t_size); + + if (r_data->flags & MMC_DATA_READ) + tifm_sd_read_fifo(host, pg, p_off, p_cnt); + else if (r_data->flags & MMC_DATA_WRITE) + tifm_sd_write_fifo(host, pg, p_off, p_cnt); + + t_size -= p_cnt; + host->block_pos += p_cnt; + } +} + +static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off, + struct page *src, unsigned int src_off, + unsigned int count) +{ + unsigned char *src_buf = kmap_atomic(src, KM_BIO_SRC_IRQ) + src_off; + unsigned char *dst_buf = kmap_atomic(dst, KM_BIO_DST_IRQ) + dst_off; + + memcpy(dst_buf, src_buf, count); + + kunmap_atomic(dst_buf - dst_off, KM_BIO_DST_IRQ); + kunmap_atomic(src_buf - src_off, KM_BIO_SRC_IRQ); +} + +static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data) +{ + struct scatterlist *sg = r_data->sg; + unsigned int t_size = r_data->blksz; + unsigned int off, cnt; + unsigned int p_off, p_cnt; + struct page *pg; + + dev_dbg(&host->dev->dev, "bouncing block\n"); + while (t_size) { + cnt = sg[host->sg_pos].length - host->block_pos; + if (!cnt) { + host->block_pos = 0; + host->sg_pos++; + if (host->sg_pos == host->sg_len) + return; + cnt = sg[host->sg_pos].length; + } + off = sg[host->sg_pos].offset + host->block_pos; + + pg = nth_page(sg[host->sg_pos].page, off >> PAGE_SHIFT); + p_off = offset_in_page(off); + p_cnt = PAGE_SIZE - p_off; + p_cnt = min(p_cnt, cnt); + p_cnt = min(p_cnt, t_size); + + if (r_data->flags & MMC_DATA_WRITE) + tifm_sd_copy_page(host->bounce_buf.page, + r_data->blksz - t_size, + pg, p_off, p_cnt); + else if (r_data->flags & MMC_DATA_READ) + tifm_sd_copy_page(pg, p_off, host->bounce_buf.page, + r_data->blksz - t_size, p_cnt); + + t_size -= p_cnt; + host->block_pos += p_cnt; + } +} + +int tifm_sd_set_dma_data(struct tifm_sd *host, struct mmc_data *r_data) +{ + struct tifm_dev *sock = host->dev; + unsigned int t_size = TIFM_DMA_TSIZE * r_data->blksz; + unsigned int dma_len, dma_blk_cnt, dma_off; + struct scatterlist *sg = NULL; + unsigned long flags; + + if (host->sg_pos == host->sg_len) + return 1; + + if (host->cmd_flags & DATA_CARRY) { + host->cmd_flags &= ~DATA_CARRY; + local_irq_save(flags); + tifm_sd_bounce_block(host, r_data); + local_irq_restore(flags); + if (host->sg_pos == host->sg_len) + return 1; + } + + dma_len = sg_dma_len(&r_data->sg[host->sg_pos]) - host->block_pos; + if (!dma_len) { + host->block_pos = 0; + host->sg_pos++; + if (host->sg_pos == host->sg_len) + return 1; + dma_len = sg_dma_len(&r_data->sg[host->sg_pos]); + } + + if (dma_len < t_size) { + dma_blk_cnt = dma_len / r_data->blksz; + dma_off = host->block_pos; + host->block_pos += dma_blk_cnt * r_data->blksz; + } else { + dma_blk_cnt = TIFM_DMA_TSIZE; + dma_off = host->block_pos; + host->block_pos += t_size; } + + if (dma_blk_cnt) + sg = &r_data->sg[host->sg_pos]; + else if (dma_len) { + if (r_data->flags & MMC_DATA_WRITE) { + local_irq_save(flags); + tifm_sd_bounce_block(host, r_data); + local_irq_restore(flags); + } else + host->cmd_flags |= DATA_CARRY; + + sg = &host->bounce_buf; + dma_off = 0; + dma_blk_cnt = 1; + } else + return 1; + + dev_dbg(&sock->dev, "setting dma for %d blocks\n", dma_blk_cnt); + writel(sg_dma_address(sg) + dma_off, sock->addr + SOCK_DMA_ADDRESS); + if (r_data->flags & MMC_DATA_WRITE) + writel((dma_blk_cnt << 8) | TIFM_DMA_TX | TIFM_DMA_EN, + sock->addr + SOCK_DMA_CONTROL); + else + writel((dma_blk_cnt << 8) | TIFM_DMA_EN, + sock->addr + SOCK_DMA_CONTROL); + return 0; } @@ -317,8 +484,10 @@ static void tifm_sd_data_event(struct tifm_dev *sock) r_data = host->req->cmd->data; if (r_data && (fifo_status & TIFM_FIFO_READY)) { - host->cmd_flags |= FIFO_READY; - tifm_sd_check_status(host); + if (tifm_sd_set_dma_data(host, r_data)) { + host->cmd_flags |= FIFO_READY; + tifm_sd_check_status(host); + } } } @@ -398,7 +567,7 @@ static void tifm_sd_card_event(struct tifm_dev *sock) if (host_status & (TIFM_MMCSD_AE | TIFM_MMCSD_AF | TIFM_MMCSD_BRS)) { local_irq_save(flags); - tifm_sd_transfer_data(sock, host, host_status); + tifm_sd_transfer_data(host); local_irq_restore(flags); host_status &= ~TIFM_MMCSD_AE; } @@ -416,38 +585,6 @@ done: spin_unlock(&sock->lock); } -static void tifm_sd_prepare_data(struct tifm_sd *host, struct mmc_command *cmd) -{ - struct tifm_dev *sock = host->dev; - unsigned int dest_cnt; - - /* DMA style IO */ - dev_dbg(&sock->dev, "setting dma for %d blocks\n", - cmd->data->blocks); - writel(TIFM_FIFO_INT_SETALL, - sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); - writel(ilog2(cmd->data->blksz) - 2, - sock->addr + SOCK_FIFO_PAGE_SIZE); - writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL); - writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); - - dest_cnt = (cmd->data->blocks) << 8; - - writel(sg_dma_address(cmd->data->sg), sock->addr + SOCK_DMA_ADDRESS); - - writel(cmd->data->blocks - 1, sock->addr + SOCK_MMCSD_NUM_BLOCKS); - writel(cmd->data->blksz - 1, sock->addr + SOCK_MMCSD_BLOCK_LEN); - - if (cmd->data->flags & MMC_DATA_WRITE) { - writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); - writel(dest_cnt | TIFM_DMA_TX | TIFM_DMA_EN, - sock->addr + SOCK_DMA_CONTROL); - } else { - writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); - writel(dest_cnt | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL); - } -} - static void tifm_sd_set_data_timeout(struct tifm_sd *host, struct mmc_data *data) { @@ -481,7 +618,6 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) struct tifm_sd *host = mmc_priv(mmc); struct tifm_dev *sock = host->dev; unsigned long flags; - int sg_count = 0; struct mmc_data *r_data = mrq->cmd->data; spin_lock_irqsave(&sock->lock, flags); @@ -496,13 +632,19 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) goto err_out; } + host->cmd_flags = 0; + host->block_pos = 0; + host->sg_pos = 0; + if (r_data) { tifm_sd_set_data_timeout(host, r_data); - if (host->no_dma) { - host->buffer_size = mrq->cmd->data->blocks - * mrq->cmd->data->blksz; + if ((r_data->flags & MMC_DATA_WRITE) && !mrq->stop) + writel(TIFM_MMCSD_EOFB + | readl(sock->addr + SOCK_MMCSD_INT_ENABLE), + sock->addr + SOCK_MMCSD_INT_ENABLE); + if (host->no_dma) { writel(TIFM_MMCSD_BUFINT | readl(sock->addr + SOCK_MMCSD_INT_ENABLE), sock->addr + SOCK_MMCSD_INT_ENABLE); @@ -510,34 +652,64 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) | (TIFM_MMCSD_FIFO_SIZE - 1), sock->addr + SOCK_MMCSD_BUFFER_CONFIG); - host->written_blocks = 0; - host->cmd_flags &= ~CARD_BUSY; - host->buffer_pos = 0; - writel(r_data->blocks - 1, - sock->addr + SOCK_MMCSD_NUM_BLOCKS); - writel(r_data->blksz - 1, - sock->addr + SOCK_MMCSD_BLOCK_LEN); + host->sg_len = r_data->sg_len; } else { - sg_count = tifm_map_sg(sock, r_data->sg, r_data->sg_len, - mrq->cmd->flags & MMC_DATA_WRITE - ? PCI_DMA_TODEVICE - : PCI_DMA_FROMDEVICE); - if (sg_count != 1) { - printk(KERN_ERR DRIVER_NAME - ": scatterlist map failed\n"); + sg_init_one(&host->bounce_buf, host->bounce_buf_data, + r_data->blksz); + + if(1 != tifm_map_sg(sock, &host->bounce_buf, 1, + r_data->flags & MMC_DATA_WRITE + ? PCI_DMA_TODEVICE + : PCI_DMA_FROMDEVICE)) { + printk(KERN_ERR "%s : scatterlist map failed\n", + sock->dev.bus_id); + spin_unlock_irqrestore(&sock->lock, flags); + goto err_out; + } + host->sg_len = tifm_map_sg(sock, r_data->sg, + r_data->sg_len, + r_data->flags + & MMC_DATA_WRITE + ? PCI_DMA_TODEVICE + : PCI_DMA_FROMDEVICE); + if (host->sg_len < 1) { + printk(KERN_ERR "%s : scatterlist map failed\n", + sock->dev.bus_id); + tifm_unmap_sg(sock, &host->bounce_buf, 1, + r_data->flags & MMC_DATA_WRITE + ? PCI_DMA_TODEVICE + : PCI_DMA_FROMDEVICE); spin_unlock_irqrestore(&sock->lock, flags); goto err_out; } - host->written_blocks = 0; - host->cmd_flags &= ~CARD_BUSY; - tifm_sd_prepare_data(host, mrq->cmd); + writel(TIFM_FIFO_INT_SETALL, + sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); + writel(ilog2(r_data->blksz) - 2, + sock->addr + SOCK_FIFO_PAGE_SIZE); + writel(TIFM_FIFO_ENABLE, + sock->addr + SOCK_FIFO_CONTROL); + writel(TIFM_FIFO_INTMASK, + sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); + + if (r_data->flags & MMC_DATA_WRITE) + writel(TIFM_MMCSD_TXDE, + sock->addr + SOCK_MMCSD_BUFFER_CONFIG); + else + writel(TIFM_MMCSD_RXDE, + sock->addr + SOCK_MMCSD_BUFFER_CONFIG); + + tifm_sd_set_dma_data(host, r_data); } + + writel(r_data->blocks - 1, + sock->addr + SOCK_MMCSD_NUM_BLOCKS); + writel(r_data->blksz - 1, + sock->addr + SOCK_MMCSD_BLOCK_LEN); } host->req = mrq; mod_timer(&host->timer, jiffies + host->timeout_jiffies); - host->cmd_flags = 0; writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), sock->addr + SOCK_CONTROL); tifm_sd_exec(host, mrq->cmd); @@ -545,11 +717,6 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) return; err_out: - if (sg_count > 0) - tifm_unmap_sg(sock, r_data->sg, r_data->sg_len, - (r_data->flags & MMC_DATA_WRITE) - ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); - mrq->cmd->error = MMC_ERR_TIMEOUT; mmc_request_done(mmc, mrq); } @@ -578,40 +745,23 @@ static void tifm_sd_end_cmd(unsigned long data) r_data = mrq->cmd->data; if (r_data) { if (host->no_dma) { - writel((~TIFM_MMCSD_BUFINT) & - readl(sock->addr + SOCK_MMCSD_INT_ENABLE), - sock->addr + SOCK_MMCSD_INT_ENABLE); - - if (r_data->flags & MMC_DATA_WRITE) { - r_data->bytes_xfered = host->written_blocks - * r_data->blksz; - } else { - r_data->bytes_xfered = r_data->blocks - - readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - - 1; - r_data->bytes_xfered *= r_data->blksz; - r_data->bytes_xfered += r_data->blksz - - readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) - + 1; - } - host->buffer_pos = 0; - host->buffer_size = 0; + writel((~TIFM_MMCSD_BUFINT) + & readl(sock->addr + SOCK_MMCSD_INT_ENABLE), + sock->addr + SOCK_MMCSD_INT_ENABLE); } else { - if (r_data->flags & MMC_DATA_WRITE) { - r_data->bytes_xfered = host->written_blocks - * r_data->blksz; - } else { - r_data->bytes_xfered = r_data->blocks - - readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; - r_data->bytes_xfered *= r_data->blksz; - r_data->bytes_xfered += r_data->blksz - - readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) - + 1; - } + tifm_unmap_sg(sock, &host->bounce_buf, 1, + (r_data->flags & MMC_DATA_WRITE) + ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); tifm_unmap_sg(sock, r_data->sg, r_data->sg_len, (r_data->flags & MMC_DATA_WRITE) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); } + + r_data->bytes_xfered = r_data->blocks + - readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; + r_data->bytes_xfered *= r_data->blksz; + r_data->bytes_xfered += r_data->blksz + - readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1; } writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), @@ -810,15 +960,14 @@ static int tifm_sd_probe(struct tifm_dev *sock) mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE; mmc->f_min = 20000000 / 60; mmc->f_max = 24000000; - mmc->max_hw_segs = 1; - mmc->max_phys_segs = 1; - // limited by DMA counter - it's safer to stick with - // block counter has 11 bits though - mmc->max_blk_count = 256; - // 2k maximum hw block length - mmc->max_blk_size = 2048; - mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; - mmc->max_seg_size = mmc->max_req_size; + + mmc->max_blk_count = 2048; + mmc->max_hw_segs = mmc->max_blk_count; + mmc->max_blk_size = min(TIFM_MMCSD_MAX_BLOCK_SIZE, PAGE_SIZE); + mmc->max_seg_size = mmc->max_blk_count * mmc->max_blk_size; + mmc->max_req_size = mmc->max_seg_size; + mmc->max_phys_segs = mmc->max_hw_segs; + sock->card_event = tifm_sd_card_event; sock->data_event = tifm_sd_data_event; rc = tifm_sd_initialize_host(host); diff --git a/include/linux/tifm.h b/include/linux/tifm.h index c8449fcea0c7..7ccad0795466 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -74,6 +74,7 @@ enum { #define TIFM_DMA_RESET 0x00000002 /* Meaning of this constant is unverified */ #define TIFM_DMA_TX 0x00008000 /* Meaning of this constant is unverified */ #define TIFM_DMA_EN 0x00000001 /* Meaning of this constant is unverified */ +#define TIFM_DMA_TSIZE 0x0000007f #define TIFM_TYPE_XD 1 #define TIFM_TYPE_MS 2 -- cgit v1.2.3 From 91f8d0118a0e1f25f809f3fde5a7616a1eaabc2b Mon Sep 17 00:00:00 2001 From: Alex Dubov Date: Thu, 12 Apr 2007 17:05:26 +1000 Subject: tifm: layout fixes, small changes to comments and printfs Cosmetic changes to the code. Signed-off-by: Alex Dubov Signed-off-by: Pierre Ossman --- drivers/misc/tifm_7xx1.c | 10 +++---- drivers/misc/tifm_core.c | 24 ++++++++--------- drivers/mmc/tifm_sd.c | 49 +++++++++++++++++++++------------- include/linux/tifm.h | 69 ++++++++++++++++++++++++------------------------ 4 files changed, 80 insertions(+), 72 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index eafa5575f312..9dcff14e752c 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -10,8 +10,6 @@ */ #include -#include -#include #define DRIVER_NAME "tifm_7xx1" #define DRIVER_VERSION "0.8" @@ -41,8 +39,7 @@ static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id) { struct tifm_adapter *fm = dev_id; struct tifm_dev *sock; - unsigned int irq_status; - unsigned int cnt; + unsigned int irq_status, cnt; spin_lock(&fm->lock); irq_status = readl(fm->addr + FM_INTERRUPT_STATUS); @@ -134,11 +131,10 @@ static void tifm_7xx1_switch_media(struct work_struct *work) { struct tifm_adapter *fm = container_of(work, struct tifm_adapter, media_switcher); + struct tifm_dev *sock; unsigned long flags; unsigned char media_id; - int cnt; - struct tifm_dev *sock; - unsigned int socket_change_set; + unsigned int socket_change_set, cnt; spin_lock_irqsave(&fm->lock, flags); socket_change_set = fm->socket_change_set; diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index 70220beb3e04..d195fb088f4a 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c @@ -115,23 +115,23 @@ static int tifm_device_remove(struct device *dev) static int tifm_device_suspend(struct device *dev, pm_message_t state) { - struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, driver); if (dev->driver && drv->suspend) - return drv->suspend(fm_dev, state); + return drv->suspend(sock, state); return 0; } static int tifm_device_resume(struct device *dev) { - struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, driver); if (dev->driver && drv->resume) - return drv->resume(fm_dev); + return drv->resume(sock); return 0; } @@ -155,14 +155,14 @@ static struct device_attribute tifm_dev_attrs[] = { }; static struct bus_type tifm_bus_type = { - .name = "tifm", - .dev_attrs = tifm_dev_attrs, - .match = tifm_bus_match, - .uevent = tifm_uevent, - .probe = tifm_device_probe, - .remove = tifm_device_remove, - .suspend = tifm_device_suspend, - .resume = tifm_device_resume + .name = "tifm", + .dev_attrs = tifm_dev_attrs, + .match = tifm_bus_match, + .uevent = tifm_uevent, + .probe = tifm_device_probe, + .remove = tifm_device_remove, + .suspend = tifm_device_suspend, + .resume = tifm_device_resume }; static void tifm_free(struct class_device *cdev) diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c index 8e69514e415d..f692a2ec09c5 100644 --- a/drivers/mmc/tifm_sd.c +++ b/drivers/mmc/tifm_sd.c @@ -7,6 +7,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * + * Special thanks to Brad Campbell for extensive testing of this driver. + * */ @@ -39,6 +41,7 @@ module_param(fixed_timeout, bool, 0644); #define TIFM_MMCSD_ERRMASK 0x01e0 /* set bits: CCRC, CTO, DCRC, DTO */ #define TIFM_MMCSD_EOC 0x0001 /* end of command phase */ +#define TIFM_MMCSD_CD 0x0002 /* card detect */ #define TIFM_MMCSD_CB 0x0004 /* card enter busy state */ #define TIFM_MMCSD_BRS 0x0008 /* block received/sent */ #define TIFM_MMCSD_EOFB 0x0010 /* card exit busy state */ @@ -48,6 +51,8 @@ module_param(fixed_timeout, bool, 0644); #define TIFM_MMCSD_CCRC 0x0100 /* command crc error */ #define TIFM_MMCSD_AF 0x0400 /* fifo almost full */ #define TIFM_MMCSD_AE 0x0800 /* fifo almost empty */ +#define TIFM_MMCSD_OCRB 0x1000 /* OCR busy */ +#define TIFM_MMCSD_CIRQ 0x2000 /* card irq (cmd40/sdio) */ #define TIFM_MMCSD_CERR 0x4000 /* card status error */ #define TIFM_MMCSD_ODTO 0x0040 /* open drain / extended timeout */ @@ -83,16 +88,16 @@ enum { }; struct tifm_sd { - struct tifm_dev *dev; + struct tifm_dev *dev; - unsigned short eject:1, - open_drain:1, - no_dma:1; - unsigned short cmd_flags; + unsigned short eject:1, + open_drain:1, + no_dma:1; + unsigned short cmd_flags; - unsigned int clk_freq; - unsigned int clk_div; - unsigned long timeout_jiffies; + unsigned int clk_freq; + unsigned int clk_div; + unsigned long timeout_jiffies; struct tasklet_struct finish_tasklet; struct timer_list timer; @@ -627,7 +632,8 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) } if (host->req) { - printk(KERN_ERR DRIVER_NAME ": unfinished request detected\n"); + printk(KERN_ERR "%s : unfinished request detected\n", + sock->dev.bus_id); spin_unlock_irqrestore(&sock->lock, flags); goto err_out; } @@ -737,7 +743,8 @@ static void tifm_sd_end_cmd(unsigned long data) host->req = NULL; if (!mrq) { - printk(KERN_ERR DRIVER_NAME ": no request to complete?\n"); + printk(KERN_ERR " %s : no request to complete?\n", + sock->dev.bus_id); spin_unlock_irqrestore(&sock->lock, flags); return; } @@ -775,8 +782,10 @@ static void tifm_sd_abort(unsigned long data) { struct tifm_sd *host = (struct tifm_sd*)data; - printk(KERN_ERR DRIVER_NAME - ": card failed to respond for a long period of time\n"); + printk(KERN_ERR + "%s : card failed to respond for a long period of time " + "(%x, %x)\n", + host->dev->dev.bus_id, host->req->cmd->opcode, host->cmd_flags); tifm_eject(host->dev); } @@ -790,8 +799,11 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) spin_lock_irqsave(&sock->lock, flags); - dev_dbg(&sock->dev, "Setting bus width %d, power %d\n", ios->bus_width, - ios->power_mode); + dev_dbg(&sock->dev, "ios: clock = %u, vdd = %x, bus_mode = %x, " + "chip_select = %x, power_mode = %x, bus_width = %x\n", + ios->clock, ios->vdd, ios->bus_mode, ios->chip_select, + ios->power_mode, ios->bus_width); + if (ios->bus_width == MMC_BUS_WIDTH_4) { writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG), sock->addr + SOCK_MMCSD_CONFIG); @@ -937,7 +949,8 @@ static int tifm_sd_probe(struct tifm_dev *sock) if (!(TIFM_SOCK_STATE_OCCUPIED & readl(sock->addr + SOCK_PRESENT_STATE))) { - printk(KERN_WARNING DRIVER_NAME ": card gone, unexpectedly\n"); + printk(KERN_WARNING "%s : card gone, unexpectedly\n", + sock->dev.bus_id); return rc; } @@ -974,11 +987,9 @@ static int tifm_sd_probe(struct tifm_dev *sock) if (!rc) rc = mmc_add_host(mmc); - if (rc) - goto out_free_mmc; + if (!rc) + return 0; - return 0; -out_free_mmc: mmc_free_host(mmc); return rc; } diff --git a/include/linux/tifm.h b/include/linux/tifm.h index 7ccad0795466..2a196982601f 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -14,16 +14,16 @@ #include #include -#include #include #include -#include +#include /* Host registers (relative to pci base address): */ enum { FM_SET_INTERRUPT_ENABLE = 0x008, FM_CLEAR_INTERRUPT_ENABLE = 0x00c, - FM_INTERRUPT_STATUS = 0x014 }; + FM_INTERRUPT_STATUS = 0x014 +}; /* Socket registers (relative to socket base address): */ enum { @@ -58,7 +58,8 @@ enum { SOCK_MS_DATA = 0x188, SOCK_MS_STATUS = 0x18c, SOCK_MS_SYSTEM = 0x190, - SOCK_FIFO_ACCESS = 0x200 }; + SOCK_FIFO_ACCESS = 0x200 +}; #define TIFM_CTRL_LED 0x00000040 #define TIFM_CTRL_FAST_CLK 0x00000100 @@ -66,14 +67,14 @@ enum { #define TIFM_SOCK_STATE_OCCUPIED 0x00000008 #define TIFM_SOCK_STATE_POWERED 0x00000080 -#define TIFM_FIFO_ENABLE 0x00000001 /* Meaning of this constant is unverified */ -#define TIFM_FIFO_READY 0x00000001 /* Meaning of this constant is unverified */ +#define TIFM_FIFO_ENABLE 0x00000001 +#define TIFM_FIFO_READY 0x00000001 #define TIFM_FIFO_INT_SETALL 0x0000ffff -#define TIFM_FIFO_INTMASK 0x00000005 /* Meaning of this constant is unverified */ +#define TIFM_FIFO_INTMASK 0x00000005 -#define TIFM_DMA_RESET 0x00000002 /* Meaning of this constant is unverified */ -#define TIFM_DMA_TX 0x00008000 /* Meaning of this constant is unverified */ -#define TIFM_DMA_EN 0x00000001 /* Meaning of this constant is unverified */ +#define TIFM_DMA_RESET 0x00000002 +#define TIFM_DMA_TX 0x00008000 +#define TIFM_DMA_EN 0x00000001 #define TIFM_DMA_TSIZE 0x0000007f #define TIFM_TYPE_XD 1 @@ -86,44 +87,44 @@ struct tifm_device_id { struct tifm_driver; struct tifm_dev { - char __iomem *addr; - spinlock_t lock; - unsigned char type; - unsigned int socket_id; + char __iomem *addr; + spinlock_t lock; + unsigned char type; + unsigned int socket_id; void (*card_event)(struct tifm_dev *sock); void (*data_event)(struct tifm_dev *sock); - struct device dev; + struct device dev; }; struct tifm_driver { struct tifm_device_id *id_table; - int (*probe)(struct tifm_dev *dev); - void (*remove)(struct tifm_dev *dev); - int (*suspend)(struct tifm_dev *dev, - pm_message_t state); - int (*resume)(struct tifm_dev *dev); + int (*probe)(struct tifm_dev *dev); + void (*remove)(struct tifm_dev *dev); + int (*suspend)(struct tifm_dev *dev, + pm_message_t state); + int (*resume)(struct tifm_dev *dev); - struct device_driver driver; + struct device_driver driver; }; struct tifm_adapter { - char __iomem *addr; - spinlock_t lock; - unsigned int irq_status; - unsigned int socket_change_set; - unsigned int id; - unsigned int num_sockets; - struct completion *finish_me; + char __iomem *addr; + spinlock_t lock; + unsigned int irq_status; + unsigned int socket_change_set; + unsigned int id; + unsigned int num_sockets; + struct completion *finish_me; - struct work_struct media_switcher; - struct class_device cdev; + struct work_struct media_switcher; + struct class_device cdev; - void (*eject)(struct tifm_adapter *fm, - struct tifm_dev *sock); + void (*eject)(struct tifm_adapter *fm, + struct tifm_dev *sock); - struct tifm_dev *sockets[0]; + struct tifm_dev *sockets[0]; }; struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets, @@ -147,7 +148,7 @@ void tifm_queue_work(struct work_struct *work); static inline void *tifm_get_drvdata(struct tifm_dev *dev) { - return dev_get_drvdata(&dev->dev); + return dev_get_drvdata(&dev->dev); } static inline void tifm_set_drvdata(struct tifm_dev *dev, void *data) -- cgit v1.2.3 From 85a18ad93ec66888d85758630019b10a84257f3c Mon Sep 17 00:00:00 2001 From: Pierre Ossman Date: Sat, 17 Feb 2007 22:15:27 +0100 Subject: mmc: MMC sector based cards Support for MMC 4.2 sector based cards. This tweaks the init a bit and reads a new field out of the EXT_CSD. Signed-off-by: Pierre Ossman --- drivers/mmc/mmc.c | 25 ++++++++++++++++++++++--- drivers/mmc/mmc_block.c | 19 ++++++++++++++----- include/linux/mmc/card.h | 1 + include/linux/mmc/protocol.h | 1 + 4 files changed, 38 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index 2ba46273496b..f772df93a398 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c @@ -1106,11 +1106,29 @@ static void mmc_process_ext_csds(struct mmc_host *host) mmc_wait_for_req(host, &mrq); if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { - printk("%s: unable to read EXT_CSD, performance " - "might suffer.\n", mmc_hostname(card->host)); + if (card->csd.capacity == (4096 * 512)) { + printk(KERN_ERR "%s: unable to read EXT_CSD " + "on a possible high capacity card. " + "Card will be ignored.\n", + mmc_hostname(card->host)); + mmc_card_set_dead(card); + } else { + printk(KERN_WARNING "%s: unable to read " + "EXT_CSD, performance might " + "suffer.\n", + mmc_hostname(card->host)); + } continue; } + card->ext_csd.sectors = + ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | + ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | + ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | + ext_csd[EXT_CSD_SEC_CNT + 3] << 24; + if (card->ext_csd.sectors) + mmc_card_set_blockaddr(card); + switch (ext_csd[EXT_CSD_CARD_TYPE]) { case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; @@ -1499,7 +1517,8 @@ static void mmc_setup(struct mmc_host *host) mmc_send_app_op_cond(host, host->ocr | (sd2 << 30), NULL); } } else { - mmc_send_op_cond(host, host->ocr, NULL); + /* The extra bit indicates that we support high capacity */ + mmc_send_op_cond(host, host->ocr | (1 << 30), NULL); } mmc_discover_cards(host); diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c index 95b0da6abe87..63fbde8756ac 100644 --- a/drivers/mmc/mmc_block.c +++ b/drivers/mmc/mmc_block.c @@ -491,11 +491,20 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits); - /* - * The CSD capacity field is in units of read_blkbits. - * set_capacity takes units of 512 bytes. - */ - set_capacity(md->disk, card->csd.capacity << (card->csd.read_blkbits - 9)); + if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { + /* + * The EXT_CSD sector count is in number or 512 byte + * sectors. + */ + set_capacity(md->disk, card->ext_csd.sectors); + } else { + /* + * The CSD capacity field is in units of read_blkbits. + * set_capacity takes units of 512 bytes. + */ + set_capacity(md->disk, + card->csd.capacity << (card->csd.read_blkbits - 9)); + } return md; err_putdisk: diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index e45712acfac5..5d9896c260a2 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -41,6 +41,7 @@ struct mmc_csd { struct mmc_ext_csd { unsigned int hs_max_dtr; + unsigned int sectors; }; struct sd_scr { diff --git a/include/linux/mmc/protocol.h b/include/linux/mmc/protocol.h index c90b6768329d..d740ab94fa25 100644 --- a/include/linux/mmc/protocol.h +++ b/include/linux/mmc/protocol.h @@ -284,6 +284,7 @@ struct _mmc_csd { #define EXT_CSD_BUS_WIDTH 183 /* R/W */ #define EXT_CSD_HS_TIMING 185 /* R/W */ #define EXT_CSD_CARD_TYPE 196 /* RO */ +#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ /* * EXT_CSD field definitions -- cgit v1.2.3 From 9c2c0af950345e63ef86f28eca44333a1e1e709b Mon Sep 17 00:00:00 2001 From: Pierre Ossman Date: Tue, 26 Dec 2006 15:25:58 +0100 Subject: mmc: add type field to cards Split out the type of card into its own field as it hardly qualifies as a state. Signed-off-by: Pierre Ossman --- drivers/mmc/mmc.c | 3 ++- include/linux/mmc/card.h | 15 +++++++++------ 2 files changed, 11 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index f772df93a398..ccd3037da249 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c @@ -980,7 +980,7 @@ static void mmc_discover_cards(struct mmc_host *host) card->state &= ~MMC_STATE_DEAD; if (host->mode == MMC_MODE_SD) { - mmc_card_set_sd(card); + card->type = MMC_TYPE_SD; cmd.opcode = SD_SEND_RELATIVE_ADDR; cmd.arg = 0; @@ -1003,6 +1003,7 @@ static void mmc_discover_cards(struct mmc_host *host) } } } else { + card->type = MMC_TYPE_MMC; cmd.opcode = MMC_SET_RELATIVE_ADDR; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 5d9896c260a2..1ca50542ce19 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -65,14 +65,16 @@ struct mmc_card { struct mmc_host *host; /* the host this device belongs to */ struct device dev; /* the device */ unsigned int rca; /* relative card address of device */ + unsigned int type; /* card type */ +#define MMC_TYPE_MMC 0 /* MMC card */ +#define MMC_TYPE_SD 1 /* SD card */ unsigned int state; /* (our) card state */ #define MMC_STATE_PRESENT (1<<0) /* present in sysfs */ #define MMC_STATE_DEAD (1<<1) /* device no longer in stack */ #define MMC_STATE_BAD (1<<2) /* unrecognised device */ -#define MMC_STATE_SDCARD (1<<3) /* is an SD card */ -#define MMC_STATE_READONLY (1<<4) /* card is read-only */ -#define MMC_STATE_HIGHSPEED (1<<5) /* card is in high speed mode */ -#define MMC_STATE_BLOCKADDR (1<<6) /* card uses block-addressing */ +#define MMC_STATE_READONLY (1<<3) /* card is read-only */ +#define MMC_STATE_HIGHSPEED (1<<4) /* card is in high speed mode */ +#define MMC_STATE_BLOCKADDR (1<<5) /* card uses block-addressing */ u32 raw_cid[4]; /* raw card CID */ u32 raw_csd[4]; /* raw card CSD */ u32 raw_scr[2]; /* raw card SCR */ @@ -83,10 +85,12 @@ struct mmc_card { struct sd_switch_caps sw_caps; /* switch (CMD6) caps */ }; +#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) +#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) + #define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT) #define mmc_card_dead(c) ((c)->state & MMC_STATE_DEAD) #define mmc_card_bad(c) ((c)->state & MMC_STATE_BAD) -#define mmc_card_sd(c) ((c)->state & MMC_STATE_SDCARD) #define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY) #define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED) #define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) @@ -94,7 +98,6 @@ struct mmc_card { #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) #define mmc_card_set_dead(c) ((c)->state |= MMC_STATE_DEAD) #define mmc_card_set_bad(c) ((c)->state |= MMC_STATE_BAD) -#define mmc_card_set_sd(c) ((c)->state |= MMC_STATE_SDCARD) #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) #define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) -- cgit v1.2.3 From f74d132cec60b686bce1f284822c1a496700bd3c Mon Sep 17 00:00:00 2001 From: Pierre Ossman Date: Fri, 9 Feb 2007 22:49:31 +0100 Subject: mmc: Move OCR bit defines All host drivers were #include:ing mmc/protocol.h just to get access to the OCR bit defines. Move these to host.h instead. Signed-off-by: Pierre Ossman --- drivers/mmc/at91_mci.c | 1 - drivers/mmc/au1xmmc.c | 1 - drivers/mmc/imxmmc.c | 1 - drivers/mmc/mmci.c | 1 - drivers/mmc/omap.c | 1 - drivers/mmc/pxamci.c | 1 - drivers/mmc/sdhci.c | 1 - drivers/mmc/tifm_sd.c | 1 - drivers/mmc/wbsd.c | 1 - include/asm-arm/arch-imx/mmc.h | 2 +- include/asm-arm/arch-pxa/mmc.h | 2 +- include/asm-arm/mach/mmc.h | 2 +- include/linux/mmc/host.h | 25 +++++++++++++++++++++++++ include/linux/mmc/protocol.h | 27 +++------------------------ 14 files changed, 31 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c index 459f4b4feded..e37943c314cb 100644 --- a/drivers/mmc/at91_mci.c +++ b/drivers/mmc/at91_mci.c @@ -67,7 +67,6 @@ #include #include -#include #include #include diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c index b834be261ab7..b7156a4555b5 100644 --- a/drivers/mmc/au1xmmc.c +++ b/drivers/mmc/au1xmmc.c @@ -42,7 +42,6 @@ #include #include -#include #include #include #include diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c index 0de5c9e94e74..7ee2045acbef 100644 --- a/drivers/mmc/imxmmc.c +++ b/drivers/mmc/imxmmc.c @@ -41,7 +41,6 @@ #include #include #include -#include #include #include diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c index 5941dd951e82..d11c2d23ceea 100644 --- a/drivers/mmc/mmci.c +++ b/drivers/mmc/mmci.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c index 1e96a2f65022..e851384e51f4 100644 --- a/drivers/mmc/omap.c +++ b/drivers/mmc/omap.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c index 9774fc68b61a..a98ff98fa567 100644 --- a/drivers/mmc/pxamci.c +++ b/drivers/mmc/pxamci.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c index 587dccf95f8e..579142a7904b 100644 --- a/drivers/mmc/sdhci.c +++ b/drivers/mmc/sdhci.c @@ -15,7 +15,6 @@ #include #include -#include #include diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c index f692a2ec09c5..b0d77d298412 100644 --- a/drivers/mmc/tifm_sd.c +++ b/drivers/mmc/tifm_sd.c @@ -13,7 +13,6 @@ #include -#include #include #include #include diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c index 673c64661d36..9f7518b37c33 100644 --- a/drivers/mmc/wbsd.c +++ b/drivers/mmc/wbsd.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include diff --git a/include/asm-arm/arch-imx/mmc.h b/include/asm-arm/arch-imx/mmc.h index 1937151665c7..84c726934ace 100644 --- a/include/asm-arm/arch-imx/mmc.h +++ b/include/asm-arm/arch-imx/mmc.h @@ -1,7 +1,7 @@ #ifndef ASMARM_ARCH_MMC_H #define ASMARM_ARCH_MMC_H -#include +#include struct imxmmc_platform_data { int (*card_present)(void); diff --git a/include/asm-arm/arch-pxa/mmc.h b/include/asm-arm/arch-pxa/mmc.h index a38a28c4bbd8..ef4f570381d1 100644 --- a/include/asm-arm/arch-pxa/mmc.h +++ b/include/asm-arm/arch-pxa/mmc.h @@ -1,7 +1,7 @@ #ifndef ASMARM_ARCH_MMC_H #define ASMARM_ARCH_MMC_H -#include +#include #include struct device; diff --git a/include/asm-arm/mach/mmc.h b/include/asm-arm/mach/mmc.h index 1b3555d4b41e..eb91145c00c4 100644 --- a/include/asm-arm/mach/mmc.h +++ b/include/asm-arm/mach/mmc.h @@ -4,7 +4,7 @@ #ifndef ASMARM_MACH_MMC_H #define ASMARM_MACH_MMC_H -#include +#include struct mmc_platform_data { unsigned int ocr_mask; /* available voltages */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index bfcef8a1ad8b..c89f41091304 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -88,6 +88,31 @@ struct mmc_host { unsigned int f_max; u32 ocr_avail; +#define MMC_VDD_145_150 0x00000001 /* VDD voltage 1.45 - 1.50 */ +#define MMC_VDD_150_155 0x00000002 /* VDD voltage 1.50 - 1.55 */ +#define MMC_VDD_155_160 0x00000004 /* VDD voltage 1.55 - 1.60 */ +#define MMC_VDD_160_165 0x00000008 /* VDD voltage 1.60 - 1.65 */ +#define MMC_VDD_165_170 0x00000010 /* VDD voltage 1.65 - 1.70 */ +#define MMC_VDD_17_18 0x00000020 /* VDD voltage 1.7 - 1.8 */ +#define MMC_VDD_18_19 0x00000040 /* VDD voltage 1.8 - 1.9 */ +#define MMC_VDD_19_20 0x00000080 /* VDD voltage 1.9 - 2.0 */ +#define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */ +#define MMC_VDD_21_22 0x00000200 /* VDD voltage 2.1 ~ 2.2 */ +#define MMC_VDD_22_23 0x00000400 /* VDD voltage 2.2 ~ 2.3 */ +#define MMC_VDD_23_24 0x00000800 /* VDD voltage 2.3 ~ 2.4 */ +#define MMC_VDD_24_25 0x00001000 /* VDD voltage 2.4 ~ 2.5 */ +#define MMC_VDD_25_26 0x00002000 /* VDD voltage 2.5 ~ 2.6 */ +#define MMC_VDD_26_27 0x00004000 /* VDD voltage 2.6 ~ 2.7 */ +#define MMC_VDD_27_28 0x00008000 /* VDD voltage 2.7 ~ 2.8 */ +#define MMC_VDD_28_29 0x00010000 /* VDD voltage 2.8 ~ 2.9 */ +#define MMC_VDD_29_30 0x00020000 /* VDD voltage 2.9 ~ 3.0 */ +#define MMC_VDD_30_31 0x00040000 /* VDD voltage 3.0 ~ 3.1 */ +#define MMC_VDD_31_32 0x00080000 /* VDD voltage 3.1 ~ 3.2 */ +#define MMC_VDD_32_33 0x00100000 /* VDD voltage 3.2 ~ 3.3 */ +#define MMC_VDD_33_34 0x00200000 /* VDD voltage 3.3 ~ 3.4 */ +#define MMC_VDD_34_35 0x00400000 /* VDD voltage 3.4 ~ 3.5 */ +#define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */ + unsigned long caps; /* Host capabilities */ #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */ diff --git a/include/linux/mmc/protocol.h b/include/linux/mmc/protocol.h index d740ab94fa25..3ca91a6fc23f 100644 --- a/include/linux/mmc/protocol.h +++ b/include/linux/mmc/protocol.h @@ -208,30 +208,9 @@ struct _mmc_csd { u8 ecc; }; -#define MMC_VDD_145_150 0x00000001 /* VDD voltage 1.45 - 1.50 */ -#define MMC_VDD_150_155 0x00000002 /* VDD voltage 1.50 - 1.55 */ -#define MMC_VDD_155_160 0x00000004 /* VDD voltage 1.55 - 1.60 */ -#define MMC_VDD_160_165 0x00000008 /* VDD voltage 1.60 - 1.65 */ -#define MMC_VDD_165_170 0x00000010 /* VDD voltage 1.65 - 1.70 */ -#define MMC_VDD_17_18 0x00000020 /* VDD voltage 1.7 - 1.8 */ -#define MMC_VDD_18_19 0x00000040 /* VDD voltage 1.8 - 1.9 */ -#define MMC_VDD_19_20 0x00000080 /* VDD voltage 1.9 - 2.0 */ -#define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */ -#define MMC_VDD_21_22 0x00000200 /* VDD voltage 2.1 ~ 2.2 */ -#define MMC_VDD_22_23 0x00000400 /* VDD voltage 2.2 ~ 2.3 */ -#define MMC_VDD_23_24 0x00000800 /* VDD voltage 2.3 ~ 2.4 */ -#define MMC_VDD_24_25 0x00001000 /* VDD voltage 2.4 ~ 2.5 */ -#define MMC_VDD_25_26 0x00002000 /* VDD voltage 2.5 ~ 2.6 */ -#define MMC_VDD_26_27 0x00004000 /* VDD voltage 2.6 ~ 2.7 */ -#define MMC_VDD_27_28 0x00008000 /* VDD voltage 2.7 ~ 2.8 */ -#define MMC_VDD_28_29 0x00010000 /* VDD voltage 2.8 ~ 2.9 */ -#define MMC_VDD_29_30 0x00020000 /* VDD voltage 2.9 ~ 3.0 */ -#define MMC_VDD_30_31 0x00040000 /* VDD voltage 3.0 ~ 3.1 */ -#define MMC_VDD_31_32 0x00080000 /* VDD voltage 3.1 ~ 3.2 */ -#define MMC_VDD_32_33 0x00100000 /* VDD voltage 3.2 ~ 3.3 */ -#define MMC_VDD_33_34 0x00200000 /* VDD voltage 3.3 ~ 3.4 */ -#define MMC_VDD_34_35 0x00400000 /* VDD voltage 3.4 ~ 3.5 */ -#define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */ +/* + * OCR bits are mostly in host.h + */ #define MMC_CARD_BUSY 0x80000000 /* Card Power up status bit */ /* -- cgit v1.2.3 From 3b91e5507cddaca53bccf1524ff11a0ac5c85531 Mon Sep 17 00:00:00 2001 From: Pierre Ossman Date: Sun, 11 Feb 2007 20:43:19 +0100 Subject: mmc: Flush pending detects on host removal Make sure we kill of any pending detection runs when the host is removed instead of when it is freed. Also add some debugging to make sure the driver doesn't queue up more detection after it has removed the host. Signed-off-by: Pierre Ossman --- drivers/mmc/mmc.c | 15 ++++++++++++++- include/linux/mmc/host.h | 3 +++ 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index 9ffeeb2cba45..0242c6a21daf 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c @@ -1550,6 +1550,12 @@ static void mmc_setup(struct mmc_host *host) */ void mmc_detect_change(struct mmc_host *host, unsigned long delay) { +#ifdef CONFIG_MMC_DEBUG + mmc_claim_host(host); + BUG_ON(host->removed); + mmc_release_host(host); +#endif + mmc_schedule_delayed_work(&host->detect, delay); } @@ -1690,6 +1696,14 @@ void mmc_remove_host(struct mmc_host *host) { struct list_head *l, *n; +#ifdef CONFIG_MMC_DEBUG + mmc_claim_host(host); + host->removed = 1; + mmc_release_host(host); +#endif + + mmc_flush_scheduled_work(); + list_for_each_safe(l, n, &host->cards) { struct mmc_card *card = mmc_list_to_card(l); @@ -1710,7 +1724,6 @@ EXPORT_SYMBOL(mmc_remove_host); */ void mmc_free_host(struct mmc_host *host) { - mmc_flush_scheduled_work(); mmc_free_host_sysfs(host); } diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index c89f41091304..92efe8e5be7e 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -147,6 +147,9 @@ struct mmc_host { struct mmc_card *card_selected; /* the selected MMC card */ struct delayed_work detect; +#ifdef CONFIG_MMC_DEBUG + unsigned int removed:1; /* host is being removed */ +#endif unsigned long private[0] ____cacheline_aligned; }; -- cgit v1.2.3 From b855885e3b60cf6f9452848712a62517b94583eb Mon Sep 17 00:00:00 2001 From: Pierre Ossman Date: Wed, 3 Jan 2007 19:47:29 +0100 Subject: mmc: deprecate mmc bus topology The classic MMC bus was defined as multi card bus system, which is reflected in the design in the MMC layer. When SD showed up, the bus topology was abandoned and a star topology (one card per host) was mandated. MMC version 4 has followed this, officially deprecating the bus topology. As we do not have any known users of the bus topology we can remove support for it. This will simplify the code and rectify some incorrect assumptions in the newer additions. Signed-off-by: Pierre Ossman --- drivers/mmc/card/block.c | 13 +- drivers/mmc/mmc.c | 921 ++++++++++++++++++++--------------------------- include/linux/mmc/card.h | 8 - include/linux/mmc/host.h | 4 +- include/linux/mmc/mmc.h | 9 +- 5 files changed, 400 insertions(+), 555 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 8eba037a18e0..8a84e4dc1b2a 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -226,8 +226,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) struct mmc_blk_request brq; int ret = 1, sg_pos, data_size; - if (mmc_card_claim_host(card)) - goto flush_queue; + mmc_claim_host(card->host); do { struct mmc_command cmd; @@ -357,7 +356,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) spin_unlock_irq(&md->lock); } while (ret); - mmc_card_release_host(card); + mmc_release_host(card->host); return 1; @@ -393,9 +392,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) spin_unlock_irq(&md->lock); } -flush_queue: - - mmc_card_release_host(card); + mmc_release_host(card->host); spin_lock_irq(&md->lock); while (ret) { @@ -526,12 +523,12 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) if (mmc_card_blockaddr(card)) return 0; - mmc_card_claim_host(card); + mmc_claim_host(card->host); cmd.opcode = MMC_SET_BLOCKLEN; cmd.arg = 1 << md->block_bits; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 5); - mmc_card_release_host(card); + mmc_release_host(card->host); if (err) { printk(KERN_ERR "%s: unable to set block size to %d: %d\n", diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index 94c04725726d..3f50b8882c89 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c @@ -3,7 +3,7 @@ * * Copyright (C) 2003-2004 Russell King, All Rights Reserved. * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. - * SD support Copyright (C) 2005 Pierre Ossman, All Rights Reserved. + * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify @@ -316,8 +316,6 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card, } EXPORT_SYMBOL(mmc_set_data_timeout); -static int mmc_select_card(struct mmc_host *host, struct mmc_card *card); - /** * __mmc_claim_host - exclusively claim a host * @host: mmc host to claim @@ -329,11 +327,10 @@ static int mmc_select_card(struct mmc_host *host, struct mmc_card *card); * * Note: you should use mmc_card_claim_host or mmc_claim_host. */ -int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card) +void mmc_claim_host(struct mmc_host *host) { DECLARE_WAITQUEUE(wait, current); unsigned long flags; - int err = 0; add_wait_queue(&host->wq, &wait); spin_lock_irqsave(&host->lock, flags); @@ -349,17 +346,9 @@ int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card) host->claimed = 1; spin_unlock_irqrestore(&host->lock, flags); remove_wait_queue(&host->wq, &wait); - - if (card != (void *)-1) { - err = mmc_select_card(host, card); - if (err != MMC_ERR_NONE) - return err; - } - - return err; } -EXPORT_SYMBOL(__mmc_claim_host); +EXPORT_SYMBOL(mmc_claim_host); /** * mmc_release_host - release a host @@ -396,23 +385,18 @@ static inline void mmc_set_ios(struct mmc_host *host) host->ops->set_ios(host, ios); } -static int mmc_select_card(struct mmc_host *host, struct mmc_card *card) +static int mmc_select_card(struct mmc_card *card) { int err; struct mmc_command cmd; - BUG_ON(!host->claimed); - - if (host->card_selected == card) - return MMC_ERR_NONE; - - host->card_selected = card; + BUG_ON(!card->host->claimed); cmd.opcode = MMC_SELECT_CARD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + err = mmc_wait_for_cmd(card->host, &cmd, CMD_RETRIES); if (err != MMC_ERR_NONE) return err; @@ -426,49 +410,24 @@ static int mmc_select_card(struct mmc_host *host, struct mmc_card *card) * wider version. */ if (mmc_card_sd(card) && - (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) { + (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4) && + (card->host->caps & MMC_CAP_4_BIT_DATA)) { - /* - * Default bus width is 1 bit. - */ - host->ios.bus_width = MMC_BUS_WIDTH_1; - - if (host->caps & MMC_CAP_4_BIT_DATA) { - struct mmc_command cmd; - cmd.opcode = SD_APP_SET_BUS_WIDTH; - cmd.arg = SD_BUS_WIDTH_4; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_app_cmd(host, card->rca, &cmd, - CMD_RETRIES); - if (err != MMC_ERR_NONE) - return err; - - host->ios.bus_width = MMC_BUS_WIDTH_4; - } - } - - mmc_set_ios(host); - - return MMC_ERR_NONE; -} - -/* - * Ensure that no card is selected. - */ -static void mmc_deselect_cards(struct mmc_host *host) -{ - struct mmc_command cmd; - - if (host->card_selected) { - host->card_selected = NULL; + struct mmc_command cmd; + cmd.opcode = SD_APP_SET_BUS_WIDTH; + cmd.arg = SD_BUS_WIDTH_4; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - cmd.opcode = MMC_SELECT_CARD; - cmd.arg = 0; - cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; + err = mmc_wait_for_app_cmd(card->host, card->rca, + &cmd, CMD_RETRIES); + if (err != MMC_ERR_NONE) + return err; - mmc_wait_for_cmd(host, &cmd, 0); + card->host->ios.bus_width = MMC_BUS_WIDTH_4; + mmc_set_ios(card->host); } + + return MMC_ERR_NONE; } @@ -732,27 +691,12 @@ static void mmc_decode_scr(struct mmc_card *card) } /* - * Locate a MMC card on this MMC host given a raw CID. - */ -static struct mmc_card *mmc_find_card(struct mmc_host *host, u32 *raw_cid) -{ - struct mmc_card *card; - - list_for_each_entry(card, &host->cards, node) { - if (memcmp(card->raw_cid, raw_cid, sizeof(card->raw_cid)) == 0) - return card; - } - return NULL; -} - -/* - * Allocate a new MMC card, and assign a unique RCA. + * Allocate a new MMC card */ static struct mmc_card * -mmc_alloc_card(struct mmc_host *host, u32 *raw_cid, unsigned int *frca) +mmc_alloc_card(struct mmc_host *host, u32 *raw_cid) { - struct mmc_card *card, *c; - unsigned int rca = *frca; + struct mmc_card *card; card = kmalloc(sizeof(struct mmc_card), GFP_KERNEL); if (!card) @@ -761,17 +705,6 @@ mmc_alloc_card(struct mmc_host *host, u32 *raw_cid, unsigned int *frca) mmc_init_card(card, host); memcpy(card->raw_cid, raw_cid, sizeof(card->raw_cid)); - again: - list_for_each_entry(c, &host->cards, node) - if (c->rca == rca) { - rca++; - goto again; - } - - card->rca = rca; - - *frca = rca; - return card; } @@ -937,128 +870,128 @@ static int mmc_send_if_cond(struct mmc_host *host, u32 ocr, int *rsd2) } /* - * Discover cards by requesting their CID. If this command - * times out, it is not an error; there are no further cards - * to be discovered. Add new cards to the list. + * Discover the card by requesting its CID. * - * Create a mmc_card entry for each discovered card, assigning + * Create a mmc_card entry for the discovered card, assigning * it an RCA, and save the raw CID for decoding later. */ -static void mmc_discover_cards(struct mmc_host *host) +static void mmc_discover_card(struct mmc_host *host) { - struct mmc_card *card; - unsigned int first_rca = 1, err; + unsigned int err; - while (1) { - struct mmc_command cmd; + struct mmc_command cmd; - cmd.opcode = MMC_ALL_SEND_CID; + BUG_ON(host->card); + + cmd.opcode = MMC_ALL_SEND_CID; + cmd.arg = 0; + cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; + + err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + if (err == MMC_ERR_TIMEOUT) { + err = MMC_ERR_NONE; + return; + } + if (err != MMC_ERR_NONE) { + printk(KERN_ERR "%s: error requesting CID: %d\n", + mmc_hostname(host), err); + return; + } + + host->card = mmc_alloc_card(host, cmd.resp); + if (IS_ERR(host->card)) { + err = PTR_ERR(host->card); + host->card = NULL; + return; + } + + if (host->mode == MMC_MODE_SD) { + host->card->type = MMC_TYPE_SD; + + cmd.opcode = SD_SEND_RELATIVE_ADDR; cmd.arg = 0; - cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; + cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); - if (err == MMC_ERR_TIMEOUT) { - err = MMC_ERR_NONE; - break; - } - if (err != MMC_ERR_NONE) { - printk(KERN_ERR "%s: error requesting CID: %d\n", - mmc_hostname(host), err); - break; - } - - card = mmc_find_card(host, cmd.resp); - if (!card) { - card = mmc_alloc_card(host, cmd.resp, &first_rca); - if (IS_ERR(card)) { - err = PTR_ERR(card); - break; + if (err != MMC_ERR_NONE) + mmc_card_set_dead(host->card); + else { + host->card->rca = cmd.resp[0] >> 16; + + if (!host->ops->get_ro) { + printk(KERN_WARNING "%s: host does not " + "support reading read-only " + "switch. assuming write-enable.\n", + mmc_hostname(host)); + } else { + if (host->ops->get_ro(host)) + mmc_card_set_readonly(host->card); } - list_add(&card->node, &host->cards); } + } else { + host->card->type = MMC_TYPE_MMC; + host->card->rca = 1; - card->state &= ~MMC_STATE_DEAD; - - if (host->mode == MMC_MODE_SD) { - card->type = MMC_TYPE_SD; - - cmd.opcode = SD_SEND_RELATIVE_ADDR; - cmd.arg = 0; - cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) - mmc_card_set_dead(card); - else { - card->rca = cmd.resp[0] >> 16; - - if (!host->ops->get_ro) { - printk(KERN_WARNING "%s: host does not " - "support reading read-only " - "switch. assuming write-enable.\n", - mmc_hostname(host)); - } else { - if (host->ops->get_ro(host)) - mmc_card_set_readonly(card); - } - } - } else { - card->type = MMC_TYPE_MMC; - cmd.opcode = MMC_SET_RELATIVE_ADDR; - cmd.arg = card->rca << 16; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) - mmc_card_set_dead(card); - } + cmd.opcode = MMC_SET_RELATIVE_ADDR; + cmd.arg = host->card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + + err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + if (err != MMC_ERR_NONE) + mmc_card_set_dead(host->card); } } -static void mmc_read_csds(struct mmc_host *host) +static void mmc_read_csd(struct mmc_host *host) { - struct mmc_card *card; - - list_for_each_entry(card, &host->cards, node) { - struct mmc_command cmd; - int err; + struct mmc_command cmd; + int err; - if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT)) - continue; + if (!host->card) + return; + if (mmc_card_dead(host->card)) + return; - cmd.opcode = MMC_SEND_CSD; - cmd.arg = card->rca << 16; - cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; + cmd.opcode = MMC_SEND_CSD; + cmd.arg = host->card->rca << 16; + cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) { - mmc_card_set_dead(card); - continue; - } + err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + if (err != MMC_ERR_NONE) { + mmc_card_set_dead(host->card); + return; + } - memcpy(card->raw_csd, cmd.resp, sizeof(card->raw_csd)); + memcpy(host->card->raw_csd, cmd.resp, sizeof(host->card->raw_csd)); - mmc_decode_csd(card); - mmc_decode_cid(card); - } + mmc_decode_csd(host->card); + mmc_decode_cid(host->card); } -static void mmc_process_ext_csds(struct mmc_host *host) +static void mmc_process_ext_csd(struct mmc_host *host) { int err; - struct mmc_card *card; struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; + u8 *ext_csd; struct scatterlist sg; + if (!host->card) + return; + if (mmc_card_dead(host->card)) + return; + if (mmc_card_sd(host->card)) + return; + if (host->card->csd.mmca_vsn < CSD_SPEC_VER_4) + return; + /* * As the ext_csd is so large and mostly unused, we don't store the * raw block in mmc_card. */ - u8 *ext_csd; ext_csd = kmalloc(512, GFP_KERNEL); if (!ext_csd) { printk("%s: could not allocate a buffer to receive the ext_csd." @@ -1067,211 +1000,184 @@ static void mmc_process_ext_csds(struct mmc_host *host) return; } - list_for_each_entry(card, &host->cards, node) { - if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT)) - continue; - if (mmc_card_sd(card)) - continue; - if (card->csd.mmca_vsn < CSD_SPEC_VER_4) - continue; - - err = mmc_select_card(host, card); - if (err != MMC_ERR_NONE) { - mmc_card_set_dead(card); - continue; - } - - memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = MMC_SEND_EXT_CSD; - cmd.arg = 0; - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + cmd.opcode = MMC_SEND_EXT_CSD; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; - memset(&data, 0, sizeof(struct mmc_data)); + memset(&data, 0, sizeof(struct mmc_data)); - mmc_set_data_timeout(&data, card, 0); + mmc_set_data_timeout(&data, host->card, 0); - data.blksz = 512; - data.blocks = 1; - data.flags = MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; + data.blksz = 512; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; - memset(&mrq, 0, sizeof(struct mmc_request)); + memset(&mrq, 0, sizeof(struct mmc_request)); - mrq.cmd = &cmd; - mrq.data = &data; + mrq.cmd = &cmd; + mrq.data = &data; - sg_init_one(&sg, ext_csd, 512); + sg_init_one(&sg, ext_csd, 512); - mmc_wait_for_req(host, &mrq); + mmc_wait_for_req(host, &mrq); - if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { - if (card->csd.capacity == (4096 * 512)) { - printk(KERN_ERR "%s: unable to read EXT_CSD " - "on a possible high capacity card. " - "Card will be ignored.\n", - mmc_hostname(card->host)); - mmc_card_set_dead(card); - } else { - printk(KERN_WARNING "%s: unable to read " - "EXT_CSD, performance might " - "suffer.\n", - mmc_hostname(card->host)); - } - continue; + if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { + if (host->card->csd.capacity == (4096 * 512)) { + printk(KERN_ERR "%s: unable to read EXT_CSD " + "on a possible high capacity card. " + "Card will be ignored.\n", + mmc_hostname(host)); + mmc_card_set_dead(host->card); + } else { + printk(KERN_WARNING "%s: unable to read " + "EXT_CSD, performance might " + "suffer.\n", + mmc_hostname(host)); } + goto out; + } - card->ext_csd.sectors = - ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | - ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | - ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | - ext_csd[EXT_CSD_SEC_CNT + 3] << 24; - if (card->ext_csd.sectors) - mmc_card_set_blockaddr(card); + host->card->ext_csd.sectors = + ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | + ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | + ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | + ext_csd[EXT_CSD_SEC_CNT + 3] << 24; + if (host->card->ext_csd.sectors) + mmc_card_set_blockaddr(host->card); + + switch (ext_csd[EXT_CSD_CARD_TYPE]) { + case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: + host->card->ext_csd.hs_max_dtr = 52000000; + break; + case EXT_CSD_CARD_TYPE_26: + host->card->ext_csd.hs_max_dtr = 26000000; + break; + default: + /* MMC v4 spec says this cannot happen */ + printk("%s: card is mmc v4 but doesn't support " + "any high-speed modes.\n", + mmc_hostname(host)); + goto out; + } - switch (ext_csd[EXT_CSD_CARD_TYPE]) { - case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: - card->ext_csd.hs_max_dtr = 52000000; - break; - case EXT_CSD_CARD_TYPE_26: - card->ext_csd.hs_max_dtr = 26000000; - break; - default: - /* MMC v4 spec says this cannot happen */ - printk("%s: card is mmc v4 but doesn't support " - "any high-speed modes.\n", - mmc_hostname(card->host)); - continue; - } + if (host->caps & MMC_CAP_MMC_HIGHSPEED) { + /* Activate highspeed support. */ + cmd.opcode = MMC_SWITCH; + cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | + (EXT_CSD_HS_TIMING << 16) | + (1 << 8) | + EXT_CSD_CMD_SET_NORMAL; + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; - if (host->caps & MMC_CAP_MMC_HIGHSPEED) { - /* Activate highspeed support. */ - cmd.opcode = MMC_SWITCH; - cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | - (EXT_CSD_HS_TIMING << 16) | - (1 << 8) | - EXT_CSD_CMD_SET_NORMAL; - cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) { - printk("%s: failed to switch card to mmc v4 " - "high-speed mode.\n", - mmc_hostname(card->host)); - continue; - } + err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + if (err != MMC_ERR_NONE) { + printk("%s: failed to switch card to mmc v4 " + "high-speed mode.\n", + mmc_hostname(host)); + goto out; + } - mmc_card_set_highspeed(card); + mmc_card_set_highspeed(host->card); - host->ios.timing = MMC_TIMING_MMC_HS; - mmc_set_ios(host); - } + host->ios.timing = MMC_TIMING_MMC_HS; + mmc_set_ios(host); + } - /* Check for host support for wide-bus modes. */ - if (host->caps & MMC_CAP_4_BIT_DATA) { - /* Activate 4-bit support. */ - cmd.opcode = MMC_SWITCH; - cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | - (EXT_CSD_BUS_WIDTH << 16) | - (EXT_CSD_BUS_WIDTH_4 << 8) | - EXT_CSD_CMD_SET_NORMAL; - cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) { - printk("%s: failed to switch card to " - "mmc v4 4-bit bus mode.\n", - mmc_hostname(card->host)); - continue; - } + /* Check for host support for wide-bus modes. */ + if (host->caps & MMC_CAP_4_BIT_DATA) { + /* Activate 4-bit support. */ + cmd.opcode = MMC_SWITCH; + cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | + (EXT_CSD_BUS_WIDTH << 16) | + (EXT_CSD_BUS_WIDTH_4 << 8) | + EXT_CSD_CMD_SET_NORMAL; + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; - host->ios.bus_width = MMC_BUS_WIDTH_4; - mmc_set_ios(host); + err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + if (err != MMC_ERR_NONE) { + printk("%s: failed to switch card to " + "mmc v4 4-bit bus mode.\n", + mmc_hostname(host)); + goto out; } + + host->ios.bus_width = MMC_BUS_WIDTH_4; + mmc_set_ios(host); } +out: kfree(ext_csd); - - mmc_deselect_cards(host); } -static void mmc_read_scrs(struct mmc_host *host) +static void mmc_read_scr(struct mmc_host *host) { int err; - struct mmc_card *card; struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; struct scatterlist sg; - list_for_each_entry(card, &host->cards, node) { - if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT)) - continue; - if (!mmc_card_sd(card)) - continue; - - err = mmc_select_card(host, card); - if (err != MMC_ERR_NONE) { - mmc_card_set_dead(card); - continue; - } - - memset(&cmd, 0, sizeof(struct mmc_command)); - - cmd.opcode = MMC_APP_CMD; - cmd.arg = card->rca << 16; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + if (!host->card) + return; + if (mmc_card_dead(host->card)) + return; + if (!mmc_card_sd(host->card)) + return; - err = mmc_wait_for_cmd(host, &cmd, 0); - if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD)) { - mmc_card_set_dead(card); - continue; - } + memset(&cmd, 0, sizeof(struct mmc_command)); - memset(&cmd, 0, sizeof(struct mmc_command)); + cmd.opcode = MMC_APP_CMD; + cmd.arg = host->card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - cmd.opcode = SD_APP_SEND_SCR; - cmd.arg = 0; - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + err = mmc_wait_for_cmd(host, &cmd, 0); + if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD)) { + mmc_card_set_dead(host->card); + return; + } - memset(&data, 0, sizeof(struct mmc_data)); + memset(&cmd, 0, sizeof(struct mmc_command)); - mmc_set_data_timeout(&data, card, 0); + cmd.opcode = SD_APP_SEND_SCR; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; - data.blksz = 1 << 3; - data.blocks = 1; - data.flags = MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; + memset(&data, 0, sizeof(struct mmc_data)); - memset(&mrq, 0, sizeof(struct mmc_request)); + mmc_set_data_timeout(&data, host->card, 0); - mrq.cmd = &cmd; - mrq.data = &data; + data.blksz = 1 << 3; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; - sg_init_one(&sg, (u8*)card->raw_scr, 8); + memset(&mrq, 0, sizeof(struct mmc_request)); - mmc_wait_for_req(host, &mrq); + mrq.cmd = &cmd; + mrq.data = &data; - if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { - mmc_card_set_dead(card); - continue; - } + sg_init_one(&sg, (u8*)host->card->raw_scr, 8); - card->raw_scr[0] = ntohl(card->raw_scr[0]); - card->raw_scr[1] = ntohl(card->raw_scr[1]); + mmc_wait_for_req(host, &mrq); - mmc_decode_scr(card); + if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { + mmc_card_set_dead(host->card); + return; } - mmc_deselect_cards(host); + host->card->raw_scr[0] = ntohl(host->card->raw_scr[0]); + host->card->raw_scr[1] = ntohl(host->card->raw_scr[1]); + + mmc_decode_scr(host->card); } static void mmc_read_switch_caps(struct mmc_host *host) { - int err; - struct mmc_card *card; struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; @@ -1281,6 +1187,15 @@ static void mmc_read_switch_caps(struct mmc_host *host) if (!(host->caps & MMC_CAP_SD_HIGHSPEED)) return; + if (!host->card) + return; + if (mmc_card_dead(host->card)) + return; + if (!mmc_card_sd(host->card)) + return; + if (host->card->scr.sda_vsn < SCR_SPEC_VER_1) + return; + status = kmalloc(64, GFP_KERNEL); if (!status) { printk(KERN_WARNING "%s: Unable to allocate buffer for " @@ -1289,116 +1204,98 @@ static void mmc_read_switch_caps(struct mmc_host *host) return; } - list_for_each_entry(card, &host->cards, node) { - if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT)) - continue; - if (!mmc_card_sd(card)) - continue; - if (card->scr.sda_vsn < SCR_SPEC_VER_1) - continue; - - err = mmc_select_card(host, card); - if (err != MMC_ERR_NONE) { - mmc_card_set_dead(card); - continue; - } - - memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = SD_SWITCH; - cmd.arg = 0x00FFFFF1; - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + cmd.opcode = SD_SWITCH; + cmd.arg = 0x00FFFFF1; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; - memset(&data, 0, sizeof(struct mmc_data)); + memset(&data, 0, sizeof(struct mmc_data)); - mmc_set_data_timeout(&data, card, 0); + mmc_set_data_timeout(&data, host->card, 0); - data.blksz = 64; - data.blocks = 1; - data.flags = MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; + data.blksz = 64; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; - memset(&mrq, 0, sizeof(struct mmc_request)); + memset(&mrq, 0, sizeof(struct mmc_request)); - mrq.cmd = &cmd; - mrq.data = &data; + mrq.cmd = &cmd; + mrq.data = &data; - sg_init_one(&sg, status, 64); + sg_init_one(&sg, status, 64); - mmc_wait_for_req(host, &mrq); + mmc_wait_for_req(host, &mrq); - if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { - printk("%s: unable to read switch capabilities, " - "performance might suffer.\n", - mmc_hostname(card->host)); - continue; - } + if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { + printk("%s: unable to read switch capabilities, " + "performance might suffer.\n", + mmc_hostname(host)); + goto out; + } - if (status[13] & 0x02) - card->sw_caps.hs_max_dtr = 50000000; + if (status[13] & 0x02) + host->card->sw_caps.hs_max_dtr = 50000000; - memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = SD_SWITCH; - cmd.arg = 0x80FFFFF1; - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + cmd.opcode = SD_SWITCH; + cmd.arg = 0x80FFFFF1; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; - memset(&data, 0, sizeof(struct mmc_data)); + memset(&data, 0, sizeof(struct mmc_data)); - mmc_set_data_timeout(&data, card, 0); + mmc_set_data_timeout(&data, host->card, 0); - data.blksz = 64; - data.blocks = 1; - data.flags = MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; + data.blksz = 64; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; - memset(&mrq, 0, sizeof(struct mmc_request)); + memset(&mrq, 0, sizeof(struct mmc_request)); - mrq.cmd = &cmd; - mrq.data = &data; + mrq.cmd = &cmd; + mrq.data = &data; - sg_init_one(&sg, status, 64); + sg_init_one(&sg, status, 64); - mmc_wait_for_req(host, &mrq); + mmc_wait_for_req(host, &mrq); - if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE || - (status[16] & 0xF) != 1) { - printk(KERN_WARNING "%s: Problem switching card " - "into high-speed mode!\n", - mmc_hostname(host)); - continue; - } + if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE || + (status[16] & 0xF) != 1) { + printk(KERN_WARNING "%s: Problem switching card " + "into high-speed mode!\n", + mmc_hostname(host)); + goto out; + } - mmc_card_set_highspeed(card); + mmc_card_set_highspeed(host->card); - host->ios.timing = MMC_TIMING_SD_HS; - mmc_set_ios(host); - } + host->ios.timing = MMC_TIMING_SD_HS; + mmc_set_ios(host); +out: kfree(status); - - mmc_deselect_cards(host); } static unsigned int mmc_calculate_clock(struct mmc_host *host) { - struct mmc_card *card; unsigned int max_dtr = host->f_max; - list_for_each_entry(card, &host->cards, node) - if (!mmc_card_dead(card)) { - if (mmc_card_highspeed(card) && mmc_card_sd(card)) { - if (max_dtr > card->sw_caps.hs_max_dtr) - max_dtr = card->sw_caps.hs_max_dtr; - } else if (mmc_card_highspeed(card) && !mmc_card_sd(card)) { - if (max_dtr > card->ext_csd.hs_max_dtr) - max_dtr = card->ext_csd.hs_max_dtr; - } else if (max_dtr > card->csd.max_dtr) { - max_dtr = card->csd.max_dtr; - } + if (host->card && !mmc_card_dead(host->card)) { + if (mmc_card_highspeed(host->card) && mmc_card_sd(host->card)) { + if (max_dtr > host->card->sw_caps.hs_max_dtr) + max_dtr = host->card->sw_caps.hs_max_dtr; + } else if (mmc_card_highspeed(host->card) && !mmc_card_sd(host->card)) { + if (max_dtr > host->card->ext_csd.hs_max_dtr) + max_dtr = host->card->ext_csd.hs_max_dtr; + } else if (max_dtr > host->card->csd.max_dtr) { + max_dtr = host->card->csd.max_dtr; } + } pr_debug("%s: selected %d.%03dMHz transfer rate\n", mmc_hostname(host), @@ -1415,91 +1312,65 @@ static unsigned int mmc_calculate_clock(struct mmc_host *host) * A request for status does not cause a state change in data * transfer mode. */ -static void mmc_check_cards(struct mmc_host *host) +static void mmc_check_card(struct mmc_card *card) { - struct list_head *l, *n; - - mmc_deselect_cards(host); + struct mmc_command cmd; + int err; - list_for_each_safe(l, n, &host->cards) { - struct mmc_card *card = mmc_list_to_card(l); - struct mmc_command cmd; - int err; + BUG_ON(!card); - cmd.opcode = MMC_SEND_STATUS; - cmd.arg = card->rca << 16; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + cmd.opcode = MMC_SEND_STATUS; + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); - if (err == MMC_ERR_NONE) - continue; + err = mmc_wait_for_cmd(card->host, &cmd, CMD_RETRIES); + if (err == MMC_ERR_NONE) + return; - mmc_card_set_dead(card); - } + mmc_card_set_dead(card); } static void mmc_setup(struct mmc_host *host) { - if (host->ios.power_mode != MMC_POWER_ON) { - int err; - u32 ocr; - - host->mode = MMC_MODE_SD; - - mmc_power_up(host); - mmc_idle_cards(host); + int err; + u32 ocr; - err = mmc_send_if_cond(host, host->ocr_avail, NULL); - if (err != MMC_ERR_NONE) { - return; - } - err = mmc_send_app_op_cond(host, 0, &ocr); + host->mode = MMC_MODE_SD; - /* - * If we fail to detect any SD cards then try - * searching for MMC cards. - */ - if (err != MMC_ERR_NONE) { - host->mode = MMC_MODE_MMC; - - err = mmc_send_op_cond(host, 0, &ocr); - if (err != MMC_ERR_NONE) - return; - } + mmc_power_up(host); + mmc_idle_cards(host); - host->ocr = mmc_select_voltage(host, ocr); + err = mmc_send_if_cond(host, host->ocr_avail, NULL); + if (err != MMC_ERR_NONE) { + return; + } + err = mmc_send_app_op_cond(host, 0, &ocr); - /* - * Since we're changing the OCR value, we seem to - * need to tell some cards to go back to the idle - * state. We wait 1ms to give cards time to - * respond. - */ - if (host->ocr) - mmc_idle_cards(host); - } else { - host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; - host->ios.clock = host->f_min; - mmc_set_ios(host); + /* + * If we fail to detect any SD cards then try + * searching for MMC cards. + */ + if (err != MMC_ERR_NONE) { + host->mode = MMC_MODE_MMC; - /* - * We should remember the OCR mask from the existing - * cards, and detect the new cards OCR mask, combine - * the two and re-select the VDD. However, if we do - * change VDD, we should do an idle, and then do a - * full re-initialisation. We would need to notify - * drivers so that they can re-setup the cards as - * well, while keeping their queues at bay. - * - * For the moment, we take the easy way out - if the - * new cards don't like our currently selected VDD, - * they drop off the bus. - */ + err = mmc_send_op_cond(host, 0, &ocr); + if (err != MMC_ERR_NONE) + return; } + host->ocr = mmc_select_voltage(host, ocr); + if (host->ocr == 0) return; + /* + * Since we're changing the OCR value, we seem to + * need to tell some cards to go back to the idle + * state. We wait 1ms to give cards time to + * respond. + */ + mmc_idle_cards(host); + /* * Send the selected OCR multiple times... until the cards * all get the idea that they should be ready for CMD2. @@ -1522,7 +1393,7 @@ static void mmc_setup(struct mmc_host *host) mmc_send_op_cond(host, host->ocr | (1 << 30), NULL); } - mmc_discover_cards(host); + mmc_discover_card(host); /* * Ok, now switch to push-pull mode. @@ -1530,13 +1401,19 @@ static void mmc_setup(struct mmc_host *host) host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; mmc_set_ios(host); - mmc_read_csds(host); + mmc_read_csd(host); + + if (host->card && !mmc_card_dead(host->card)) { + err = mmc_select_card(host->card); + if (err != MMC_ERR_NONE) + mmc_card_set_dead(host->card); + } if (host->mode == MMC_MODE_SD) { - mmc_read_scrs(host); + mmc_read_scr(host); mmc_read_switch_caps(host); } else - mmc_process_ext_csds(host); + mmc_process_ext_csd(host); } @@ -1566,31 +1443,29 @@ static void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); - struct list_head *l, *n; - unsigned char power_mode; mmc_claim_host(host); /* - * Check for removed cards and newly inserted ones. We check for + * Check for removed card and newly inserted ones. We check for * removed cards first so we can intelligently re-select the VDD. */ - power_mode = host->ios.power_mode; - if (power_mode == MMC_POWER_ON) - mmc_check_cards(host); + if (host->card) { + mmc_check_card(host->card); - mmc_setup(host); + mmc_release_host(host); - /* - * Some broken cards process CMD1 even in stand-by state. There is - * no reply, but an ILLEGAL_COMMAND error is cached and returned - * after next command. We poll for card status here to clear any - * possibly pending error. - */ - if (power_mode == MMC_POWER_ON) - mmc_check_cards(host); + if (mmc_card_dead(host->card)) { + mmc_remove_card(host->card); + host->card = NULL; + } + + goto out; + } - if (!list_empty(&host->cards)) { + mmc_setup(host); + + if (host->card && !mmc_card_dead(host->card)) { /* * (Re-)calculate the fastest clock rate which the * attached cards and the host support. @@ -1601,31 +1476,28 @@ static void mmc_rescan(struct work_struct *work) mmc_release_host(host); - list_for_each_safe(l, n, &host->cards) { - struct mmc_card *card = mmc_list_to_card(l); - - /* - * If this is a new and good card, register it. - */ - if (!mmc_card_present(card) && !mmc_card_dead(card)) { - if (mmc_register_card(card)) - mmc_card_set_dead(card); - } + /* + * If this is a new and good card, register it. + */ + if (host->card && !mmc_card_dead(host->card)) { + if (mmc_register_card(host->card)) + mmc_card_set_dead(host->card); + } - /* - * If this card is dead, destroy it. - */ - if (mmc_card_dead(card)) { - list_del(&card->node); - mmc_remove_card(card); - } + /* + * If this card is dead, destroy it. + */ + if (host->card && mmc_card_dead(host->card)) { + mmc_remove_card(host->card); + host->card = NULL; } +out: /* * If we discover that there are no cards on the * bus, turn off the clock and power down. */ - if (list_empty(&host->cards)) + if (!host->card) mmc_power_off(host); } @@ -1645,7 +1517,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) if (host) { spin_lock_init(&host->lock); init_waitqueue_head(&host->wq); - INIT_LIST_HEAD(&host->cards); INIT_DELAYED_WORK(&host->detect, mmc_rescan); /* @@ -1694,8 +1565,6 @@ EXPORT_SYMBOL(mmc_add_host); */ void mmc_remove_host(struct mmc_host *host) { - struct list_head *l, *n; - #ifdef CONFIG_MMC_DEBUG mmc_claim_host(host); host->removed = 1; @@ -1704,10 +1573,9 @@ void mmc_remove_host(struct mmc_host *host) mmc_flush_scheduled_work(); - list_for_each_safe(l, n, &host->cards) { - struct mmc_card *card = mmc_list_to_card(l); - - mmc_remove_card(card); + if (host->card) { + mmc_remove_card(host->card); + host->card = NULL; } mmc_power_off(host); @@ -1738,14 +1606,11 @@ EXPORT_SYMBOL(mmc_free_host); */ int mmc_suspend_host(struct mmc_host *host, pm_message_t state) { - struct list_head *l, *n; - mmc_flush_scheduled_work(); - list_for_each_safe(l, n, &host->cards) { - struct mmc_card *card = mmc_list_to_card(l); - - mmc_remove_card(card); + if (host->card) { + mmc_remove_card(host->card); + host->card = NULL; } mmc_power_off(host); diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 1ca50542ce19..7d98990ac94e 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -61,7 +61,6 @@ struct mmc_host; * MMC device */ struct mmc_card { - struct list_head node; /* node in hosts devices list */ struct mmc_host *host; /* the host this device belongs to */ struct device dev; /* the device */ unsigned int rca; /* relative card address of device */ @@ -123,11 +122,4 @@ struct mmc_driver { extern int mmc_register_driver(struct mmc_driver *); extern void mmc_unregister_driver(struct mmc_driver *); -static inline int mmc_card_claim_host(struct mmc_card *card) -{ - return __mmc_claim_host(card->host, card); -} - -#define mmc_card_release_host(c) mmc_release_host((c)->host) - #endif diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 92efe8e5be7e..6ea3c0ea3e15 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -138,14 +138,12 @@ struct mmc_host { #define MMC_MODE_MMC 0 #define MMC_MODE_SD 1 - struct list_head cards; /* devices attached to this host */ + struct mmc_card *card; /* device attached to this host */ wait_queue_head_t wq; spinlock_t lock; /* claimed lock */ unsigned int claimed:1; /* host exclusively claimed */ - struct mmc_card *card_selected; /* the selected MMC card */ - struct delayed_work detect; #ifdef CONFIG_MMC_DEBUG unsigned int removed:1; /* host is being removed */ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index cdc54be804f1..b3d80efc6434 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -8,7 +8,6 @@ #ifndef MMC_H #define MMC_H -#include #include #include @@ -107,13 +106,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, unsigned int, extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *, int); -extern int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card); - -static inline void mmc_claim_host(struct mmc_host *host) -{ - __mmc_claim_host(host, (struct mmc_card *)-1); -} - +extern void mmc_claim_host(struct mmc_host *host); extern void mmc_release_host(struct mmc_host *host); #endif -- cgit v1.2.3 From aaac1b470bd0dccb30912356617069dc6199cc80 Mon Sep 17 00:00:00 2001 From: Pierre Ossman Date: Wed, 28 Feb 2007 15:33:10 +0100 Subject: mmc: Move core functions to subdir Create a "core" subdirectory to house the central bus handling functions. Signed-off-by: Pierre Ossman --- drivers/mmc/Makefile | 7 +- drivers/mmc/core/Makefile | 11 + drivers/mmc/core/core.c | 1638 +++++++++++++++++++++++++++++++++++++++++++++ drivers/mmc/core/core.h | 25 + drivers/mmc/core/sysfs.c | 361 ++++++++++ drivers/mmc/mmc.c | 1638 --------------------------------------------- drivers/mmc/mmc.h | 25 - drivers/mmc/mmc_sysfs.c | 361 ---------- include/linux/mmc/card.h | 2 +- include/linux/mmc/core.h | 112 ++++ include/linux/mmc/host.h | 2 +- include/linux/mmc/mmc.h | 112 ---- 12 files changed, 2150 insertions(+), 2144 deletions(-) create mode 100644 drivers/mmc/core/Makefile create mode 100644 drivers/mmc/core/core.c create mode 100644 drivers/mmc/core/core.h create mode 100644 drivers/mmc/core/sysfs.c delete mode 100644 drivers/mmc/mmc.c delete mode 100644 drivers/mmc/mmc.h delete mode 100644 drivers/mmc/mmc_sysfs.c create mode 100644 include/linux/mmc/core.h delete mode 100644 include/linux/mmc/mmc.h (limited to 'include/linux') diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile index 4d2bdfeb8d76..9979f5e9765b 100644 --- a/drivers/mmc/Makefile +++ b/drivers/mmc/Makefile @@ -6,12 +6,7 @@ ifeq ($(CONFIG_MMC_DEBUG),y) EXTRA_CFLAGS += -DDEBUG endif -# -# Core -# -obj-$(CONFIG_MMC) += mmc_core.o -mmc_core-y := mmc.o mmc_sysfs.o - +obj-$(CONFIG_MMC) += core/ obj-$(CONFIG_MMC) += card/ obj-$(CONFIG_MMC) += host/ diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile new file mode 100644 index 000000000000..f911fbd2845b --- /dev/null +++ b/drivers/mmc/core/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the kernel mmc core. +# + +ifeq ($(CONFIG_MMC_DEBUG),y) + EXTRA_CFLAGS += -DDEBUG +endif + +obj-$(CONFIG_MMC) += mmc_core.o +mmc_core-y := core.o sysfs.o + diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c new file mode 100644 index 000000000000..334e663e465b --- /dev/null +++ b/drivers/mmc/core/core.c @@ -0,0 +1,1638 @@ +/* + * linux/drivers/mmc/core/core.c + * + * Copyright (C) 2003-2004 Russell King, All Rights Reserved. + * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. + * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. + * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "core.h" + +#define CMD_RETRIES 3 + +/* + * OCR Bit positions to 10s of Vdd mV. + */ +static const unsigned short mmc_ocr_bit_to_vdd[] = { + 150, 155, 160, 165, 170, 180, 190, 200, + 210, 220, 230, 240, 250, 260, 270, 280, + 290, 300, 310, 320, 330, 340, 350, 360 +}; + +static const unsigned int tran_exp[] = { + 10000, 100000, 1000000, 10000000, + 0, 0, 0, 0 +}; + +static const unsigned char tran_mant[] = { + 0, 10, 12, 13, 15, 20, 25, 30, + 35, 40, 45, 50, 55, 60, 70, 80, +}; + +static const unsigned int tacc_exp[] = { + 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, +}; + +static const unsigned int tacc_mant[] = { + 0, 10, 12, 13, 15, 20, 25, 30, + 35, 40, 45, 50, 55, 60, 70, 80, +}; + + +/** + * mmc_request_done - finish processing an MMC request + * @host: MMC host which completed request + * @mrq: MMC request which request + * + * MMC drivers should call this function when they have completed + * their processing of a request. + */ +void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) +{ + struct mmc_command *cmd = mrq->cmd; + int err = cmd->error; + + pr_debug("%s: req done (CMD%u): %d/%d/%d: %08x %08x %08x %08x\n", + mmc_hostname(host), cmd->opcode, err, + mrq->data ? mrq->data->error : 0, + mrq->stop ? mrq->stop->error : 0, + cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); + + if (err && cmd->retries) { + cmd->retries--; + cmd->error = 0; + host->ops->request(host, mrq); + } else if (mrq->done) { + mrq->done(mrq); + } +} + +EXPORT_SYMBOL(mmc_request_done); + +/** + * mmc_start_request - start a command on a host + * @host: MMC host to start command on + * @mrq: MMC request to start + * + * Queue a command on the specified host. We expect the + * caller to be holding the host lock with interrupts disabled. + */ +void +mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) +{ +#ifdef CONFIG_MMC_DEBUG + unsigned int i, sz; +#endif + + pr_debug("%s: starting CMD%u arg %08x flags %08x\n", + mmc_hostname(host), mrq->cmd->opcode, + mrq->cmd->arg, mrq->cmd->flags); + + WARN_ON(!host->claimed); + + mrq->cmd->error = 0; + mrq->cmd->mrq = mrq; + if (mrq->data) { + BUG_ON(mrq->data->blksz > host->max_blk_size); + BUG_ON(mrq->data->blocks > host->max_blk_count); + BUG_ON(mrq->data->blocks * mrq->data->blksz > + host->max_req_size); + +#ifdef CONFIG_MMC_DEBUG + sz = 0; + for (i = 0;i < mrq->data->sg_len;i++) + sz += mrq->data->sg[i].length; + BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); +#endif + + mrq->cmd->data = mrq->data; + mrq->data->error = 0; + mrq->data->mrq = mrq; + if (mrq->stop) { + mrq->data->stop = mrq->stop; + mrq->stop->error = 0; + mrq->stop->mrq = mrq; + } + } + host->ops->request(host, mrq); +} + +EXPORT_SYMBOL(mmc_start_request); + +static void mmc_wait_done(struct mmc_request *mrq) +{ + complete(mrq->done_data); +} + +int mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) +{ + DECLARE_COMPLETION_ONSTACK(complete); + + mrq->done_data = &complete; + mrq->done = mmc_wait_done; + + mmc_start_request(host, mrq); + + wait_for_completion(&complete); + + return 0; +} + +EXPORT_SYMBOL(mmc_wait_for_req); + +/** + * mmc_wait_for_cmd - start a command and wait for completion + * @host: MMC host to start command + * @cmd: MMC command to start + * @retries: maximum number of retries + * + * Start a new MMC command for a host, and wait for the command + * to complete. Return any error that occurred while the command + * was executing. Do not attempt to parse the response. + */ +int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) +{ + struct mmc_request mrq; + + BUG_ON(!host->claimed); + + memset(&mrq, 0, sizeof(struct mmc_request)); + + memset(cmd->resp, 0, sizeof(cmd->resp)); + cmd->retries = retries; + + mrq.cmd = cmd; + cmd->data = NULL; + + mmc_wait_for_req(host, &mrq); + + return cmd->error; +} + +EXPORT_SYMBOL(mmc_wait_for_cmd); + +/** + * mmc_wait_for_app_cmd - start an application command and wait for + completion + * @host: MMC host to start command + * @rca: RCA to send MMC_APP_CMD to + * @cmd: MMC command to start + * @retries: maximum number of retries + * + * Sends a MMC_APP_CMD, checks the card response, sends the command + * in the parameter and waits for it to complete. Return any error + * that occurred while the command was executing. Do not attempt to + * parse the response. + */ +int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca, + struct mmc_command *cmd, int retries) +{ + struct mmc_request mrq; + struct mmc_command appcmd; + + int i, err; + + BUG_ON(!host->claimed); + BUG_ON(retries < 0); + + err = MMC_ERR_INVALID; + + /* + * We have to resend MMC_APP_CMD for each attempt so + * we cannot use the retries field in mmc_command. + */ + for (i = 0;i <= retries;i++) { + memset(&mrq, 0, sizeof(struct mmc_request)); + + appcmd.opcode = MMC_APP_CMD; + appcmd.arg = rca << 16; + appcmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + appcmd.retries = 0; + memset(appcmd.resp, 0, sizeof(appcmd.resp)); + appcmd.data = NULL; + + mrq.cmd = &appcmd; + appcmd.data = NULL; + + mmc_wait_for_req(host, &mrq); + + if (appcmd.error) { + err = appcmd.error; + continue; + } + + /* Check that card supported application commands */ + if (!(appcmd.resp[0] & R1_APP_CMD)) + return MMC_ERR_FAILED; + + memset(&mrq, 0, sizeof(struct mmc_request)); + + memset(cmd->resp, 0, sizeof(cmd->resp)); + cmd->retries = 0; + + mrq.cmd = cmd; + cmd->data = NULL; + + mmc_wait_for_req(host, &mrq); + + err = cmd->error; + if (cmd->error == MMC_ERR_NONE) + break; + } + + return err; +} + +EXPORT_SYMBOL(mmc_wait_for_app_cmd); + +/** + * mmc_set_data_timeout - set the timeout for a data command + * @data: data phase for command + * @card: the MMC card associated with the data transfer + * @write: flag to differentiate reads from writes + */ +void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card, + int write) +{ + unsigned int mult; + + /* + * SD cards use a 100 multiplier rather than 10 + */ + mult = mmc_card_sd(card) ? 100 : 10; + + /* + * Scale up the multiplier (and therefore the timeout) by + * the r2w factor for writes. + */ + if (write) + mult <<= card->csd.r2w_factor; + + data->timeout_ns = card->csd.tacc_ns * mult; + data->timeout_clks = card->csd.tacc_clks * mult; + + /* + * SD cards also have an upper limit on the timeout. + */ + if (mmc_card_sd(card)) { + unsigned int timeout_us, limit_us; + + timeout_us = data->timeout_ns / 1000; + timeout_us += data->timeout_clks * 1000 / + (card->host->ios.clock / 1000); + + if (write) + limit_us = 250000; + else + limit_us = 100000; + + /* + * SDHC cards always use these fixed values. + */ + if (timeout_us > limit_us || mmc_card_blockaddr(card)) { + data->timeout_ns = limit_us * 1000; + data->timeout_clks = 0; + } + } +} +EXPORT_SYMBOL(mmc_set_data_timeout); + +/** + * __mmc_claim_host - exclusively claim a host + * @host: mmc host to claim + * @card: mmc card to claim host for + * + * Claim a host for a set of operations. If a valid card + * is passed and this wasn't the last card selected, select + * the card before returning. + * + * Note: you should use mmc_card_claim_host or mmc_claim_host. + */ +void mmc_claim_host(struct mmc_host *host) +{ + DECLARE_WAITQUEUE(wait, current); + unsigned long flags; + + add_wait_queue(&host->wq, &wait); + spin_lock_irqsave(&host->lock, flags); + while (1) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (!host->claimed) + break; + spin_unlock_irqrestore(&host->lock, flags); + schedule(); + spin_lock_irqsave(&host->lock, flags); + } + set_current_state(TASK_RUNNING); + host->claimed = 1; + spin_unlock_irqrestore(&host->lock, flags); + remove_wait_queue(&host->wq, &wait); +} + +EXPORT_SYMBOL(mmc_claim_host); + +/** + * mmc_release_host - release a host + * @host: mmc host to release + * + * Release a MMC host, allowing others to claim the host + * for their operations. + */ +void mmc_release_host(struct mmc_host *host) +{ + unsigned long flags; + + BUG_ON(!host->claimed); + + spin_lock_irqsave(&host->lock, flags); + host->claimed = 0; + spin_unlock_irqrestore(&host->lock, flags); + + wake_up(&host->wq); +} + +EXPORT_SYMBOL(mmc_release_host); + +static inline void mmc_set_ios(struct mmc_host *host) +{ + struct mmc_ios *ios = &host->ios; + + pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " + "width %u timing %u\n", + mmc_hostname(host), ios->clock, ios->bus_mode, + ios->power_mode, ios->chip_select, ios->vdd, + ios->bus_width, ios->timing); + + host->ops->set_ios(host, ios); +} + +static int mmc_select_card(struct mmc_card *card) +{ + int err; + struct mmc_command cmd; + + BUG_ON(!card->host->claimed); + + cmd.opcode = MMC_SELECT_CARD; + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + + err = mmc_wait_for_cmd(card->host, &cmd, CMD_RETRIES); + if (err != MMC_ERR_NONE) + return err; + + /* + * We can only change the bus width of SD cards when + * they are selected so we have to put the handling + * here. + * + * The card is in 1 bit mode by default so + * we only need to change if it supports the + * wider version. + */ + if (mmc_card_sd(card) && + (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4) && + (card->host->caps & MMC_CAP_4_BIT_DATA)) { + + struct mmc_command cmd; + cmd.opcode = SD_APP_SET_BUS_WIDTH; + cmd.arg = SD_BUS_WIDTH_4; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + + err = mmc_wait_for_app_cmd(card->host, card->rca, + &cmd, CMD_RETRIES); + if (err != MMC_ERR_NONE) + return err; + + card->host->ios.bus_width = MMC_BUS_WIDTH_4; + mmc_set_ios(card->host); + } + + return MMC_ERR_NONE; +} + + +static inline void mmc_delay(unsigned int ms) +{ + if (ms < 1000 / HZ) { + cond_resched(); + mdelay(ms); + } else { + msleep(ms); + } +} + +/* + * Mask off any voltages we don't support and select + * the lowest voltage + */ +static u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) +{ + int bit; + + ocr &= host->ocr_avail; + + bit = ffs(ocr); + if (bit) { + bit -= 1; + + ocr &= 3 << bit; + + host->ios.vdd = bit; + mmc_set_ios(host); + } else { + ocr = 0; + } + + return ocr; +} + +#define UNSTUFF_BITS(resp,start,size) \ + ({ \ + const int __size = size; \ + const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \ + const int __off = 3 - ((start) / 32); \ + const int __shft = (start) & 31; \ + u32 __res; \ + \ + __res = resp[__off] >> __shft; \ + if (__size + __shft > 32) \ + __res |= resp[__off-1] << ((32 - __shft) % 32); \ + __res & __mask; \ + }) + +/* + * Given the decoded CSD structure, decode the raw CID to our CID structure. + */ +static void mmc_decode_cid(struct mmc_card *card) +{ + u32 *resp = card->raw_cid; + + memset(&card->cid, 0, sizeof(struct mmc_cid)); + + if (mmc_card_sd(card)) { + /* + * SD doesn't currently have a version field so we will + * have to assume we can parse this. + */ + card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); + card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); + card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); + card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); + card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); + card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); + card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); + card->cid.hwrev = UNSTUFF_BITS(resp, 60, 4); + card->cid.fwrev = UNSTUFF_BITS(resp, 56, 4); + card->cid.serial = UNSTUFF_BITS(resp, 24, 32); + card->cid.year = UNSTUFF_BITS(resp, 12, 8); + card->cid.month = UNSTUFF_BITS(resp, 8, 4); + + card->cid.year += 2000; /* SD cards year offset */ + } else { + /* + * The selection of the format here is based upon published + * specs from sandisk and from what people have reported. + */ + switch (card->csd.mmca_vsn) { + case 0: /* MMC v1.0 - v1.2 */ + case 1: /* MMC v1.4 */ + card->cid.manfid = UNSTUFF_BITS(resp, 104, 24); + card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); + card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); + card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); + card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); + card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); + card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); + card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8); + card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4); + card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4); + card->cid.serial = UNSTUFF_BITS(resp, 16, 24); + card->cid.month = UNSTUFF_BITS(resp, 12, 4); + card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; + break; + + case 2: /* MMC v2.0 - v2.2 */ + case 3: /* MMC v3.1 - v3.3 */ + case 4: /* MMC v4 */ + card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); + card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); + card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); + card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); + card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); + card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); + card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); + card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); + card->cid.serial = UNSTUFF_BITS(resp, 16, 32); + card->cid.month = UNSTUFF_BITS(resp, 12, 4); + card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; + break; + + default: + printk("%s: card has unknown MMCA version %d\n", + mmc_hostname(card->host), card->csd.mmca_vsn); + mmc_card_set_bad(card); + break; + } + } +} + +/* + * Given a 128-bit response, decode to our card CSD structure. + */ +static void mmc_decode_csd(struct mmc_card *card) +{ + struct mmc_csd *csd = &card->csd; + unsigned int e, m, csd_struct; + u32 *resp = card->raw_csd; + + if (mmc_card_sd(card)) { + csd_struct = UNSTUFF_BITS(resp, 126, 2); + + switch (csd_struct) { + case 0: + m = UNSTUFF_BITS(resp, 115, 4); + e = UNSTUFF_BITS(resp, 112, 3); + csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; + csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; + + m = UNSTUFF_BITS(resp, 99, 4); + e = UNSTUFF_BITS(resp, 96, 3); + csd->max_dtr = tran_exp[e] * tran_mant[m]; + csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); + + e = UNSTUFF_BITS(resp, 47, 3); + m = UNSTUFF_BITS(resp, 62, 12); + csd->capacity = (1 + m) << (e + 2); + + csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); + csd->read_partial = UNSTUFF_BITS(resp, 79, 1); + csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); + csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); + csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); + csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); + csd->write_partial = UNSTUFF_BITS(resp, 21, 1); + break; + case 1: + /* + * This is a block-addressed SDHC card. Most + * interesting fields are unused and have fixed + * values. To avoid getting tripped by buggy cards, + * we assume those fixed values ourselves. + */ + mmc_card_set_blockaddr(card); + + csd->tacc_ns = 0; /* Unused */ + csd->tacc_clks = 0; /* Unused */ + + m = UNSTUFF_BITS(resp, 99, 4); + e = UNSTUFF_BITS(resp, 96, 3); + csd->max_dtr = tran_exp[e] * tran_mant[m]; + csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); + + m = UNSTUFF_BITS(resp, 48, 22); + csd->capacity = (1 + m) << 10; + + csd->read_blkbits = 9; + csd->read_partial = 0; + csd->write_misalign = 0; + csd->read_misalign = 0; + csd->r2w_factor = 4; /* Unused */ + csd->write_blkbits = 9; + csd->write_partial = 0; + break; + default: + printk("%s: unrecognised CSD structure version %d\n", + mmc_hostname(card->host), csd_struct); + mmc_card_set_bad(card); + return; + } + } else { + /* + * We only understand CSD structure v1.1 and v1.2. + * v1.2 has extra information in bits 15, 11 and 10. + */ + csd_struct = UNSTUFF_BITS(resp, 126, 2); + if (csd_struct != 1 && csd_struct != 2) { + printk("%s: unrecognised CSD structure version %d\n", + mmc_hostname(card->host), csd_struct); + mmc_card_set_bad(card); + return; + } + + csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); + m = UNSTUFF_BITS(resp, 115, 4); + e = UNSTUFF_BITS(resp, 112, 3); + csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; + csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; + + m = UNSTUFF_BITS(resp, 99, 4); + e = UNSTUFF_BITS(resp, 96, 3); + csd->max_dtr = tran_exp[e] * tran_mant[m]; + csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); + + e = UNSTUFF_BITS(resp, 47, 3); + m = UNSTUFF_BITS(resp, 62, 12); + csd->capacity = (1 + m) << (e + 2); + + csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); + csd->read_partial = UNSTUFF_BITS(resp, 79, 1); + csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); + csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); + csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); + csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); + csd->write_partial = UNSTUFF_BITS(resp, 21, 1); + } +} + +/* + * Given a 64-bit response, decode to our card SCR structure. + */ +static void mmc_decode_scr(struct mmc_card *card) +{ + struct sd_scr *scr = &card->scr; + unsigned int scr_struct; + u32 resp[4]; + + BUG_ON(!mmc_card_sd(card)); + + resp[3] = card->raw_scr[1]; + resp[2] = card->raw_scr[0]; + + scr_struct = UNSTUFF_BITS(resp, 60, 4); + if (scr_struct != 0) { + printk("%s: unrecognised SCR structure version %d\n", + mmc_hostname(card->host), scr_struct); + mmc_card_set_bad(card); + return; + } + + scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4); + scr->bus_widths = UNSTUFF_BITS(resp, 48, 4); +} + +/* + * Allocate a new MMC card + */ +static struct mmc_card * +mmc_alloc_card(struct mmc_host *host, u32 *raw_cid) +{ + struct mmc_card *card; + + card = kmalloc(sizeof(struct mmc_card), GFP_KERNEL); + if (!card) + return ERR_PTR(-ENOMEM); + + mmc_init_card(card, host); + memcpy(card->raw_cid, raw_cid, sizeof(card->raw_cid)); + + return card; +} + +/* + * Tell attached cards to go to IDLE state + */ +static void mmc_idle_cards(struct mmc_host *host) +{ + struct mmc_command cmd; + + host->ios.chip_select = MMC_CS_HIGH; + mmc_set_ios(host); + + mmc_delay(1); + + cmd.opcode = MMC_GO_IDLE_STATE; + cmd.arg = 0; + cmd.flags = MMC_RSP_NONE | MMC_CMD_BC; + + mmc_wait_for_cmd(host, &cmd, 0); + + mmc_delay(1); + + host->ios.chip_select = MMC_CS_DONTCARE; + mmc_set_ios(host); + + mmc_delay(1); +} + +/* + * Apply power to the MMC stack. This is a two-stage process. + * First, we enable power to the card without the clock running. + * We then wait a bit for the power to stabilise. Finally, + * enable the bus drivers and clock to the card. + * + * We must _NOT_ enable the clock prior to power stablising. + * + * If a host does all the power sequencing itself, ignore the + * initial MMC_POWER_UP stage. + */ +static void mmc_power_up(struct mmc_host *host) +{ + int bit = fls(host->ocr_avail) - 1; + + host->ios.vdd = bit; + host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; + host->ios.chip_select = MMC_CS_DONTCARE; + host->ios.power_mode = MMC_POWER_UP; + host->ios.bus_width = MMC_BUS_WIDTH_1; + host->ios.timing = MMC_TIMING_LEGACY; + mmc_set_ios(host); + + mmc_delay(1); + + host->ios.clock = host->f_min; + host->ios.power_mode = MMC_POWER_ON; + mmc_set_ios(host); + + mmc_delay(2); +} + +static void mmc_power_off(struct mmc_host *host) +{ + host->ios.clock = 0; + host->ios.vdd = 0; + host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; + host->ios.chip_select = MMC_CS_DONTCARE; + host->ios.power_mode = MMC_POWER_OFF; + host->ios.bus_width = MMC_BUS_WIDTH_1; + host->ios.timing = MMC_TIMING_LEGACY; + mmc_set_ios(host); +} + +static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) +{ + struct mmc_command cmd; + int i, err = 0; + + cmd.opcode = MMC_SEND_OP_COND; + cmd.arg = ocr; + cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; + + for (i = 100; i; i--) { + err = mmc_wait_for_cmd(host, &cmd, 0); + if (err != MMC_ERR_NONE) + break; + + if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0) + break; + + err = MMC_ERR_TIMEOUT; + + mmc_delay(10); + } + + if (rocr) + *rocr = cmd.resp[0]; + + return err; +} + +static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) +{ + struct mmc_command cmd; + int i, err = 0; + + cmd.opcode = SD_APP_OP_COND; + cmd.arg = ocr; + cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; + + for (i = 100; i; i--) { + err = mmc_wait_for_app_cmd(host, 0, &cmd, CMD_RETRIES); + if (err != MMC_ERR_NONE) + break; + + if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0) + break; + + err = MMC_ERR_TIMEOUT; + + mmc_delay(10); + } + + if (rocr) + *rocr = cmd.resp[0]; + + return err; +} + +static int mmc_send_if_cond(struct mmc_host *host, u32 ocr, int *rsd2) +{ + struct mmc_command cmd; + int err, sd2; + static const u8 test_pattern = 0xAA; + + /* + * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND + * before SD_APP_OP_COND. This command will harmlessly fail for + * SD 1.0 cards. + */ + cmd.opcode = SD_SEND_IF_COND; + cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern; + cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR; + + err = mmc_wait_for_cmd(host, &cmd, 0); + if (err == MMC_ERR_NONE) { + if ((cmd.resp[0] & 0xFF) == test_pattern) { + sd2 = 1; + } else { + sd2 = 0; + err = MMC_ERR_FAILED; + } + } else { + /* + * Treat errors as SD 1.0 card. + */ + sd2 = 0; + err = MMC_ERR_NONE; + } + if (rsd2) + *rsd2 = sd2; + return err; +} + +/* + * Discover the card by requesting its CID. + * + * Create a mmc_card entry for the discovered card, assigning + * it an RCA, and save the raw CID for decoding later. + */ +static void mmc_discover_card(struct mmc_host *host) +{ + unsigned int err; + + struct mmc_command cmd; + + BUG_ON(host->card); + + cmd.opcode = MMC_ALL_SEND_CID; + cmd.arg = 0; + cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; + + err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + if (err == MMC_ERR_TIMEOUT) { + err = MMC_ERR_NONE; + return; + } + if (err != MMC_ERR_NONE) { + printk(KERN_ERR "%s: error requesting CID: %d\n", + mmc_hostname(host), err); + return; + } + + host->card = mmc_alloc_card(host, cmd.resp); + if (IS_ERR(host->card)) { + err = PTR_ERR(host->card); + host->card = NULL; + return; + } + + if (host->mode == MMC_MODE_SD) { + host->card->type = MMC_TYPE_SD; + + cmd.opcode = SD_SEND_RELATIVE_ADDR; + cmd.arg = 0; + cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; + + err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + if (err != MMC_ERR_NONE) + mmc_card_set_dead(host->card); + else { + host->card->rca = cmd.resp[0] >> 16; + + if (!host->ops->get_ro) { + printk(KERN_WARNING "%s: host does not " + "support reading read-only " + "switch. assuming write-enable.\n", + mmc_hostname(host)); + } else { + if (host->ops->get_ro(host)) + mmc_card_set_readonly(host->card); + } + } + } else { + host->card->type = MMC_TYPE_MMC; + host->card->rca = 1; + + cmd.opcode = MMC_SET_RELATIVE_ADDR; + cmd.arg = host->card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + + err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + if (err != MMC_ERR_NONE) + mmc_card_set_dead(host->card); + } +} + +static void mmc_read_csd(struct mmc_host *host) +{ + struct mmc_command cmd; + int err; + + if (!host->card) + return; + if (mmc_card_dead(host->card)) + return; + + cmd.opcode = MMC_SEND_CSD; + cmd.arg = host->card->rca << 16; + cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; + + err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + if (err != MMC_ERR_NONE) { + mmc_card_set_dead(host->card); + return; + } + + memcpy(host->card->raw_csd, cmd.resp, sizeof(host->card->raw_csd)); + + mmc_decode_csd(host->card); + mmc_decode_cid(host->card); +} + +static void mmc_process_ext_csd(struct mmc_host *host) +{ + int err; + + struct mmc_request mrq; + struct mmc_command cmd; + struct mmc_data data; + + u8 *ext_csd; + struct scatterlist sg; + + if (!host->card) + return; + if (mmc_card_dead(host->card)) + return; + if (mmc_card_sd(host->card)) + return; + if (host->card->csd.mmca_vsn < CSD_SPEC_VER_4) + return; + + /* + * As the ext_csd is so large and mostly unused, we don't store the + * raw block in mmc_card. + */ + ext_csd = kmalloc(512, GFP_KERNEL); + if (!ext_csd) { + printk("%s: could not allocate a buffer to receive the ext_csd." + "mmc v4 cards will be treated as v3.\n", + mmc_hostname(host)); + return; + } + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = MMC_SEND_EXT_CSD; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + memset(&data, 0, sizeof(struct mmc_data)); + + mmc_set_data_timeout(&data, host->card, 0); + + data.blksz = 512; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; + + memset(&mrq, 0, sizeof(struct mmc_request)); + + mrq.cmd = &cmd; + mrq.data = &data; + + sg_init_one(&sg, ext_csd, 512); + + mmc_wait_for_req(host, &mrq); + + if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { + if (host->card->csd.capacity == (4096 * 512)) { + printk(KERN_ERR "%s: unable to read EXT_CSD " + "on a possible high capacity card. " + "Card will be ignored.\n", + mmc_hostname(host)); + mmc_card_set_dead(host->card); + } else { + printk(KERN_WARNING "%s: unable to read " + "EXT_CSD, performance might " + "suffer.\n", + mmc_hostname(host)); + } + goto out; + } + + host->card->ext_csd.sectors = + ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | + ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | + ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | + ext_csd[EXT_CSD_SEC_CNT + 3] << 24; + if (host->card->ext_csd.sectors) + mmc_card_set_blockaddr(host->card); + + switch (ext_csd[EXT_CSD_CARD_TYPE]) { + case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: + host->card->ext_csd.hs_max_dtr = 52000000; + break; + case EXT_CSD_CARD_TYPE_26: + host->card->ext_csd.hs_max_dtr = 26000000; + break; + default: + /* MMC v4 spec says this cannot happen */ + printk("%s: card is mmc v4 but doesn't support " + "any high-speed modes.\n", + mmc_hostname(host)); + goto out; + } + + if (host->caps & MMC_CAP_MMC_HIGHSPEED) { + /* Activate highspeed support. */ + cmd.opcode = MMC_SWITCH; + cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | + (EXT_CSD_HS_TIMING << 16) | + (1 << 8) | + EXT_CSD_CMD_SET_NORMAL; + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; + + err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + if (err != MMC_ERR_NONE) { + printk("%s: failed to switch card to mmc v4 " + "high-speed mode.\n", + mmc_hostname(host)); + goto out; + } + + mmc_card_set_highspeed(host->card); + + host->ios.timing = MMC_TIMING_MMC_HS; + mmc_set_ios(host); + } + + /* Check for host support for wide-bus modes. */ + if (host->caps & MMC_CAP_4_BIT_DATA) { + /* Activate 4-bit support. */ + cmd.opcode = MMC_SWITCH; + cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | + (EXT_CSD_BUS_WIDTH << 16) | + (EXT_CSD_BUS_WIDTH_4 << 8) | + EXT_CSD_CMD_SET_NORMAL; + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; + + err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + if (err != MMC_ERR_NONE) { + printk("%s: failed to switch card to " + "mmc v4 4-bit bus mode.\n", + mmc_hostname(host)); + goto out; + } + + host->ios.bus_width = MMC_BUS_WIDTH_4; + mmc_set_ios(host); + } + +out: + kfree(ext_csd); +} + +static void mmc_read_scr(struct mmc_host *host) +{ + int err; + struct mmc_request mrq; + struct mmc_command cmd; + struct mmc_data data; + struct scatterlist sg; + + if (!host->card) + return; + if (mmc_card_dead(host->card)) + return; + if (!mmc_card_sd(host->card)) + return; + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = MMC_APP_CMD; + cmd.arg = host->card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + + err = mmc_wait_for_cmd(host, &cmd, 0); + if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD)) { + mmc_card_set_dead(host->card); + return; + } + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = SD_APP_SEND_SCR; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + memset(&data, 0, sizeof(struct mmc_data)); + + mmc_set_data_timeout(&data, host->card, 0); + + data.blksz = 1 << 3; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; + + memset(&mrq, 0, sizeof(struct mmc_request)); + + mrq.cmd = &cmd; + mrq.data = &data; + + sg_init_one(&sg, (u8*)host->card->raw_scr, 8); + + mmc_wait_for_req(host, &mrq); + + if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { + mmc_card_set_dead(host->card); + return; + } + + host->card->raw_scr[0] = ntohl(host->card->raw_scr[0]); + host->card->raw_scr[1] = ntohl(host->card->raw_scr[1]); + + mmc_decode_scr(host->card); +} + +static void mmc_read_switch_caps(struct mmc_host *host) +{ + struct mmc_request mrq; + struct mmc_command cmd; + struct mmc_data data; + unsigned char *status; + struct scatterlist sg; + + if (!(host->caps & MMC_CAP_SD_HIGHSPEED)) + return; + + if (!host->card) + return; + if (mmc_card_dead(host->card)) + return; + if (!mmc_card_sd(host->card)) + return; + if (host->card->scr.sda_vsn < SCR_SPEC_VER_1) + return; + + status = kmalloc(64, GFP_KERNEL); + if (!status) { + printk(KERN_WARNING "%s: Unable to allocate buffer for " + "reading switch capabilities.\n", + mmc_hostname(host)); + return; + } + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = SD_SWITCH; + cmd.arg = 0x00FFFFF1; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + memset(&data, 0, sizeof(struct mmc_data)); + + mmc_set_data_timeout(&data, host->card, 0); + + data.blksz = 64; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; + + memset(&mrq, 0, sizeof(struct mmc_request)); + + mrq.cmd = &cmd; + mrq.data = &data; + + sg_init_one(&sg, status, 64); + + mmc_wait_for_req(host, &mrq); + + if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { + printk("%s: unable to read switch capabilities, " + "performance might suffer.\n", + mmc_hostname(host)); + goto out; + } + + if (status[13] & 0x02) + host->card->sw_caps.hs_max_dtr = 50000000; + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = SD_SWITCH; + cmd.arg = 0x80FFFFF1; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + memset(&data, 0, sizeof(struct mmc_data)); + + mmc_set_data_timeout(&data, host->card, 0); + + data.blksz = 64; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; + + memset(&mrq, 0, sizeof(struct mmc_request)); + + mrq.cmd = &cmd; + mrq.data = &data; + + sg_init_one(&sg, status, 64); + + mmc_wait_for_req(host, &mrq); + + if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE || + (status[16] & 0xF) != 1) { + printk(KERN_WARNING "%s: Problem switching card " + "into high-speed mode!\n", + mmc_hostname(host)); + goto out; + } + + mmc_card_set_highspeed(host->card); + + host->ios.timing = MMC_TIMING_SD_HS; + mmc_set_ios(host); + +out: + kfree(status); +} + +static unsigned int mmc_calculate_clock(struct mmc_host *host) +{ + unsigned int max_dtr = host->f_max; + + if (host->card && !mmc_card_dead(host->card)) { + if (mmc_card_highspeed(host->card) && mmc_card_sd(host->card)) { + if (max_dtr > host->card->sw_caps.hs_max_dtr) + max_dtr = host->card->sw_caps.hs_max_dtr; + } else if (mmc_card_highspeed(host->card) && !mmc_card_sd(host->card)) { + if (max_dtr > host->card->ext_csd.hs_max_dtr) + max_dtr = host->card->ext_csd.hs_max_dtr; + } else if (max_dtr > host->card->csd.max_dtr) { + max_dtr = host->card->csd.max_dtr; + } + } + + pr_debug("%s: selected %d.%03dMHz transfer rate\n", + mmc_hostname(host), + max_dtr / 1000000, (max_dtr / 1000) % 1000); + + return max_dtr; +} + +/* + * Check whether cards we already know about are still present. + * We do this by requesting status, and checking whether a card + * responds. + * + * A request for status does not cause a state change in data + * transfer mode. + */ +static void mmc_check_card(struct mmc_card *card) +{ + struct mmc_command cmd; + int err; + + BUG_ON(!card); + + cmd.opcode = MMC_SEND_STATUS; + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + + err = mmc_wait_for_cmd(card->host, &cmd, CMD_RETRIES); + if (err == MMC_ERR_NONE) + return; + + mmc_card_set_dead(card); +} + +static void mmc_setup(struct mmc_host *host) +{ + int err; + u32 ocr; + + host->mode = MMC_MODE_SD; + + mmc_power_up(host); + mmc_idle_cards(host); + + err = mmc_send_if_cond(host, host->ocr_avail, NULL); + if (err != MMC_ERR_NONE) { + return; + } + err = mmc_send_app_op_cond(host, 0, &ocr); + + /* + * If we fail to detect any SD cards then try + * searching for MMC cards. + */ + if (err != MMC_ERR_NONE) { + host->mode = MMC_MODE_MMC; + + err = mmc_send_op_cond(host, 0, &ocr); + if (err != MMC_ERR_NONE) + return; + } + + host->ocr = mmc_select_voltage(host, ocr); + + if (host->ocr == 0) + return; + + /* + * Since we're changing the OCR value, we seem to + * need to tell some cards to go back to the idle + * state. We wait 1ms to give cards time to + * respond. + */ + mmc_idle_cards(host); + + /* + * Send the selected OCR multiple times... until the cards + * all get the idea that they should be ready for CMD2. + * (My SanDisk card seems to need this.) + */ + if (host->mode == MMC_MODE_SD) { + int err, sd2; + err = mmc_send_if_cond(host, host->ocr, &sd2); + if (err == MMC_ERR_NONE) { + /* + * If SD_SEND_IF_COND indicates an SD 2.0 + * compliant card and we should set bit 30 + * of the ocr to indicate that we can handle + * block-addressed SDHC cards. + */ + mmc_send_app_op_cond(host, host->ocr | (sd2 << 30), NULL); + } + } else { + /* The extra bit indicates that we support high capacity */ + mmc_send_op_cond(host, host->ocr | (1 << 30), NULL); + } + + mmc_discover_card(host); + + /* + * Ok, now switch to push-pull mode. + */ + host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; + mmc_set_ios(host); + + mmc_read_csd(host); + + if (host->card && !mmc_card_dead(host->card)) { + err = mmc_select_card(host->card); + if (err != MMC_ERR_NONE) + mmc_card_set_dead(host->card); + } + + if (host->mode == MMC_MODE_SD) { + mmc_read_scr(host); + mmc_read_switch_caps(host); + } else + mmc_process_ext_csd(host); +} + + +/** + * mmc_detect_change - process change of state on a MMC socket + * @host: host which changed state. + * @delay: optional delay to wait before detection (jiffies) + * + * All we know is that card(s) have been inserted or removed + * from the socket(s). We don't know which socket or cards. + */ +void mmc_detect_change(struct mmc_host *host, unsigned long delay) +{ +#ifdef CONFIG_MMC_DEBUG + mmc_claim_host(host); + BUG_ON(host->removed); + mmc_release_host(host); +#endif + + mmc_schedule_delayed_work(&host->detect, delay); +} + +EXPORT_SYMBOL(mmc_detect_change); + + +static void mmc_rescan(struct work_struct *work) +{ + struct mmc_host *host = + container_of(work, struct mmc_host, detect.work); + + mmc_claim_host(host); + + /* + * Check for removed card and newly inserted ones. We check for + * removed cards first so we can intelligently re-select the VDD. + */ + if (host->card) { + mmc_check_card(host->card); + + mmc_release_host(host); + + if (mmc_card_dead(host->card)) { + mmc_remove_card(host->card); + host->card = NULL; + } + + goto out; + } + + mmc_setup(host); + + if (host->card && !mmc_card_dead(host->card)) { + /* + * (Re-)calculate the fastest clock rate which the + * attached cards and the host support. + */ + host->ios.clock = mmc_calculate_clock(host); + mmc_set_ios(host); + } + + mmc_release_host(host); + + /* + * If this is a new and good card, register it. + */ + if (host->card && !mmc_card_dead(host->card)) { + if (mmc_register_card(host->card)) + mmc_card_set_dead(host->card); + } + + /* + * If this card is dead, destroy it. + */ + if (host->card && mmc_card_dead(host->card)) { + mmc_remove_card(host->card); + host->card = NULL; + } + +out: + /* + * If we discover that there are no cards on the + * bus, turn off the clock and power down. + */ + if (!host->card) + mmc_power_off(host); +} + + +/** + * mmc_alloc_host - initialise the per-host structure. + * @extra: sizeof private data structure + * @dev: pointer to host device model structure + * + * Initialise the per-host structure. + */ +struct mmc_host *mmc_alloc_host(int extra, struct device *dev) +{ + struct mmc_host *host; + + host = mmc_alloc_host_sysfs(extra, dev); + if (host) { + spin_lock_init(&host->lock); + init_waitqueue_head(&host->wq); + INIT_DELAYED_WORK(&host->detect, mmc_rescan); + + /* + * By default, hosts do not support SGIO or large requests. + * They have to set these according to their abilities. + */ + host->max_hw_segs = 1; + host->max_phys_segs = 1; + host->max_seg_size = PAGE_CACHE_SIZE; + + host->max_req_size = PAGE_CACHE_SIZE; + host->max_blk_size = 512; + host->max_blk_count = PAGE_CACHE_SIZE / 512; + } + + return host; +} + +EXPORT_SYMBOL(mmc_alloc_host); + +/** + * mmc_add_host - initialise host hardware + * @host: mmc host + */ +int mmc_add_host(struct mmc_host *host) +{ + int ret; + + ret = mmc_add_host_sysfs(host); + if (ret == 0) { + mmc_power_off(host); + mmc_detect_change(host, 0); + } + + return ret; +} + +EXPORT_SYMBOL(mmc_add_host); + +/** + * mmc_remove_host - remove host hardware + * @host: mmc host + * + * Unregister and remove all cards associated with this host, + * and power down the MMC bus. + */ +void mmc_remove_host(struct mmc_host *host) +{ +#ifdef CONFIG_MMC_DEBUG + mmc_claim_host(host); + host->removed = 1; + mmc_release_host(host); +#endif + + mmc_flush_scheduled_work(); + + if (host->card) { + mmc_remove_card(host->card); + host->card = NULL; + } + + mmc_power_off(host); + mmc_remove_host_sysfs(host); +} + +EXPORT_SYMBOL(mmc_remove_host); + +/** + * mmc_free_host - free the host structure + * @host: mmc host + * + * Free the host once all references to it have been dropped. + */ +void mmc_free_host(struct mmc_host *host) +{ + mmc_free_host_sysfs(host); +} + +EXPORT_SYMBOL(mmc_free_host); + +#ifdef CONFIG_PM + +/** + * mmc_suspend_host - suspend a host + * @host: mmc host + * @state: suspend mode (PM_SUSPEND_xxx) + */ +int mmc_suspend_host(struct mmc_host *host, pm_message_t state) +{ + mmc_flush_scheduled_work(); + + if (host->card) { + mmc_remove_card(host->card); + host->card = NULL; + } + + mmc_power_off(host); + + return 0; +} + +EXPORT_SYMBOL(mmc_suspend_host); + +/** + * mmc_resume_host - resume a previously suspended host + * @host: mmc host + */ +int mmc_resume_host(struct mmc_host *host) +{ + mmc_rescan(&host->detect.work); + + return 0; +} + +EXPORT_SYMBOL(mmc_resume_host); + +#endif + +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h new file mode 100644 index 000000000000..076cb2f49a0f --- /dev/null +++ b/drivers/mmc/core/core.h @@ -0,0 +1,25 @@ +/* + * linux/drivers/mmc/core/core.h + * + * Copyright (C) 2003 Russell King, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _MMC_CORE_H +#define _MMC_CORE_H +/* core-internal functions */ +void mmc_init_card(struct mmc_card *card, struct mmc_host *host); +int mmc_register_card(struct mmc_card *card); +void mmc_remove_card(struct mmc_card *card); + +struct mmc_host *mmc_alloc_host_sysfs(int extra, struct device *dev); +int mmc_add_host_sysfs(struct mmc_host *host); +void mmc_remove_host_sysfs(struct mmc_host *host); +void mmc_free_host_sysfs(struct mmc_host *host); + +int mmc_schedule_work(struct work_struct *work); +int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay); +void mmc_flush_scheduled_work(void); +#endif diff --git a/drivers/mmc/core/sysfs.c b/drivers/mmc/core/sysfs.c new file mode 100644 index 000000000000..bf9a5f8beb86 --- /dev/null +++ b/drivers/mmc/core/sysfs.c @@ -0,0 +1,361 @@ +/* + * linux/drivers/mmc/core/sysfs.c + * + * Copyright (C) 2003 Russell King, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * MMC sysfs/driver model support. + */ +#include +#include +#include +#include +#include + +#include +#include + +#include "core.h" + +#define dev_to_mmc_card(d) container_of(d, struct mmc_card, dev) +#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv) +#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev) + +#define MMC_ATTR(name, fmt, args...) \ +static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \ +{ \ + struct mmc_card *card = dev_to_mmc_card(dev); \ + return sprintf(buf, fmt, args); \ +} + +MMC_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1], + card->raw_cid[2], card->raw_cid[3]); +MMC_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], + card->raw_csd[2], card->raw_csd[3]); +MMC_ATTR(scr, "%08x%08x\n", card->raw_scr[0], card->raw_scr[1]); +MMC_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); +MMC_ATTR(fwrev, "0x%x\n", card->cid.fwrev); +MMC_ATTR(hwrev, "0x%x\n", card->cid.hwrev); +MMC_ATTR(manfid, "0x%06x\n", card->cid.manfid); +MMC_ATTR(name, "%s\n", card->cid.prod_name); +MMC_ATTR(oemid, "0x%04x\n", card->cid.oemid); +MMC_ATTR(serial, "0x%08x\n", card->cid.serial); + +#define MMC_ATTR_RO(name) __ATTR(name, S_IRUGO, mmc_##name##_show, NULL) + +static struct device_attribute mmc_dev_attrs[] = { + MMC_ATTR_RO(cid), + MMC_ATTR_RO(csd), + MMC_ATTR_RO(date), + MMC_ATTR_RO(fwrev), + MMC_ATTR_RO(hwrev), + MMC_ATTR_RO(manfid), + MMC_ATTR_RO(name), + MMC_ATTR_RO(oemid), + MMC_ATTR_RO(serial), + __ATTR_NULL +}; + +static struct device_attribute mmc_dev_attr_scr = MMC_ATTR_RO(scr); + + +static void mmc_release_card(struct device *dev) +{ + struct mmc_card *card = dev_to_mmc_card(dev); + + kfree(card); +} + +/* + * This currently matches any MMC driver to any MMC card - drivers + * themselves make the decision whether to drive this card in their + * probe method. However, we force "bad" cards to fail. + */ +static int mmc_bus_match(struct device *dev, struct device_driver *drv) +{ + struct mmc_card *card = dev_to_mmc_card(dev); + return !mmc_card_bad(card); +} + +static int +mmc_bus_uevent(struct device *dev, char **envp, int num_envp, char *buf, + int buf_size) +{ + struct mmc_card *card = dev_to_mmc_card(dev); + char ccc[13]; + int retval = 0, i = 0, length = 0; + +#define add_env(fmt,val) do { \ + retval = add_uevent_var(envp, num_envp, &i, \ + buf, buf_size, &length, \ + fmt, val); \ + if (retval) \ + return retval; \ +} while (0); + + for (i = 0; i < 12; i++) + ccc[i] = card->csd.cmdclass & (1 << i) ? '1' : '0'; + ccc[12] = '\0'; + + add_env("MMC_CCC=%s", ccc); + add_env("MMC_MANFID=%06x", card->cid.manfid); + add_env("MMC_NAME=%s", mmc_card_name(card)); + add_env("MMC_OEMID=%04x", card->cid.oemid); +#undef add_env + envp[i] = NULL; + + return 0; +} + +static int mmc_bus_suspend(struct device *dev, pm_message_t state) +{ + struct mmc_driver *drv = to_mmc_driver(dev->driver); + struct mmc_card *card = dev_to_mmc_card(dev); + int ret = 0; + + if (dev->driver && drv->suspend) + ret = drv->suspend(card, state); + return ret; +} + +static int mmc_bus_resume(struct device *dev) +{ + struct mmc_driver *drv = to_mmc_driver(dev->driver); + struct mmc_card *card = dev_to_mmc_card(dev); + int ret = 0; + + if (dev->driver && drv->resume) + ret = drv->resume(card); + return ret; +} + +static int mmc_bus_probe(struct device *dev) +{ + struct mmc_driver *drv = to_mmc_driver(dev->driver); + struct mmc_card *card = dev_to_mmc_card(dev); + + return drv->probe(card); +} + +static int mmc_bus_remove(struct device *dev) +{ + struct mmc_driver *drv = to_mmc_driver(dev->driver); + struct mmc_card *card = dev_to_mmc_card(dev); + + drv->remove(card); + + return 0; +} + +static struct bus_type mmc_bus_type = { + .name = "mmc", + .dev_attrs = mmc_dev_attrs, + .match = mmc_bus_match, + .uevent = mmc_bus_uevent, + .probe = mmc_bus_probe, + .remove = mmc_bus_remove, + .suspend = mmc_bus_suspend, + .resume = mmc_bus_resume, +}; + +/** + * mmc_register_driver - register a media driver + * @drv: MMC media driver + */ +int mmc_register_driver(struct mmc_driver *drv) +{ + drv->drv.bus = &mmc_bus_type; + return driver_register(&drv->drv); +} + +EXPORT_SYMBOL(mmc_register_driver); + +/** + * mmc_unregister_driver - unregister a media driver + * @drv: MMC media driver + */ +void mmc_unregister_driver(struct mmc_driver *drv) +{ + drv->drv.bus = &mmc_bus_type; + driver_unregister(&drv->drv); +} + +EXPORT_SYMBOL(mmc_unregister_driver); + + +/* + * Internal function. Initialise a MMC card structure. + */ +void mmc_init_card(struct mmc_card *card, struct mmc_host *host) +{ + memset(card, 0, sizeof(struct mmc_card)); + card->host = host; + device_initialize(&card->dev); + card->dev.parent = mmc_classdev(host); + card->dev.bus = &mmc_bus_type; + card->dev.release = mmc_release_card; +} + +/* + * Internal function. Register a new MMC card with the driver model. + */ +int mmc_register_card(struct mmc_card *card) +{ + int ret; + + snprintf(card->dev.bus_id, sizeof(card->dev.bus_id), + "%s:%04x", mmc_hostname(card->host), card->rca); + + ret = device_add(&card->dev); + if (ret == 0) { + if (mmc_card_sd(card)) { + ret = device_create_file(&card->dev, &mmc_dev_attr_scr); + if (ret) + device_del(&card->dev); + } + } + if (ret == 0) + mmc_card_set_present(card); + return ret; +} + +/* + * Internal function. Unregister a new MMC card with the + * driver model, and (eventually) free it. + */ +void mmc_remove_card(struct mmc_card *card) +{ + if (mmc_card_present(card)) { + if (mmc_card_sd(card)) + device_remove_file(&card->dev, &mmc_dev_attr_scr); + + device_del(&card->dev); + } + + put_device(&card->dev); +} + + +static void mmc_host_classdev_release(struct device *dev) +{ + struct mmc_host *host = cls_dev_to_mmc_host(dev); + kfree(host); +} + +static struct class mmc_host_class = { + .name = "mmc_host", + .dev_release = mmc_host_classdev_release, +}; + +static DEFINE_IDR(mmc_host_idr); +static DEFINE_SPINLOCK(mmc_host_lock); + +/* + * Internal function. Allocate a new MMC host. + */ +struct mmc_host *mmc_alloc_host_sysfs(int extra, struct device *dev) +{ + struct mmc_host *host; + + host = kmalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); + if (host) { + memset(host, 0, sizeof(struct mmc_host) + extra); + + host->parent = dev; + host->class_dev.parent = dev; + host->class_dev.class = &mmc_host_class; + device_initialize(&host->class_dev); + } + + return host; +} + +/* + * Internal function. Register a new MMC host with the MMC class. + */ +int mmc_add_host_sysfs(struct mmc_host *host) +{ + int err; + + if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL)) + return -ENOMEM; + + spin_lock(&mmc_host_lock); + err = idr_get_new(&mmc_host_idr, host, &host->index); + spin_unlock(&mmc_host_lock); + if (err) + return err; + + snprintf(host->class_dev.bus_id, BUS_ID_SIZE, + "mmc%d", host->index); + + return device_add(&host->class_dev); +} + +/* + * Internal function. Unregister a MMC host with the MMC class. + */ +void mmc_remove_host_sysfs(struct mmc_host *host) +{ + device_del(&host->class_dev); + + spin_lock(&mmc_host_lock); + idr_remove(&mmc_host_idr, host->index); + spin_unlock(&mmc_host_lock); +} + +/* + * Internal function. Free a MMC host. + */ +void mmc_free_host_sysfs(struct mmc_host *host) +{ + put_device(&host->class_dev); +} + +static struct workqueue_struct *workqueue; + +/* + * Internal function. Schedule delayed work in the MMC work queue. + */ +int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) +{ + return queue_delayed_work(workqueue, work, delay); +} + +/* + * Internal function. Flush all scheduled work from the MMC work queue. + */ +void mmc_flush_scheduled_work(void) +{ + flush_workqueue(workqueue); +} + +static int __init mmc_init(void) +{ + int ret; + + workqueue = create_singlethread_workqueue("kmmcd"); + if (!workqueue) + return -ENOMEM; + + ret = bus_register(&mmc_bus_type); + if (ret == 0) { + ret = class_register(&mmc_host_class); + if (ret) + bus_unregister(&mmc_bus_type); + } + return ret; +} + +static void __exit mmc_exit(void) +{ + class_unregister(&mmc_host_class); + bus_unregister(&mmc_bus_type); + destroy_workqueue(workqueue); +} + +module_init(mmc_init); +module_exit(mmc_exit); diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c deleted file mode 100644 index 3f50b8882c89..000000000000 --- a/drivers/mmc/mmc.c +++ /dev/null @@ -1,1638 +0,0 @@ -/* - * linux/drivers/mmc/mmc.c - * - * Copyright (C) 2003-2004 Russell King, All Rights Reserved. - * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. - * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. - * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "mmc.h" - -#define CMD_RETRIES 3 - -/* - * OCR Bit positions to 10s of Vdd mV. - */ -static const unsigned short mmc_ocr_bit_to_vdd[] = { - 150, 155, 160, 165, 170, 180, 190, 200, - 210, 220, 230, 240, 250, 260, 270, 280, - 290, 300, 310, 320, 330, 340, 350, 360 -}; - -static const unsigned int tran_exp[] = { - 10000, 100000, 1000000, 10000000, - 0, 0, 0, 0 -}; - -static const unsigned char tran_mant[] = { - 0, 10, 12, 13, 15, 20, 25, 30, - 35, 40, 45, 50, 55, 60, 70, 80, -}; - -static const unsigned int tacc_exp[] = { - 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, -}; - -static const unsigned int tacc_mant[] = { - 0, 10, 12, 13, 15, 20, 25, 30, - 35, 40, 45, 50, 55, 60, 70, 80, -}; - - -/** - * mmc_request_done - finish processing an MMC request - * @host: MMC host which completed request - * @mrq: MMC request which request - * - * MMC drivers should call this function when they have completed - * their processing of a request. - */ -void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) -{ - struct mmc_command *cmd = mrq->cmd; - int err = cmd->error; - - pr_debug("%s: req done (CMD%u): %d/%d/%d: %08x %08x %08x %08x\n", - mmc_hostname(host), cmd->opcode, err, - mrq->data ? mrq->data->error : 0, - mrq->stop ? mrq->stop->error : 0, - cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); - - if (err && cmd->retries) { - cmd->retries--; - cmd->error = 0; - host->ops->request(host, mrq); - } else if (mrq->done) { - mrq->done(mrq); - } -} - -EXPORT_SYMBOL(mmc_request_done); - -/** - * mmc_start_request - start a command on a host - * @host: MMC host to start command on - * @mrq: MMC request to start - * - * Queue a command on the specified host. We expect the - * caller to be holding the host lock with interrupts disabled. - */ -void -mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) -{ -#ifdef CONFIG_MMC_DEBUG - unsigned int i, sz; -#endif - - pr_debug("%s: starting CMD%u arg %08x flags %08x\n", - mmc_hostname(host), mrq->cmd->opcode, - mrq->cmd->arg, mrq->cmd->flags); - - WARN_ON(!host->claimed); - - mrq->cmd->error = 0; - mrq->cmd->mrq = mrq; - if (mrq->data) { - BUG_ON(mrq->data->blksz > host->max_blk_size); - BUG_ON(mrq->data->blocks > host->max_blk_count); - BUG_ON(mrq->data->blocks * mrq->data->blksz > - host->max_req_size); - -#ifdef CONFIG_MMC_DEBUG - sz = 0; - for (i = 0;i < mrq->data->sg_len;i++) - sz += mrq->data->sg[i].length; - BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); -#endif - - mrq->cmd->data = mrq->data; - mrq->data->error = 0; - mrq->data->mrq = mrq; - if (mrq->stop) { - mrq->data->stop = mrq->stop; - mrq->stop->error = 0; - mrq->stop->mrq = mrq; - } - } - host->ops->request(host, mrq); -} - -EXPORT_SYMBOL(mmc_start_request); - -static void mmc_wait_done(struct mmc_request *mrq) -{ - complete(mrq->done_data); -} - -int mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) -{ - DECLARE_COMPLETION_ONSTACK(complete); - - mrq->done_data = &complete; - mrq->done = mmc_wait_done; - - mmc_start_request(host, mrq); - - wait_for_completion(&complete); - - return 0; -} - -EXPORT_SYMBOL(mmc_wait_for_req); - -/** - * mmc_wait_for_cmd - start a command and wait for completion - * @host: MMC host to start command - * @cmd: MMC command to start - * @retries: maximum number of retries - * - * Start a new MMC command for a host, and wait for the command - * to complete. Return any error that occurred while the command - * was executing. Do not attempt to parse the response. - */ -int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) -{ - struct mmc_request mrq; - - BUG_ON(!host->claimed); - - memset(&mrq, 0, sizeof(struct mmc_request)); - - memset(cmd->resp, 0, sizeof(cmd->resp)); - cmd->retries = retries; - - mrq.cmd = cmd; - cmd->data = NULL; - - mmc_wait_for_req(host, &mrq); - - return cmd->error; -} - -EXPORT_SYMBOL(mmc_wait_for_cmd); - -/** - * mmc_wait_for_app_cmd - start an application command and wait for - completion - * @host: MMC host to start command - * @rca: RCA to send MMC_APP_CMD to - * @cmd: MMC command to start - * @retries: maximum number of retries - * - * Sends a MMC_APP_CMD, checks the card response, sends the command - * in the parameter and waits for it to complete. Return any error - * that occurred while the command was executing. Do not attempt to - * parse the response. - */ -int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca, - struct mmc_command *cmd, int retries) -{ - struct mmc_request mrq; - struct mmc_command appcmd; - - int i, err; - - BUG_ON(!host->claimed); - BUG_ON(retries < 0); - - err = MMC_ERR_INVALID; - - /* - * We have to resend MMC_APP_CMD for each attempt so - * we cannot use the retries field in mmc_command. - */ - for (i = 0;i <= retries;i++) { - memset(&mrq, 0, sizeof(struct mmc_request)); - - appcmd.opcode = MMC_APP_CMD; - appcmd.arg = rca << 16; - appcmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - appcmd.retries = 0; - memset(appcmd.resp, 0, sizeof(appcmd.resp)); - appcmd.data = NULL; - - mrq.cmd = &appcmd; - appcmd.data = NULL; - - mmc_wait_for_req(host, &mrq); - - if (appcmd.error) { - err = appcmd.error; - continue; - } - - /* Check that card supported application commands */ - if (!(appcmd.resp[0] & R1_APP_CMD)) - return MMC_ERR_FAILED; - - memset(&mrq, 0, sizeof(struct mmc_request)); - - memset(cmd->resp, 0, sizeof(cmd->resp)); - cmd->retries = 0; - - mrq.cmd = cmd; - cmd->data = NULL; - - mmc_wait_for_req(host, &mrq); - - err = cmd->error; - if (cmd->error == MMC_ERR_NONE) - break; - } - - return err; -} - -EXPORT_SYMBOL(mmc_wait_for_app_cmd); - -/** - * mmc_set_data_timeout - set the timeout for a data command - * @data: data phase for command - * @card: the MMC card associated with the data transfer - * @write: flag to differentiate reads from writes - */ -void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card, - int write) -{ - unsigned int mult; - - /* - * SD cards use a 100 multiplier rather than 10 - */ - mult = mmc_card_sd(card) ? 100 : 10; - - /* - * Scale up the multiplier (and therefore the timeout) by - * the r2w factor for writes. - */ - if (write) - mult <<= card->csd.r2w_factor; - - data->timeout_ns = card->csd.tacc_ns * mult; - data->timeout_clks = card->csd.tacc_clks * mult; - - /* - * SD cards also have an upper limit on the timeout. - */ - if (mmc_card_sd(card)) { - unsigned int timeout_us, limit_us; - - timeout_us = data->timeout_ns / 1000; - timeout_us += data->timeout_clks * 1000 / - (card->host->ios.clock / 1000); - - if (write) - limit_us = 250000; - else - limit_us = 100000; - - /* - * SDHC cards always use these fixed values. - */ - if (timeout_us > limit_us || mmc_card_blockaddr(card)) { - data->timeout_ns = limit_us * 1000; - data->timeout_clks = 0; - } - } -} -EXPORT_SYMBOL(mmc_set_data_timeout); - -/** - * __mmc_claim_host - exclusively claim a host - * @host: mmc host to claim - * @card: mmc card to claim host for - * - * Claim a host for a set of operations. If a valid card - * is passed and this wasn't the last card selected, select - * the card before returning. - * - * Note: you should use mmc_card_claim_host or mmc_claim_host. - */ -void mmc_claim_host(struct mmc_host *host) -{ - DECLARE_WAITQUEUE(wait, current); - unsigned long flags; - - add_wait_queue(&host->wq, &wait); - spin_lock_irqsave(&host->lock, flags); - while (1) { - set_current_state(TASK_UNINTERRUPTIBLE); - if (!host->claimed) - break; - spin_unlock_irqrestore(&host->lock, flags); - schedule(); - spin_lock_irqsave(&host->lock, flags); - } - set_current_state(TASK_RUNNING); - host->claimed = 1; - spin_unlock_irqrestore(&host->lock, flags); - remove_wait_queue(&host->wq, &wait); -} - -EXPORT_SYMBOL(mmc_claim_host); - -/** - * mmc_release_host - release a host - * @host: mmc host to release - * - * Release a MMC host, allowing others to claim the host - * for their operations. - */ -void mmc_release_host(struct mmc_host *host) -{ - unsigned long flags; - - BUG_ON(!host->claimed); - - spin_lock_irqsave(&host->lock, flags); - host->claimed = 0; - spin_unlock_irqrestore(&host->lock, flags); - - wake_up(&host->wq); -} - -EXPORT_SYMBOL(mmc_release_host); - -static inline void mmc_set_ios(struct mmc_host *host) -{ - struct mmc_ios *ios = &host->ios; - - pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " - "width %u timing %u\n", - mmc_hostname(host), ios->clock, ios->bus_mode, - ios->power_mode, ios->chip_select, ios->vdd, - ios->bus_width, ios->timing); - - host->ops->set_ios(host, ios); -} - -static int mmc_select_card(struct mmc_card *card) -{ - int err; - struct mmc_command cmd; - - BUG_ON(!card->host->claimed); - - cmd.opcode = MMC_SELECT_CARD; - cmd.arg = card->rca << 16; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_cmd(card->host, &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) - return err; - - /* - * We can only change the bus width of SD cards when - * they are selected so we have to put the handling - * here. - * - * The card is in 1 bit mode by default so - * we only need to change if it supports the - * wider version. - */ - if (mmc_card_sd(card) && - (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4) && - (card->host->caps & MMC_CAP_4_BIT_DATA)) { - - struct mmc_command cmd; - cmd.opcode = SD_APP_SET_BUS_WIDTH; - cmd.arg = SD_BUS_WIDTH_4; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_app_cmd(card->host, card->rca, - &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) - return err; - - card->host->ios.bus_width = MMC_BUS_WIDTH_4; - mmc_set_ios(card->host); - } - - return MMC_ERR_NONE; -} - - -static inline void mmc_delay(unsigned int ms) -{ - if (ms < 1000 / HZ) { - cond_resched(); - mdelay(ms); - } else { - msleep(ms); - } -} - -/* - * Mask off any voltages we don't support and select - * the lowest voltage - */ -static u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) -{ - int bit; - - ocr &= host->ocr_avail; - - bit = ffs(ocr); - if (bit) { - bit -= 1; - - ocr &= 3 << bit; - - host->ios.vdd = bit; - mmc_set_ios(host); - } else { - ocr = 0; - } - - return ocr; -} - -#define UNSTUFF_BITS(resp,start,size) \ - ({ \ - const int __size = size; \ - const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \ - const int __off = 3 - ((start) / 32); \ - const int __shft = (start) & 31; \ - u32 __res; \ - \ - __res = resp[__off] >> __shft; \ - if (__size + __shft > 32) \ - __res |= resp[__off-1] << ((32 - __shft) % 32); \ - __res & __mask; \ - }) - -/* - * Given the decoded CSD structure, decode the raw CID to our CID structure. - */ -static void mmc_decode_cid(struct mmc_card *card) -{ - u32 *resp = card->raw_cid; - - memset(&card->cid, 0, sizeof(struct mmc_cid)); - - if (mmc_card_sd(card)) { - /* - * SD doesn't currently have a version field so we will - * have to assume we can parse this. - */ - card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); - card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); - card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); - card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); - card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); - card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); - card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); - card->cid.hwrev = UNSTUFF_BITS(resp, 60, 4); - card->cid.fwrev = UNSTUFF_BITS(resp, 56, 4); - card->cid.serial = UNSTUFF_BITS(resp, 24, 32); - card->cid.year = UNSTUFF_BITS(resp, 12, 8); - card->cid.month = UNSTUFF_BITS(resp, 8, 4); - - card->cid.year += 2000; /* SD cards year offset */ - } else { - /* - * The selection of the format here is based upon published - * specs from sandisk and from what people have reported. - */ - switch (card->csd.mmca_vsn) { - case 0: /* MMC v1.0 - v1.2 */ - case 1: /* MMC v1.4 */ - card->cid.manfid = UNSTUFF_BITS(resp, 104, 24); - card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); - card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); - card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); - card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); - card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); - card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); - card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8); - card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4); - card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4); - card->cid.serial = UNSTUFF_BITS(resp, 16, 24); - card->cid.month = UNSTUFF_BITS(resp, 12, 4); - card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; - break; - - case 2: /* MMC v2.0 - v2.2 */ - case 3: /* MMC v3.1 - v3.3 */ - case 4: /* MMC v4 */ - card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); - card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); - card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); - card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); - card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); - card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); - card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); - card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); - card->cid.serial = UNSTUFF_BITS(resp, 16, 32); - card->cid.month = UNSTUFF_BITS(resp, 12, 4); - card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; - break; - - default: - printk("%s: card has unknown MMCA version %d\n", - mmc_hostname(card->host), card->csd.mmca_vsn); - mmc_card_set_bad(card); - break; - } - } -} - -/* - * Given a 128-bit response, decode to our card CSD structure. - */ -static void mmc_decode_csd(struct mmc_card *card) -{ - struct mmc_csd *csd = &card->csd; - unsigned int e, m, csd_struct; - u32 *resp = card->raw_csd; - - if (mmc_card_sd(card)) { - csd_struct = UNSTUFF_BITS(resp, 126, 2); - - switch (csd_struct) { - case 0: - m = UNSTUFF_BITS(resp, 115, 4); - e = UNSTUFF_BITS(resp, 112, 3); - csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; - csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; - - m = UNSTUFF_BITS(resp, 99, 4); - e = UNSTUFF_BITS(resp, 96, 3); - csd->max_dtr = tran_exp[e] * tran_mant[m]; - csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); - - e = UNSTUFF_BITS(resp, 47, 3); - m = UNSTUFF_BITS(resp, 62, 12); - csd->capacity = (1 + m) << (e + 2); - - csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); - csd->read_partial = UNSTUFF_BITS(resp, 79, 1); - csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); - csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); - csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); - csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); - csd->write_partial = UNSTUFF_BITS(resp, 21, 1); - break; - case 1: - /* - * This is a block-addressed SDHC card. Most - * interesting fields are unused and have fixed - * values. To avoid getting tripped by buggy cards, - * we assume those fixed values ourselves. - */ - mmc_card_set_blockaddr(card); - - csd->tacc_ns = 0; /* Unused */ - csd->tacc_clks = 0; /* Unused */ - - m = UNSTUFF_BITS(resp, 99, 4); - e = UNSTUFF_BITS(resp, 96, 3); - csd->max_dtr = tran_exp[e] * tran_mant[m]; - csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); - - m = UNSTUFF_BITS(resp, 48, 22); - csd->capacity = (1 + m) << 10; - - csd->read_blkbits = 9; - csd->read_partial = 0; - csd->write_misalign = 0; - csd->read_misalign = 0; - csd->r2w_factor = 4; /* Unused */ - csd->write_blkbits = 9; - csd->write_partial = 0; - break; - default: - printk("%s: unrecognised CSD structure version %d\n", - mmc_hostname(card->host), csd_struct); - mmc_card_set_bad(card); - return; - } - } else { - /* - * We only understand CSD structure v1.1 and v1.2. - * v1.2 has extra information in bits 15, 11 and 10. - */ - csd_struct = UNSTUFF_BITS(resp, 126, 2); - if (csd_struct != 1 && csd_struct != 2) { - printk("%s: unrecognised CSD structure version %d\n", - mmc_hostname(card->host), csd_struct); - mmc_card_set_bad(card); - return; - } - - csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); - m = UNSTUFF_BITS(resp, 115, 4); - e = UNSTUFF_BITS(resp, 112, 3); - csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; - csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; - - m = UNSTUFF_BITS(resp, 99, 4); - e = UNSTUFF_BITS(resp, 96, 3); - csd->max_dtr = tran_exp[e] * tran_mant[m]; - csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); - - e = UNSTUFF_BITS(resp, 47, 3); - m = UNSTUFF_BITS(resp, 62, 12); - csd->capacity = (1 + m) << (e + 2); - - csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); - csd->read_partial = UNSTUFF_BITS(resp, 79, 1); - csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); - csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); - csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); - csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); - csd->write_partial = UNSTUFF_BITS(resp, 21, 1); - } -} - -/* - * Given a 64-bit response, decode to our card SCR structure. - */ -static void mmc_decode_scr(struct mmc_card *card) -{ - struct sd_scr *scr = &card->scr; - unsigned int scr_struct; - u32 resp[4]; - - BUG_ON(!mmc_card_sd(card)); - - resp[3] = card->raw_scr[1]; - resp[2] = card->raw_scr[0]; - - scr_struct = UNSTUFF_BITS(resp, 60, 4); - if (scr_struct != 0) { - printk("%s: unrecognised SCR structure version %d\n", - mmc_hostname(card->host), scr_struct); - mmc_card_set_bad(card); - return; - } - - scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4); - scr->bus_widths = UNSTUFF_BITS(resp, 48, 4); -} - -/* - * Allocate a new MMC card - */ -static struct mmc_card * -mmc_alloc_card(struct mmc_host *host, u32 *raw_cid) -{ - struct mmc_card *card; - - card = kmalloc(sizeof(struct mmc_card), GFP_KERNEL); - if (!card) - return ERR_PTR(-ENOMEM); - - mmc_init_card(card, host); - memcpy(card->raw_cid, raw_cid, sizeof(card->raw_cid)); - - return card; -} - -/* - * Tell attached cards to go to IDLE state - */ -static void mmc_idle_cards(struct mmc_host *host) -{ - struct mmc_command cmd; - - host->ios.chip_select = MMC_CS_HIGH; - mmc_set_ios(host); - - mmc_delay(1); - - cmd.opcode = MMC_GO_IDLE_STATE; - cmd.arg = 0; - cmd.flags = MMC_RSP_NONE | MMC_CMD_BC; - - mmc_wait_for_cmd(host, &cmd, 0); - - mmc_delay(1); - - host->ios.chip_select = MMC_CS_DONTCARE; - mmc_set_ios(host); - - mmc_delay(1); -} - -/* - * Apply power to the MMC stack. This is a two-stage process. - * First, we enable power to the card without the clock running. - * We then wait a bit for the power to stabilise. Finally, - * enable the bus drivers and clock to the card. - * - * We must _NOT_ enable the clock prior to power stablising. - * - * If a host does all the power sequencing itself, ignore the - * initial MMC_POWER_UP stage. - */ -static void mmc_power_up(struct mmc_host *host) -{ - int bit = fls(host->ocr_avail) - 1; - - host->ios.vdd = bit; - host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; - host->ios.chip_select = MMC_CS_DONTCARE; - host->ios.power_mode = MMC_POWER_UP; - host->ios.bus_width = MMC_BUS_WIDTH_1; - host->ios.timing = MMC_TIMING_LEGACY; - mmc_set_ios(host); - - mmc_delay(1); - - host->ios.clock = host->f_min; - host->ios.power_mode = MMC_POWER_ON; - mmc_set_ios(host); - - mmc_delay(2); -} - -static void mmc_power_off(struct mmc_host *host) -{ - host->ios.clock = 0; - host->ios.vdd = 0; - host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; - host->ios.chip_select = MMC_CS_DONTCARE; - host->ios.power_mode = MMC_POWER_OFF; - host->ios.bus_width = MMC_BUS_WIDTH_1; - host->ios.timing = MMC_TIMING_LEGACY; - mmc_set_ios(host); -} - -static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) -{ - struct mmc_command cmd; - int i, err = 0; - - cmd.opcode = MMC_SEND_OP_COND; - cmd.arg = ocr; - cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; - - for (i = 100; i; i--) { - err = mmc_wait_for_cmd(host, &cmd, 0); - if (err != MMC_ERR_NONE) - break; - - if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0) - break; - - err = MMC_ERR_TIMEOUT; - - mmc_delay(10); - } - - if (rocr) - *rocr = cmd.resp[0]; - - return err; -} - -static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) -{ - struct mmc_command cmd; - int i, err = 0; - - cmd.opcode = SD_APP_OP_COND; - cmd.arg = ocr; - cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; - - for (i = 100; i; i--) { - err = mmc_wait_for_app_cmd(host, 0, &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) - break; - - if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0) - break; - - err = MMC_ERR_TIMEOUT; - - mmc_delay(10); - } - - if (rocr) - *rocr = cmd.resp[0]; - - return err; -} - -static int mmc_send_if_cond(struct mmc_host *host, u32 ocr, int *rsd2) -{ - struct mmc_command cmd; - int err, sd2; - static const u8 test_pattern = 0xAA; - - /* - * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND - * before SD_APP_OP_COND. This command will harmlessly fail for - * SD 1.0 cards. - */ - cmd.opcode = SD_SEND_IF_COND; - cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern; - cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR; - - err = mmc_wait_for_cmd(host, &cmd, 0); - if (err == MMC_ERR_NONE) { - if ((cmd.resp[0] & 0xFF) == test_pattern) { - sd2 = 1; - } else { - sd2 = 0; - err = MMC_ERR_FAILED; - } - } else { - /* - * Treat errors as SD 1.0 card. - */ - sd2 = 0; - err = MMC_ERR_NONE; - } - if (rsd2) - *rsd2 = sd2; - return err; -} - -/* - * Discover the card by requesting its CID. - * - * Create a mmc_card entry for the discovered card, assigning - * it an RCA, and save the raw CID for decoding later. - */ -static void mmc_discover_card(struct mmc_host *host) -{ - unsigned int err; - - struct mmc_command cmd; - - BUG_ON(host->card); - - cmd.opcode = MMC_ALL_SEND_CID; - cmd.arg = 0; - cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); - if (err == MMC_ERR_TIMEOUT) { - err = MMC_ERR_NONE; - return; - } - if (err != MMC_ERR_NONE) { - printk(KERN_ERR "%s: error requesting CID: %d\n", - mmc_hostname(host), err); - return; - } - - host->card = mmc_alloc_card(host, cmd.resp); - if (IS_ERR(host->card)) { - err = PTR_ERR(host->card); - host->card = NULL; - return; - } - - if (host->mode == MMC_MODE_SD) { - host->card->type = MMC_TYPE_SD; - - cmd.opcode = SD_SEND_RELATIVE_ADDR; - cmd.arg = 0; - cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) - mmc_card_set_dead(host->card); - else { - host->card->rca = cmd.resp[0] >> 16; - - if (!host->ops->get_ro) { - printk(KERN_WARNING "%s: host does not " - "support reading read-only " - "switch. assuming write-enable.\n", - mmc_hostname(host)); - } else { - if (host->ops->get_ro(host)) - mmc_card_set_readonly(host->card); - } - } - } else { - host->card->type = MMC_TYPE_MMC; - host->card->rca = 1; - - cmd.opcode = MMC_SET_RELATIVE_ADDR; - cmd.arg = host->card->rca << 16; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) - mmc_card_set_dead(host->card); - } -} - -static void mmc_read_csd(struct mmc_host *host) -{ - struct mmc_command cmd; - int err; - - if (!host->card) - return; - if (mmc_card_dead(host->card)) - return; - - cmd.opcode = MMC_SEND_CSD; - cmd.arg = host->card->rca << 16; - cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) { - mmc_card_set_dead(host->card); - return; - } - - memcpy(host->card->raw_csd, cmd.resp, sizeof(host->card->raw_csd)); - - mmc_decode_csd(host->card); - mmc_decode_cid(host->card); -} - -static void mmc_process_ext_csd(struct mmc_host *host) -{ - int err; - - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_data data; - - u8 *ext_csd; - struct scatterlist sg; - - if (!host->card) - return; - if (mmc_card_dead(host->card)) - return; - if (mmc_card_sd(host->card)) - return; - if (host->card->csd.mmca_vsn < CSD_SPEC_VER_4) - return; - - /* - * As the ext_csd is so large and mostly unused, we don't store the - * raw block in mmc_card. - */ - ext_csd = kmalloc(512, GFP_KERNEL); - if (!ext_csd) { - printk("%s: could not allocate a buffer to receive the ext_csd." - "mmc v4 cards will be treated as v3.\n", - mmc_hostname(host)); - return; - } - - memset(&cmd, 0, sizeof(struct mmc_command)); - - cmd.opcode = MMC_SEND_EXT_CSD; - cmd.arg = 0; - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; - - memset(&data, 0, sizeof(struct mmc_data)); - - mmc_set_data_timeout(&data, host->card, 0); - - data.blksz = 512; - data.blocks = 1; - data.flags = MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; - - memset(&mrq, 0, sizeof(struct mmc_request)); - - mrq.cmd = &cmd; - mrq.data = &data; - - sg_init_one(&sg, ext_csd, 512); - - mmc_wait_for_req(host, &mrq); - - if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { - if (host->card->csd.capacity == (4096 * 512)) { - printk(KERN_ERR "%s: unable to read EXT_CSD " - "on a possible high capacity card. " - "Card will be ignored.\n", - mmc_hostname(host)); - mmc_card_set_dead(host->card); - } else { - printk(KERN_WARNING "%s: unable to read " - "EXT_CSD, performance might " - "suffer.\n", - mmc_hostname(host)); - } - goto out; - } - - host->card->ext_csd.sectors = - ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | - ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | - ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | - ext_csd[EXT_CSD_SEC_CNT + 3] << 24; - if (host->card->ext_csd.sectors) - mmc_card_set_blockaddr(host->card); - - switch (ext_csd[EXT_CSD_CARD_TYPE]) { - case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: - host->card->ext_csd.hs_max_dtr = 52000000; - break; - case EXT_CSD_CARD_TYPE_26: - host->card->ext_csd.hs_max_dtr = 26000000; - break; - default: - /* MMC v4 spec says this cannot happen */ - printk("%s: card is mmc v4 but doesn't support " - "any high-speed modes.\n", - mmc_hostname(host)); - goto out; - } - - if (host->caps & MMC_CAP_MMC_HIGHSPEED) { - /* Activate highspeed support. */ - cmd.opcode = MMC_SWITCH; - cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | - (EXT_CSD_HS_TIMING << 16) | - (1 << 8) | - EXT_CSD_CMD_SET_NORMAL; - cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) { - printk("%s: failed to switch card to mmc v4 " - "high-speed mode.\n", - mmc_hostname(host)); - goto out; - } - - mmc_card_set_highspeed(host->card); - - host->ios.timing = MMC_TIMING_MMC_HS; - mmc_set_ios(host); - } - - /* Check for host support for wide-bus modes. */ - if (host->caps & MMC_CAP_4_BIT_DATA) { - /* Activate 4-bit support. */ - cmd.opcode = MMC_SWITCH; - cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | - (EXT_CSD_BUS_WIDTH << 16) | - (EXT_CSD_BUS_WIDTH_4 << 8) | - EXT_CSD_CMD_SET_NORMAL; - cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) { - printk("%s: failed to switch card to " - "mmc v4 4-bit bus mode.\n", - mmc_hostname(host)); - goto out; - } - - host->ios.bus_width = MMC_BUS_WIDTH_4; - mmc_set_ios(host); - } - -out: - kfree(ext_csd); -} - -static void mmc_read_scr(struct mmc_host *host) -{ - int err; - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_data data; - struct scatterlist sg; - - if (!host->card) - return; - if (mmc_card_dead(host->card)) - return; - if (!mmc_card_sd(host->card)) - return; - - memset(&cmd, 0, sizeof(struct mmc_command)); - - cmd.opcode = MMC_APP_CMD; - cmd.arg = host->card->rca << 16; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_cmd(host, &cmd, 0); - if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD)) { - mmc_card_set_dead(host->card); - return; - } - - memset(&cmd, 0, sizeof(struct mmc_command)); - - cmd.opcode = SD_APP_SEND_SCR; - cmd.arg = 0; - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; - - memset(&data, 0, sizeof(struct mmc_data)); - - mmc_set_data_timeout(&data, host->card, 0); - - data.blksz = 1 << 3; - data.blocks = 1; - data.flags = MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; - - memset(&mrq, 0, sizeof(struct mmc_request)); - - mrq.cmd = &cmd; - mrq.data = &data; - - sg_init_one(&sg, (u8*)host->card->raw_scr, 8); - - mmc_wait_for_req(host, &mrq); - - if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { - mmc_card_set_dead(host->card); - return; - } - - host->card->raw_scr[0] = ntohl(host->card->raw_scr[0]); - host->card->raw_scr[1] = ntohl(host->card->raw_scr[1]); - - mmc_decode_scr(host->card); -} - -static void mmc_read_switch_caps(struct mmc_host *host) -{ - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_data data; - unsigned char *status; - struct scatterlist sg; - - if (!(host->caps & MMC_CAP_SD_HIGHSPEED)) - return; - - if (!host->card) - return; - if (mmc_card_dead(host->card)) - return; - if (!mmc_card_sd(host->card)) - return; - if (host->card->scr.sda_vsn < SCR_SPEC_VER_1) - return; - - status = kmalloc(64, GFP_KERNEL); - if (!status) { - printk(KERN_WARNING "%s: Unable to allocate buffer for " - "reading switch capabilities.\n", - mmc_hostname(host)); - return; - } - - memset(&cmd, 0, sizeof(struct mmc_command)); - - cmd.opcode = SD_SWITCH; - cmd.arg = 0x00FFFFF1; - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; - - memset(&data, 0, sizeof(struct mmc_data)); - - mmc_set_data_timeout(&data, host->card, 0); - - data.blksz = 64; - data.blocks = 1; - data.flags = MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; - - memset(&mrq, 0, sizeof(struct mmc_request)); - - mrq.cmd = &cmd; - mrq.data = &data; - - sg_init_one(&sg, status, 64); - - mmc_wait_for_req(host, &mrq); - - if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { - printk("%s: unable to read switch capabilities, " - "performance might suffer.\n", - mmc_hostname(host)); - goto out; - } - - if (status[13] & 0x02) - host->card->sw_caps.hs_max_dtr = 50000000; - - memset(&cmd, 0, sizeof(struct mmc_command)); - - cmd.opcode = SD_SWITCH; - cmd.arg = 0x80FFFFF1; - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; - - memset(&data, 0, sizeof(struct mmc_data)); - - mmc_set_data_timeout(&data, host->card, 0); - - data.blksz = 64; - data.blocks = 1; - data.flags = MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; - - memset(&mrq, 0, sizeof(struct mmc_request)); - - mrq.cmd = &cmd; - mrq.data = &data; - - sg_init_one(&sg, status, 64); - - mmc_wait_for_req(host, &mrq); - - if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE || - (status[16] & 0xF) != 1) { - printk(KERN_WARNING "%s: Problem switching card " - "into high-speed mode!\n", - mmc_hostname(host)); - goto out; - } - - mmc_card_set_highspeed(host->card); - - host->ios.timing = MMC_TIMING_SD_HS; - mmc_set_ios(host); - -out: - kfree(status); -} - -static unsigned int mmc_calculate_clock(struct mmc_host *host) -{ - unsigned int max_dtr = host->f_max; - - if (host->card && !mmc_card_dead(host->card)) { - if (mmc_card_highspeed(host->card) && mmc_card_sd(host->card)) { - if (max_dtr > host->card->sw_caps.hs_max_dtr) - max_dtr = host->card->sw_caps.hs_max_dtr; - } else if (mmc_card_highspeed(host->card) && !mmc_card_sd(host->card)) { - if (max_dtr > host->card->ext_csd.hs_max_dtr) - max_dtr = host->card->ext_csd.hs_max_dtr; - } else if (max_dtr > host->card->csd.max_dtr) { - max_dtr = host->card->csd.max_dtr; - } - } - - pr_debug("%s: selected %d.%03dMHz transfer rate\n", - mmc_hostname(host), - max_dtr / 1000000, (max_dtr / 1000) % 1000); - - return max_dtr; -} - -/* - * Check whether cards we already know about are still present. - * We do this by requesting status, and checking whether a card - * responds. - * - * A request for status does not cause a state change in data - * transfer mode. - */ -static void mmc_check_card(struct mmc_card *card) -{ - struct mmc_command cmd; - int err; - - BUG_ON(!card); - - cmd.opcode = MMC_SEND_STATUS; - cmd.arg = card->rca << 16; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_cmd(card->host, &cmd, CMD_RETRIES); - if (err == MMC_ERR_NONE) - return; - - mmc_card_set_dead(card); -} - -static void mmc_setup(struct mmc_host *host) -{ - int err; - u32 ocr; - - host->mode = MMC_MODE_SD; - - mmc_power_up(host); - mmc_idle_cards(host); - - err = mmc_send_if_cond(host, host->ocr_avail, NULL); - if (err != MMC_ERR_NONE) { - return; - } - err = mmc_send_app_op_cond(host, 0, &ocr); - - /* - * If we fail to detect any SD cards then try - * searching for MMC cards. - */ - if (err != MMC_ERR_NONE) { - host->mode = MMC_MODE_MMC; - - err = mmc_send_op_cond(host, 0, &ocr); - if (err != MMC_ERR_NONE) - return; - } - - host->ocr = mmc_select_voltage(host, ocr); - - if (host->ocr == 0) - return; - - /* - * Since we're changing the OCR value, we seem to - * need to tell some cards to go back to the idle - * state. We wait 1ms to give cards time to - * respond. - */ - mmc_idle_cards(host); - - /* - * Send the selected OCR multiple times... until the cards - * all get the idea that they should be ready for CMD2. - * (My SanDisk card seems to need this.) - */ - if (host->mode == MMC_MODE_SD) { - int err, sd2; - err = mmc_send_if_cond(host, host->ocr, &sd2); - if (err == MMC_ERR_NONE) { - /* - * If SD_SEND_IF_COND indicates an SD 2.0 - * compliant card and we should set bit 30 - * of the ocr to indicate that we can handle - * block-addressed SDHC cards. - */ - mmc_send_app_op_cond(host, host->ocr | (sd2 << 30), NULL); - } - } else { - /* The extra bit indicates that we support high capacity */ - mmc_send_op_cond(host, host->ocr | (1 << 30), NULL); - } - - mmc_discover_card(host); - - /* - * Ok, now switch to push-pull mode. - */ - host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; - mmc_set_ios(host); - - mmc_read_csd(host); - - if (host->card && !mmc_card_dead(host->card)) { - err = mmc_select_card(host->card); - if (err != MMC_ERR_NONE) - mmc_card_set_dead(host->card); - } - - if (host->mode == MMC_MODE_SD) { - mmc_read_scr(host); - mmc_read_switch_caps(host); - } else - mmc_process_ext_csd(host); -} - - -/** - * mmc_detect_change - process change of state on a MMC socket - * @host: host which changed state. - * @delay: optional delay to wait before detection (jiffies) - * - * All we know is that card(s) have been inserted or removed - * from the socket(s). We don't know which socket or cards. - */ -void mmc_detect_change(struct mmc_host *host, unsigned long delay) -{ -#ifdef CONFIG_MMC_DEBUG - mmc_claim_host(host); - BUG_ON(host->removed); - mmc_release_host(host); -#endif - - mmc_schedule_delayed_work(&host->detect, delay); -} - -EXPORT_SYMBOL(mmc_detect_change); - - -static void mmc_rescan(struct work_struct *work) -{ - struct mmc_host *host = - container_of(work, struct mmc_host, detect.work); - - mmc_claim_host(host); - - /* - * Check for removed card and newly inserted ones. We check for - * removed cards first so we can intelligently re-select the VDD. - */ - if (host->card) { - mmc_check_card(host->card); - - mmc_release_host(host); - - if (mmc_card_dead(host->card)) { - mmc_remove_card(host->card); - host->card = NULL; - } - - goto out; - } - - mmc_setup(host); - - if (host->card && !mmc_card_dead(host->card)) { - /* - * (Re-)calculate the fastest clock rate which the - * attached cards and the host support. - */ - host->ios.clock = mmc_calculate_clock(host); - mmc_set_ios(host); - } - - mmc_release_host(host); - - /* - * If this is a new and good card, register it. - */ - if (host->card && !mmc_card_dead(host->card)) { - if (mmc_register_card(host->card)) - mmc_card_set_dead(host->card); - } - - /* - * If this card is dead, destroy it. - */ - if (host->card && mmc_card_dead(host->card)) { - mmc_remove_card(host->card); - host->card = NULL; - } - -out: - /* - * If we discover that there are no cards on the - * bus, turn off the clock and power down. - */ - if (!host->card) - mmc_power_off(host); -} - - -/** - * mmc_alloc_host - initialise the per-host structure. - * @extra: sizeof private data structure - * @dev: pointer to host device model structure - * - * Initialise the per-host structure. - */ -struct mmc_host *mmc_alloc_host(int extra, struct device *dev) -{ - struct mmc_host *host; - - host = mmc_alloc_host_sysfs(extra, dev); - if (host) { - spin_lock_init(&host->lock); - init_waitqueue_head(&host->wq); - INIT_DELAYED_WORK(&host->detect, mmc_rescan); - - /* - * By default, hosts do not support SGIO or large requests. - * They have to set these according to their abilities. - */ - host->max_hw_segs = 1; - host->max_phys_segs = 1; - host->max_seg_size = PAGE_CACHE_SIZE; - - host->max_req_size = PAGE_CACHE_SIZE; - host->max_blk_size = 512; - host->max_blk_count = PAGE_CACHE_SIZE / 512; - } - - return host; -} - -EXPORT_SYMBOL(mmc_alloc_host); - -/** - * mmc_add_host - initialise host hardware - * @host: mmc host - */ -int mmc_add_host(struct mmc_host *host) -{ - int ret; - - ret = mmc_add_host_sysfs(host); - if (ret == 0) { - mmc_power_off(host); - mmc_detect_change(host, 0); - } - - return ret; -} - -EXPORT_SYMBOL(mmc_add_host); - -/** - * mmc_remove_host - remove host hardware - * @host: mmc host - * - * Unregister and remove all cards associated with this host, - * and power down the MMC bus. - */ -void mmc_remove_host(struct mmc_host *host) -{ -#ifdef CONFIG_MMC_DEBUG - mmc_claim_host(host); - host->removed = 1; - mmc_release_host(host); -#endif - - mmc_flush_scheduled_work(); - - if (host->card) { - mmc_remove_card(host->card); - host->card = NULL; - } - - mmc_power_off(host); - mmc_remove_host_sysfs(host); -} - -EXPORT_SYMBOL(mmc_remove_host); - -/** - * mmc_free_host - free the host structure - * @host: mmc host - * - * Free the host once all references to it have been dropped. - */ -void mmc_free_host(struct mmc_host *host) -{ - mmc_free_host_sysfs(host); -} - -EXPORT_SYMBOL(mmc_free_host); - -#ifdef CONFIG_PM - -/** - * mmc_suspend_host - suspend a host - * @host: mmc host - * @state: suspend mode (PM_SUSPEND_xxx) - */ -int mmc_suspend_host(struct mmc_host *host, pm_message_t state) -{ - mmc_flush_scheduled_work(); - - if (host->card) { - mmc_remove_card(host->card); - host->card = NULL; - } - - mmc_power_off(host); - - return 0; -} - -EXPORT_SYMBOL(mmc_suspend_host); - -/** - * mmc_resume_host - resume a previously suspended host - * @host: mmc host - */ -int mmc_resume_host(struct mmc_host *host) -{ - mmc_rescan(&host->detect.work); - - return 0; -} - -EXPORT_SYMBOL(mmc_resume_host); - -#endif - -MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/mmc.h b/drivers/mmc/mmc.h deleted file mode 100644 index 149affe0b686..000000000000 --- a/drivers/mmc/mmc.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * linux/drivers/mmc/mmc.h - * - * Copyright (C) 2003 Russell King, All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef _MMC_H -#define _MMC_H -/* core-internal functions */ -void mmc_init_card(struct mmc_card *card, struct mmc_host *host); -int mmc_register_card(struct mmc_card *card); -void mmc_remove_card(struct mmc_card *card); - -struct mmc_host *mmc_alloc_host_sysfs(int extra, struct device *dev); -int mmc_add_host_sysfs(struct mmc_host *host); -void mmc_remove_host_sysfs(struct mmc_host *host); -void mmc_free_host_sysfs(struct mmc_host *host); - -int mmc_schedule_work(struct work_struct *work); -int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay); -void mmc_flush_scheduled_work(void); -#endif diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c deleted file mode 100644 index 06f264b2f79c..000000000000 --- a/drivers/mmc/mmc_sysfs.c +++ /dev/null @@ -1,361 +0,0 @@ -/* - * linux/drivers/mmc/mmc_sysfs.c - * - * Copyright (C) 2003 Russell King, All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * MMC sysfs/driver model support. - */ -#include -#include -#include -#include -#include - -#include -#include - -#include "mmc.h" - -#define dev_to_mmc_card(d) container_of(d, struct mmc_card, dev) -#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv) -#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev) - -#define MMC_ATTR(name, fmt, args...) \ -static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \ -{ \ - struct mmc_card *card = dev_to_mmc_card(dev); \ - return sprintf(buf, fmt, args); \ -} - -MMC_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1], - card->raw_cid[2], card->raw_cid[3]); -MMC_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], - card->raw_csd[2], card->raw_csd[3]); -MMC_ATTR(scr, "%08x%08x\n", card->raw_scr[0], card->raw_scr[1]); -MMC_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); -MMC_ATTR(fwrev, "0x%x\n", card->cid.fwrev); -MMC_ATTR(hwrev, "0x%x\n", card->cid.hwrev); -MMC_ATTR(manfid, "0x%06x\n", card->cid.manfid); -MMC_ATTR(name, "%s\n", card->cid.prod_name); -MMC_ATTR(oemid, "0x%04x\n", card->cid.oemid); -MMC_ATTR(serial, "0x%08x\n", card->cid.serial); - -#define MMC_ATTR_RO(name) __ATTR(name, S_IRUGO, mmc_##name##_show, NULL) - -static struct device_attribute mmc_dev_attrs[] = { - MMC_ATTR_RO(cid), - MMC_ATTR_RO(csd), - MMC_ATTR_RO(date), - MMC_ATTR_RO(fwrev), - MMC_ATTR_RO(hwrev), - MMC_ATTR_RO(manfid), - MMC_ATTR_RO(name), - MMC_ATTR_RO(oemid), - MMC_ATTR_RO(serial), - __ATTR_NULL -}; - -static struct device_attribute mmc_dev_attr_scr = MMC_ATTR_RO(scr); - - -static void mmc_release_card(struct device *dev) -{ - struct mmc_card *card = dev_to_mmc_card(dev); - - kfree(card); -} - -/* - * This currently matches any MMC driver to any MMC card - drivers - * themselves make the decision whether to drive this card in their - * probe method. However, we force "bad" cards to fail. - */ -static int mmc_bus_match(struct device *dev, struct device_driver *drv) -{ - struct mmc_card *card = dev_to_mmc_card(dev); - return !mmc_card_bad(card); -} - -static int -mmc_bus_uevent(struct device *dev, char **envp, int num_envp, char *buf, - int buf_size) -{ - struct mmc_card *card = dev_to_mmc_card(dev); - char ccc[13]; - int retval = 0, i = 0, length = 0; - -#define add_env(fmt,val) do { \ - retval = add_uevent_var(envp, num_envp, &i, \ - buf, buf_size, &length, \ - fmt, val); \ - if (retval) \ - return retval; \ -} while (0); - - for (i = 0; i < 12; i++) - ccc[i] = card->csd.cmdclass & (1 << i) ? '1' : '0'; - ccc[12] = '\0'; - - add_env("MMC_CCC=%s", ccc); - add_env("MMC_MANFID=%06x", card->cid.manfid); - add_env("MMC_NAME=%s", mmc_card_name(card)); - add_env("MMC_OEMID=%04x", card->cid.oemid); -#undef add_env - envp[i] = NULL; - - return 0; -} - -static int mmc_bus_suspend(struct device *dev, pm_message_t state) -{ - struct mmc_driver *drv = to_mmc_driver(dev->driver); - struct mmc_card *card = dev_to_mmc_card(dev); - int ret = 0; - - if (dev->driver && drv->suspend) - ret = drv->suspend(card, state); - return ret; -} - -static int mmc_bus_resume(struct device *dev) -{ - struct mmc_driver *drv = to_mmc_driver(dev->driver); - struct mmc_card *card = dev_to_mmc_card(dev); - int ret = 0; - - if (dev->driver && drv->resume) - ret = drv->resume(card); - return ret; -} - -static int mmc_bus_probe(struct device *dev) -{ - struct mmc_driver *drv = to_mmc_driver(dev->driver); - struct mmc_card *card = dev_to_mmc_card(dev); - - return drv->probe(card); -} - -static int mmc_bus_remove(struct device *dev) -{ - struct mmc_driver *drv = to_mmc_driver(dev->driver); - struct mmc_card *card = dev_to_mmc_card(dev); - - drv->remove(card); - - return 0; -} - -static struct bus_type mmc_bus_type = { - .name = "mmc", - .dev_attrs = mmc_dev_attrs, - .match = mmc_bus_match, - .uevent = mmc_bus_uevent, - .probe = mmc_bus_probe, - .remove = mmc_bus_remove, - .suspend = mmc_bus_suspend, - .resume = mmc_bus_resume, -}; - -/** - * mmc_register_driver - register a media driver - * @drv: MMC media driver - */ -int mmc_register_driver(struct mmc_driver *drv) -{ - drv->drv.bus = &mmc_bus_type; - return driver_register(&drv->drv); -} - -EXPORT_SYMBOL(mmc_register_driver); - -/** - * mmc_unregister_driver - unregister a media driver - * @drv: MMC media driver - */ -void mmc_unregister_driver(struct mmc_driver *drv) -{ - drv->drv.bus = &mmc_bus_type; - driver_unregister(&drv->drv); -} - -EXPORT_SYMBOL(mmc_unregister_driver); - - -/* - * Internal function. Initialise a MMC card structure. - */ -void mmc_init_card(struct mmc_card *card, struct mmc_host *host) -{ - memset(card, 0, sizeof(struct mmc_card)); - card->host = host; - device_initialize(&card->dev); - card->dev.parent = mmc_classdev(host); - card->dev.bus = &mmc_bus_type; - card->dev.release = mmc_release_card; -} - -/* - * Internal function. Register a new MMC card with the driver model. - */ -int mmc_register_card(struct mmc_card *card) -{ - int ret; - - snprintf(card->dev.bus_id, sizeof(card->dev.bus_id), - "%s:%04x", mmc_hostname(card->host), card->rca); - - ret = device_add(&card->dev); - if (ret == 0) { - if (mmc_card_sd(card)) { - ret = device_create_file(&card->dev, &mmc_dev_attr_scr); - if (ret) - device_del(&card->dev); - } - } - if (ret == 0) - mmc_card_set_present(card); - return ret; -} - -/* - * Internal function. Unregister a new MMC card with the - * driver model, and (eventually) free it. - */ -void mmc_remove_card(struct mmc_card *card) -{ - if (mmc_card_present(card)) { - if (mmc_card_sd(card)) - device_remove_file(&card->dev, &mmc_dev_attr_scr); - - device_del(&card->dev); - } - - put_device(&card->dev); -} - - -static void mmc_host_classdev_release(struct device *dev) -{ - struct mmc_host *host = cls_dev_to_mmc_host(dev); - kfree(host); -} - -static struct class mmc_host_class = { - .name = "mmc_host", - .dev_release = mmc_host_classdev_release, -}; - -static DEFINE_IDR(mmc_host_idr); -static DEFINE_SPINLOCK(mmc_host_lock); - -/* - * Internal function. Allocate a new MMC host. - */ -struct mmc_host *mmc_alloc_host_sysfs(int extra, struct device *dev) -{ - struct mmc_host *host; - - host = kmalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); - if (host) { - memset(host, 0, sizeof(struct mmc_host) + extra); - - host->parent = dev; - host->class_dev.parent = dev; - host->class_dev.class = &mmc_host_class; - device_initialize(&host->class_dev); - } - - return host; -} - -/* - * Internal function. Register a new MMC host with the MMC class. - */ -int mmc_add_host_sysfs(struct mmc_host *host) -{ - int err; - - if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL)) - return -ENOMEM; - - spin_lock(&mmc_host_lock); - err = idr_get_new(&mmc_host_idr, host, &host->index); - spin_unlock(&mmc_host_lock); - if (err) - return err; - - snprintf(host->class_dev.bus_id, BUS_ID_SIZE, - "mmc%d", host->index); - - return device_add(&host->class_dev); -} - -/* - * Internal function. Unregister a MMC host with the MMC class. - */ -void mmc_remove_host_sysfs(struct mmc_host *host) -{ - device_del(&host->class_dev); - - spin_lock(&mmc_host_lock); - idr_remove(&mmc_host_idr, host->index); - spin_unlock(&mmc_host_lock); -} - -/* - * Internal function. Free a MMC host. - */ -void mmc_free_host_sysfs(struct mmc_host *host) -{ - put_device(&host->class_dev); -} - -static struct workqueue_struct *workqueue; - -/* - * Internal function. Schedule delayed work in the MMC work queue. - */ -int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) -{ - return queue_delayed_work(workqueue, work, delay); -} - -/* - * Internal function. Flush all scheduled work from the MMC work queue. - */ -void mmc_flush_scheduled_work(void) -{ - flush_workqueue(workqueue); -} - -static int __init mmc_init(void) -{ - int ret; - - workqueue = create_singlethread_workqueue("kmmcd"); - if (!workqueue) - return -ENOMEM; - - ret = bus_register(&mmc_bus_type); - if (ret == 0) { - ret = class_register(&mmc_host_class); - if (ret) - bus_unregister(&mmc_bus_type); - } - return ret; -} - -static void __exit mmc_exit(void) -{ - class_unregister(&mmc_host_class); - bus_unregister(&mmc_bus_type); - destroy_workqueue(workqueue); -} - -module_init(mmc_init); -module_exit(mmc_exit); diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 7d98990ac94e..800425e05165 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -10,7 +10,7 @@ #ifndef LINUX_MMC_CARD_H #define LINUX_MMC_CARD_H -#include +#include struct mmc_cid { unsigned int manfid; diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h new file mode 100644 index 000000000000..d8fd66cf28be --- /dev/null +++ b/include/linux/mmc/core.h @@ -0,0 +1,112 @@ +/* + * linux/include/linux/mmc/core.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef LINUX_MMC_CORE_H +#define LINUX_MMC_CORE_H + +#include +#include + +struct request; +struct mmc_data; +struct mmc_request; + +struct mmc_command { + u32 opcode; + u32 arg; + u32 resp[4]; + unsigned int flags; /* expected response type */ +#define MMC_RSP_PRESENT (1 << 0) +#define MMC_RSP_136 (1 << 1) /* 136 bit response */ +#define MMC_RSP_CRC (1 << 2) /* expect valid crc */ +#define MMC_RSP_BUSY (1 << 3) /* card may send busy */ +#define MMC_RSP_OPCODE (1 << 4) /* response contains opcode */ +#define MMC_CMD_MASK (3 << 5) /* command type */ +#define MMC_CMD_AC (0 << 5) +#define MMC_CMD_ADTC (1 << 5) +#define MMC_CMD_BC (2 << 5) +#define MMC_CMD_BCR (3 << 5) + +/* + * These are the response types, and correspond to valid bit + * patterns of the above flags. One additional valid pattern + * is all zeros, which means we don't expect a response. + */ +#define MMC_RSP_NONE (0) +#define MMC_RSP_R1 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) +#define MMC_RSP_R1B (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY) +#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC) +#define MMC_RSP_R3 (MMC_RSP_PRESENT) +#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) +#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) + +#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE)) + +/* + * These are the command types. + */ +#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK) + + unsigned int retries; /* max number of retries */ + unsigned int error; /* command error */ + +#define MMC_ERR_NONE 0 +#define MMC_ERR_TIMEOUT 1 +#define MMC_ERR_BADCRC 2 +#define MMC_ERR_FIFO 3 +#define MMC_ERR_FAILED 4 +#define MMC_ERR_INVALID 5 + + struct mmc_data *data; /* data segment associated with cmd */ + struct mmc_request *mrq; /* associated request */ +}; + +struct mmc_data { + unsigned int timeout_ns; /* data timeout (in ns, max 80ms) */ + unsigned int timeout_clks; /* data timeout (in clocks) */ + unsigned int blksz; /* data block size */ + unsigned int blocks; /* number of blocks */ + unsigned int error; /* data error */ + unsigned int flags; + +#define MMC_DATA_WRITE (1 << 8) +#define MMC_DATA_READ (1 << 9) +#define MMC_DATA_STREAM (1 << 10) +#define MMC_DATA_MULTI (1 << 11) + + unsigned int bytes_xfered; + + struct mmc_command *stop; /* stop command */ + struct mmc_request *mrq; /* associated request */ + + unsigned int sg_len; /* size of scatter list */ + struct scatterlist *sg; /* I/O scatter list */ +}; + +struct mmc_request { + struct mmc_command *cmd; + struct mmc_data *data; + struct mmc_command *stop; + + void *done_data; /* completion data */ + void (*done)(struct mmc_request *);/* completion function */ +}; + +struct mmc_host; +struct mmc_card; + +extern int mmc_wait_for_req(struct mmc_host *, struct mmc_request *); +extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int); +extern int mmc_wait_for_app_cmd(struct mmc_host *, unsigned int, + struct mmc_command *, int); + +extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *, int); + +extern void mmc_claim_host(struct mmc_host *host); +extern void mmc_release_host(struct mmc_host *host); + +#endif diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 6ea3c0ea3e15..43bf6a5c398d 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -10,7 +10,7 @@ #ifndef LINUX_MMC_HOST_H #define LINUX_MMC_HOST_H -#include +#include struct mmc_ios { unsigned int clock; /* clock rate */ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h deleted file mode 100644 index b3d80efc6434..000000000000 --- a/include/linux/mmc/mmc.h +++ /dev/null @@ -1,112 +0,0 @@ -/* - * linux/include/linux/mmc/mmc.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef MMC_H -#define MMC_H - -#include -#include - -struct request; -struct mmc_data; -struct mmc_request; - -struct mmc_command { - u32 opcode; - u32 arg; - u32 resp[4]; - unsigned int flags; /* expected response type */ -#define MMC_RSP_PRESENT (1 << 0) -#define MMC_RSP_136 (1 << 1) /* 136 bit response */ -#define MMC_RSP_CRC (1 << 2) /* expect valid crc */ -#define MMC_RSP_BUSY (1 << 3) /* card may send busy */ -#define MMC_RSP_OPCODE (1 << 4) /* response contains opcode */ -#define MMC_CMD_MASK (3 << 5) /* command type */ -#define MMC_CMD_AC (0 << 5) -#define MMC_CMD_ADTC (1 << 5) -#define MMC_CMD_BC (2 << 5) -#define MMC_CMD_BCR (3 << 5) - -/* - * These are the response types, and correspond to valid bit - * patterns of the above flags. One additional valid pattern - * is all zeros, which means we don't expect a response. - */ -#define MMC_RSP_NONE (0) -#define MMC_RSP_R1 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) -#define MMC_RSP_R1B (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY) -#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC) -#define MMC_RSP_R3 (MMC_RSP_PRESENT) -#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) -#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) - -#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE)) - -/* - * These are the command types. - */ -#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK) - - unsigned int retries; /* max number of retries */ - unsigned int error; /* command error */ - -#define MMC_ERR_NONE 0 -#define MMC_ERR_TIMEOUT 1 -#define MMC_ERR_BADCRC 2 -#define MMC_ERR_FIFO 3 -#define MMC_ERR_FAILED 4 -#define MMC_ERR_INVALID 5 - - struct mmc_data *data; /* data segment associated with cmd */ - struct mmc_request *mrq; /* associated request */ -}; - -struct mmc_data { - unsigned int timeout_ns; /* data timeout (in ns, max 80ms) */ - unsigned int timeout_clks; /* data timeout (in clocks) */ - unsigned int blksz; /* data block size */ - unsigned int blocks; /* number of blocks */ - unsigned int error; /* data error */ - unsigned int flags; - -#define MMC_DATA_WRITE (1 << 8) -#define MMC_DATA_READ (1 << 9) -#define MMC_DATA_STREAM (1 << 10) -#define MMC_DATA_MULTI (1 << 11) - - unsigned int bytes_xfered; - - struct mmc_command *stop; /* stop command */ - struct mmc_request *mrq; /* associated request */ - - unsigned int sg_len; /* size of scatter list */ - struct scatterlist *sg; /* I/O scatter list */ -}; - -struct mmc_request { - struct mmc_command *cmd; - struct mmc_data *data; - struct mmc_command *stop; - - void *done_data; /* completion data */ - void (*done)(struct mmc_request *);/* completion function */ -}; - -struct mmc_host; -struct mmc_card; - -extern int mmc_wait_for_req(struct mmc_host *, struct mmc_request *); -extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int); -extern int mmc_wait_for_app_cmd(struct mmc_host *, unsigned int, - struct mmc_command *, int); - -extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *, int); - -extern void mmc_claim_host(struct mmc_host *host); -extern void mmc_release_host(struct mmc_host *host); - -#endif -- cgit v1.2.3 From da7fbe58d2d347e95af699ddf04d885be6362bbe Mon Sep 17 00:00:00 2001 From: Pierre Ossman Date: Sun, 24 Dec 2006 22:46:55 +0100 Subject: mmc: Separate out protocol ops Move protocol operations and definitions into their own files in an effort to separate protocol handling and bus arbitration more clearly. Signed-off-by: Pierre Ossman --- drivers/mmc/card/block.c | 4 +- drivers/mmc/core/Makefile | 2 +- drivers/mmc/core/core.c | 511 ++++++------------------------------------- drivers/mmc/core/core.h | 32 +-- drivers/mmc/core/mmc_ops.c | 276 +++++++++++++++++++++++ drivers/mmc/core/mmc_ops.h | 27 +++ drivers/mmc/core/sd_ops.c | 316 ++++++++++++++++++++++++++ drivers/mmc/core/sd_ops.h | 25 +++ drivers/mmc/core/sysfs.c | 2 +- drivers/mmc/core/sysfs.h | 27 +++ include/linux/mmc/core.h | 2 +- include/linux/mmc/mmc.h | 257 ++++++++++++++++++++++ include/linux/mmc/protocol.h | 307 -------------------------- include/linux/mmc/sd.h | 83 +++++++ 14 files changed, 1099 insertions(+), 772 deletions(-) create mode 100644 drivers/mmc/core/mmc_ops.c create mode 100644 drivers/mmc/core/mmc_ops.h create mode 100644 drivers/mmc/core/sd_ops.c create mode 100644 drivers/mmc/core/sd_ops.h create mode 100644 drivers/mmc/core/sysfs.h create mode 100644 include/linux/mmc/mmc.h delete mode 100644 include/linux/mmc/protocol.h create mode 100644 include/linux/mmc/sd.h (limited to 'include/linux') diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 8a84e4dc1b2a..d24ab234394c 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -32,8 +32,8 @@ #include #include -#include -#include +#include +#include #include #include diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile index f911fbd2845b..5977abf3e41b 100644 --- a/drivers/mmc/core/Makefile +++ b/drivers/mmc/core/Makefile @@ -7,5 +7,5 @@ ifeq ($(CONFIG_MMC_DEBUG),y) endif obj-$(CONFIG_MMC) += mmc_core.o -mmc_core-y := core.o sysfs.o +mmc_core-y := core.o sysfs.o mmc_ops.o sd_ops.o diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 334e663e465b..310be2fe1944 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -23,9 +23,14 @@ #include #include -#include +#include +#include #include "core.h" +#include "sysfs.h" + +#include "mmc_ops.h" +#include "sd_ops.h" #define CMD_RETRIES 3 @@ -190,80 +195,6 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries EXPORT_SYMBOL(mmc_wait_for_cmd); -/** - * mmc_wait_for_app_cmd - start an application command and wait for - completion - * @host: MMC host to start command - * @rca: RCA to send MMC_APP_CMD to - * @cmd: MMC command to start - * @retries: maximum number of retries - * - * Sends a MMC_APP_CMD, checks the card response, sends the command - * in the parameter and waits for it to complete. Return any error - * that occurred while the command was executing. Do not attempt to - * parse the response. - */ -int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca, - struct mmc_command *cmd, int retries) -{ - struct mmc_request mrq; - struct mmc_command appcmd; - - int i, err; - - BUG_ON(!host->claimed); - BUG_ON(retries < 0); - - err = MMC_ERR_INVALID; - - /* - * We have to resend MMC_APP_CMD for each attempt so - * we cannot use the retries field in mmc_command. - */ - for (i = 0;i <= retries;i++) { - memset(&mrq, 0, sizeof(struct mmc_request)); - - appcmd.opcode = MMC_APP_CMD; - appcmd.arg = rca << 16; - appcmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - appcmd.retries = 0; - memset(appcmd.resp, 0, sizeof(appcmd.resp)); - appcmd.data = NULL; - - mrq.cmd = &appcmd; - appcmd.data = NULL; - - mmc_wait_for_req(host, &mrq); - - if (appcmd.error) { - err = appcmd.error; - continue; - } - - /* Check that card supported application commands */ - if (!(appcmd.resp[0] & R1_APP_CMD)) - return MMC_ERR_FAILED; - - memset(&mrq, 0, sizeof(struct mmc_request)); - - memset(cmd->resp, 0, sizeof(cmd->resp)); - cmd->retries = 0; - - mrq.cmd = cmd; - cmd->data = NULL; - - mmc_wait_for_req(host, &mrq); - - err = cmd->error; - if (cmd->error == MMC_ERR_NONE) - break; - } - - return err; -} - -EXPORT_SYMBOL(mmc_wait_for_app_cmd); - /** * mmc_set_data_timeout - set the timeout for a data command * @data: data phase for command @@ -385,60 +316,10 @@ static inline void mmc_set_ios(struct mmc_host *host) host->ops->set_ios(host, ios); } -static int mmc_select_card(struct mmc_card *card) -{ - int err; - struct mmc_command cmd; - - BUG_ON(!card->host->claimed); - - cmd.opcode = MMC_SELECT_CARD; - cmd.arg = card->rca << 16; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_cmd(card->host, &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) - return err; - - /* - * We can only change the bus width of SD cards when - * they are selected so we have to put the handling - * here. - * - * The card is in 1 bit mode by default so - * we only need to change if it supports the - * wider version. - */ - if (mmc_card_sd(card) && - (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4) && - (card->host->caps & MMC_CAP_4_BIT_DATA)) { - - struct mmc_command cmd; - cmd.opcode = SD_APP_SET_BUS_WIDTH; - cmd.arg = SD_BUS_WIDTH_4; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_app_cmd(card->host, card->rca, - &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) - return err; - - card->host->ios.bus_width = MMC_BUS_WIDTH_4; - mmc_set_ios(card->host); - } - - return MMC_ERR_NONE; -} - - -static inline void mmc_delay(unsigned int ms) +void mmc_set_chip_select(struct mmc_host *host, int mode) { - if (ms < 1000 / HZ) { - cond_resched(); - mdelay(ms); - } else { - msleep(ms); - } + host->ios.chip_select = mode; + mmc_set_ios(host); } /* @@ -708,32 +589,6 @@ mmc_alloc_card(struct mmc_host *host, u32 *raw_cid) return card; } -/* - * Tell attached cards to go to IDLE state - */ -static void mmc_idle_cards(struct mmc_host *host) -{ - struct mmc_command cmd; - - host->ios.chip_select = MMC_CS_HIGH; - mmc_set_ios(host); - - mmc_delay(1); - - cmd.opcode = MMC_GO_IDLE_STATE; - cmd.arg = 0; - cmd.flags = MMC_RSP_NONE | MMC_CMD_BC; - - mmc_wait_for_cmd(host, &cmd, 0); - - mmc_delay(1); - - host->ios.chip_select = MMC_CS_DONTCARE; - mmc_set_ios(host); - - mmc_delay(1); -} - /* * Apply power to the MMC stack. This is a two-stage process. * First, we enable power to the card without the clock running. @@ -778,97 +633,6 @@ static void mmc_power_off(struct mmc_host *host) mmc_set_ios(host); } -static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) -{ - struct mmc_command cmd; - int i, err = 0; - - cmd.opcode = MMC_SEND_OP_COND; - cmd.arg = ocr; - cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; - - for (i = 100; i; i--) { - err = mmc_wait_for_cmd(host, &cmd, 0); - if (err != MMC_ERR_NONE) - break; - - if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0) - break; - - err = MMC_ERR_TIMEOUT; - - mmc_delay(10); - } - - if (rocr) - *rocr = cmd.resp[0]; - - return err; -} - -static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) -{ - struct mmc_command cmd; - int i, err = 0; - - cmd.opcode = SD_APP_OP_COND; - cmd.arg = ocr; - cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; - - for (i = 100; i; i--) { - err = mmc_wait_for_app_cmd(host, 0, &cmd, CMD_RETRIES); - if (err != MMC_ERR_NONE) - break; - - if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0) - break; - - err = MMC_ERR_TIMEOUT; - - mmc_delay(10); - } - - if (rocr) - *rocr = cmd.resp[0]; - - return err; -} - -static int mmc_send_if_cond(struct mmc_host *host, u32 ocr, int *rsd2) -{ - struct mmc_command cmd; - int err, sd2; - static const u8 test_pattern = 0xAA; - - /* - * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND - * before SD_APP_OP_COND. This command will harmlessly fail for - * SD 1.0 cards. - */ - cmd.opcode = SD_SEND_IF_COND; - cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern; - cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR; - - err = mmc_wait_for_cmd(host, &cmd, 0); - if (err == MMC_ERR_NONE) { - if ((cmd.resp[0] & 0xFF) == test_pattern) { - sd2 = 1; - } else { - sd2 = 0; - err = MMC_ERR_FAILED; - } - } else { - /* - * Treat errors as SD 1.0 card. - */ - sd2 = 0; - err = MMC_ERR_NONE; - } - if (rsd2) - *rsd2 = sd2; - return err; -} - /* * Discover the card by requesting its CID. * @@ -878,27 +642,18 @@ static int mmc_send_if_cond(struct mmc_host *host, u32 ocr, int *rsd2) static void mmc_discover_card(struct mmc_host *host) { unsigned int err; - - struct mmc_command cmd; + u32 cid[4]; BUG_ON(host->card); - cmd.opcode = MMC_ALL_SEND_CID; - cmd.arg = 0; - cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); - if (err == MMC_ERR_TIMEOUT) { - err = MMC_ERR_NONE; - return; - } + err = mmc_all_send_cid(host, cid); if (err != MMC_ERR_NONE) { printk(KERN_ERR "%s: error requesting CID: %d\n", mmc_hostname(host), err); return; } - host->card = mmc_alloc_card(host, cmd.resp); + host->card = mmc_alloc_card(host, cid); if (IS_ERR(host->card)) { err = PTR_ERR(host->card); host->card = NULL; @@ -908,16 +663,10 @@ static void mmc_discover_card(struct mmc_host *host) if (host->mode == MMC_MODE_SD) { host->card->type = MMC_TYPE_SD; - cmd.opcode = SD_SEND_RELATIVE_ADDR; - cmd.arg = 0; - cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + err = mmc_send_relative_addr(host, &host->card->rca); if (err != MMC_ERR_NONE) mmc_card_set_dead(host->card); else { - host->card->rca = cmd.resp[0] >> 16; - if (!host->ops->get_ro) { printk(KERN_WARNING "%s: host does not " "support reading read-only " @@ -932,11 +681,7 @@ static void mmc_discover_card(struct mmc_host *host) host->card->type = MMC_TYPE_MMC; host->card->rca = 1; - cmd.opcode = MMC_SET_RELATIVE_ADDR; - cmd.arg = host->card->rca << 16; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + err = mmc_set_relative_addr(host->card); if (err != MMC_ERR_NONE) mmc_card_set_dead(host->card); } @@ -944,7 +689,6 @@ static void mmc_discover_card(struct mmc_host *host) static void mmc_read_csd(struct mmc_host *host) { - struct mmc_command cmd; int err; if (!host->card) @@ -952,18 +696,12 @@ static void mmc_read_csd(struct mmc_host *host) if (mmc_card_dead(host->card)) return; - cmd.opcode = MMC_SEND_CSD; - cmd.arg = host->card->rca << 16; - cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + err = mmc_send_csd(host->card, host->card->raw_csd); if (err != MMC_ERR_NONE) { mmc_card_set_dead(host->card); return; } - memcpy(host->card->raw_csd, cmd.resp, sizeof(host->card->raw_csd)); - mmc_decode_csd(host->card); mmc_decode_cid(host->card); } @@ -971,13 +709,7 @@ static void mmc_read_csd(struct mmc_host *host) static void mmc_process_ext_csd(struct mmc_host *host) { int err; - - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_data data; - u8 *ext_csd; - struct scatterlist sg; if (!host->card) return; @@ -1000,32 +732,8 @@ static void mmc_process_ext_csd(struct mmc_host *host) return; } - memset(&cmd, 0, sizeof(struct mmc_command)); - - cmd.opcode = MMC_SEND_EXT_CSD; - cmd.arg = 0; - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; - - memset(&data, 0, sizeof(struct mmc_data)); - - mmc_set_data_timeout(&data, host->card, 0); - - data.blksz = 512; - data.blocks = 1; - data.flags = MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; - - memset(&mrq, 0, sizeof(struct mmc_request)); - - mrq.cmd = &cmd; - mrq.data = &data; - - sg_init_one(&sg, ext_csd, 512); - - mmc_wait_for_req(host, &mrq); - - if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { + err = mmc_send_ext_csd(host->card, ext_csd); + if (err != MMC_ERR_NONE) { if (host->card->csd.capacity == (4096 * 512)) { printk(KERN_ERR "%s: unable to read EXT_CSD " "on a possible high capacity card. " @@ -1066,14 +774,8 @@ static void mmc_process_ext_csd(struct mmc_host *host) if (host->caps & MMC_CAP_MMC_HIGHSPEED) { /* Activate highspeed support. */ - cmd.opcode = MMC_SWITCH; - cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | - (EXT_CSD_HS_TIMING << 16) | - (1 << 8) | - EXT_CSD_CMD_SET_NORMAL; - cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + err = mmc_switch(host->card, MMC_SWITCH_MODE_WRITE_BYTE, + EXT_CSD_HS_TIMING, 1); if (err != MMC_ERR_NONE) { printk("%s: failed to switch card to mmc v4 " "high-speed mode.\n", @@ -1090,14 +792,9 @@ static void mmc_process_ext_csd(struct mmc_host *host) /* Check for host support for wide-bus modes. */ if (host->caps & MMC_CAP_4_BIT_DATA) { /* Activate 4-bit support. */ - cmd.opcode = MMC_SWITCH; - cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | - (EXT_CSD_BUS_WIDTH << 16) | - (EXT_CSD_BUS_WIDTH_4 << 8) | - EXT_CSD_CMD_SET_NORMAL; - cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; - - err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); + err = mmc_switch(host->card, MMC_SWITCH_MODE_WRITE_BYTE, + EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_4 | + EXT_CSD_CMD_SET_NORMAL); if (err != MMC_ERR_NONE) { printk("%s: failed to switch card to " "mmc v4 4-bit bus mode.\n", @@ -1116,10 +813,6 @@ out: static void mmc_read_scr(struct mmc_host *host) { int err; - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_data data; - struct scatterlist sg; if (!host->card) return; @@ -1128,61 +821,19 @@ static void mmc_read_scr(struct mmc_host *host) if (!mmc_card_sd(host->card)) return; - memset(&cmd, 0, sizeof(struct mmc_command)); - - cmd.opcode = MMC_APP_CMD; - cmd.arg = host->card->rca << 16; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_cmd(host, &cmd, 0); - if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD)) { - mmc_card_set_dead(host->card); - return; - } - - memset(&cmd, 0, sizeof(struct mmc_command)); - - cmd.opcode = SD_APP_SEND_SCR; - cmd.arg = 0; - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; - - memset(&data, 0, sizeof(struct mmc_data)); - - mmc_set_data_timeout(&data, host->card, 0); - - data.blksz = 1 << 3; - data.blocks = 1; - data.flags = MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; - - memset(&mrq, 0, sizeof(struct mmc_request)); - - mrq.cmd = &cmd; - mrq.data = &data; - - sg_init_one(&sg, (u8*)host->card->raw_scr, 8); - - mmc_wait_for_req(host, &mrq); - - if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { + err = mmc_app_send_scr(host->card, host->card->raw_scr); + if (err != MMC_ERR_NONE) { mmc_card_set_dead(host->card); return; } - host->card->raw_scr[0] = ntohl(host->card->raw_scr[0]); - host->card->raw_scr[1] = ntohl(host->card->raw_scr[1]); - mmc_decode_scr(host->card); } static void mmc_read_switch_caps(struct mmc_host *host) { - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_data data; + int err; unsigned char *status; - struct scatterlist sg; if (!(host->caps & MMC_CAP_SD_HIGHSPEED)) return; @@ -1204,32 +855,9 @@ static void mmc_read_switch_caps(struct mmc_host *host) return; } - memset(&cmd, 0, sizeof(struct mmc_command)); - - cmd.opcode = SD_SWITCH; - cmd.arg = 0x00FFFFF1; - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; - - memset(&data, 0, sizeof(struct mmc_data)); - - mmc_set_data_timeout(&data, host->card, 0); - - data.blksz = 64; - data.blocks = 1; - data.flags = MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; - - memset(&mrq, 0, sizeof(struct mmc_request)); - - mrq.cmd = &cmd; - mrq.data = &data; - - sg_init_one(&sg, status, 64); - - mmc_wait_for_req(host, &mrq); - - if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { + err = mmc_sd_switch(host->card, SD_SWITCH_CHECK, + SD_SWITCH_GRP_ACCESS, SD_SWITCH_ACCESS_HS, status); + if (err != MMC_ERR_NONE) { printk("%s: unable to read switch capabilities, " "performance might suffer.\n", mmc_hostname(host)); @@ -1239,33 +867,9 @@ static void mmc_read_switch_caps(struct mmc_host *host) if (status[13] & 0x02) host->card->sw_caps.hs_max_dtr = 50000000; - memset(&cmd, 0, sizeof(struct mmc_command)); - - cmd.opcode = SD_SWITCH; - cmd.arg = 0x80FFFFF1; - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; - - memset(&data, 0, sizeof(struct mmc_data)); - - mmc_set_data_timeout(&data, host->card, 0); - - data.blksz = 64; - data.blocks = 1; - data.flags = MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; - - memset(&mrq, 0, sizeof(struct mmc_request)); - - mrq.cmd = &cmd; - mrq.data = &data; - - sg_init_one(&sg, status, 64); - - mmc_wait_for_req(host, &mrq); - - if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE || - (status[16] & 0xF) != 1) { + err = mmc_sd_switch(host->card, SD_SWITCH_SET, + SD_SWITCH_GRP_ACCESS, SD_SWITCH_ACCESS_HS, status); + if (err != MMC_ERR_NONE || (status[16] & 0xF) != 1) { printk(KERN_WARNING "%s: Problem switching card " "into high-speed mode!\n", mmc_hostname(host)); @@ -1314,16 +918,11 @@ static unsigned int mmc_calculate_clock(struct mmc_host *host) */ static void mmc_check_card(struct mmc_card *card) { - struct mmc_command cmd; int err; BUG_ON(!card); - cmd.opcode = MMC_SEND_STATUS; - cmd.arg = card->rca << 16; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_cmd(card->host, &cmd, CMD_RETRIES); + err = mmc_send_status(card, NULL); if (err == MMC_ERR_NONE) return; @@ -1338,9 +937,9 @@ static void mmc_setup(struct mmc_host *host) host->mode = MMC_MODE_SD; mmc_power_up(host); - mmc_idle_cards(host); + mmc_go_idle(host); - err = mmc_send_if_cond(host, host->ocr_avail, NULL); + err = mmc_send_if_cond(host, host->ocr_avail); if (err != MMC_ERR_NONE) { return; } @@ -1369,7 +968,7 @@ static void mmc_setup(struct mmc_host *host) * state. We wait 1ms to give cards time to * respond. */ - mmc_idle_cards(host); + mmc_go_idle(host); /* * Send the selected OCR multiple times... until the cards @@ -1377,17 +976,17 @@ static void mmc_setup(struct mmc_host *host) * (My SanDisk card seems to need this.) */ if (host->mode == MMC_MODE_SD) { - int err, sd2; - err = mmc_send_if_cond(host, host->ocr, &sd2); - if (err == MMC_ERR_NONE) { - /* - * If SD_SEND_IF_COND indicates an SD 2.0 - * compliant card and we should set bit 30 - * of the ocr to indicate that we can handle - * block-addressed SDHC cards. - */ - mmc_send_app_op_cond(host, host->ocr | (sd2 << 30), NULL); - } + /* + * If SD_SEND_IF_COND indicates an SD 2.0 + * compliant card and we should set bit 30 + * of the ocr to indicate that we can handle + * block-addressed SDHC cards. + */ + err = mmc_send_if_cond(host, host->ocr); + if (err == MMC_ERR_NONE) + ocr = host->ocr | (1 << 30); + + mmc_send_app_op_cond(host, ocr, NULL); } else { /* The extra bit indicates that we support high capacity */ mmc_send_op_cond(host, host->ocr | (1 << 30), NULL); @@ -1409,6 +1008,24 @@ static void mmc_setup(struct mmc_host *host) mmc_card_set_dead(host->card); } + /* + * The card is in 1 bit mode by default so + * we only need to change if it supports the + * wider version. + */ + if (host->card && !mmc_card_dead(host->card) && + mmc_card_sd(host->card) && + (host->card->scr.bus_widths & SD_SCR_BUS_WIDTH_4) && + (host->card->host->caps & MMC_CAP_4_BIT_DATA)) { + err = mmc_app_set_bus_width(host->card, SD_BUS_WIDTH_4); + if (err != MMC_ERR_NONE) + mmc_card_set_dead(host->card); + else { + host->ios.bus_width = MMC_BUS_WIDTH_4; + mmc_set_ios(host); + } + } + if (host->mode == MMC_MODE_SD) { mmc_read_scr(host); mmc_read_switch_caps(host); diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 076cb2f49a0f..1c1066342fba 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -2,24 +2,30 @@ * linux/drivers/mmc/core/core.h * * Copyright (C) 2003 Russell King, All Rights Reserved. + * Copyright 2007 Pierre Ossman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#ifndef _MMC_CORE_H -#define _MMC_CORE_H -/* core-internal functions */ -void mmc_init_card(struct mmc_card *card, struct mmc_host *host); -int mmc_register_card(struct mmc_card *card); -void mmc_remove_card(struct mmc_card *card); +#ifndef _MMC_CORE_CORE_H +#define _MMC_CORE_CORE_H -struct mmc_host *mmc_alloc_host_sysfs(int extra, struct device *dev); -int mmc_add_host_sysfs(struct mmc_host *host); -void mmc_remove_host_sysfs(struct mmc_host *host); -void mmc_free_host_sysfs(struct mmc_host *host); +#include + +#define MMC_CMD_RETRIES 3 + +void mmc_set_chip_select(struct mmc_host *host, int mode); + +static inline void mmc_delay(unsigned int ms) +{ + if (ms < 1000 / HZ) { + cond_resched(); + mdelay(ms); + } else { + msleep(ms); + } +} -int mmc_schedule_work(struct work_struct *work); -int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay); -void mmc_flush_scheduled_work(void); #endif + diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c new file mode 100644 index 000000000000..7dd720fa5895 --- /dev/null +++ b/drivers/mmc/core/mmc_ops.c @@ -0,0 +1,276 @@ +/* + * linux/drivers/mmc/mmc_ops.h + * + * Copyright 2006-2007 Pierre Ossman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include +#include +#include + +#include +#include +#include + +#include "core.h" +#include "mmc_ops.h" + +static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) +{ + int err; + struct mmc_command cmd; + + BUG_ON(!host); + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = MMC_SELECT_CARD; + + if (card) { + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + } else { + cmd.arg = 0; + cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; + } + + err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); + if (err != MMC_ERR_NONE) + return err; + + return MMC_ERR_NONE; +} + +int mmc_select_card(struct mmc_card *card) +{ + BUG_ON(!card); + + return _mmc_select_card(card->host, card); +} + +int mmc_deselect_cards(struct mmc_host *host) +{ + return _mmc_select_card(host, NULL); +} + +int mmc_go_idle(struct mmc_host *host) +{ + int err; + struct mmc_command cmd; + + mmc_set_chip_select(host, MMC_CS_HIGH); + + mmc_delay(1); + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = MMC_GO_IDLE_STATE; + cmd.arg = 0; + cmd.flags = MMC_RSP_NONE | MMC_CMD_BC; + + err = mmc_wait_for_cmd(host, &cmd, 0); + + mmc_delay(1); + + mmc_set_chip_select(host, MMC_CS_DONTCARE); + + mmc_delay(1); + + return err; +} + +int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) +{ + struct mmc_command cmd; + int i, err = 0; + + BUG_ON(!host); + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = MMC_SEND_OP_COND; + cmd.arg = ocr; + cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; + + for (i = 100; i; i--) { + err = mmc_wait_for_cmd(host, &cmd, 0); + if (err != MMC_ERR_NONE) + break; + + if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0) + break; + + err = MMC_ERR_TIMEOUT; + + mmc_delay(10); + } + + if (rocr) + *rocr = cmd.resp[0]; + + return err; +} + +int mmc_all_send_cid(struct mmc_host *host, u32 *cid) +{ + int err; + struct mmc_command cmd; + + BUG_ON(!host); + BUG_ON(!cid); + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = MMC_ALL_SEND_CID; + cmd.arg = 0; + cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; + + err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); + if (err != MMC_ERR_NONE) + return err; + + memcpy(cid, cmd.resp, sizeof(u32) * 4); + + return MMC_ERR_NONE; +} + +int mmc_set_relative_addr(struct mmc_card *card) +{ + int err; + struct mmc_command cmd; + + BUG_ON(!card); + BUG_ON(!card->host); + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = MMC_SET_RELATIVE_ADDR; + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + + err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); + if (err != MMC_ERR_NONE) + return err; + + return MMC_ERR_NONE; +} + +int mmc_send_csd(struct mmc_card *card, u32 *csd) +{ + int err; + struct mmc_command cmd; + + BUG_ON(!card); + BUG_ON(!card->host); + BUG_ON(!csd); + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = MMC_SEND_CSD; + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; + + err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); + if (err != MMC_ERR_NONE) + return err; + + memcpy(csd, cmd.resp, sizeof(u32) * 4); + + return MMC_ERR_NONE; +} + +int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) +{ + struct mmc_request mrq; + struct mmc_command cmd; + struct mmc_data data; + struct scatterlist sg; + + BUG_ON(!card); + BUG_ON(!card->host); + BUG_ON(!ext_csd); + + memset(&mrq, 0, sizeof(struct mmc_request)); + memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&data, 0, sizeof(struct mmc_data)); + + mrq.cmd = &cmd; + mrq.data = &data; + + cmd.opcode = MMC_SEND_EXT_CSD; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + data.blksz = 512; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; + + sg_init_one(&sg, ext_csd, 512); + + mmc_set_data_timeout(&data, card, 0); + + mmc_wait_for_req(card->host, &mrq); + + if (cmd.error != MMC_ERR_NONE) + return cmd.error; + if (data.error != MMC_ERR_NONE) + return data.error; + + return MMC_ERR_NONE; +} + +int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value) +{ + int err; + struct mmc_command cmd; + + BUG_ON(!card); + BUG_ON(!card->host); + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = MMC_SWITCH; + cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | + (index << 16) | + (value << 8) | + set; + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; + + err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); + if (err != MMC_ERR_NONE) + return err; + + return MMC_ERR_NONE; +} + +int mmc_send_status(struct mmc_card *card, u32 *status) +{ + int err; + struct mmc_command cmd; + + BUG_ON(!card); + BUG_ON(!card->host); + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = MMC_SEND_STATUS; + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + + err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); + if (err != MMC_ERR_NONE) + return err; + + if (status) + *status = cmd.resp[0]; + + return MMC_ERR_NONE; +} + diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h new file mode 100644 index 000000000000..7a481e8ca5ea --- /dev/null +++ b/drivers/mmc/core/mmc_ops.h @@ -0,0 +1,27 @@ +/* + * linux/drivers/mmc/mmc_ops.h + * + * Copyright 2006-2007 Pierre Ossman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef _MMC_MMC_OPS_H +#define _MMC_MMC_OPS_H + +int mmc_select_card(struct mmc_card *card); +int mmc_deselect_cards(struct mmc_host *host); +int mmc_go_idle(struct mmc_host *host); +int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr); +int mmc_all_send_cid(struct mmc_host *host, u32 *cid); +int mmc_set_relative_addr(struct mmc_card *card); +int mmc_send_csd(struct mmc_card *card, u32 *csd); +int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); +int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value); +int mmc_send_status(struct mmc_card *card, u32 *status); + +#endif + diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c new file mode 100644 index 000000000000..9697ce581101 --- /dev/null +++ b/drivers/mmc/core/sd_ops.c @@ -0,0 +1,316 @@ +/* + * linux/drivers/mmc/sd_ops.h + * + * Copyright 2006-2007 Pierre Ossman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include "core.h" +#include "sd_ops.h" + +/** + * mmc_wait_for_app_cmd - start an application command and wait for + completion + * @host: MMC host to start command + * @rca: RCA to send MMC_APP_CMD to + * @cmd: MMC command to start + * @retries: maximum number of retries + * + * Sends a MMC_APP_CMD, checks the card response, sends the command + * in the parameter and waits for it to complete. Return any error + * that occurred while the command was executing. Do not attempt to + * parse the response. + */ +int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, + struct mmc_command *cmd, int retries) +{ + struct mmc_request mrq; + + int i, err; + + BUG_ON(!cmd); + BUG_ON(retries < 0); + + err = MMC_ERR_INVALID; + + /* + * We have to resend MMC_APP_CMD for each attempt so + * we cannot use the retries field in mmc_command. + */ + for (i = 0;i <= retries;i++) { + memset(&mrq, 0, sizeof(struct mmc_request)); + + err = mmc_app_cmd(host, card); + if (err != MMC_ERR_NONE) + continue; + + memset(&mrq, 0, sizeof(struct mmc_request)); + + memset(cmd->resp, 0, sizeof(cmd->resp)); + cmd->retries = 0; + + mrq.cmd = cmd; + cmd->data = NULL; + + mmc_wait_for_req(host, &mrq); + + err = cmd->error; + if (cmd->error == MMC_ERR_NONE) + break; + } + + return err; +} + +EXPORT_SYMBOL(mmc_wait_for_app_cmd); + +int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card) +{ + int err; + struct mmc_command cmd; + + BUG_ON(!host); + BUG_ON(card && (card->host != host)); + + cmd.opcode = MMC_APP_CMD; + + if (card) { + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + } else { + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_BCR; + } + + err = mmc_wait_for_cmd(host, &cmd, 0); + if (err != MMC_ERR_NONE) + return err; + + /* Check that card supported application commands */ + if (!(cmd.resp[0] & R1_APP_CMD)) + return MMC_ERR_FAILED; + + return MMC_ERR_NONE; +} + +int mmc_app_set_bus_width(struct mmc_card *card, int width) +{ + int err; + struct mmc_command cmd; + + BUG_ON(!card); + BUG_ON(!card->host); + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = SD_APP_SET_BUS_WIDTH; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + + switch (width) { + case MMC_BUS_WIDTH_1: + cmd.arg = SD_BUS_WIDTH_1; + break; + case MMC_BUS_WIDTH_4: + cmd.arg = SD_BUS_WIDTH_4; + break; + default: + return MMC_ERR_INVALID; + } + + err = mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES); + if (err != MMC_ERR_NONE) + return err; + + return MMC_ERR_NONE; +} + +int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) +{ + struct mmc_command cmd; + int i, err = 0; + + BUG_ON(!host); + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = SD_APP_OP_COND; + cmd.arg = ocr; + cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; + + for (i = 100; i; i--) { + err = mmc_wait_for_app_cmd(host, NULL, &cmd, MMC_CMD_RETRIES); + if (err != MMC_ERR_NONE) + break; + + if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0) + break; + + err = MMC_ERR_TIMEOUT; + + mmc_delay(10); + } + + if (rocr) + *rocr = cmd.resp[0]; + + return err; +} + +int mmc_send_if_cond(struct mmc_host *host, u32 ocr) +{ + struct mmc_command cmd; + int err; + static const u8 test_pattern = 0xAA; + + /* + * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND + * before SD_APP_OP_COND. This command will harmlessly fail for + * SD 1.0 cards. + */ + cmd.opcode = SD_SEND_IF_COND; + cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern; + cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR; + + err = mmc_wait_for_cmd(host, &cmd, 0); + if (err != MMC_ERR_NONE) + return err; + + if ((cmd.resp[0] & 0xFF) != test_pattern) + return MMC_ERR_FAILED; + + return MMC_ERR_NONE; +} + +int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca) +{ + int err; + struct mmc_command cmd; + + BUG_ON(!host); + BUG_ON(!rca); + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = SD_SEND_RELATIVE_ADDR; + cmd.arg = 0; + cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; + + err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); + if (err != MMC_ERR_NONE) + return err; + + *rca = cmd.resp[0] >> 16; + + return MMC_ERR_NONE; +} + +int mmc_app_send_scr(struct mmc_card *card, u32 *scr) +{ + int err; + struct mmc_request mrq; + struct mmc_command cmd; + struct mmc_data data; + struct scatterlist sg; + + BUG_ON(!card); + BUG_ON(!card->host); + BUG_ON(!scr); + + err = mmc_app_cmd(card->host, card); + if (err != MMC_ERR_NONE) + return err; + + memset(&mrq, 0, sizeof(struct mmc_request)); + memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&data, 0, sizeof(struct mmc_data)); + + mrq.cmd = &cmd; + mrq.data = &data; + + cmd.opcode = SD_APP_SEND_SCR; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + data.blksz = 8; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; + + sg_init_one(&sg, scr, 8); + + mmc_set_data_timeout(&data, card, 0); + + mmc_wait_for_req(card->host, &mrq); + + if (cmd.error != MMC_ERR_NONE) + return cmd.error; + if (data.error != MMC_ERR_NONE) + return data.error; + + scr[0] = ntohl(scr[0]); + scr[1] = ntohl(scr[1]); + + return MMC_ERR_NONE; +} + +int mmc_sd_switch(struct mmc_card *card, int mode, int group, + u8 value, u8 *resp) +{ + struct mmc_request mrq; + struct mmc_command cmd; + struct mmc_data data; + struct scatterlist sg; + + BUG_ON(!card); + BUG_ON(!card->host); + + mode = !!mode; + value &= 0xF; + + memset(&mrq, 0, sizeof(struct mmc_request)); + memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&data, 0, sizeof(struct mmc_data)); + + mrq.cmd = &cmd; + mrq.data = &data; + + cmd.opcode = SD_SWITCH; + cmd.arg = mode << 31 | 0x00FFFFFF; + cmd.arg &= ~(0xF << (group * 4)); + cmd.arg |= value << (group * 4); + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + data.blksz = 64; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; + + sg_init_one(&sg, resp, 64); + + mmc_set_data_timeout(&data, card, 0); + + mmc_wait_for_req(card->host, &mrq); + + if (cmd.error != MMC_ERR_NONE) + return cmd.error; + if (data.error != MMC_ERR_NONE) + return data.error; + + return MMC_ERR_NONE; +} + diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h new file mode 100644 index 000000000000..1240fddba5e3 --- /dev/null +++ b/drivers/mmc/core/sd_ops.h @@ -0,0 +1,25 @@ +/* + * linux/drivers/mmc/sd_ops.h + * + * Copyright 2006-2007 Pierre Ossman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef _MMC_SD_OPS_H +#define _MMC_SD_OPS_H + +int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card); +int mmc_app_set_bus_width(struct mmc_card *card, int width); +int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr); +int mmc_send_if_cond(struct mmc_host *host, u32 ocr); +int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca); +int mmc_app_send_scr(struct mmc_card *card, u32 *scr); +int mmc_sd_switch(struct mmc_card *card, int mode, int group, + u8 value, u8 *resp); + +#endif + diff --git a/drivers/mmc/core/sysfs.c b/drivers/mmc/core/sysfs.c index bf9a5f8beb86..5c9ce02e7e55 100644 --- a/drivers/mmc/core/sysfs.c +++ b/drivers/mmc/core/sysfs.c @@ -18,7 +18,7 @@ #include #include -#include "core.h" +#include "sysfs.h" #define dev_to_mmc_card(d) container_of(d, struct mmc_card, dev) #define to_mmc_driver(d) container_of(d, struct mmc_driver, drv) diff --git a/drivers/mmc/core/sysfs.h b/drivers/mmc/core/sysfs.h new file mode 100644 index 000000000000..80e29b358282 --- /dev/null +++ b/drivers/mmc/core/sysfs.h @@ -0,0 +1,27 @@ +/* + * linux/drivers/mmc/core/sysfs.h + * + * Copyright (C) 2003 Russell King, All Rights Reserved. + * Copyright 2007 Pierre Ossman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _MMC_CORE_SYSFS_H +#define _MMC_CORE_SYSFS_H + +void mmc_init_card(struct mmc_card *card, struct mmc_host *host); +int mmc_register_card(struct mmc_card *card); +void mmc_remove_card(struct mmc_card *card); + +struct mmc_host *mmc_alloc_host_sysfs(int extra, struct device *dev); +int mmc_add_host_sysfs(struct mmc_host *host); +void mmc_remove_host_sysfs(struct mmc_host *host); +void mmc_free_host_sysfs(struct mmc_host *host); + +int mmc_schedule_work(struct work_struct *work); +int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay); +void mmc_flush_scheduled_work(void); + +#endif diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index d8fd66cf28be..04bbe12fae8d 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -101,7 +101,7 @@ struct mmc_card; extern int mmc_wait_for_req(struct mmc_host *, struct mmc_request *); extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int); -extern int mmc_wait_for_app_cmd(struct mmc_host *, unsigned int, +extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, struct mmc_command *, int); extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *, int); diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h new file mode 100644 index 000000000000..e3ed9b95040e --- /dev/null +++ b/include/linux/mmc/mmc.h @@ -0,0 +1,257 @@ +/* + * Header for MultiMediaCard (MMC) + * + * Copyright 2002 Hewlett-Packard Company + * + * Use consistent with the GNU GPL is permitted, + * provided that this copyright notice is + * preserved in its entirety in all copies and derived works. + * + * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, + * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS + * FITNESS FOR ANY PARTICULAR PURPOSE. + * + * Many thanks to Alessandro Rubini and Jonathan Corbet! + * + * Based strongly on code by: + * + * Author: Yong-iL Joh + * Date : $Date: 2002/06/18 12:37:30 $ + * + * Author: Andrew Christian + * 15 May 2002 + */ + +#ifndef MMC_MMC_H +#define MMC_MMC_H + +/* Standard MMC commands (4.1) type argument response */ + /* class 1 */ +#define MMC_GO_IDLE_STATE 0 /* bc */ +#define MMC_SEND_OP_COND 1 /* bcr [31:0] OCR R3 */ +#define MMC_ALL_SEND_CID 2 /* bcr R2 */ +#define MMC_SET_RELATIVE_ADDR 3 /* ac [31:16] RCA R1 */ +#define MMC_SET_DSR 4 /* bc [31:16] RCA */ +#define MMC_SWITCH 6 /* ac [31:0] See below R1b */ +#define MMC_SELECT_CARD 7 /* ac [31:16] RCA R1 */ +#define MMC_SEND_EXT_CSD 8 /* adtc R1 */ +#define MMC_SEND_CSD 9 /* ac [31:16] RCA R2 */ +#define MMC_SEND_CID 10 /* ac [31:16] RCA R2 */ +#define MMC_READ_DAT_UNTIL_STOP 11 /* adtc [31:0] dadr R1 */ +#define MMC_STOP_TRANSMISSION 12 /* ac R1b */ +#define MMC_SEND_STATUS 13 /* ac [31:16] RCA R1 */ +#define MMC_GO_INACTIVE_STATE 15 /* ac [31:16] RCA */ + + /* class 2 */ +#define MMC_SET_BLOCKLEN 16 /* ac [31:0] block len R1 */ +#define MMC_READ_SINGLE_BLOCK 17 /* adtc [31:0] data addr R1 */ +#define MMC_READ_MULTIPLE_BLOCK 18 /* adtc [31:0] data addr R1 */ + + /* class 3 */ +#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */ + + /* class 4 */ +#define MMC_SET_BLOCK_COUNT 23 /* adtc [31:0] data addr R1 */ +#define MMC_WRITE_BLOCK 24 /* adtc [31:0] data addr R1 */ +#define MMC_WRITE_MULTIPLE_BLOCK 25 /* adtc R1 */ +#define MMC_PROGRAM_CID 26 /* adtc R1 */ +#define MMC_PROGRAM_CSD 27 /* adtc R1 */ + + /* class 6 */ +#define MMC_SET_WRITE_PROT 28 /* ac [31:0] data addr R1b */ +#define MMC_CLR_WRITE_PROT 29 /* ac [31:0] data addr R1b */ +#define MMC_SEND_WRITE_PROT 30 /* adtc [31:0] wpdata addr R1 */ + + /* class 5 */ +#define MMC_ERASE_GROUP_START 35 /* ac [31:0] data addr R1 */ +#define MMC_ERASE_GROUP_END 36 /* ac [31:0] data addr R1 */ +#define MMC_ERASE 38 /* ac R1b */ + + /* class 9 */ +#define MMC_FAST_IO 39 /* ac R4 */ +#define MMC_GO_IRQ_STATE 40 /* bcr R5 */ + + /* class 7 */ +#define MMC_LOCK_UNLOCK 42 /* adtc R1b */ + + /* class 8 */ +#define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */ +#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ + +/* + * MMC_SWITCH argument format: + * + * [31:26] Always 0 + * [25:24] Access Mode + * [23:16] Location of target Byte in EXT_CSD + * [15:08] Value Byte + * [07:03] Always 0 + * [02:00] Command Set + */ + +/* + MMC status in R1 + Type + e : error bit + s : status bit + r : detected and set for the actual command response + x : detected and set during command execution. the host must poll + the card by sending status command in order to read these bits. + Clear condition + a : according to the card state + b : always related to the previous command. Reception of + a valid command will clear it (with a delay of one command) + c : clear by read + */ + +#define R1_OUT_OF_RANGE (1 << 31) /* er, c */ +#define R1_ADDRESS_ERROR (1 << 30) /* erx, c */ +#define R1_BLOCK_LEN_ERROR (1 << 29) /* er, c */ +#define R1_ERASE_SEQ_ERROR (1 << 28) /* er, c */ +#define R1_ERASE_PARAM (1 << 27) /* ex, c */ +#define R1_WP_VIOLATION (1 << 26) /* erx, c */ +#define R1_CARD_IS_LOCKED (1 << 25) /* sx, a */ +#define R1_LOCK_UNLOCK_FAILED (1 << 24) /* erx, c */ +#define R1_COM_CRC_ERROR (1 << 23) /* er, b */ +#define R1_ILLEGAL_COMMAND (1 << 22) /* er, b */ +#define R1_CARD_ECC_FAILED (1 << 21) /* ex, c */ +#define R1_CC_ERROR (1 << 20) /* erx, c */ +#define R1_ERROR (1 << 19) /* erx, c */ +#define R1_UNDERRUN (1 << 18) /* ex, c */ +#define R1_OVERRUN (1 << 17) /* ex, c */ +#define R1_CID_CSD_OVERWRITE (1 << 16) /* erx, c, CID/CSD overwrite */ +#define R1_WP_ERASE_SKIP (1 << 15) /* sx, c */ +#define R1_CARD_ECC_DISABLED (1 << 14) /* sx, a */ +#define R1_ERASE_RESET (1 << 13) /* sr, c */ +#define R1_STATUS(x) (x & 0xFFFFE000) +#define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */ +#define R1_READY_FOR_DATA (1 << 8) /* sx, a */ +#define R1_APP_CMD (1 << 5) /* sr, c */ + +/* These are unpacked versions of the actual responses */ + +struct _mmc_csd { + u8 csd_structure; + u8 spec_vers; + u8 taac; + u8 nsac; + u8 tran_speed; + u16 ccc; + u8 read_bl_len; + u8 read_bl_partial; + u8 write_blk_misalign; + u8 read_blk_misalign; + u8 dsr_imp; + u16 c_size; + u8 vdd_r_curr_min; + u8 vdd_r_curr_max; + u8 vdd_w_curr_min; + u8 vdd_w_curr_max; + u8 c_size_mult; + union { + struct { /* MMC system specification version 3.1 */ + u8 erase_grp_size; + u8 erase_grp_mult; + } v31; + struct { /* MMC system specification version 2.2 */ + u8 sector_size; + u8 erase_grp_size; + } v22; + } erase; + u8 wp_grp_size; + u8 wp_grp_enable; + u8 default_ecc; + u8 r2w_factor; + u8 write_bl_len; + u8 write_bl_partial; + u8 file_format_grp; + u8 copy; + u8 perm_write_protect; + u8 tmp_write_protect; + u8 file_format; + u8 ecc; +}; + +/* + * OCR bits are mostly in host.h + */ +#define MMC_CARD_BUSY 0x80000000 /* Card Power up status bit */ + +/* + * Card Command Classes (CCC) + */ +#define CCC_BASIC (1<<0) /* (0) Basic protocol functions */ + /* (CMD0,1,2,3,4,7,9,10,12,13,15) */ +#define CCC_STREAM_READ (1<<1) /* (1) Stream read commands */ + /* (CMD11) */ +#define CCC_BLOCK_READ (1<<2) /* (2) Block read commands */ + /* (CMD16,17,18) */ +#define CCC_STREAM_WRITE (1<<3) /* (3) Stream write commands */ + /* (CMD20) */ +#define CCC_BLOCK_WRITE (1<<4) /* (4) Block write commands */ + /* (CMD16,24,25,26,27) */ +#define CCC_ERASE (1<<5) /* (5) Ability to erase blocks */ + /* (CMD32,33,34,35,36,37,38,39) */ +#define CCC_WRITE_PROT (1<<6) /* (6) Able to write protect blocks */ + /* (CMD28,29,30) */ +#define CCC_LOCK_CARD (1<<7) /* (7) Able to lock down card */ + /* (CMD16,CMD42) */ +#define CCC_APP_SPEC (1<<8) /* (8) Application specific */ + /* (CMD55,56,57,ACMD*) */ +#define CCC_IO_MODE (1<<9) /* (9) I/O mode */ + /* (CMD5,39,40,52,53) */ +#define CCC_SWITCH (1<<10) /* (10) High speed switch */ + /* (CMD6,34,35,36,37,50) */ + /* (11) Reserved */ + /* (CMD?) */ + +/* + * CSD field definitions + */ + +#define CSD_STRUCT_VER_1_0 0 /* Valid for system specification 1.0 - 1.2 */ +#define CSD_STRUCT_VER_1_1 1 /* Valid for system specification 1.4 - 2.2 */ +#define CSD_STRUCT_VER_1_2 2 /* Valid for system specification 3.1 - 3.2 - 3.31 - 4.0 - 4.1 */ +#define CSD_STRUCT_EXT_CSD 3 /* Version is coded in CSD_STRUCTURE in EXT_CSD */ + +#define CSD_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.2 */ +#define CSD_SPEC_VER_1 1 /* Implements system specification 1.4 */ +#define CSD_SPEC_VER_2 2 /* Implements system specification 2.0 - 2.2 */ +#define CSD_SPEC_VER_3 3 /* Implements system specification 3.1 - 3.2 - 3.31 */ +#define CSD_SPEC_VER_4 4 /* Implements system specification 4.0 - 4.1 */ + +/* + * EXT_CSD fields + */ + +#define EXT_CSD_BUS_WIDTH 183 /* R/W */ +#define EXT_CSD_HS_TIMING 185 /* R/W */ +#define EXT_CSD_CARD_TYPE 196 /* RO */ +#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ + +/* + * EXT_CSD field definitions + */ + +#define EXT_CSD_CMD_SET_NORMAL (1<<0) +#define EXT_CSD_CMD_SET_SECURE (1<<1) +#define EXT_CSD_CMD_SET_CPSECURE (1<<2) + +#define EXT_CSD_CARD_TYPE_26 (1<<0) /* Card can run at 26MHz */ +#define EXT_CSD_CARD_TYPE_52 (1<<1) /* Card can run at 52MHz */ + +#define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */ +#define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */ +#define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */ + +/* + * MMC_SWITCH access modes + */ + +#define MMC_SWITCH_MODE_CMD_SET 0x00 /* Change the command set */ +#define MMC_SWITCH_MODE_SET_BITS 0x01 /* Set bits which are 1 in value */ +#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */ +#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */ + +#endif /* MMC_MMC_PROTOCOL_H */ + diff --git a/include/linux/mmc/protocol.h b/include/linux/mmc/protocol.h deleted file mode 100644 index 3ca91a6fc23f..000000000000 --- a/include/linux/mmc/protocol.h +++ /dev/null @@ -1,307 +0,0 @@ -/* - * Header for MultiMediaCard (MMC) - * - * Copyright 2002 Hewlett-Packard Company - * - * Use consistent with the GNU GPL is permitted, - * provided that this copyright notice is - * preserved in its entirety in all copies and derived works. - * - * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, - * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS - * FITNESS FOR ANY PARTICULAR PURPOSE. - * - * Many thanks to Alessandro Rubini and Jonathan Corbet! - * - * Based strongly on code by: - * - * Author: Yong-iL Joh - * Date : $Date: 2002/06/18 12:37:30 $ - * - * Author: Andrew Christian - * 15 May 2002 - */ - -#ifndef MMC_MMC_PROTOCOL_H -#define MMC_MMC_PROTOCOL_H - -/* Standard MMC commands (4.1) type argument response */ - /* class 1 */ -#define MMC_GO_IDLE_STATE 0 /* bc */ -#define MMC_SEND_OP_COND 1 /* bcr [31:0] OCR R3 */ -#define MMC_ALL_SEND_CID 2 /* bcr R2 */ -#define MMC_SET_RELATIVE_ADDR 3 /* ac [31:16] RCA R1 */ -#define MMC_SET_DSR 4 /* bc [31:16] RCA */ -#define MMC_SWITCH 6 /* ac [31:0] See below R1b */ -#define MMC_SELECT_CARD 7 /* ac [31:16] RCA R1 */ -#define MMC_SEND_EXT_CSD 8 /* adtc R1 */ -#define MMC_SEND_CSD 9 /* ac [31:16] RCA R2 */ -#define MMC_SEND_CID 10 /* ac [31:16] RCA R2 */ -#define MMC_READ_DAT_UNTIL_STOP 11 /* adtc [31:0] dadr R1 */ -#define MMC_STOP_TRANSMISSION 12 /* ac R1b */ -#define MMC_SEND_STATUS 13 /* ac [31:16] RCA R1 */ -#define MMC_GO_INACTIVE_STATE 15 /* ac [31:16] RCA */ - - /* class 2 */ -#define MMC_SET_BLOCKLEN 16 /* ac [31:0] block len R1 */ -#define MMC_READ_SINGLE_BLOCK 17 /* adtc [31:0] data addr R1 */ -#define MMC_READ_MULTIPLE_BLOCK 18 /* adtc [31:0] data addr R1 */ - - /* class 3 */ -#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */ - - /* class 4 */ -#define MMC_SET_BLOCK_COUNT 23 /* adtc [31:0] data addr R1 */ -#define MMC_WRITE_BLOCK 24 /* adtc [31:0] data addr R1 */ -#define MMC_WRITE_MULTIPLE_BLOCK 25 /* adtc R1 */ -#define MMC_PROGRAM_CID 26 /* adtc R1 */ -#define MMC_PROGRAM_CSD 27 /* adtc R1 */ - - /* class 6 */ -#define MMC_SET_WRITE_PROT 28 /* ac [31:0] data addr R1b */ -#define MMC_CLR_WRITE_PROT 29 /* ac [31:0] data addr R1b */ -#define MMC_SEND_WRITE_PROT 30 /* adtc [31:0] wpdata addr R1 */ - - /* class 5 */ -#define MMC_ERASE_GROUP_START 35 /* ac [31:0] data addr R1 */ -#define MMC_ERASE_GROUP_END 36 /* ac [31:0] data addr R1 */ -#define MMC_ERASE 38 /* ac R1b */ - - /* class 9 */ -#define MMC_FAST_IO 39 /* ac R4 */ -#define MMC_GO_IRQ_STATE 40 /* bcr R5 */ - - /* class 7 */ -#define MMC_LOCK_UNLOCK 42 /* adtc R1b */ - - /* class 8 */ -#define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */ -#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ - -/* SD commands type argument response */ - /* class 0 */ -/* This is basically the same command as for MMC with some quirks. */ -#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */ -#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */ - - /* class 10 */ -#define SD_SWITCH 6 /* adtc [31:0] See below R1 */ - - /* Application commands */ -#define SD_APP_SET_BUS_WIDTH 6 /* ac [1:0] bus width R1 */ -#define SD_APP_SEND_NUM_WR_BLKS 22 /* adtc R1 */ -#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */ -#define SD_APP_SEND_SCR 51 /* adtc R1 */ - -/* - * MMC_SWITCH argument format: - * - * [31:26] Always 0 - * [25:24] Access Mode - * [23:16] Location of target Byte in EXT_CSD - * [15:08] Value Byte - * [07:03] Always 0 - * [02:00] Command Set - */ - -/* - * SD_SWITCH argument format: - * - * [31] Check (0) or switch (1) - * [30:24] Reserved (0) - * [23:20] Function group 6 - * [19:16] Function group 5 - * [15:12] Function group 4 - * [11:8] Function group 3 - * [7:4] Function group 2 - * [3:0] Function group 1 - */ - -/* - * SD_SEND_IF_COND argument format: - * - * [31:12] Reserved (0) - * [11:8] Host Voltage Supply Flags - * [7:0] Check Pattern (0xAA) - */ - -/* - MMC status in R1 - Type - e : error bit - s : status bit - r : detected and set for the actual command response - x : detected and set during command execution. the host must poll - the card by sending status command in order to read these bits. - Clear condition - a : according to the card state - b : always related to the previous command. Reception of - a valid command will clear it (with a delay of one command) - c : clear by read - */ - -#define R1_OUT_OF_RANGE (1 << 31) /* er, c */ -#define R1_ADDRESS_ERROR (1 << 30) /* erx, c */ -#define R1_BLOCK_LEN_ERROR (1 << 29) /* er, c */ -#define R1_ERASE_SEQ_ERROR (1 << 28) /* er, c */ -#define R1_ERASE_PARAM (1 << 27) /* ex, c */ -#define R1_WP_VIOLATION (1 << 26) /* erx, c */ -#define R1_CARD_IS_LOCKED (1 << 25) /* sx, a */ -#define R1_LOCK_UNLOCK_FAILED (1 << 24) /* erx, c */ -#define R1_COM_CRC_ERROR (1 << 23) /* er, b */ -#define R1_ILLEGAL_COMMAND (1 << 22) /* er, b */ -#define R1_CARD_ECC_FAILED (1 << 21) /* ex, c */ -#define R1_CC_ERROR (1 << 20) /* erx, c */ -#define R1_ERROR (1 << 19) /* erx, c */ -#define R1_UNDERRUN (1 << 18) /* ex, c */ -#define R1_OVERRUN (1 << 17) /* ex, c */ -#define R1_CID_CSD_OVERWRITE (1 << 16) /* erx, c, CID/CSD overwrite */ -#define R1_WP_ERASE_SKIP (1 << 15) /* sx, c */ -#define R1_CARD_ECC_DISABLED (1 << 14) /* sx, a */ -#define R1_ERASE_RESET (1 << 13) /* sr, c */ -#define R1_STATUS(x) (x & 0xFFFFE000) -#define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */ -#define R1_READY_FOR_DATA (1 << 8) /* sx, a */ -#define R1_APP_CMD (1 << 5) /* sr, c */ - -/* These are unpacked versions of the actual responses */ - -struct _mmc_csd { - u8 csd_structure; - u8 spec_vers; - u8 taac; - u8 nsac; - u8 tran_speed; - u16 ccc; - u8 read_bl_len; - u8 read_bl_partial; - u8 write_blk_misalign; - u8 read_blk_misalign; - u8 dsr_imp; - u16 c_size; - u8 vdd_r_curr_min; - u8 vdd_r_curr_max; - u8 vdd_w_curr_min; - u8 vdd_w_curr_max; - u8 c_size_mult; - union { - struct { /* MMC system specification version 3.1 */ - u8 erase_grp_size; - u8 erase_grp_mult; - } v31; - struct { /* MMC system specification version 2.2 */ - u8 sector_size; - u8 erase_grp_size; - } v22; - } erase; - u8 wp_grp_size; - u8 wp_grp_enable; - u8 default_ecc; - u8 r2w_factor; - u8 write_bl_len; - u8 write_bl_partial; - u8 file_format_grp; - u8 copy; - u8 perm_write_protect; - u8 tmp_write_protect; - u8 file_format; - u8 ecc; -}; - -/* - * OCR bits are mostly in host.h - */ -#define MMC_CARD_BUSY 0x80000000 /* Card Power up status bit */ - -/* - * Card Command Classes (CCC) - */ -#define CCC_BASIC (1<<0) /* (0) Basic protocol functions */ - /* (CMD0,1,2,3,4,7,9,10,12,13,15) */ -#define CCC_STREAM_READ (1<<1) /* (1) Stream read commands */ - /* (CMD11) */ -#define CCC_BLOCK_READ (1<<2) /* (2) Block read commands */ - /* (CMD16,17,18) */ -#define CCC_STREAM_WRITE (1<<3) /* (3) Stream write commands */ - /* (CMD20) */ -#define CCC_BLOCK_WRITE (1<<4) /* (4) Block write commands */ - /* (CMD16,24,25,26,27) */ -#define CCC_ERASE (1<<5) /* (5) Ability to erase blocks */ - /* (CMD32,33,34,35,36,37,38,39) */ -#define CCC_WRITE_PROT (1<<6) /* (6) Able to write protect blocks */ - /* (CMD28,29,30) */ -#define CCC_LOCK_CARD (1<<7) /* (7) Able to lock down card */ - /* (CMD16,CMD42) */ -#define CCC_APP_SPEC (1<<8) /* (8) Application specific */ - /* (CMD55,56,57,ACMD*) */ -#define CCC_IO_MODE (1<<9) /* (9) I/O mode */ - /* (CMD5,39,40,52,53) */ -#define CCC_SWITCH (1<<10) /* (10) High speed switch */ - /* (CMD6,34,35,36,37,50) */ - /* (11) Reserved */ - /* (CMD?) */ - -/* - * CSD field definitions - */ - -#define CSD_STRUCT_VER_1_0 0 /* Valid for system specification 1.0 - 1.2 */ -#define CSD_STRUCT_VER_1_1 1 /* Valid for system specification 1.4 - 2.2 */ -#define CSD_STRUCT_VER_1_2 2 /* Valid for system specification 3.1 - 3.2 - 3.31 - 4.0 - 4.1 */ -#define CSD_STRUCT_EXT_CSD 3 /* Version is coded in CSD_STRUCTURE in EXT_CSD */ - -#define CSD_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.2 */ -#define CSD_SPEC_VER_1 1 /* Implements system specification 1.4 */ -#define CSD_SPEC_VER_2 2 /* Implements system specification 2.0 - 2.2 */ -#define CSD_SPEC_VER_3 3 /* Implements system specification 3.1 - 3.2 - 3.31 */ -#define CSD_SPEC_VER_4 4 /* Implements system specification 4.0 - 4.1 */ - -/* - * EXT_CSD fields - */ - -#define EXT_CSD_BUS_WIDTH 183 /* R/W */ -#define EXT_CSD_HS_TIMING 185 /* R/W */ -#define EXT_CSD_CARD_TYPE 196 /* RO */ -#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ - -/* - * EXT_CSD field definitions - */ - -#define EXT_CSD_CMD_SET_NORMAL (1<<0) -#define EXT_CSD_CMD_SET_SECURE (1<<1) -#define EXT_CSD_CMD_SET_CPSECURE (1<<2) - -#define EXT_CSD_CARD_TYPE_26 (1<<0) /* Card can run at 26MHz */ -#define EXT_CSD_CARD_TYPE_52 (1<<1) /* Card can run at 52MHz */ - -#define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */ -#define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */ -#define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */ - -/* - * MMC_SWITCH access modes - */ - -#define MMC_SWITCH_MODE_CMD_SET 0x00 /* Change the command set */ -#define MMC_SWITCH_MODE_SET_BITS 0x01 /* Set bits which are 1 in value */ -#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */ -#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */ - -/* - * SCR field definitions - */ - -#define SCR_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.01 */ -#define SCR_SPEC_VER_1 1 /* Implements system specification 1.10 */ -#define SCR_SPEC_VER_2 2 /* Implements system specification 2.00 */ - -/* - * SD bus widths - */ -#define SD_BUS_WIDTH_1 0 -#define SD_BUS_WIDTH_4 2 - -#endif /* MMC_MMC_PROTOCOL_H */ - diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h new file mode 100644 index 000000000000..f310062cffb4 --- /dev/null +++ b/include/linux/mmc/sd.h @@ -0,0 +1,83 @@ +/* + * include/linux/mmc/sd.h + * + * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef MMC_SD_H +#define MMC_SD_H + +/* SD commands type argument response */ + /* class 0 */ +/* This is basically the same command as for MMC with some quirks. */ +#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */ +#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */ + + /* class 10 */ +#define SD_SWITCH 6 /* adtc [31:0] See below R1 */ + + /* Application commands */ +#define SD_APP_SET_BUS_WIDTH 6 /* ac [1:0] bus width R1 */ +#define SD_APP_SEND_NUM_WR_BLKS 22 /* adtc R1 */ +#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */ +#define SD_APP_SEND_SCR 51 /* adtc R1 */ + +/* + * SD_SWITCH argument format: + * + * [31] Check (0) or switch (1) + * [30:24] Reserved (0) + * [23:20] Function group 6 + * [19:16] Function group 5 + * [15:12] Function group 4 + * [11:8] Function group 3 + * [7:4] Function group 2 + * [3:0] Function group 1 + */ + +/* + * SD_SEND_IF_COND argument format: + * + * [31:12] Reserved (0) + * [11:8] Host Voltage Supply Flags + * [7:0] Check Pattern (0xAA) + */ + +/* + * SCR field definitions + */ + +#define SCR_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.01 */ +#define SCR_SPEC_VER_1 1 /* Implements system specification 1.10 */ +#define SCR_SPEC_VER_2 2 /* Implements system specification 2.00 */ + +/* + * SD bus widths + */ +#define SD_BUS_WIDTH_1 0 +#define SD_BUS_WIDTH_4 2 + +/* + * SD_SWITCH mode + */ +#define SD_SWITCH_CHECK 0 +#define SD_SWITCH_SET 1 + +/* + * SD_SWITCH function groups + */ +#define SD_SWITCH_GRP_ACCESS 0 + +/* + * SD_SWITCH access modes + */ +#define SD_SWITCH_ACCESS_DEF 0 +#define SD_SWITCH_ACCESS_HS 1 + +#endif + -- cgit v1.2.3 From 7ea239d9e6d6993469a6a8ca83ff23834dfc3fce Mon Sep 17 00:00:00 2001 From: Pierre Ossman Date: Sun, 31 Dec 2006 00:11:32 +0100 Subject: mmc: add bus handler Delegate protocol handling to "bus handlers". This allows the core to just handle the task of arbitrating the bus. Initialisation and pampering of cards is now done by the different bus handlers. This design also allows MMC and SD (and later SDIO) to be more cleanly separated, allowing easier maintenance. Signed-off-by: Pierre Ossman --- drivers/mmc/core/Makefile | 2 +- drivers/mmc/core/core.c | 817 ++++++++-------------------------------------- drivers/mmc/core/core.h | 37 +++ drivers/mmc/core/mmc.c | 430 ++++++++++++++++++++++++ drivers/mmc/core/sd.c | 431 ++++++++++++++++++++++++ include/linux/mmc/host.h | 7 +- 6 files changed, 1041 insertions(+), 683 deletions(-) create mode 100644 drivers/mmc/core/mmc.c create mode 100644 drivers/mmc/core/sd.c (limited to 'include/linux') diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile index 5977abf3e41b..1075b02ae754 100644 --- a/drivers/mmc/core/Makefile +++ b/drivers/mmc/core/Makefile @@ -7,5 +7,5 @@ ifeq ($(CONFIG_MMC_DEBUG),y) endif obj-$(CONFIG_MMC) += mmc_core.o -mmc_core-y := core.o sysfs.o mmc_ops.o sd_ops.o +mmc_core-y := core.o sysfs.o mmc.o mmc_ops.o sd.o sd_ops.o diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 310be2fe1944..75333a2461df 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -32,36 +32,8 @@ #include "mmc_ops.h" #include "sd_ops.h" -#define CMD_RETRIES 3 - -/* - * OCR Bit positions to 10s of Vdd mV. - */ -static const unsigned short mmc_ocr_bit_to_vdd[] = { - 150, 155, 160, 165, 170, 180, 190, 200, - 210, 220, 230, 240, 250, 260, 270, 280, - 290, 300, 310, 320, 330, 340, 350, 360 -}; - -static const unsigned int tran_exp[] = { - 10000, 100000, 1000000, 10000000, - 0, 0, 0, 0 -}; - -static const unsigned char tran_mant[] = { - 0, 10, 12, 13, 15, 20, 25, 30, - 35, 40, 45, 50, 55, 60, 70, 80, -}; - -static const unsigned int tacc_exp[] = { - 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, -}; - -static const unsigned int tacc_mant[] = { - 0, 10, 12, 13, 15, 20, 25, 30, - 35, 40, 45, 50, 55, 60, 70, 80, -}; - +extern int mmc_attach_mmc(struct mmc_host *host, u32 ocr); +extern int mmc_attach_sd(struct mmc_host *host, u32 ocr); /** * mmc_request_done - finish processing an MMC request @@ -303,6 +275,10 @@ void mmc_release_host(struct mmc_host *host) EXPORT_SYMBOL(mmc_release_host); +/* + * Internal function that does the actual ios call to the host driver, + * optionally printing some debug output. + */ static inline void mmc_set_ios(struct mmc_host *host) { struct mmc_ios *ios = &host->ios; @@ -316,17 +292,53 @@ static inline void mmc_set_ios(struct mmc_host *host) host->ops->set_ios(host, ios); } +/* + * Control chip select pin on a host. + */ void mmc_set_chip_select(struct mmc_host *host, int mode) { host->ios.chip_select = mode; mmc_set_ios(host); } +/* + * Sets the host clock to the highest possible frequency that + * is below "hz". + */ +void mmc_set_clock(struct mmc_host *host, unsigned int hz) +{ + WARN_ON(hz < host->f_min); + + if (hz > host->f_max) + hz = host->f_max; + + host->ios.clock = hz; + mmc_set_ios(host); +} + +/* + * Change the bus mode (open drain/push-pull) of a host. + */ +void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) +{ + host->ios.bus_mode = mode; + mmc_set_ios(host); +} + +/* + * Change data bus width of a host. + */ +void mmc_set_bus_width(struct mmc_host *host, unsigned int width) +{ + host->ios.bus_width = width; + mmc_set_ios(host); +} + /* * Mask off any voltages we don't support and select * the lowest voltage */ -static u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) +u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) { int bit; @@ -347,235 +359,19 @@ static u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) return ocr; } -#define UNSTUFF_BITS(resp,start,size) \ - ({ \ - const int __size = size; \ - const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \ - const int __off = 3 - ((start) / 32); \ - const int __shft = (start) & 31; \ - u32 __res; \ - \ - __res = resp[__off] >> __shft; \ - if (__size + __shft > 32) \ - __res |= resp[__off-1] << ((32 - __shft) % 32); \ - __res & __mask; \ - }) - -/* - * Given the decoded CSD structure, decode the raw CID to our CID structure. - */ -static void mmc_decode_cid(struct mmc_card *card) -{ - u32 *resp = card->raw_cid; - - memset(&card->cid, 0, sizeof(struct mmc_cid)); - - if (mmc_card_sd(card)) { - /* - * SD doesn't currently have a version field so we will - * have to assume we can parse this. - */ - card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); - card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); - card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); - card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); - card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); - card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); - card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); - card->cid.hwrev = UNSTUFF_BITS(resp, 60, 4); - card->cid.fwrev = UNSTUFF_BITS(resp, 56, 4); - card->cid.serial = UNSTUFF_BITS(resp, 24, 32); - card->cid.year = UNSTUFF_BITS(resp, 12, 8); - card->cid.month = UNSTUFF_BITS(resp, 8, 4); - - card->cid.year += 2000; /* SD cards year offset */ - } else { - /* - * The selection of the format here is based upon published - * specs from sandisk and from what people have reported. - */ - switch (card->csd.mmca_vsn) { - case 0: /* MMC v1.0 - v1.2 */ - case 1: /* MMC v1.4 */ - card->cid.manfid = UNSTUFF_BITS(resp, 104, 24); - card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); - card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); - card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); - card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); - card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); - card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); - card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8); - card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4); - card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4); - card->cid.serial = UNSTUFF_BITS(resp, 16, 24); - card->cid.month = UNSTUFF_BITS(resp, 12, 4); - card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; - break; - - case 2: /* MMC v2.0 - v2.2 */ - case 3: /* MMC v3.1 - v3.3 */ - case 4: /* MMC v4 */ - card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); - card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); - card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); - card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); - card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); - card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); - card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); - card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); - card->cid.serial = UNSTUFF_BITS(resp, 16, 32); - card->cid.month = UNSTUFF_BITS(resp, 12, 4); - card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; - break; - - default: - printk("%s: card has unknown MMCA version %d\n", - mmc_hostname(card->host), card->csd.mmca_vsn); - mmc_card_set_bad(card); - break; - } - } -} - -/* - * Given a 128-bit response, decode to our card CSD structure. - */ -static void mmc_decode_csd(struct mmc_card *card) -{ - struct mmc_csd *csd = &card->csd; - unsigned int e, m, csd_struct; - u32 *resp = card->raw_csd; - - if (mmc_card_sd(card)) { - csd_struct = UNSTUFF_BITS(resp, 126, 2); - - switch (csd_struct) { - case 0: - m = UNSTUFF_BITS(resp, 115, 4); - e = UNSTUFF_BITS(resp, 112, 3); - csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; - csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; - - m = UNSTUFF_BITS(resp, 99, 4); - e = UNSTUFF_BITS(resp, 96, 3); - csd->max_dtr = tran_exp[e] * tran_mant[m]; - csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); - - e = UNSTUFF_BITS(resp, 47, 3); - m = UNSTUFF_BITS(resp, 62, 12); - csd->capacity = (1 + m) << (e + 2); - - csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); - csd->read_partial = UNSTUFF_BITS(resp, 79, 1); - csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); - csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); - csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); - csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); - csd->write_partial = UNSTUFF_BITS(resp, 21, 1); - break; - case 1: - /* - * This is a block-addressed SDHC card. Most - * interesting fields are unused and have fixed - * values. To avoid getting tripped by buggy cards, - * we assume those fixed values ourselves. - */ - mmc_card_set_blockaddr(card); - - csd->tacc_ns = 0; /* Unused */ - csd->tacc_clks = 0; /* Unused */ - - m = UNSTUFF_BITS(resp, 99, 4); - e = UNSTUFF_BITS(resp, 96, 3); - csd->max_dtr = tran_exp[e] * tran_mant[m]; - csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); - - m = UNSTUFF_BITS(resp, 48, 22); - csd->capacity = (1 + m) << 10; - - csd->read_blkbits = 9; - csd->read_partial = 0; - csd->write_misalign = 0; - csd->read_misalign = 0; - csd->r2w_factor = 4; /* Unused */ - csd->write_blkbits = 9; - csd->write_partial = 0; - break; - default: - printk("%s: unrecognised CSD structure version %d\n", - mmc_hostname(card->host), csd_struct); - mmc_card_set_bad(card); - return; - } - } else { - /* - * We only understand CSD structure v1.1 and v1.2. - * v1.2 has extra information in bits 15, 11 and 10. - */ - csd_struct = UNSTUFF_BITS(resp, 126, 2); - if (csd_struct != 1 && csd_struct != 2) { - printk("%s: unrecognised CSD structure version %d\n", - mmc_hostname(card->host), csd_struct); - mmc_card_set_bad(card); - return; - } - - csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); - m = UNSTUFF_BITS(resp, 115, 4); - e = UNSTUFF_BITS(resp, 112, 3); - csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; - csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; - - m = UNSTUFF_BITS(resp, 99, 4); - e = UNSTUFF_BITS(resp, 96, 3); - csd->max_dtr = tran_exp[e] * tran_mant[m]; - csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); - - e = UNSTUFF_BITS(resp, 47, 3); - m = UNSTUFF_BITS(resp, 62, 12); - csd->capacity = (1 + m) << (e + 2); - - csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); - csd->read_partial = UNSTUFF_BITS(resp, 79, 1); - csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); - csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); - csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); - csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); - csd->write_partial = UNSTUFF_BITS(resp, 21, 1); - } -} - /* - * Given a 64-bit response, decode to our card SCR structure. + * Select timing parameters for host. */ -static void mmc_decode_scr(struct mmc_card *card) +void mmc_set_timing(struct mmc_host *host, unsigned int timing) { - struct sd_scr *scr = &card->scr; - unsigned int scr_struct; - u32 resp[4]; - - BUG_ON(!mmc_card_sd(card)); - - resp[3] = card->raw_scr[1]; - resp[2] = card->raw_scr[0]; - - scr_struct = UNSTUFF_BITS(resp, 60, 4); - if (scr_struct != 0) { - printk("%s: unrecognised SCR structure version %d\n", - mmc_hostname(card->host), scr_struct); - mmc_card_set_bad(card); - return; - } - - scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4); - scr->bus_widths = UNSTUFF_BITS(resp, 48, 4); + host->ios.timing = timing; + mmc_set_ios(host); } /* * Allocate a new MMC card */ -static struct mmc_card * -mmc_alloc_card(struct mmc_host *host, u32 *raw_cid) +struct mmc_card *mmc_alloc_card(struct mmc_host *host) { struct mmc_card *card; @@ -584,7 +380,6 @@ mmc_alloc_card(struct mmc_host *host, u32 *raw_cid) return ERR_PTR(-ENOMEM); mmc_init_card(card, host); - memcpy(card->raw_cid, raw_cid, sizeof(card->raw_cid)); return card; } @@ -634,406 +429,66 @@ static void mmc_power_off(struct mmc_host *host) } /* - * Discover the card by requesting its CID. - * - * Create a mmc_card entry for the discovered card, assigning - * it an RCA, and save the raw CID for decoding later. + * Assign a mmc bus handler to a host. Only one bus handler may control a + * host at any given time. */ -static void mmc_discover_card(struct mmc_host *host) +void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) { - unsigned int err; - u32 cid[4]; - - BUG_ON(host->card); - - err = mmc_all_send_cid(host, cid); - if (err != MMC_ERR_NONE) { - printk(KERN_ERR "%s: error requesting CID: %d\n", - mmc_hostname(host), err); - return; - } - - host->card = mmc_alloc_card(host, cid); - if (IS_ERR(host->card)) { - err = PTR_ERR(host->card); - host->card = NULL; - return; - } - - if (host->mode == MMC_MODE_SD) { - host->card->type = MMC_TYPE_SD; - - err = mmc_send_relative_addr(host, &host->card->rca); - if (err != MMC_ERR_NONE) - mmc_card_set_dead(host->card); - else { - if (!host->ops->get_ro) { - printk(KERN_WARNING "%s: host does not " - "support reading read-only " - "switch. assuming write-enable.\n", - mmc_hostname(host)); - } else { - if (host->ops->get_ro(host)) - mmc_card_set_readonly(host->card); - } - } - } else { - host->card->type = MMC_TYPE_MMC; - host->card->rca = 1; - - err = mmc_set_relative_addr(host->card); - if (err != MMC_ERR_NONE) - mmc_card_set_dead(host->card); - } -} - -static void mmc_read_csd(struct mmc_host *host) -{ - int err; - - if (!host->card) - return; - if (mmc_card_dead(host->card)) - return; - - err = mmc_send_csd(host->card, host->card->raw_csd); - if (err != MMC_ERR_NONE) { - mmc_card_set_dead(host->card); - return; - } - - mmc_decode_csd(host->card); - mmc_decode_cid(host->card); -} - -static void mmc_process_ext_csd(struct mmc_host *host) -{ - int err; - u8 *ext_csd; - - if (!host->card) - return; - if (mmc_card_dead(host->card)) - return; - if (mmc_card_sd(host->card)) - return; - if (host->card->csd.mmca_vsn < CSD_SPEC_VER_4) - return; - - /* - * As the ext_csd is so large and mostly unused, we don't store the - * raw block in mmc_card. - */ - ext_csd = kmalloc(512, GFP_KERNEL); - if (!ext_csd) { - printk("%s: could not allocate a buffer to receive the ext_csd." - "mmc v4 cards will be treated as v3.\n", - mmc_hostname(host)); - return; - } - - err = mmc_send_ext_csd(host->card, ext_csd); - if (err != MMC_ERR_NONE) { - if (host->card->csd.capacity == (4096 * 512)) { - printk(KERN_ERR "%s: unable to read EXT_CSD " - "on a possible high capacity card. " - "Card will be ignored.\n", - mmc_hostname(host)); - mmc_card_set_dead(host->card); - } else { - printk(KERN_WARNING "%s: unable to read " - "EXT_CSD, performance might " - "suffer.\n", - mmc_hostname(host)); - } - goto out; - } - - host->card->ext_csd.sectors = - ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | - ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | - ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | - ext_csd[EXT_CSD_SEC_CNT + 3] << 24; - if (host->card->ext_csd.sectors) - mmc_card_set_blockaddr(host->card); - - switch (ext_csd[EXT_CSD_CARD_TYPE]) { - case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: - host->card->ext_csd.hs_max_dtr = 52000000; - break; - case EXT_CSD_CARD_TYPE_26: - host->card->ext_csd.hs_max_dtr = 26000000; - break; - default: - /* MMC v4 spec says this cannot happen */ - printk("%s: card is mmc v4 but doesn't support " - "any high-speed modes.\n", - mmc_hostname(host)); - goto out; - } - - if (host->caps & MMC_CAP_MMC_HIGHSPEED) { - /* Activate highspeed support. */ - err = mmc_switch(host->card, MMC_SWITCH_MODE_WRITE_BYTE, - EXT_CSD_HS_TIMING, 1); - if (err != MMC_ERR_NONE) { - printk("%s: failed to switch card to mmc v4 " - "high-speed mode.\n", - mmc_hostname(host)); - goto out; - } - - mmc_card_set_highspeed(host->card); - - host->ios.timing = MMC_TIMING_MMC_HS; - mmc_set_ios(host); - } + unsigned long flags; - /* Check for host support for wide-bus modes. */ - if (host->caps & MMC_CAP_4_BIT_DATA) { - /* Activate 4-bit support. */ - err = mmc_switch(host->card, MMC_SWITCH_MODE_WRITE_BYTE, - EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_4 | - EXT_CSD_CMD_SET_NORMAL); - if (err != MMC_ERR_NONE) { - printk("%s: failed to switch card to " - "mmc v4 4-bit bus mode.\n", - mmc_hostname(host)); - goto out; - } + BUG_ON(!host); + BUG_ON(!ops); - host->ios.bus_width = MMC_BUS_WIDTH_4; - mmc_set_ios(host); - } + BUG_ON(!host->claimed); -out: - kfree(ext_csd); -} + spin_lock_irqsave(&host->lock, flags); -static void mmc_read_scr(struct mmc_host *host) -{ - int err; + BUG_ON(host->bus_ops); + BUG_ON(host->bus_refs); - if (!host->card) - return; - if (mmc_card_dead(host->card)) - return; - if (!mmc_card_sd(host->card)) - return; - - err = mmc_app_send_scr(host->card, host->card->raw_scr); - if (err != MMC_ERR_NONE) { - mmc_card_set_dead(host->card); - return; - } + host->bus_ops = ops; + host->bus_refs = 1; + host->bus_dead = 0; - mmc_decode_scr(host->card); + spin_unlock_irqrestore(&host->lock, flags); } -static void mmc_read_switch_caps(struct mmc_host *host) +/* + * Remove the current bus handler from a host. Assumes that there are + * no interesting cards left, so the bus is powered down. + */ +void mmc_detach_bus(struct mmc_host *host) { - int err; - unsigned char *status; - - if (!(host->caps & MMC_CAP_SD_HIGHSPEED)) - return; - - if (!host->card) - return; - if (mmc_card_dead(host->card)) - return; - if (!mmc_card_sd(host->card)) - return; - if (host->card->scr.sda_vsn < SCR_SPEC_VER_1) - return; - - status = kmalloc(64, GFP_KERNEL); - if (!status) { - printk(KERN_WARNING "%s: Unable to allocate buffer for " - "reading switch capabilities.\n", - mmc_hostname(host)); - return; - } - - err = mmc_sd_switch(host->card, SD_SWITCH_CHECK, - SD_SWITCH_GRP_ACCESS, SD_SWITCH_ACCESS_HS, status); - if (err != MMC_ERR_NONE) { - printk("%s: unable to read switch capabilities, " - "performance might suffer.\n", - mmc_hostname(host)); - goto out; - } - - if (status[13] & 0x02) - host->card->sw_caps.hs_max_dtr = 50000000; + unsigned long flags; - err = mmc_sd_switch(host->card, SD_SWITCH_SET, - SD_SWITCH_GRP_ACCESS, SD_SWITCH_ACCESS_HS, status); - if (err != MMC_ERR_NONE || (status[16] & 0xF) != 1) { - printk(KERN_WARNING "%s: Problem switching card " - "into high-speed mode!\n", - mmc_hostname(host)); - goto out; - } + BUG_ON(!host); - mmc_card_set_highspeed(host->card); + BUG_ON(!host->claimed); + BUG_ON(!host->bus_ops); - host->ios.timing = MMC_TIMING_SD_HS; - mmc_set_ios(host); + spin_lock_irqsave(&host->lock, flags); -out: - kfree(status); -} + host->bus_dead = 1; -static unsigned int mmc_calculate_clock(struct mmc_host *host) -{ - unsigned int max_dtr = host->f_max; - - if (host->card && !mmc_card_dead(host->card)) { - if (mmc_card_highspeed(host->card) && mmc_card_sd(host->card)) { - if (max_dtr > host->card->sw_caps.hs_max_dtr) - max_dtr = host->card->sw_caps.hs_max_dtr; - } else if (mmc_card_highspeed(host->card) && !mmc_card_sd(host->card)) { - if (max_dtr > host->card->ext_csd.hs_max_dtr) - max_dtr = host->card->ext_csd.hs_max_dtr; - } else if (max_dtr > host->card->csd.max_dtr) { - max_dtr = host->card->csd.max_dtr; - } - } + spin_unlock_irqrestore(&host->lock, flags); - pr_debug("%s: selected %d.%03dMHz transfer rate\n", - mmc_hostname(host), - max_dtr / 1000000, (max_dtr / 1000) % 1000); + mmc_power_off(host); - return max_dtr; + mmc_bus_put(host); } /* - * Check whether cards we already know about are still present. - * We do this by requesting status, and checking whether a card - * responds. - * - * A request for status does not cause a state change in data - * transfer mode. + * Cleanup when the last reference to the bus operator is dropped. */ -static void mmc_check_card(struct mmc_card *card) +void __mmc_release_bus(struct mmc_host *host) { - int err; - - BUG_ON(!card); + BUG_ON(!host); + BUG_ON(host->bus_refs); + BUG_ON(!host->bus_dead); - err = mmc_send_status(card, NULL); - if (err == MMC_ERR_NONE) - return; - - mmc_card_set_dead(card); + host->bus_ops = NULL; } -static void mmc_setup(struct mmc_host *host) -{ - int err; - u32 ocr; - - host->mode = MMC_MODE_SD; - - mmc_power_up(host); - mmc_go_idle(host); - - err = mmc_send_if_cond(host, host->ocr_avail); - if (err != MMC_ERR_NONE) { - return; - } - err = mmc_send_app_op_cond(host, 0, &ocr); - - /* - * If we fail to detect any SD cards then try - * searching for MMC cards. - */ - if (err != MMC_ERR_NONE) { - host->mode = MMC_MODE_MMC; - - err = mmc_send_op_cond(host, 0, &ocr); - if (err != MMC_ERR_NONE) - return; - } - - host->ocr = mmc_select_voltage(host, ocr); - - if (host->ocr == 0) - return; - - /* - * Since we're changing the OCR value, we seem to - * need to tell some cards to go back to the idle - * state. We wait 1ms to give cards time to - * respond. - */ - mmc_go_idle(host); - - /* - * Send the selected OCR multiple times... until the cards - * all get the idea that they should be ready for CMD2. - * (My SanDisk card seems to need this.) - */ - if (host->mode == MMC_MODE_SD) { - /* - * If SD_SEND_IF_COND indicates an SD 2.0 - * compliant card and we should set bit 30 - * of the ocr to indicate that we can handle - * block-addressed SDHC cards. - */ - err = mmc_send_if_cond(host, host->ocr); - if (err == MMC_ERR_NONE) - ocr = host->ocr | (1 << 30); - - mmc_send_app_op_cond(host, ocr, NULL); - } else { - /* The extra bit indicates that we support high capacity */ - mmc_send_op_cond(host, host->ocr | (1 << 30), NULL); - } - - mmc_discover_card(host); - - /* - * Ok, now switch to push-pull mode. - */ - host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; - mmc_set_ios(host); - - mmc_read_csd(host); - - if (host->card && !mmc_card_dead(host->card)) { - err = mmc_select_card(host->card); - if (err != MMC_ERR_NONE) - mmc_card_set_dead(host->card); - } - - /* - * The card is in 1 bit mode by default so - * we only need to change if it supports the - * wider version. - */ - if (host->card && !mmc_card_dead(host->card) && - mmc_card_sd(host->card) && - (host->card->scr.bus_widths & SD_SCR_BUS_WIDTH_4) && - (host->card->host->caps & MMC_CAP_4_BIT_DATA)) { - err = mmc_app_set_bus_width(host->card, SD_BUS_WIDTH_4); - if (err != MMC_ERR_NONE) - mmc_card_set_dead(host->card); - else { - host->ios.bus_width = MMC_BUS_WIDTH_4; - mmc_set_ios(host); - } - } - - if (host->mode == MMC_MODE_SD) { - mmc_read_scr(host); - mmc_read_switch_caps(host); - } else - mmc_process_ext_csd(host); -} - - /** * mmc_detect_change - process change of state on a MMC socket * @host: host which changed state. @@ -1060,62 +515,49 @@ static void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); + u32 ocr; + int err; - mmc_claim_host(host); - - /* - * Check for removed card and newly inserted ones. We check for - * removed cards first so we can intelligently re-select the VDD. - */ - if (host->card) { - mmc_check_card(host->card); - - mmc_release_host(host); - - if (mmc_card_dead(host->card)) { - mmc_remove_card(host->card); - host->card = NULL; - } - - goto out; - } - - mmc_setup(host); + mmc_bus_get(host); - if (host->card && !mmc_card_dead(host->card)) { + if (host->bus_ops == NULL) { /* - * (Re-)calculate the fastest clock rate which the - * attached cards and the host support. + * Only we can add a new handler, so it's safe to + * release the lock here. */ - host->ios.clock = mmc_calculate_clock(host); - mmc_set_ios(host); - } + mmc_bus_put(host); - mmc_release_host(host); + mmc_claim_host(host); - /* - * If this is a new and good card, register it. - */ - if (host->card && !mmc_card_dead(host->card)) { - if (mmc_register_card(host->card)) - mmc_card_set_dead(host->card); - } + mmc_power_up(host); + mmc_go_idle(host); - /* - * If this card is dead, destroy it. - */ - if (host->card && mmc_card_dead(host->card)) { - mmc_remove_card(host->card); - host->card = NULL; - } + mmc_send_if_cond(host, host->ocr_avail); -out: - /* - * If we discover that there are no cards on the - * bus, turn off the clock and power down. - */ - if (!host->card) - mmc_power_off(host); + err = mmc_send_app_op_cond(host, 0, &ocr); + if (err == MMC_ERR_NONE) { + if (mmc_attach_sd(host, ocr)) + mmc_power_off(host); + } else { + /* + * If we fail to detect any SD cards then try + * searching for MMC cards. + */ + err = mmc_send_op_cond(host, 0, &ocr); + if (err == MMC_ERR_NONE) { + if (mmc_attach_mmc(host, ocr)) + mmc_power_off(host); + } else { + mmc_power_off(host); + mmc_release_host(host); + } + } + } else { + if (host->bus_ops->detect && !host->bus_dead) + host->bus_ops->detect(host); + + mmc_bus_put(host); + } } @@ -1190,10 +632,18 @@ void mmc_remove_host(struct mmc_host *host) mmc_flush_scheduled_work(); - if (host->card) { - mmc_remove_card(host->card); - host->card = NULL; + mmc_bus_get(host); + if (host->bus_ops && !host->bus_dead) { + if (host->bus_ops->remove) + host->bus_ops->remove(host); + + mmc_claim_host(host); + mmc_detach_bus(host); + mmc_release_host(host); } + mmc_bus_put(host); + + BUG_ON(host->card); mmc_power_off(host); mmc_remove_host_sysfs(host); @@ -1225,10 +675,15 @@ int mmc_suspend_host(struct mmc_host *host, pm_message_t state) { mmc_flush_scheduled_work(); - if (host->card) { - mmc_remove_card(host->card); - host->card = NULL; + mmc_bus_get(host); + if (host->bus_ops && !host->bus_dead) { + if (host->bus_ops->remove) + host->bus_ops->remove(host); + mmc_detach_bus(host); } + mmc_bus_put(host); + + BUG_ON(host->card); mmc_power_off(host); diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 1c1066342fba..fad8edc38099 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -15,7 +15,44 @@ #define MMC_CMD_RETRIES 3 +struct mmc_bus_ops { + void (*remove)(struct mmc_host *); + void (*detect)(struct mmc_host *); +}; + +void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); +void mmc_detach_bus(struct mmc_host *host); + +void __mmc_release_bus(struct mmc_host *host); + +static inline void mmc_bus_get(struct mmc_host *host) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->bus_refs++; + spin_unlock_irqrestore(&host->lock, flags); +} + +static inline void mmc_bus_put(struct mmc_host *host) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->bus_refs--; + if ((host->bus_refs == 0) && host->bus_ops) + __mmc_release_bus(host); + spin_unlock_irqrestore(&host->lock, flags); +} + void mmc_set_chip_select(struct mmc_host *host, int mode); +void mmc_set_clock(struct mmc_host *host, unsigned int hz); +void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); +void mmc_set_bus_width(struct mmc_host *host, unsigned int width); +u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); +void mmc_set_timing(struct mmc_host *host, unsigned int timing); + +struct mmc_card *mmc_alloc_card(struct mmc_host *host); static inline void mmc_delay(unsigned int ms) { diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c new file mode 100644 index 000000000000..c528017d6128 --- /dev/null +++ b/drivers/mmc/core/mmc.c @@ -0,0 +1,430 @@ +/* + * linux/drivers/mmc/mmc.c + * + * Copyright (C) 2003-2004 Russell King, All Rights Reserved. + * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. + * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#include +#include +#include + +#include "core.h" +#include "sysfs.h" +#include "mmc_ops.h" + +static const unsigned int tran_exp[] = { + 10000, 100000, 1000000, 10000000, + 0, 0, 0, 0 +}; + +static const unsigned char tran_mant[] = { + 0, 10, 12, 13, 15, 20, 25, 30, + 35, 40, 45, 50, 55, 60, 70, 80, +}; + +static const unsigned int tacc_exp[] = { + 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, +}; + +static const unsigned int tacc_mant[] = { + 0, 10, 12, 13, 15, 20, 25, 30, + 35, 40, 45, 50, 55, 60, 70, 80, +}; + +#define UNSTUFF_BITS(resp,start,size) \ + ({ \ + const int __size = size; \ + const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \ + const int __off = 3 - ((start) / 32); \ + const int __shft = (start) & 31; \ + u32 __res; \ + \ + __res = resp[__off] >> __shft; \ + if (__size + __shft > 32) \ + __res |= resp[__off-1] << ((32 - __shft) % 32); \ + __res & __mask; \ + }) + +/* + * Given the decoded CSD structure, decode the raw CID to our CID structure. + */ +static void mmc_decode_cid(struct mmc_card *card) +{ + u32 *resp = card->raw_cid; + + /* + * The selection of the format here is based upon published + * specs from sandisk and from what people have reported. + */ + switch (card->csd.mmca_vsn) { + case 0: /* MMC v1.0 - v1.2 */ + case 1: /* MMC v1.4 */ + card->cid.manfid = UNSTUFF_BITS(resp, 104, 24); + card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); + card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); + card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); + card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); + card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); + card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); + card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8); + card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4); + card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4); + card->cid.serial = UNSTUFF_BITS(resp, 16, 24); + card->cid.month = UNSTUFF_BITS(resp, 12, 4); + card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; + break; + + case 2: /* MMC v2.0 - v2.2 */ + case 3: /* MMC v3.1 - v3.3 */ + case 4: /* MMC v4 */ + card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); + card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); + card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); + card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); + card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); + card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); + card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); + card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); + card->cid.serial = UNSTUFF_BITS(resp, 16, 32); + card->cid.month = UNSTUFF_BITS(resp, 12, 4); + card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; + break; + + default: + printk("%s: card has unknown MMCA version %d\n", + mmc_hostname(card->host), card->csd.mmca_vsn); + mmc_card_set_bad(card); + break; + } +} + +/* + * Given a 128-bit response, decode to our card CSD structure. + */ +static void mmc_decode_csd(struct mmc_card *card) +{ + struct mmc_csd *csd = &card->csd; + unsigned int e, m, csd_struct; + u32 *resp = card->raw_csd; + + /* + * We only understand CSD structure v1.1 and v1.2. + * v1.2 has extra information in bits 15, 11 and 10. + */ + csd_struct = UNSTUFF_BITS(resp, 126, 2); + if (csd_struct != 1 && csd_struct != 2) { + printk("%s: unrecognised CSD structure version %d\n", + mmc_hostname(card->host), csd_struct); + mmc_card_set_bad(card); + return; + } + + csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); + m = UNSTUFF_BITS(resp, 115, 4); + e = UNSTUFF_BITS(resp, 112, 3); + csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; + csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; + + m = UNSTUFF_BITS(resp, 99, 4); + e = UNSTUFF_BITS(resp, 96, 3); + csd->max_dtr = tran_exp[e] * tran_mant[m]; + csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); + + e = UNSTUFF_BITS(resp, 47, 3); + m = UNSTUFF_BITS(resp, 62, 12); + csd->capacity = (1 + m) << (e + 2); + + csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); + csd->read_partial = UNSTUFF_BITS(resp, 79, 1); + csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); + csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); + csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); + csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); + csd->write_partial = UNSTUFF_BITS(resp, 21, 1); +} + +/* + * Read and decode extended CSD. Switch to high-speed and wide bus + * if supported. + */ +static int mmc_process_ext_csd(struct mmc_card *card) +{ + int err; + u8 *ext_csd; + + BUG_ON(!card); + + err = MMC_ERR_FAILED; + + if (card->csd.mmca_vsn < CSD_SPEC_VER_4) + return MMC_ERR_NONE; + + /* + * As the ext_csd is so large and mostly unused, we don't store the + * raw block in mmc_card. + */ + ext_csd = kmalloc(512, GFP_KERNEL); + if (!ext_csd) { + printk(KERN_ERR "%s: could not allocate a buffer to " + "receive the ext_csd. mmc v4 cards will be " + "treated as v3.\n", mmc_hostname(card->host)); + return MMC_ERR_FAILED; + } + + err = mmc_send_ext_csd(card, ext_csd); + if (err != MMC_ERR_NONE) { + /* + * High capacity cards should have this "magic" size + * stored in their CSD. + */ + if (card->csd.capacity == (4096 * 512)) { + printk(KERN_ERR "%s: unable to read EXT_CSD " + "on a possible high capacity card. " + "Card will be ignored.\n", + mmc_hostname(card->host)); + } else { + printk(KERN_WARNING "%s: unable to read " + "EXT_CSD, performance might " + "suffer.\n", + mmc_hostname(card->host)); + err = MMC_ERR_NONE; + } + goto out; + } + + card->ext_csd.sectors = + ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | + ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | + ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | + ext_csd[EXT_CSD_SEC_CNT + 3] << 24; + if (card->ext_csd.sectors) + mmc_card_set_blockaddr(card); + + switch (ext_csd[EXT_CSD_CARD_TYPE]) { + case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: + card->ext_csd.hs_max_dtr = 52000000; + break; + case EXT_CSD_CARD_TYPE_26: + card->ext_csd.hs_max_dtr = 26000000; + break; + default: + /* MMC v4 spec says this cannot happen */ + printk(KERN_WARNING "%s: card is mmc v4 but doesn't " + "support any high-speed modes.\n", + mmc_hostname(card->host)); + goto out; + } + + if (card->host->caps & MMC_CAP_MMC_HIGHSPEED) { + /* Activate highspeed support. */ + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_HS_TIMING, 1); + if (err != MMC_ERR_NONE) { + printk(KERN_WARNING "%s: failed to switch " + "card to mmc v4 high-speed mode.\n", + mmc_hostname(card->host)); + err = MMC_ERR_NONE; + goto out; + } + + mmc_card_set_highspeed(card); + + mmc_set_timing(card->host, MMC_TIMING_MMC_HS); + } + + /* Check for host support for wide-bus modes. */ + if (card->host->caps & MMC_CAP_4_BIT_DATA) { + /* Activate 4-bit support. */ + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_4); + if (err != MMC_ERR_NONE) { + printk(KERN_WARNING "%s: failed to switch " + "card to mmc v4 4-bit bus mode.\n", + mmc_hostname(card->host)); + err = MMC_ERR_NONE; + goto out; + } + + mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); + } + +out: + kfree(ext_csd); + + return err; +} + +/* + * Host is being removed. Free up the current card. + */ +static void mmc_remove(struct mmc_host *host) +{ + BUG_ON(!host); + BUG_ON(!host->card); + + mmc_remove_card(host->card); + host->card = NULL; +} + +/* + * Card detection callback from host. + */ +static void mmc_detect(struct mmc_host *host) +{ + int err; + + BUG_ON(!host); + BUG_ON(!host->card); + + mmc_claim_host(host); + + /* + * Just check if our card has been removed. + */ + err = mmc_send_status(host->card, NULL); + + mmc_release_host(host); + + if (err != MMC_ERR_NONE) { + mmc_remove_card(host->card); + host->card = NULL; + + mmc_claim_host(host); + mmc_detach_bus(host); + mmc_release_host(host); + } +} + +static const struct mmc_bus_ops mmc_ops = { + .remove = mmc_remove, + .detect = mmc_detect, +}; + +/* + * Starting point for MMC card init. + */ +int mmc_attach_mmc(struct mmc_host *host, u32 ocr) +{ + struct mmc_card *card; + int err; + u32 cid[4]; + unsigned int max_dtr; + + BUG_ON(!host); + BUG_ON(!host->claimed); + + mmc_attach_bus(host, &mmc_ops); + + host->ocr = mmc_select_voltage(host, ocr); + + /* + * Can we support the voltage of the card? + */ + if (!host->ocr) + goto err; + + /* + * Since we're changing the OCR value, we seem to + * need to tell some cards to go back to the idle + * state. We wait 1ms to give cards time to + * respond. + */ + mmc_go_idle(host); + + /* The extra bit indicates that we support high capacity */ + mmc_send_op_cond(host, host->ocr | (1 << 30), NULL); + + /* + * Fetch CID from card. + */ + err = mmc_all_send_cid(host, cid); + if (err != MMC_ERR_NONE) + goto err; + + /* + * Allocate card structure. + */ + card = mmc_alloc_card(host); + if (IS_ERR(card)) + goto err; + + card->type = MMC_TYPE_MMC; + card->rca = 1; + memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); + + /* + * Set card RCA. + */ + err = mmc_set_relative_addr(card); + if (err != MMC_ERR_NONE) + goto free_card; + + mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); + + /* + * Fetch CSD from card. + */ + err = mmc_send_csd(card, card->raw_csd); + if (err != MMC_ERR_NONE) + goto free_card; + + mmc_decode_csd(card); + mmc_decode_cid(card); + + /* + * Fetch and process extened CSD. + * This will switch into high-speed and wide bus modes, + * as available. + */ + err = mmc_select_card(card); + if (err != MMC_ERR_NONE) + goto free_card; + + err = mmc_process_ext_csd(card); + if (err != MMC_ERR_NONE) + goto free_card; + + /* + * Compute bus speed. + */ + max_dtr = (unsigned int)-1; + + if (mmc_card_highspeed(card)) { + if (max_dtr > card->ext_csd.hs_max_dtr) + max_dtr = card->ext_csd.hs_max_dtr; + } else if (max_dtr > card->csd.max_dtr) { + max_dtr = card->csd.max_dtr; + } + + mmc_set_clock(host, max_dtr); + + host->card = card; + + mmc_release_host(host); + + err = mmc_register_card(card); + if (err) + goto reclaim_host; + + return 0; + +reclaim_host: + mmc_claim_host(host); +free_card: + mmc_remove_card(card); + host->card = NULL; +err: + mmc_detach_bus(host); + mmc_release_host(host); + + return 0; +} + diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c new file mode 100644 index 000000000000..6c6beb48f3a8 --- /dev/null +++ b/drivers/mmc/core/sd.c @@ -0,0 +1,431 @@ +/* + * linux/drivers/mmc/sd.c + * + * Copyright (C) 2003-2004 Russell King, All Rights Reserved. + * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. + * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#include +#include +#include + +#include "core.h" +#include "sysfs.h" +#include "mmc_ops.h" +#include "sd_ops.h" + +#include "core.h" + +static const unsigned int tran_exp[] = { + 10000, 100000, 1000000, 10000000, + 0, 0, 0, 0 +}; + +static const unsigned char tran_mant[] = { + 0, 10, 12, 13, 15, 20, 25, 30, + 35, 40, 45, 50, 55, 60, 70, 80, +}; + +static const unsigned int tacc_exp[] = { + 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, +}; + +static const unsigned int tacc_mant[] = { + 0, 10, 12, 13, 15, 20, 25, 30, + 35, 40, 45, 50, 55, 60, 70, 80, +}; + +#define UNSTUFF_BITS(resp,start,size) \ + ({ \ + const int __size = size; \ + const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \ + const int __off = 3 - ((start) / 32); \ + const int __shft = (start) & 31; \ + u32 __res; \ + \ + __res = resp[__off] >> __shft; \ + if (__size + __shft > 32) \ + __res |= resp[__off-1] << ((32 - __shft) % 32); \ + __res & __mask; \ + }) + +/* + * Given the decoded CSD structure, decode the raw CID to our CID structure. + */ +static void mmc_decode_cid(struct mmc_card *card) +{ + u32 *resp = card->raw_cid; + + memset(&card->cid, 0, sizeof(struct mmc_cid)); + + /* + * SD doesn't currently have a version field so we will + * have to assume we can parse this. + */ + card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); + card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); + card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); + card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); + card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); + card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); + card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); + card->cid.hwrev = UNSTUFF_BITS(resp, 60, 4); + card->cid.fwrev = UNSTUFF_BITS(resp, 56, 4); + card->cid.serial = UNSTUFF_BITS(resp, 24, 32); + card->cid.year = UNSTUFF_BITS(resp, 12, 8); + card->cid.month = UNSTUFF_BITS(resp, 8, 4); + + card->cid.year += 2000; /* SD cards year offset */ +} + +/* + * Given a 128-bit response, decode to our card CSD structure. + */ +static void mmc_decode_csd(struct mmc_card *card) +{ + struct mmc_csd *csd = &card->csd; + unsigned int e, m, csd_struct; + u32 *resp = card->raw_csd; + + csd_struct = UNSTUFF_BITS(resp, 126, 2); + + switch (csd_struct) { + case 0: + m = UNSTUFF_BITS(resp, 115, 4); + e = UNSTUFF_BITS(resp, 112, 3); + csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; + csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; + + m = UNSTUFF_BITS(resp, 99, 4); + e = UNSTUFF_BITS(resp, 96, 3); + csd->max_dtr = tran_exp[e] * tran_mant[m]; + csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); + + e = UNSTUFF_BITS(resp, 47, 3); + m = UNSTUFF_BITS(resp, 62, 12); + csd->capacity = (1 + m) << (e + 2); + + csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); + csd->read_partial = UNSTUFF_BITS(resp, 79, 1); + csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); + csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); + csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); + csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); + csd->write_partial = UNSTUFF_BITS(resp, 21, 1); + break; + case 1: + /* + * This is a block-addressed SDHC card. Most + * interesting fields are unused and have fixed + * values. To avoid getting tripped by buggy cards, + * we assume those fixed values ourselves. + */ + mmc_card_set_blockaddr(card); + + csd->tacc_ns = 0; /* Unused */ + csd->tacc_clks = 0; /* Unused */ + + m = UNSTUFF_BITS(resp, 99, 4); + e = UNSTUFF_BITS(resp, 96, 3); + csd->max_dtr = tran_exp[e] * tran_mant[m]; + csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); + + m = UNSTUFF_BITS(resp, 48, 22); + csd->capacity = (1 + m) << 10; + + csd->read_blkbits = 9; + csd->read_partial = 0; + csd->write_misalign = 0; + csd->read_misalign = 0; + csd->r2w_factor = 4; /* Unused */ + csd->write_blkbits = 9; + csd->write_partial = 0; + break; + default: + printk("%s: unrecognised CSD structure version %d\n", + mmc_hostname(card->host), csd_struct); + mmc_card_set_bad(card); + return; + } +} + +/* + * Given a 64-bit response, decode to our card SCR structure. + */ +static void mmc_decode_scr(struct mmc_card *card) +{ + struct sd_scr *scr = &card->scr; + unsigned int scr_struct; + u32 resp[4]; + + BUG_ON(!mmc_card_sd(card)); + + resp[3] = card->raw_scr[1]; + resp[2] = card->raw_scr[0]; + + scr_struct = UNSTUFF_BITS(resp, 60, 4); + if (scr_struct != 0) { + printk("%s: unrecognised SCR structure version %d\n", + mmc_hostname(card->host), scr_struct); + mmc_card_set_bad(card); + return; + } + + scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4); + scr->bus_widths = UNSTUFF_BITS(resp, 48, 4); +} + +/* + * Test if the card supports high-speed mode and, if so, switch to it. + */ +static int mmc_switch_hs(struct mmc_card *card) +{ + int err; + u8 *status; + + if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED)) + return MMC_ERR_NONE; + + err = MMC_ERR_FAILED; + + status = kmalloc(64, GFP_KERNEL); + if (!status) { + printk("%s: could not allocate a buffer for switch " + "capabilities.\n", + mmc_hostname(card->host)); + return err; + } + + err = mmc_sd_switch(card, 0, 0, 1, status); + if (err != MMC_ERR_NONE) { + /* + * Card not supporting high-speed will ignore the + * command. + */ + if (err == MMC_ERR_TIMEOUT) + err = MMC_ERR_NONE; + goto out; + } + + if (status[13] & 0x02) + card->sw_caps.hs_max_dtr = 50000000; + + err = mmc_sd_switch(card, 1, 0, 1, status); + if (err != MMC_ERR_NONE) + goto out; + + if ((status[16] & 0xF) != 1) { + printk(KERN_WARNING "%s: Problem switching card " + "into high-speed mode!\n", + mmc_hostname(card->host)); + } else { + mmc_card_set_highspeed(card); + mmc_set_timing(card->host, MMC_TIMING_SD_HS); + } + +out: + kfree(status); + + return err; +} + +/* + * Host is being removed. Free up the current card. + */ +static void mmc_sd_remove(struct mmc_host *host) +{ + BUG_ON(!host); + BUG_ON(!host->card); + + mmc_remove_card(host->card); + host->card = NULL; +} + +/* + * Card detection callback from host. + */ +static void mmc_sd_detect(struct mmc_host *host) +{ + int err; + + BUG_ON(!host); + BUG_ON(!host->card); + + mmc_claim_host(host); + + /* + * Just check if our card has been removed. + */ + err = mmc_send_status(host->card, NULL); + + mmc_release_host(host); + + if (err != MMC_ERR_NONE) { + mmc_remove_card(host->card); + host->card = NULL; + + mmc_claim_host(host); + mmc_detach_bus(host); + mmc_release_host(host); + } +} + +static const struct mmc_bus_ops mmc_sd_ops = { + .remove = mmc_sd_remove, + .detect = mmc_sd_detect, +}; + +/* + * Starting point for SD card init. + */ +int mmc_attach_sd(struct mmc_host *host, u32 ocr) +{ + struct mmc_card *card; + int err; + u32 cid[4]; + unsigned int max_dtr; + + BUG_ON(!host); + BUG_ON(!host->claimed); + + mmc_attach_bus(host, &mmc_sd_ops); + + host->ocr = mmc_select_voltage(host, ocr); + + /* + * Can we support the voltage(s) of the card(s)? + */ + if (!host->ocr) + goto err; + + /* + * Since we're changing the OCR value, we seem to + * need to tell some cards to go back to the idle + * state. We wait 1ms to give cards time to + * respond. + */ + mmc_go_idle(host); + + /* + * If SD_SEND_IF_COND indicates an SD 2.0 + * compliant card and we should set bit 30 + * of the ocr to indicate that we can handle + * block-addressed SDHC cards. + */ + err = mmc_send_if_cond(host, host->ocr); + if (err == MMC_ERR_NONE) + ocr = host->ocr | (1 << 30); + + mmc_send_app_op_cond(host, ocr, NULL); + + /* + * Fetch CID from card. + */ + err = mmc_all_send_cid(host, cid); + if (err != MMC_ERR_NONE) + goto err; + + /* + * Allocate card structure. + */ + card = mmc_alloc_card(host); + if (IS_ERR(card)) + goto err; + + card->type = MMC_TYPE_SD; + memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); + + /* + * Set card RCA. + */ + err = mmc_send_relative_addr(host, &card->rca); + if (err != MMC_ERR_NONE) + goto free_card; + + mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); + + /* + * Fetch CSD from card. + */ + err = mmc_send_csd(card, card->raw_csd); + if (err != MMC_ERR_NONE) + goto free_card; + + mmc_decode_csd(card); + mmc_decode_cid(card); + + /* + * Fetch SCR from card. + */ + err = mmc_select_card(card); + if (err != MMC_ERR_NONE) + goto free_card; + + err = mmc_app_send_scr(card, card->raw_scr); + if (err != MMC_ERR_NONE) + goto free_card; + + mmc_decode_scr(card); + + /* + * Check if card can be switched into high-speed mode. + */ + err = mmc_switch_hs(card); + if (err != MMC_ERR_NONE) + goto free_card; + + /* + * Compute bus speed. + */ + max_dtr = (unsigned int)-1; + + if (mmc_card_highspeed(card)) { + if (max_dtr > card->sw_caps.hs_max_dtr) + max_dtr = card->sw_caps.hs_max_dtr; + } else if (max_dtr > card->csd.max_dtr) { + max_dtr = card->csd.max_dtr; + } + + mmc_set_clock(host, max_dtr); + + /* + * Switch to wider bus (if supported). + */ + if ((host->caps && MMC_CAP_4_BIT_DATA) && + (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) { + err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4); + if (err != MMC_ERR_NONE) + goto free_card; + + mmc_set_bus_width(host, MMC_BUS_WIDTH_4); + } + + host->card = card; + + mmc_release_host(host); + + err = mmc_register_card(card); + if (err) + goto reclaim_host; + + return 0; + +reclaim_host: + mmc_claim_host(host); +free_card: + mmc_remove_card(card); + host->card = NULL; +err: + mmc_detach_bus(host); + mmc_release_host(host); + + return 0; +} + diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 43bf6a5c398d..efae87b5c4e7 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -131,6 +131,8 @@ struct mmc_host { unsigned int max_blk_count; /* maximum number of blocks in one req */ /* private data */ + spinlock_t lock; /* lock for claim and bus ops */ + struct mmc_ios ios; /* current io bus settings */ u32 ocr; /* the current OCR setting */ @@ -141,7 +143,6 @@ struct mmc_host { struct mmc_card *card; /* device attached to this host */ wait_queue_head_t wq; - spinlock_t lock; /* claimed lock */ unsigned int claimed:1; /* host exclusively claimed */ struct delayed_work detect; @@ -149,6 +150,10 @@ struct mmc_host { unsigned int removed:1; /* host is being removed */ #endif + const struct mmc_bus_ops *bus_ops; /* current bus driver */ + unsigned int bus_refs; /* reference counter */ + unsigned int bus_dead:1; /* bus has been released */ + unsigned long private[0] ____cacheline_aligned; }; -- cgit v1.2.3 From 4be34c99a2f3aa90fa42e62c0918f07afb8a645b Mon Sep 17 00:00:00 2001 From: Philip Langdale Date: Sun, 11 Mar 2007 17:15:15 -0700 Subject: MMC: Consolidate voltage definitions Consolidate the list of available voltages. Up until now, a separate set of defines has been used for host->vdd than that used for the OCR voltage mask values. Having two sets of defines allows them to get out of sync and the current sets are already inconsistent with one claiming to describe ranges and the other specific voltages. Only the SDHCI driver uses the host->vdd defines and it is easily fixed to use the OCR defines. Signed-off-by: Philip Langdale Signed-off-by: Pierre Ossman --- drivers/mmc/host/sdhci.c | 17 +++++++---------- include/linux/mmc/host.h | 25 +------------------------ 2 files changed, 8 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 579142a7904b..a57f6a3d48d3 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -658,20 +658,17 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power) pwr = SDHCI_POWER_ON; - switch (power) { - case MMC_VDD_170: - case MMC_VDD_180: - case MMC_VDD_190: + switch (1 << power) { + case MMC_VDD_17_18: + case MMC_VDD_18_19: pwr |= SDHCI_POWER_180; break; - case MMC_VDD_290: - case MMC_VDD_300: - case MMC_VDD_310: + case MMC_VDD_29_30: + case MMC_VDD_30_31: pwr |= SDHCI_POWER_300; break; - case MMC_VDD_320: - case MMC_VDD_330: - case MMC_VDD_340: + case MMC_VDD_32_33: + case MMC_VDD_33_34: pwr |= SDHCI_POWER_330; break; default: diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index efae87b5c4e7..5a66d8a2bf17 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -16,30 +16,7 @@ struct mmc_ios { unsigned int clock; /* clock rate */ unsigned short vdd; -#define MMC_VDD_150 0 -#define MMC_VDD_155 1 -#define MMC_VDD_160 2 -#define MMC_VDD_165 3 -#define MMC_VDD_170 4 -#define MMC_VDD_180 5 -#define MMC_VDD_190 6 -#define MMC_VDD_200 7 -#define MMC_VDD_210 8 -#define MMC_VDD_220 9 -#define MMC_VDD_230 10 -#define MMC_VDD_240 11 -#define MMC_VDD_250 12 -#define MMC_VDD_260 13 -#define MMC_VDD_270 14 -#define MMC_VDD_280 15 -#define MMC_VDD_290 16 -#define MMC_VDD_300 17 -#define MMC_VDD_310 18 -#define MMC_VDD_320 19 -#define MMC_VDD_330 20 -#define MMC_VDD_340 21 -#define MMC_VDD_350 22 -#define MMC_VDD_360 23 +/* vdd stores the bit number of the selected voltage range from below. */ unsigned char bus_mode; /* command output mode */ -- cgit v1.2.3 From d4b2bab4f26345ea1803feb23ea92fbe3f6b77bc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Feb 2007 16:50:52 +0900 Subject: libata: add deadline support to prereset and reset methods Add @deadline to prereset and reset methods and make them honor it. ata_wait_ready() which directly takes @deadline is implemented to be used as the wait function. This patch is in preparation for EH timing improvements. * ata_wait_ready() never does busy sleep. It's only used from EH and no wait in EH is that urgent. This function also prints 'be patient' message automatically after 5 secs of waiting if more than 3 secs is remaining till deadline. * ata_bus_post_reset() now fails with error code if any of its wait fails. This is important because earlier reset tries will have shorter timeout than the spec requires. If a device fails to respond before the short timeout, reset should be retried with longer timeout rather than silently ignoring the device. There are three behavior differences. 1. Timeout is applied to both devices at once, not separately. This is more consistent with what the spec says. 2. When a device passes devchk but fails to become ready before deadline. Previouly, post_reset would just succeed and let device classification remove the device. New code fails the reset thus causing reset retry. After a few times, EH will give up disabling the port. 3. When slave device passes devchk but fails to become accessible (TF-wise) after reset. Original code disables dev1 after 30s timeout and continues as if the device doesn't exist, while the patched code fails reset. When this happens, new code fails reset on whole port rather than proceeding with only the primary device. If the failing device is suffering transient problems, new code retries reset which is a better behavior. If the failing device is actually broken, the net effect is identical to it, but not to the other device sharing the channel. In the previous code, reset would have succeeded after 30s thus detecting the working one. In the new code, reset fails and whole port gets disabled. IMO, it's a pathological case anyway (broken device sharing bus with working one) and doesn't really matter. * ata_bus_softreset() is changed to return error code from ata_bus_post_reset(). It used to return 0 unconditionally. * Spin up waiting is to be removed and not converted to honor deadline. * To be on the safe side, deadline is set to 40s for the time being. Signed-off-by: Tejun Heo Signed-off-by: Jeff Garzik --- drivers/ata/ahci.c | 18 +++-- drivers/ata/ata_piix.c | 6 +- drivers/ata/libata-core.c | 161 +++++++++++++++++++++++++++++------------ drivers/ata/libata-eh.c | 10 +-- drivers/ata/pata_amd.c | 11 +-- drivers/ata/pata_artop.c | 8 +- drivers/ata/pata_atiixp.c | 4 +- drivers/ata/pata_cs5535.c | 1 + drivers/ata/pata_efar.c | 5 +- drivers/ata/pata_hpt366.c | 4 +- drivers/ata/pata_hpt37x.c | 9 ++- drivers/ata/pata_hpt3x2n.c | 5 +- drivers/ata/pata_it8213.c | 6 +- drivers/ata/pata_jmicron.c | 7 +- drivers/ata/pata_marvell.c | 4 +- drivers/ata/pata_mpiix.c | 5 +- drivers/ata/pata_ns87410.c | 6 +- drivers/ata/pata_oldpiix.c | 6 +- drivers/ata/pata_opti.c | 6 +- drivers/ata/pata_optidma.c | 5 +- drivers/ata/pata_pdc2027x.c | 5 +- drivers/ata/pata_serverworks.c | 4 +- drivers/ata/pata_sil680.c | 6 +- drivers/ata/pata_sis.c | 8 +- drivers/ata/pata_sl82c105.c | 5 +- drivers/ata/pata_triflex.c | 6 +- drivers/ata/pata_via.c | 5 +- drivers/ata/sata_inic162x.c | 14 ++-- drivers/ata/sata_nv.c | 5 +- drivers/ata/sata_sil24.c | 10 ++- drivers/ata/sata_via.c | 5 +- include/linux/libata.h | 24 +++--- 32 files changed, 249 insertions(+), 135 deletions(-) (limited to 'include/linux') diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 34c5534ed64c..0319f10d42d5 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -874,7 +874,8 @@ static int ahci_clo(struct ata_port *ap) return 0; } -static int ahci_softreset(struct ata_port *ap, unsigned int *class) +static int ahci_softreset(struct ata_port *ap, unsigned int *class, + unsigned long deadline) { struct ahci_port_priv *pp = ap->private_data; void __iomem *port_mmio = ahci_port_base(ap); @@ -961,8 +962,8 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class) *class = ATA_DEV_NONE; if (ata_port_online(ap)) { - if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { - rc = -EIO; + rc = ata_wait_ready(ap, deadline); + if (rc && rc != -ENODEV) { reason = "device not ready"; goto fail; } @@ -979,7 +980,8 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class) return rc; } -static int ahci_hardreset(struct ata_port *ap, unsigned int *class) +static int ahci_hardreset(struct ata_port *ap, unsigned int *class, + unsigned long deadline) { struct ahci_port_priv *pp = ap->private_data; u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; @@ -995,7 +997,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class) tf.command = 0x80; ata_tf_to_fis(&tf, d2h_fis, 0); - rc = sata_std_hardreset(ap, class); + rc = sata_std_hardreset(ap, class, deadline); ahci_start_engine(ap); @@ -1008,7 +1010,8 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class) return rc; } -static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class) +static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class, + unsigned long deadline) { int rc; @@ -1016,7 +1019,8 @@ static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class) ahci_stop_engine(ap); - rc = sata_port_hardreset(ap, sata_ehc_deb_timing(&ap->eh_context)); + rc = sata_port_hardreset(ap, sata_ehc_deb_timing(&ap->eh_context), + deadline); /* vt8251 needs SError cleared for the port to operate */ ahci_scr_write(ap, SCR_ERROR, ahci_scr_read(ap, SCR_ERROR)); diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 55d306a3e538..4a795fdb6a02 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -625,17 +625,18 @@ static int ich_pata_cable_detect(struct ata_port *ap) /** * piix_pata_prereset - prereset for PATA host controller * @ap: Target port + * @deadline: deadline jiffies for the operation * * LOCKING: * None (inherited from caller). */ -static int piix_pata_prereset(struct ata_port *ap) +static int piix_pata_prereset(struct ata_port *ap, unsigned long deadline) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) return -ENOENT; - return ata_std_prereset(ap); + return ata_std_prereset(ap, deadline); } static void piix_pata_error_handler(struct ata_port *ap) @@ -644,7 +645,6 @@ static void piix_pata_error_handler(struct ata_port *ap) ata_std_postreset); } - static void piix_sata_error_handler(struct ata_port *ap) { ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, NULL, diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index ca67484af1eb..84e9448c12d4 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2979,23 +2979,68 @@ int ata_busy_sleep(struct ata_port *ap, return 0; } -static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask) +/** + * ata_wait_ready - sleep until BSY clears, or timeout + * @ap: port containing status register to be polled + * @deadline: deadline jiffies for the operation + * + * Sleep until ATA Status register bit BSY clears, or timeout + * occurs. + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +int ata_wait_ready(struct ata_port *ap, unsigned long deadline) +{ + unsigned long start = jiffies; + int warned = 0; + + while (1) { + u8 status = ata_chk_status(ap); + unsigned long now = jiffies; + + if (!(status & ATA_BUSY)) + return 0; + if (status == 0xff) + return -ENODEV; + if (time_after(now, deadline)) + return -EBUSY; + + if (!warned && time_after(now, start + 5 * HZ) && + (deadline - now > 3 * HZ)) { + ata_port_printk(ap, KERN_WARNING, + "port is slow to respond, please be patient " + "(Status 0x%x)\n", status); + warned = 1; + } + + msleep(50); + } +} + +static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask, + unsigned long deadline) { struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int dev0 = devmask & (1 << 0); unsigned int dev1 = devmask & (1 << 1); - unsigned long timeout; + int rc; /* if device 0 was found in ata_devchk, wait for its * BSY bit to clear */ - if (dev0) - ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + if (dev0) { + rc = ata_wait_ready(ap, deadline); + if (rc && rc != -ENODEV) + return rc; + } /* if device 1 was found in ata_devchk, wait for * register access, then wait for BSY to clear */ - timeout = jiffies + ATA_TMOUT_BOOT; while (dev1) { u8 nsect, lbal; @@ -3004,14 +3049,15 @@ static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask) lbal = ioread8(ioaddr->lbal_addr); if ((nsect == 1) && (lbal == 1)) break; - if (time_after(jiffies, timeout)) { - dev1 = 0; - break; - } + if (time_after(jiffies, deadline)) + return -EBUSY; msleep(50); /* give drive a breather */ } - if (dev1) - ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + if (dev1) { + rc = ata_wait_ready(ap, deadline); + if (rc && rc != -ENODEV) + return rc; + } /* is all this really necessary? */ ap->ops->dev_select(ap, 0); @@ -3019,10 +3065,12 @@ static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask) ap->ops->dev_select(ap, 1); if (dev0) ap->ops->dev_select(ap, 0); + + return 0; } -static unsigned int ata_bus_softreset(struct ata_port *ap, - unsigned int devmask) +static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, + unsigned long deadline) { struct ata_ioports *ioaddr = &ap->ioaddr; @@ -3054,9 +3102,7 @@ static unsigned int ata_bus_softreset(struct ata_port *ap, if (ata_check_status(ap) == 0xFF) return 0; - ata_bus_post_reset(ap, devmask); - - return 0; + return ata_bus_post_reset(ap, devmask, deadline); } /** @@ -3107,7 +3153,7 @@ void ata_bus_reset(struct ata_port *ap) /* issue bus reset */ if (ap->flags & ATA_FLAG_SRST) - if (ata_bus_softreset(ap, devmask)) + if (ata_bus_softreset(ap, devmask, jiffies + 40 * HZ)) goto err_out; /* @@ -3150,29 +3196,37 @@ err_out: * sata_phy_debounce - debounce SATA phy status * @ap: ATA port to debounce SATA phy status for * @params: timing parameters { interval, duratinon, timeout } in msec + * @deadline: deadline jiffies for the operation * * Make sure SStatus of @ap reaches stable state, determined by * holding the same value where DET is not 1 for @duration polled * every @interval, before @timeout. Timeout constraints the - * beginning of the stable state. Because, after hot unplugging, - * DET gets stuck at 1 on some controllers, this functions waits + * beginning of the stable state. Because DET gets stuck at 1 on + * some controllers after hot unplugging, this functions waits * until timeout then returns 0 if DET is stable at 1. * + * @timeout is further limited by @deadline. The sooner of the + * two is used. + * * LOCKING: * Kernel thread context (may sleep) * * RETURNS: * 0 on success, -errno on failure. */ -int sata_phy_debounce(struct ata_port *ap, const unsigned long *params) +int sata_phy_debounce(struct ata_port *ap, const unsigned long *params, + unsigned long deadline) { unsigned long interval_msec = params[0]; - unsigned long duration = params[1] * HZ / 1000; - unsigned long timeout = jiffies + params[2] * HZ / 1000; - unsigned long last_jiffies; + unsigned long duration = msecs_to_jiffies(params[1]); + unsigned long last_jiffies, t; u32 last, cur; int rc; + t = jiffies + msecs_to_jiffies(params[2]); + if (time_before(t, deadline)) + deadline = t; + if ((rc = sata_scr_read(ap, SCR_STATUS, &cur))) return rc; cur &= 0xf; @@ -3188,7 +3242,7 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params) /* DET stable? */ if (cur == last) { - if (cur == 1 && time_before(jiffies, timeout)) + if (cur == 1 && time_before(jiffies, deadline)) continue; if (time_after(jiffies, last_jiffies + duration)) return 0; @@ -3199,8 +3253,8 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params) last = cur; last_jiffies = jiffies; - /* check timeout */ - if (time_after(jiffies, timeout)) + /* check deadline */ + if (time_after(jiffies, deadline)) return -EBUSY; } } @@ -3209,6 +3263,7 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params) * sata_phy_resume - resume SATA phy * @ap: ATA port to resume SATA phy for * @params: timing parameters { interval, duratinon, timeout } in msec + * @deadline: deadline jiffies for the operation * * Resume SATA phy of @ap and debounce it. * @@ -3218,7 +3273,8 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params) * RETURNS: * 0 on success, -errno on failure. */ -int sata_phy_resume(struct ata_port *ap, const unsigned long *params) +int sata_phy_resume(struct ata_port *ap, const unsigned long *params, + unsigned long deadline) { u32 scontrol; int rc; @@ -3236,10 +3292,10 @@ int sata_phy_resume(struct ata_port *ap, const unsigned long *params) */ msleep(200); - return sata_phy_debounce(ap, params); + return sata_phy_debounce(ap, params, deadline); } -static void ata_wait_spinup(struct ata_port *ap) +static void ata_wait_spinup(struct ata_port *ap, unsigned long deadline) { struct ata_eh_context *ehc = &ap->eh_context; unsigned long end, secs; @@ -3247,7 +3303,7 @@ static void ata_wait_spinup(struct ata_port *ap) /* first, debounce phy if SATA */ if (ap->cbl == ATA_CBL_SATA) { - rc = sata_phy_debounce(ap, sata_deb_timing_hotplug); + rc = sata_phy_debounce(ap, sata_deb_timing_hotplug, deadline); /* if debounced successfully and offline, no need to wait */ if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap)) @@ -3271,6 +3327,7 @@ static void ata_wait_spinup(struct ata_port *ap) /** * ata_std_prereset - prepare for reset * @ap: ATA port to be reset + * @deadline: deadline jiffies for the operation * * @ap is about to be reset. Initialize it. * @@ -3280,7 +3337,7 @@ static void ata_wait_spinup(struct ata_port *ap) * RETURNS: * 0 on success, -errno otherwise. */ -int ata_std_prereset(struct ata_port *ap) +int ata_std_prereset(struct ata_port *ap, unsigned long deadline) { struct ata_eh_context *ehc = &ap->eh_context; const unsigned long *timing = sata_ehc_deb_timing(ehc); @@ -3293,7 +3350,7 @@ int ata_std_prereset(struct ata_port *ap) if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) && (ap->flags & ATA_FLAG_SKIP_D2H_BSY)) - ata_wait_spinup(ap); + ata_wait_spinup(ap, deadline); /* if we're about to do hardreset, nothing more to do */ if (ehc->i.action & ATA_EH_HARDRESET) @@ -3301,7 +3358,7 @@ int ata_std_prereset(struct ata_port *ap) /* if SATA, resume phy */ if (ap->cbl == ATA_CBL_SATA) { - rc = sata_phy_resume(ap, timing); + rc = sata_phy_resume(ap, timing, deadline); if (rc && rc != -EOPNOTSUPP) { /* phy resume failed */ ata_port_printk(ap, KERN_WARNING, "failed to resume " @@ -3314,7 +3371,7 @@ int ata_std_prereset(struct ata_port *ap) * Reg FIS and we don't know that no device is attached. */ if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) - ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + ata_wait_ready(ap, deadline); return 0; } @@ -3323,6 +3380,7 @@ int ata_std_prereset(struct ata_port *ap) * ata_std_softreset - reset host port via ATA SRST * @ap: port to reset * @classes: resulting classes of attached devices + * @deadline: deadline jiffies for the operation * * Reset host port using ATA SRST. * @@ -3332,10 +3390,12 @@ int ata_std_prereset(struct ata_port *ap) * RETURNS: * 0 on success, -errno otherwise. */ -int ata_std_softreset(struct ata_port *ap, unsigned int *classes) +int ata_std_softreset(struct ata_port *ap, unsigned int *classes, + unsigned long deadline) { unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; - unsigned int devmask = 0, err_mask; + unsigned int devmask = 0; + int rc; u8 err; DPRINTK("ENTER\n"); @@ -3356,11 +3416,10 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes) /* issue bus reset */ DPRINTK("about to softreset, devmask=%x\n", devmask); - err_mask = ata_bus_softreset(ap, devmask); - if (err_mask) { - ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n", - err_mask); - return -EIO; + rc = ata_bus_softreset(ap, devmask, deadline); + if (rc) { + ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc); + return rc; } /* determine by signature whether we have ATA or ATAPI devices */ @@ -3377,6 +3436,7 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes) * sata_port_hardreset - reset port via SATA phy reset * @ap: port to reset * @timing: timing parameters { interval, duratinon, timeout } in msec + * @deadline: deadline jiffies for the operation * * SATA phy-reset host port using DET bits of SControl register. * @@ -3386,7 +3446,8 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes) * RETURNS: * 0 on success, -errno otherwise. */ -int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing) +int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing, + unsigned long deadline) { u32 scontrol; int rc; @@ -3425,7 +3486,7 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing) msleep(1); /* bring phy back */ - rc = sata_phy_resume(ap, timing); + rc = sata_phy_resume(ap, timing, deadline); out: DPRINTK("EXIT, rc=%d\n", rc); return rc; @@ -3435,6 +3496,7 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing) * sata_std_hardreset - reset host port via SATA phy reset * @ap: port to reset * @class: resulting class of attached device + * @deadline: deadline jiffies for the operation * * SATA phy-reset host port using DET bits of SControl register, * wait for !BSY and classify the attached device. @@ -3445,7 +3507,8 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing) * RETURNS: * 0 on success, -errno otherwise. */ -int sata_std_hardreset(struct ata_port *ap, unsigned int *class) +int sata_std_hardreset(struct ata_port *ap, unsigned int *class, + unsigned long deadline) { const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context); int rc; @@ -3453,7 +3516,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class) DPRINTK("ENTER\n"); /* do hardreset */ - rc = sata_port_hardreset(ap, timing); + rc = sata_port_hardreset(ap, timing, deadline); if (rc) { ata_port_printk(ap, KERN_ERR, "COMRESET failed (errno=%d)\n", rc); @@ -3470,10 +3533,11 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class) /* wait a while before checking status, see SRST for more info */ msleep(150); - if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { + rc = ata_wait_ready(ap, deadline); + if (rc && rc != -ENODEV) { ata_port_printk(ap, KERN_ERR, - "COMRESET failed (device not ready)\n"); - return -EIO; + "COMRESET failed (errno=%d)\n", rc); + return rc; } ap->ops->dev_select(ap, 0); /* probably unnecessary */ @@ -6793,6 +6857,7 @@ EXPORT_SYMBOL_GPL(ata_port_disable); EXPORT_SYMBOL_GPL(ata_ratelimit); EXPORT_SYMBOL_GPL(ata_wait_register); EXPORT_SYMBOL_GPL(ata_busy_sleep); +EXPORT_SYMBOL_GPL(ata_wait_ready); EXPORT_SYMBOL_GPL(ata_port_queue_task); EXPORT_SYMBOL_GPL(ata_scsi_ioctl); EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 2bff9adcacf1..b3f7d3c8ae60 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1558,14 +1558,14 @@ static void ata_eh_report(struct ata_port *ap) } static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset, - unsigned int *classes) + unsigned int *classes, unsigned long deadline) { int i, rc; for (i = 0; i < ATA_MAX_DEVICES; i++) classes[i] = ATA_DEV_UNKNOWN; - rc = reset(ap, classes); + rc = reset(ap, classes, deadline); if (rc) return rc; @@ -1624,7 +1624,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, ehc->i.action |= ATA_EH_HARDRESET; if (prereset) { - rc = prereset(ap); + rc = prereset(ap, jiffies + 40 * HZ); if (rc) { if (rc == -ENOENT) { ata_port_printk(ap, KERN_DEBUG, @@ -1676,7 +1676,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, else ehc->i.flags |= ATA_EHI_DID_SOFTRESET; - rc = ata_do_reset(ap, reset, classes); + rc = ata_do_reset(ap, reset, classes, jiffies + 40 * HZ); did_followup_srst = 0; if (reset == hardreset && @@ -1693,7 +1693,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, } ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); - rc = ata_do_reset(ap, reset, classes); + rc = ata_do_reset(ap, reset, classes, jiffies + 40 * HZ); if (rc == 0 && classify && classes[0] == ATA_DEV_UNKNOWN) { diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index 536ee892ab72..67c7e87dec04 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c @@ -121,12 +121,13 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse /** * amd_probe_init - perform reset handling * @ap: ATA port + * @deadline: deadline jiffies for the operation * * Reset sequence checking enable bits to see which ports are * active. */ -static int amd_pre_reset(struct ata_port *ap) +static int amd_pre_reset(struct ata_port *ap, unsigned long deadline) { static const struct pci_bits amd_enable_bits[] = { { 0x40, 1, 0x02, 0x02 }, @@ -138,8 +139,7 @@ static int amd_pre_reset(struct ata_port *ap) if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) return -ENOENT; - return ata_std_prereset(ap); - + return ata_std_prereset(ap, deadline); } static void amd_error_handler(struct ata_port *ap) @@ -227,7 +227,8 @@ static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev) * space for us. */ -static int nv_pre_reset(struct ata_port *ap) { +static int nv_pre_reset(struct ata_port *ap, unsigned long deadline) +{ static const struct pci_bits nv_enable_bits[] = { { 0x50, 1, 0x02, 0x02 }, { 0x50, 1, 0x01, 0x01 } @@ -238,7 +239,7 @@ static int nv_pre_reset(struct ata_port *ap) { if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no])) return -ENOENT; - return ata_std_prereset(ap); + return ata_std_prereset(ap, deadline); } static void nv_error_handler(struct ata_port *ap) diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c index 00e9ec342db0..d472894a983b 100644 --- a/drivers/ata/pata_artop.c +++ b/drivers/ata/pata_artop.c @@ -39,7 +39,7 @@ static int clock = 0; -static int artop6210_pre_reset(struct ata_port *ap) +static int artop6210_pre_reset(struct ata_port *ap, unsigned long deadline) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); const struct pci_bits artop_enable_bits[] = { @@ -49,7 +49,8 @@ static int artop6210_pre_reset(struct ata_port *ap) if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) return -ENOENT; - return ata_std_prereset(ap); + + return ata_std_prereset(ap, deadline); } /** @@ -70,12 +71,13 @@ static void artop6210_error_handler(struct ata_port *ap) /** * artop6260_pre_reset - check for 40/80 pin * @ap: Port + * @deadline: deadline jiffies for the operation * * The ARTOP hardware reports the cable detect bits in register 0x49. * Nothing complicated needed here. */ -static int artop6260_pre_reset(struct ata_port *ap) +static int artop6260_pre_reset(struct ata_port *ap, unsigned long deadline) { static const struct pci_bits artop_enable_bits[] = { { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */ diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index 39c871a3ddac..21515381b5b3 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c @@ -33,7 +33,7 @@ enum { ATIIXP_IDE_UDMA_MODE = 0x56 }; -static int atiixp_pre_reset(struct ata_port *ap) +static int atiixp_pre_reset(struct ata_port *ap, unsigned long deadline) { static const struct pci_bits atiixp_enable_bits[] = { { 0x48, 1, 0x01, 0x00 }, @@ -44,7 +44,7 @@ static int atiixp_pre_reset(struct ata_port *ap) if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no])) return -ENOENT; - return ata_std_prereset(ap); + return ata_std_prereset(ap, deadline); } static void atiixp_error_handler(struct ata_port *ap) diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c index 08cccc9c659b..22006ae71941 100644 --- a/drivers/ata/pata_cs5535.c +++ b/drivers/ata/pata_cs5535.c @@ -72,6 +72,7 @@ /** * cs5535_cable_detect - detect cable type * @ap: Port to detect on + * @deadline: deadline jiffies for the operation * * Perform cable detection for ATA66 capable cable. Return a libata * cable type. diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c index a3216850bba1..d0f52e034906 100644 --- a/drivers/ata/pata_efar.c +++ b/drivers/ata/pata_efar.c @@ -27,12 +27,13 @@ /** * efar_pre_reset - Enable bits * @ap: Port + * @deadline: deadline jiffies for the operation * * Perform cable detection for the EFAR ATA interface. This is * different to the PIIX arrangement */ -static int efar_pre_reset(struct ata_port *ap) +static int efar_pre_reset(struct ata_port *ap, unsigned long deadline) { static const struct pci_bits efar_enable_bits[] = { { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ @@ -43,7 +44,7 @@ static int efar_pre_reset(struct ata_port *ap) if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no])) return -ENOENT; - return ata_std_prereset(ap); + return ata_std_prereset(ap, deadline); } /** diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index 93cfa6d300a5..e64e05e5c7fe 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c @@ -220,7 +220,7 @@ static int hpt36x_cable_detect(struct ata_port *ap) return ATA_CBL_PATA80; } -static int hpt36x_pre_reset(struct ata_port *ap) +static int hpt36x_pre_reset(struct ata_port *ap, unsigned long deadline) { static const struct pci_bits hpt36x_enable_bits[] = { { 0x50, 1, 0x04, 0x04 }, @@ -231,7 +231,7 @@ static int hpt36x_pre_reset(struct ata_port *ap) if (!pci_test_config_bits(pdev, &hpt36x_enable_bits[ap->port_no])) return -ENOENT; - return ata_std_prereset(ap); + return ata_std_prereset(ap, deadline); } /** diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 41d831296347..1614e8c822a4 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -307,11 +307,12 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask) /** * hpt37x_pre_reset - reset the hpt37x bus * @ap: ATA port to reset + * @deadline: deadline jiffies for the operation * * Perform the initial reset handling for the 370/372 and 374 func 0 */ -static int hpt37x_pre_reset(struct ata_port *ap) +static int hpt37x_pre_reset(struct ata_port *ap, unsigned long deadline) { u8 scr2, ata66; struct pci_dev *pdev = to_pci_dev(ap->host->dev); @@ -338,7 +339,7 @@ static int hpt37x_pre_reset(struct ata_port *ap) pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); udelay(100); - return ata_std_prereset(ap); + return ata_std_prereset(ap, deadline); } /** @@ -353,7 +354,7 @@ static void hpt37x_error_handler(struct ata_port *ap) ata_bmdma_drive_eh(ap, hpt37x_pre_reset, ata_std_softreset, NULL, ata_std_postreset); } -static int hpt374_pre_reset(struct ata_port *ap) +static int hpt374_pre_reset(struct ata_port *ap, unsigned long deadline) { static const struct pci_bits hpt37x_enable_bits[] = { { 0x50, 1, 0x04, 0x04 }, @@ -388,7 +389,7 @@ static int hpt374_pre_reset(struct ata_port *ap) pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); udelay(100); - return ata_std_prereset(ap); + return ata_std_prereset(ap, deadline); } /** diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c index 6a34521b9e01..ea1037d67860 100644 --- a/drivers/ata/pata_hpt3x2n.c +++ b/drivers/ata/pata_hpt3x2n.c @@ -148,13 +148,14 @@ static int hpt3x2n_cable_detect(struct ata_port *ap) * Reset the hardware and state machine, */ -static int hpt3xn_pre_reset(struct ata_port *ap) +static int hpt3xn_pre_reset(struct ata_port *ap, unsigned long deadline) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); /* Reset the state machine */ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); udelay(100); - return ata_std_prereset(ap); + + return ata_std_prereset(ap, deadline); } /** diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c index 011306ef8334..17bf9f3ed013 100644 --- a/drivers/ata/pata_it8213.c +++ b/drivers/ata/pata_it8213.c @@ -24,12 +24,13 @@ /** * it8213_pre_reset - check for 40/80 pin * @ap: Port + * @deadline: deadline jiffies for the operation * * Filter out ports by the enable bits before doing the normal reset * and probe. */ -static int it8213_pre_reset(struct ata_port *ap) +static int it8213_pre_reset(struct ata_port *ap, unsigned long deadline) { static const struct pci_bits it8213_enable_bits[] = { { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ @@ -37,7 +38,8 @@ static int it8213_pre_reset(struct ata_port *ap) struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no])) return -ENOENT; - return ata_std_prereset(ap); + + return ata_std_prereset(ap, deadline); } /** diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c index 43763c99ea02..1daf78ac6efb 100644 --- a/drivers/ata/pata_jmicron.c +++ b/drivers/ata/pata_jmicron.c @@ -30,16 +30,17 @@ typedef enum { /** * jmicron_pre_reset - check for 40/80 pin * @ap: Port + * @deadline: deadline jiffies for the operation * * Perform the PATA port setup we need. - + * * On the Jmicron 361/363 there is a single PATA port that can be mapped * either as primary or secondary (or neither). We don't do any policy * and setup here. We assume that has been done by init_one and the * BIOS. */ -static int jmicron_pre_reset(struct ata_port *ap) +static int jmicron_pre_reset(struct ata_port *ap, unsigned long deadline) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 control; @@ -102,7 +103,7 @@ static int jmicron_pre_reset(struct ata_port *ap) ap->cbl = ATA_CBL_SATA; break; } - return ata_std_prereset(ap); + return ata_std_prereset(ap, deadline); } /** diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index d9b94a1b6954..8ab236c54823 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c @@ -25,11 +25,12 @@ /** * marvell_pre_reset - check for 40/80 pin * @ap: Port + * @deadline: deadline jiffies for the operation * * Perform the PATA port setup we need. */ -static int marvell_pre_reset(struct ata_port *ap) +static int marvell_pre_reset(struct ata_port *ap, unsigned long deadline) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 devices; @@ -67,6 +68,7 @@ static int marvell_cable_detect(struct ata_port *ap) case 1: /* Legacy SATA port */ return ATA_CBL_SATA; } + BUG(); return 0; /* Our BUG macro needs the right markup */ } diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c index 987c5fafab08..3bfbd495f643 100644 --- a/drivers/ata/pata_mpiix.c +++ b/drivers/ata/pata_mpiix.c @@ -46,14 +46,15 @@ enum { SECONDARY = (1 << 14) }; -static int mpiix_pre_reset(struct ata_port *ap) +static int mpiix_pre_reset(struct ata_port *ap, unsigned long deadline) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const struct pci_bits mpiix_enable_bits = { 0x6D, 1, 0x80, 0x80 }; if (!pci_test_config_bits(pdev, &mpiix_enable_bits)) return -ENOENT; - return ata_std_prereset(ap); + + return ata_std_prereset(ap, deadline); } /** diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c index 078aeda9cf8d..ebc58a907d26 100644 --- a/drivers/ata/pata_ns87410.c +++ b/drivers/ata/pata_ns87410.c @@ -33,11 +33,12 @@ /** * ns87410_pre_reset - probe begin * @ap: ATA port + * @deadline: deadline jiffies for the operation * * Check enabled ports */ -static int ns87410_pre_reset(struct ata_port *ap) +static int ns87410_pre_reset(struct ata_port *ap, unsigned long deadline) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const struct pci_bits ns87410_enable_bits[] = { @@ -47,7 +48,8 @@ static int ns87410_pre_reset(struct ata_port *ap) if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no])) return -ENOENT; - return ata_std_prereset(ap); + + return ata_std_prereset(ap, deadline); } /** diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c index dea4690340d1..4d75d32e5826 100644 --- a/drivers/ata/pata_oldpiix.c +++ b/drivers/ata/pata_oldpiix.c @@ -30,11 +30,12 @@ /** * oldpiix_pre_reset - probe begin * @ap: ATA port + * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ -static int oldpiix_pre_reset(struct ata_port *ap) +static int oldpiix_pre_reset(struct ata_port *ap, unsigned long deadline) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const struct pci_bits oldpiix_enable_bits[] = { @@ -44,7 +45,8 @@ static int oldpiix_pre_reset(struct ata_port *ap) if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no])) return -ENOENT; - return ata_std_prereset(ap); + + return ata_std_prereset(ap, deadline); } /** diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c index 13b63e21838d..0af8a2c77cc9 100644 --- a/drivers/ata/pata_opti.c +++ b/drivers/ata/pata_opti.c @@ -47,11 +47,12 @@ enum { /** * opti_pre_reset - probe begin * @ap: ATA port + * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ -static int opti_pre_reset(struct ata_port *ap) +static int opti_pre_reset(struct ata_port *ap, unsigned long deadline) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const struct pci_bits opti_enable_bits[] = { @@ -61,7 +62,8 @@ static int opti_pre_reset(struct ata_port *ap) if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no])) return -ENOENT; - return ata_std_prereset(ap); + + return ata_std_prereset(ap, deadline); } /** diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c index b70e04c144df..2843e480f216 100644 --- a/drivers/ata/pata_optidma.c +++ b/drivers/ata/pata_optidma.c @@ -48,11 +48,12 @@ static int pci_clock; /* 0 = 33 1 = 25 */ /** * optidma_pre_reset - probe begin * @ap: ATA port + * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ -static int optidma_pre_reset(struct ata_port *ap) +static int optidma_pre_reset(struct ata_port *ap, unsigned long deadline) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const struct pci_bits optidma_enable_bits = { @@ -62,7 +63,7 @@ static int optidma_pre_reset(struct ata_port *ap) if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits)) return -ENOENT; - return ata_std_prereset(ap); + return ata_std_prereset(ap, deadline); } /** diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index a61cbc110688..0d2cc49fde4b 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c @@ -301,6 +301,7 @@ static inline int pdc2027x_port_enabled(struct ata_port *ap) /** * pdc2027x_prereset - prereset for PATA host controller * @ap: Target port + * @deadline: deadline jiffies for the operation * * Probeinit including cable detection. * @@ -308,12 +309,12 @@ static inline int pdc2027x_port_enabled(struct ata_port *ap) * None (inherited from caller). */ -static int pdc2027x_prereset(struct ata_port *ap) +static int pdc2027x_prereset(struct ata_port *ap, unsigned long deadline) { /* Check whether port enabled */ if (!pdc2027x_port_enabled(ap)) return -ENOENT; - return ata_std_prereset(ap); + return ata_std_prereset(ap, deadline); } /** diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index 3956ef26936d..b6e020383dd9 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c @@ -139,12 +139,14 @@ static struct sv_cable_table cable_detect[] = { /** * serverworks_cable_detect - cable detection * @ap: ATA port + * @deadline: deadline jiffies for the operation * * Perform cable detection according to the device and subvendor * identifications */ -static int serverworks_cable_detect(struct ata_port *ap) { +static int serverworks_cable_detect(struct ata_port *ap) +{ struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct sv_cable_table *cb = cable_detect; diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index 6770820cfca9..a5886f061c0b 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c @@ -94,11 +94,13 @@ static int sil680_cable_detect(struct ata_port *ap) { /** * sil680_bus_reset - reset the SIL680 bus * @ap: ATA port to reset + * @deadline: deadline jiffies for the operation * * Perform the SIL680 housekeeping when doing an ATA bus reset */ -static int sil680_bus_reset(struct ata_port *ap,unsigned int *classes) +static int sil680_bus_reset(struct ata_port *ap,unsigned int *classes, + unsigned long deadline) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); unsigned long addr = sil680_selreg(ap, 0); @@ -108,7 +110,7 @@ static int sil680_bus_reset(struct ata_port *ap,unsigned int *classes) pci_write_config_byte(pdev, addr, reset | 0x03); udelay(25); pci_write_config_byte(pdev, addr, reset); - return ata_std_softreset(ap, classes); + return ata_std_softreset(ap, classes, deadline); } static void sil680_error_handler(struct ata_port *ap) diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index a3fbcee6fb33..7c6b58223c79 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c @@ -88,6 +88,7 @@ static int sis_port_base(struct ata_device *adev) /** * sis_133_cable_detect - check for 40/80 pin * @ap: Port + * @deadline: deadline jiffies for the operation * * Perform cable detection for the later UDMA133 capable * SiS chipset. @@ -108,6 +109,7 @@ static int sis_133_cable_detect(struct ata_port *ap) /** * sis_66_cable_detect - check for 40/80 pin * @ap: Port + * @deadline: deadline jiffies for the operation * * Perform cable detection on the UDMA66, UDMA100 and early UDMA133 * SiS IDE controllers. @@ -130,11 +132,12 @@ static int sis_66_cable_detect(struct ata_port *ap) /** * sis_pre_reset - probe begin * @ap: ATA port + * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ -static int sis_pre_reset(struct ata_port *ap) +static int sis_old_pre_reset(struct ata_port *ap, unsigned long deadline) { static const struct pci_bits sis_enable_bits[] = { { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */ @@ -145,7 +148,8 @@ static int sis_pre_reset(struct ata_port *ap) if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) return -ENOENT; - return ata_std_prereset(ap); + + return ata_std_prereset(ap, deadline); } diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c index da9e22b25753..9aeffdbe2829 100644 --- a/drivers/ata/pata_sl82c105.c +++ b/drivers/ata/pata_sl82c105.c @@ -44,11 +44,12 @@ enum { /** * sl82c105_pre_reset - probe begin * @ap: ATA port + * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ -static int sl82c105_pre_reset(struct ata_port *ap) +static int sl82c105_pre_reset(struct ata_port *ap, unsigned long deadline) { static const struct pci_bits sl82c105_enable_bits[] = { { 0x40, 1, 0x01, 0x01 }, @@ -58,7 +59,7 @@ static int sl82c105_pre_reset(struct ata_port *ap) if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no])) return -ENOENT; - return ata_std_prereset(ap); + return ata_std_prereset(ap, deadline); } diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c index e618ffd6e944..349887bf5b93 100644 --- a/drivers/ata/pata_triflex.c +++ b/drivers/ata/pata_triflex.c @@ -48,11 +48,12 @@ /** * triflex_prereset - probe begin * @ap: ATA port + * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ -static int triflex_prereset(struct ata_port *ap) +static int triflex_prereset(struct ata_port *ap, unsigned long deadline) { static const struct pci_bits triflex_enable_bits[] = { { 0x80, 1, 0x01, 0x01 }, @@ -63,7 +64,8 @@ static int triflex_prereset(struct ata_port *ap) if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no])) return -ENOENT; - return ata_std_prereset(ap); + + return ata_std_prereset(ap, deadline); } diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 96b71791d2f4..362beb2f489c 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -154,7 +154,7 @@ static int via_cable_detect(struct ata_port *ap) { return ATA_CBL_PATA40; } -static int via_pre_reset(struct ata_port *ap) +static int via_pre_reset(struct ata_port *ap, unsigned long deadline) { const struct via_isa_bridge *config = ap->host->private_data; @@ -167,7 +167,8 @@ static int via_pre_reset(struct ata_port *ap) if (!pci_test_config_bits(pdev, &via_enable_bits[ap->port_no])) return -ENOENT; } - return ata_std_prereset(ap); + + return ata_std_prereset(ap, deadline); } diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index f099a1d83a00..25b747e26133 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c @@ -420,7 +420,8 @@ static void inic_thaw(struct ata_port *ap) * SRST and SControl hardreset don't give valid signature on this * controller. Only controller specific hardreset mechanism works. */ -static int inic_hardreset(struct ata_port *ap, unsigned int *class) +static int inic_hardreset(struct ata_port *ap, unsigned int *class, + unsigned long deadline) { void __iomem *port_base = inic_port_base(ap); void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; @@ -437,7 +438,7 @@ static int inic_hardreset(struct ata_port *ap, unsigned int *class) msleep(1); writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); - rc = sata_phy_resume(ap, timing); + rc = sata_phy_resume(ap, timing, deadline); if (rc) { ata_port_printk(ap, KERN_WARNING, "failed to resume " "link after reset (errno=%d)\n", rc); @@ -451,10 +452,11 @@ static int inic_hardreset(struct ata_port *ap, unsigned int *class) /* wait a while before checking status */ msleep(150); - if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { - ata_port_printk(ap, KERN_WARNING, - "device busy after hardreset\n"); - return -EIO; + rc = ata_wait_ready(ap, deadline); + if (rc && rc != -ENODEV) { + ata_port_printk(ap, KERN_WARNING, "device not ready " + "after hardreset (errno=%d)\n", rc); + return rc; } ata_tf_read(ap, &tf); diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 02169740ed24..e2e795e58236 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -1405,7 +1405,8 @@ static void nv_ck804_thaw(struct ata_port *ap) writeb(mask, mmio_base + NV_INT_ENABLE_CK804); } -static int nv_hardreset(struct ata_port *ap, unsigned int *class) +static int nv_hardreset(struct ata_port *ap, unsigned int *class, + unsigned long deadline) { unsigned int dummy; @@ -1413,7 +1414,7 @@ static int nv_hardreset(struct ata_port *ap, unsigned int *class) * some controllers. Don't classify on hardreset. For more * info, see http://bugme.osdl.org/show_bug.cgi?id=3352 */ - return sata_std_hardreset(ap, &dummy); + return sata_std_hardreset(ap, &dummy, deadline); } static void nv_error_handler(struct ata_port *ap) diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index e6223ba667da..b97ee9f31aec 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -534,7 +534,8 @@ static int sil24_init_port(struct ata_port *ap) return 0; } -static int sil24_softreset(struct ata_port *ap, unsigned int *class) +static int sil24_softreset(struct ata_port *ap, unsigned int *class, + unsigned long deadline) { void __iomem *port = ap->ioaddr.cmd_addr; struct sil24_port_priv *pp = ap->private_data; @@ -566,7 +567,7 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class) mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT; irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0, - 100, ATA_TMOUT_BOOT / HZ * 1000); + 100, jiffies_to_msecs(deadline - jiffies)); writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */ irq_stat >>= PORT_IRQ_RAW_SHIFT; @@ -594,7 +595,8 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class) return -EIO; } -static int sil24_hardreset(struct ata_port *ap, unsigned int *class) +static int sil24_hardreset(struct ata_port *ap, unsigned int *class, + unsigned long deadline) { void __iomem *port = ap->ioaddr.cmd_addr; const char *reason; @@ -615,7 +617,7 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class) /* SStatus oscillates between zero and valid status after * DEV_RST, debounce it. */ - rc = sata_phy_debounce(ap, sata_deb_timing_long); + rc = sata_phy_debounce(ap, sata_deb_timing_long, deadline); if (rc) { reason = "PHY debouncing failed"; goto err; diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 1d855f55f5f7..305ab7c68ca5 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -268,6 +268,7 @@ static void svia_noop_freeze(struct ata_port *ap) /** * vt6420_prereset - prereset for vt6420 * @ap: target ATA port + * @deadline: deadline jiffies for the operation * * SCR registers on vt6420 are pieces of shit and may hang the * whole machine completely if accessed with the wrong timing. @@ -284,7 +285,7 @@ static void svia_noop_freeze(struct ata_port *ap) * RETURNS: * 0 on success, -errno otherwise. */ -static int vt6420_prereset(struct ata_port *ap) +static int vt6420_prereset(struct ata_port *ap, unsigned long deadline) { struct ata_eh_context *ehc = &ap->eh_context; unsigned long timeout = jiffies + (HZ * 5); @@ -329,7 +330,7 @@ static int vt6420_prereset(struct ata_port *ap) skip_scr: /* wait for !BSY */ - ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + ata_wait_ready(ap, deadline); return 0; } diff --git a/include/linux/libata.h b/include/linux/libata.h index d8cfc72ea9c1..69fc1b8a9215 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -348,8 +348,9 @@ struct ata_queued_cmd; /* typedefs */ typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc); -typedef int (*ata_prereset_fn_t)(struct ata_port *ap); -typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes); +typedef int (*ata_prereset_fn_t)(struct ata_port *ap, unsigned long deadline); +typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes, + unsigned long deadline); typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes); struct ata_ioports { @@ -688,13 +689,17 @@ extern void __sata_phy_reset(struct ata_port *ap); extern void sata_phy_reset(struct ata_port *ap); extern void ata_bus_reset(struct ata_port *ap); extern int sata_set_spd(struct ata_port *ap); -extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param); -extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param); -extern int ata_std_prereset(struct ata_port *ap); -extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes); -extern int sata_port_hardreset(struct ata_port *ap, - const unsigned long *timing); -extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class); +extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param, + unsigned long deadline); +extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param, + unsigned long deadline); +extern int ata_std_prereset(struct ata_port *ap, unsigned long deadline); +extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes, + unsigned long deadline); +extern int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing, + unsigned long deadline); +extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class, + unsigned long deadline); extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes); extern void ata_port_disable(struct ata_port *); extern void ata_std_ports(struct ata_ioports *ioaddr); @@ -750,6 +755,7 @@ extern void ata_host_resume(struct ata_host *host); extern int ata_ratelimit(void); extern int ata_busy_sleep(struct ata_port *ap, unsigned long timeout_pat, unsigned long timeout); +extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline); extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, unsigned long delay); extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, -- cgit v1.2.3 From 31daabda16063b64a99a526242add727601e43c3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Feb 2007 16:50:52 +0900 Subject: libata: reimplement reset sequencing libata previously depended upon waits in prereset to get resets after hotplug right for both spin up and device ready wait. This was necessary both for reliablity and speed as reset was likely to fail if initiated too early and each try usually took more than 30secs to fail. Previous patches fixed the reliability part by fixing status and SCR handling in resets. This patch remedies the speed part by improving reset sequencing. Prereset waiting timeout is adjusted to 10s because spinup wait is replaced by reset sequencing and !BSY wait is not as important as before. During boot or module loading where the drive is already fully spun up, !BSY wait succeeds immediately, so 10s should be enough in most cases. It matters after hotplugging or other error conditions, but in those cases, !BSY wait in prereset simply can't be relied upon due to the varied and weird behaviors ATA controllers and devices show. Reset is now driven by ata_eh_reset_timeouts[] table which contains timeouts for each reset try. The first reset can be softreset but the following ones are always hardreset if available. Each timeout defines deadline for the reset try. If a reset try fails, reset is retried with the next timeout till the end of the timeout table is reached. If a reset try fails before the timeout with error, libata waits till the deadline of the failed try before retrying. IOW, the timeout table defines timetable of reset tries such that the n'th try always begins at least after the sum of all previous timeouts has passed. The current timetable defines 4 tries and takes around 1 minute. @0 : First try. This should succeed most of the time during boot. @10 : 10s is enough to spin up most consumer harddrives. Give it another shot. @20 : 20s should spin up > 99% of working drives. This has 30s timeout for retarded devices needing long idleness post reset. @55 : Final try with 5s timeout just in case. The above timetable is trade off between not annoying the device too much with frequent resets and taking reasonable amount of time in most cases. Some controllers may do better with shorter timeouts while others may fare better with longer but we just can't rely upon LLD writers to test each controller with wide variety of devices using various scenarios. We need default behavior which reasonably fits most cases. I've tested the above timetable on a dozen SATA controllers and a few PATA controllers with about a dozen different drives from all major vendors and 4 different ODDs from three different vendors for both boot and hotplug (if available) cases. Boot probing is not affected unless the device is broken in which cases new code gives up on the port after a minute rather than five or nine minutes. When hotplugging, most devices get detected on the first or second try. Multi-platter drives with long spin up time which sometimes took > 40 secs with the original code, now usually comes up during the second try and at least right after the third try @20. Signed-off-by: Tejun Heo Signed-off-by: Jeff Garzik --- drivers/ata/libata-core.c | 35 +--------------------------- drivers/ata/libata-eh.c | 58 +++++++++++++++++++++++++++++++++-------------- include/linux/libata.h | 16 ------------- 3 files changed, 42 insertions(+), 67 deletions(-) (limited to 'include/linux') diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 12717fa5e888..a7950885d18e 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -3304,35 +3304,6 @@ int sata_phy_resume(struct ata_port *ap, const unsigned long *params, return sata_phy_debounce(ap, params, deadline); } -static void ata_wait_spinup(struct ata_port *ap, unsigned long deadline) -{ - struct ata_eh_context *ehc = &ap->eh_context; - unsigned long end, secs; - int rc; - - /* first, debounce phy if SATA */ - if (ap->cbl == ATA_CBL_SATA) { - rc = sata_phy_debounce(ap, sata_deb_timing_hotplug, deadline); - - /* if debounced successfully and offline, no need to wait */ - if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap)) - return; - } - - /* okay, let's give the drive time to spin up */ - end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000; - secs = ((end - jiffies) + HZ - 1) / HZ; - - if (time_after(jiffies, end)) - return; - - if (secs > 5) - ata_port_printk(ap, KERN_INFO, "waiting for device to spin up " - "(%lu secs)\n", secs); - - schedule_timeout_uninterruptible(end - jiffies); -} - /** * ata_std_prereset - prepare for reset * @ap: ATA port to be reset @@ -3356,15 +3327,11 @@ int ata_std_prereset(struct ata_port *ap, unsigned long deadline) const unsigned long *timing = sata_ehc_deb_timing(ehc); int rc; - /* handle link resume & hotplug spinup */ + /* handle link resume */ if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && (ap->flags & ATA_FLAG_HRST_TO_RESUME)) ehc->i.action |= ATA_EH_HARDRESET; - if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) && - (ap->flags & ATA_FLAG_SKIP_D2H_BSY)) - ata_wait_spinup(ap, deadline); - /* if we're about to do hardreset, nothing more to do */ if (ehc->i.action & ATA_EH_HARDRESET) return 0; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index b3f7d3c8ae60..8256655ce7d9 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -50,6 +50,28 @@ enum { ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), }; +/* Waiting in ->prereset can never be reliable. It's sometimes nice + * to wait there but it can't be depended upon; otherwise, we wouldn't + * be resetting. Just give it enough time for most drives to spin up. + */ +enum { + ATA_EH_PRERESET_TIMEOUT = 10 * HZ, +}; + +/* The following table determines how we sequence resets. Each entry + * represents timeout for that try. The first try can be soft or + * hardreset. All others are hardreset if available. In most cases + * the first reset w/ 10sec timeout should succeed. Following entries + * are mostly for error handling, hotplug and retarded devices. + */ +static const unsigned long ata_eh_reset_timeouts[] = { + 10 * HZ, /* most drives spin up by 10sec */ + 10 * HZ, /* > 99% working drives spin up before 20sec */ + 35 * HZ, /* give > 30 secs of idleness for retarded devices */ + 5 * HZ, /* and sweet one last chance */ + /* > 1 min has elapsed, give up */ +}; + static void __ata_port_freeze(struct ata_port *ap); static void ata_eh_finish(struct ata_port *ap); #ifdef CONFIG_PM @@ -1603,8 +1625,9 @@ static int ata_eh_reset(struct ata_port *ap, int classify, { struct ata_eh_context *ehc = &ap->eh_context; unsigned int *classes = ehc->classes; - int tries = ATA_EH_RESET_TRIES; int verbose = !(ehc->i.flags & ATA_EHI_QUIET); + int try = 0; + unsigned long deadline; unsigned int action; ata_reset_fn_t reset; int i, did_followup_srst, rc; @@ -1624,7 +1647,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, ehc->i.action |= ATA_EH_HARDRESET; if (prereset) { - rc = prereset(ap, jiffies + 40 * HZ); + rc = prereset(ap, jiffies + ATA_EH_PRERESET_TIMEOUT); if (rc) { if (rc == -ENOENT) { ata_port_printk(ap, KERN_DEBUG, @@ -1665,6 +1688,8 @@ static int ata_eh_reset(struct ata_port *ap, int classify, } retry: + deadline = jiffies + ata_eh_reset_timeouts[try++]; + /* shut up during boot probing */ if (verbose) ata_port_printk(ap, KERN_INFO, "%s resetting port\n", @@ -1676,7 +1701,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, else ehc->i.flags |= ATA_EHI_DID_SOFTRESET; - rc = ata_do_reset(ap, reset, classes, jiffies + 40 * HZ); + rc = ata_do_reset(ap, reset, classes, deadline); did_followup_srst = 0; if (reset == hardreset && @@ -1693,7 +1718,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, } ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); - rc = ata_do_reset(ap, reset, classes, jiffies + 40 * HZ); + rc = ata_do_reset(ap, reset, classes, deadline); if (rc == 0 && classify && classes[0] == ATA_DEV_UNKNOWN) { @@ -1703,22 +1728,21 @@ static int ata_eh_reset(struct ata_port *ap, int classify, } } - if (rc && --tries) { - const char *type; + if (rc && try < ARRAY_SIZE(ata_eh_reset_timeouts)) { + unsigned long now = jiffies; - if (reset == softreset) { - if (did_followup_srst) - type = "follow-up soft"; - else - type = "soft"; - } else - type = "hard"; + if (time_before(now, deadline)) { + unsigned long delta = deadline - jiffies; - ata_port_printk(ap, KERN_WARNING, - "%sreset failed, retrying in 5 secs\n", type); - ssleep(5); + ata_port_printk(ap, KERN_WARNING, "reset failed " + "(errno=%d), retrying in %u secs\n", + rc, (jiffies_to_msecs(delta) + 999) / 1000); + + schedule_timeout_uninterruptible(delta); + } - if (reset == hardreset) + if (reset == hardreset && + try == ARRAY_SIZE(ata_eh_reset_timeouts) - 1) sata_down_spd_limit(ap); if (hardreset) reset = hardreset; diff --git a/include/linux/libata.h b/include/linux/libata.h index 69fc1b8a9215..7906d750aa77 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -296,18 +296,8 @@ enum { /* how hard are we gonna try to probe/recover devices */ ATA_PROBE_MAX_TRIES = 3, - ATA_EH_RESET_TRIES = 3, ATA_EH_DEV_TRIES = 3, - /* Drive spinup time (time from power-on to the first D2H FIS) - * in msecs - 8s currently. Failing to get ready in this time - * isn't critical. It will result in reset failure for - * controllers which can't wait for the first D2H FIS. libata - * will retry, so it just has to be long enough to spin up - * most devices. - */ - ATA_SPINUP_WAIT = 8000, - /* Horkage types. May be set by libata or controller on drives (some horkage may be drive/controller pair dependant */ @@ -495,7 +485,6 @@ struct ata_eh_info { unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */ unsigned int flags; /* ATA_EHI_* flags */ - unsigned long hotplug_timestamp; unsigned int probe_mask; char desc[ATA_EH_DESC_LEN]; @@ -925,12 +914,7 @@ extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi) { - if (ehi->flags & ATA_EHI_HOTPLUGGED) - return; - ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK; - ehi->hotplug_timestamp = jiffies; - ehi->action |= ATA_EH_SOFTRESET; ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; } -- cgit v1.2.3 From 55556da01284af8c2174b786b3eca8e11301b656 Mon Sep 17 00:00:00 2001 From: Philip Langdale Date: Fri, 16 Mar 2007 19:39:00 -0700 Subject: MMC: Fix handling of low-voltage cards Fix handling of low voltage MMC cards. The latest MMC and SD specs both agree that support for low-voltage operations is indicated by bit 7 in the OCR. The MMC spec states that the low voltage range is 1.65-1.95V while the SD spec leaves the actual voltage range undefined - meaning that there is still no such thing as a low voltage SD card. However, an old Sandisk spec implied that bits 7.0 represented voltages below 2.0V in 1V or 0.5V increments, and the code was accordingly written with that expectation. This confusion meant that host drivers attempting to support the typical low voltage (1.8V) would set the wrong bits in the host OCR mask (usually bits 5 and/or 6) resulting in the the low voltage mode never being used. This change corrects the low voltage range and adds sanity checks on the reserved bits (0-6) and for SD cards that claim to support low-voltage operations. Signed-off-by: Philip Langdale Signed-off-by: Pierre Ossman --- drivers/mmc/core/mmc.c | 11 +++++++++++ drivers/mmc/core/sd.c | 18 ++++++++++++++++++ drivers/mmc/host/sdhci.c | 5 ++--- include/linux/mmc/host.h | 9 +-------- 4 files changed, 32 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index c528017d6128..c2e120b11bef 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -323,6 +323,17 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr) mmc_attach_bus(host, &mmc_ops); + /* + * Sanity check the voltages that the card claims to + * support. + */ + if (ocr & 0x7F) { + printk(KERN_WARNING "%s: card claims to support voltages " + "below the defined range. These will be ignored.\n", + mmc_hostname(host)); + ocr &= ~0x7F; + } + host->ocr = mmc_select_voltage(host, ocr); /* diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 6c6beb48f3a8..fb18b301502e 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -297,6 +297,24 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr) mmc_attach_bus(host, &mmc_sd_ops); + /* + * Sanity check the voltages that the card claims to + * support. + */ + if (ocr & 0x7F) { + printk(KERN_WARNING "%s: card claims to support voltages " + "below the defined range. These will be ignored.\n", + mmc_hostname(host)); + ocr &= ~0x7F; + } + + if (ocr & MMC_VDD_165_195) { + printk(KERN_WARNING "%s: SD card claims to support the " + "incompletely defined 'low voltage range'. This " + "will be ignored.\n", mmc_hostname(host)); + ocr &= ~MMC_VDD_165_195; + } + host->ocr = mmc_select_voltage(host, ocr); /* diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index a57f6a3d48d3..ff5bf73cdd25 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -659,8 +659,7 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power) pwr = SDHCI_POWER_ON; switch (1 << power) { - case MMC_VDD_17_18: - case MMC_VDD_18_19: + case MMC_VDD_165_195: pwr |= SDHCI_POWER_180; break; case MMC_VDD_29_30: @@ -1280,7 +1279,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) if (caps & SDHCI_CAN_VDD_300) mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; if (caps & SDHCI_CAN_VDD_180) - mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19; + mmc->ocr_avail |= MMC_VDD_165_195; if (mmc->ocr_avail == 0) { printk(KERN_ERR "%s: Hardware doesn't report any " diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 5a66d8a2bf17..b1350dfd3e91 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -65,14 +65,7 @@ struct mmc_host { unsigned int f_max; u32 ocr_avail; -#define MMC_VDD_145_150 0x00000001 /* VDD voltage 1.45 - 1.50 */ -#define MMC_VDD_150_155 0x00000002 /* VDD voltage 1.50 - 1.55 */ -#define MMC_VDD_155_160 0x00000004 /* VDD voltage 1.55 - 1.60 */ -#define MMC_VDD_160_165 0x00000008 /* VDD voltage 1.60 - 1.65 */ -#define MMC_VDD_165_170 0x00000010 /* VDD voltage 1.65 - 1.70 */ -#define MMC_VDD_17_18 0x00000020 /* VDD voltage 1.7 - 1.8 */ -#define MMC_VDD_18_19 0x00000040 /* VDD voltage 1.8 - 1.9 */ -#define MMC_VDD_19_20 0x00000080 /* VDD voltage 1.9 - 2.0 */ +#define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */ #define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */ #define MMC_VDD_21_22 0x00000200 /* VDD voltage 2.1 ~ 2.2 */ #define MMC_VDD_22_23 0x00000400 /* VDD voltage 2.2 ~ 2.3 */ -- cgit v1.2.3 From bd766312618d2ecc85bce663f95faec601447ecb Mon Sep 17 00:00:00 2001 From: Pierre Ossman Date: Tue, 1 May 2007 16:11:57 +0200 Subject: mmc: remove old card states Remove card states that no longer make any sense. Signed-off-by: Pierre Ossman --- drivers/mmc/core/mmc.c | 22 ++++++++++++++-------- drivers/mmc/core/sd.c | 23 +++++++++++++++-------- drivers/mmc/core/sysfs.c | 5 ++--- include/linux/mmc/card.h | 12 +++--------- 4 files changed, 34 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 619581e49163..42cc2867ed7d 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -56,7 +56,7 @@ static const unsigned int tacc_mant[] = { /* * Given the decoded CSD structure, decode the raw CID to our CID structure. */ -static void mmc_decode_cid(struct mmc_card *card) +static int mmc_decode_cid(struct mmc_card *card) { u32 *resp = card->raw_cid; @@ -101,15 +101,16 @@ static void mmc_decode_cid(struct mmc_card *card) default: printk("%s: card has unknown MMCA version %d\n", mmc_hostname(card->host), card->csd.mmca_vsn); - mmc_card_set_bad(card); - break; + return -EINVAL; } + + return 0; } /* * Given a 128-bit response, decode to our card CSD structure. */ -static void mmc_decode_csd(struct mmc_card *card) +static int mmc_decode_csd(struct mmc_card *card) { struct mmc_csd *csd = &card->csd; unsigned int e, m, csd_struct; @@ -123,8 +124,7 @@ static void mmc_decode_csd(struct mmc_card *card) if (csd_struct != 1 && csd_struct != 2) { printk("%s: unrecognised CSD structure version %d\n", mmc_hostname(card->host), csd_struct); - mmc_card_set_bad(card); - return; + return -EINVAL; } csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); @@ -149,6 +149,8 @@ static void mmc_decode_csd(struct mmc_card *card) csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); csd->write_partial = UNSTUFF_BITS(resp, 21, 1); + + return 0; } /* @@ -300,8 +302,12 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, if (err != MMC_ERR_NONE) goto free_card; - mmc_decode_csd(card); - mmc_decode_cid(card); + err = mmc_decode_csd(card); + if (err < 0) + goto free_card; + err = mmc_decode_cid(card); + if (err < 0) + goto free_card; } /* diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 6597e778f70e..c1dfd03d559a 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -88,7 +88,7 @@ static void mmc_decode_cid(struct mmc_card *card) /* * Given a 128-bit response, decode to our card CSD structure. */ -static void mmc_decode_csd(struct mmc_card *card) +static int mmc_decode_csd(struct mmc_card *card) { struct mmc_csd *csd = &card->csd; unsigned int e, m, csd_struct; @@ -151,15 +151,16 @@ static void mmc_decode_csd(struct mmc_card *card) default: printk("%s: unrecognised CSD structure version %d\n", mmc_hostname(card->host), csd_struct); - mmc_card_set_bad(card); - return; + return -EINVAL; } + + return 0; } /* * Given a 64-bit response, decode to our card SCR structure. */ -static void mmc_decode_scr(struct mmc_card *card) +static int mmc_decode_scr(struct mmc_card *card) { struct sd_scr *scr = &card->scr; unsigned int scr_struct; @@ -174,12 +175,13 @@ static void mmc_decode_scr(struct mmc_card *card) if (scr_struct != 0) { printk("%s: unrecognised SCR structure version %d\n", mmc_hostname(card->host), scr_struct); - mmc_card_set_bad(card); - return; + return -EINVAL; } scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4); scr->bus_widths = UNSTUFF_BITS(resp, 48, 4); + + return 0; } /* @@ -342,7 +344,10 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, if (err != MMC_ERR_NONE) goto free_card; - mmc_decode_csd(card); + err = mmc_decode_csd(card); + if (err < 0) + goto free_card; + mmc_decode_cid(card); } @@ -361,7 +366,9 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, if (err != MMC_ERR_NONE) goto free_card; - mmc_decode_scr(card); + err = mmc_decode_scr(card); + if (err < 0) + goto free_card; /* * Fetch switch information from card. diff --git a/drivers/mmc/core/sysfs.c b/drivers/mmc/core/sysfs.c index 5c9ce02e7e55..843b1fbba557 100644 --- a/drivers/mmc/core/sysfs.c +++ b/drivers/mmc/core/sysfs.c @@ -72,12 +72,11 @@ static void mmc_release_card(struct device *dev) /* * This currently matches any MMC driver to any MMC card - drivers * themselves make the decision whether to drive this card in their - * probe method. However, we force "bad" cards to fail. + * probe method. */ static int mmc_bus_match(struct device *dev, struct device_driver *drv) { - struct mmc_card *card = dev_to_mmc_card(dev); - return !mmc_card_bad(card); + return 1; } static int diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 800425e05165..badf702fcff4 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -69,11 +69,9 @@ struct mmc_card { #define MMC_TYPE_SD 1 /* SD card */ unsigned int state; /* (our) card state */ #define MMC_STATE_PRESENT (1<<0) /* present in sysfs */ -#define MMC_STATE_DEAD (1<<1) /* device no longer in stack */ -#define MMC_STATE_BAD (1<<2) /* unrecognised device */ -#define MMC_STATE_READONLY (1<<3) /* card is read-only */ -#define MMC_STATE_HIGHSPEED (1<<4) /* card is in high speed mode */ -#define MMC_STATE_BLOCKADDR (1<<5) /* card uses block-addressing */ +#define MMC_STATE_READONLY (1<<1) /* card is read-only */ +#define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */ +#define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */ u32 raw_cid[4]; /* raw card CID */ u32 raw_csd[4]; /* raw card CSD */ u32 raw_scr[2]; /* raw card SCR */ @@ -88,15 +86,11 @@ struct mmc_card { #define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) #define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT) -#define mmc_card_dead(c) ((c)->state & MMC_STATE_DEAD) -#define mmc_card_bad(c) ((c)->state & MMC_STATE_BAD) #define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY) #define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED) #define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) -#define mmc_card_set_dead(c) ((c)->state |= MMC_STATE_DEAD) -#define mmc_card_set_bad(c) ((c)->state |= MMC_STATE_BAD) #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) #define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) -- cgit v1.2.3 From fccb56e4d82132ac15359efc9e419371e4533437 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 1 May 2007 23:26:27 +0200 Subject: i2c: Kill i2c_adapter.class_dev Kill i2c_adapter.class_dev. Instead, set the class of i2c_adapter.dev to i2c_adapter_class, so that a symlink will be created for every i2c_adapter in /sys/class/i2c-adapter. The same change must be mirrored to i2c-isa as it duplicates some of the i2c-core functionalities. User-space tools and libraries might need some adjustments. In particular, libsensors from lm_sensors 2.10.3 or later is required for proper discovery of i2c adapter names after this change. Signed-off-by: Jean Delvare --- Documentation/feature-removal-schedule.txt | 14 +++--------- drivers/i2c/busses/i2c-isa.c | 18 +--------------- drivers/i2c/i2c-core.c | 34 +----------------------------- include/linux/i2c.h | 3 --- 4 files changed, 5 insertions(+), 64 deletions(-) (limited to 'include/linux') diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 5c88ba1ea262..c4b3bdad15d8 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -190,18 +190,10 @@ Who: Jean Delvare --------------------------- -What: i2c_adapter.dev - i2c_adapter.list +What: i2c_adapter.list When: July 2007 -Why: Superfluous, given i2c_adapter.class_dev: - * The "dev" was a stand-in for the physical device node that legacy - drivers would not have; but now it's almost always present. Any - remaining legacy drivers must upgrade (they now trigger warnings). - * The "list" duplicates class device children. - The delay in removing this is so upgraded lm_sensors and libsensors - can get deployed. (Removal causes minor changes in the sysfs layout, - notably the location of the adapter type name and parenting the i2c - client hardware directly from their controller.) +Why: Superfluous, this list duplicates the one maintained by the driver + core. Who: Jean Delvare , David Brownell diff --git a/drivers/i2c/busses/i2c-isa.c b/drivers/i2c/busses/i2c-isa.c index 5f33bc9c1e02..a2e75bc9178b 100644 --- a/drivers/i2c/busses/i2c-isa.c +++ b/drivers/i2c/busses/i2c-isa.c @@ -141,6 +141,7 @@ static int __init i2c_isa_init(void) sprintf(isa_adapter.dev.bus_id, "i2c-%d", isa_adapter.nr); isa_adapter.dev.driver = &i2c_adapter_driver; isa_adapter.dev.release = &i2c_adapter_dev_release; + isa_adapter.dev.class = &i2c_adapter_class; err = device_register(&isa_adapter.dev); if (err) { printk(KERN_ERR "i2c-isa: Failed to register device\n"); @@ -152,24 +153,10 @@ static int __init i2c_isa_init(void) goto exit_unregister; } - /* Add this adapter to the i2c_adapter class */ - memset(&isa_adapter.class_dev, 0x00, sizeof(struct class_device)); - isa_adapter.class_dev.dev = &isa_adapter.dev; - isa_adapter.class_dev.class = &i2c_adapter_class; - strlcpy(isa_adapter.class_dev.class_id, isa_adapter.dev.bus_id, - BUS_ID_SIZE); - err = class_device_register(&isa_adapter.class_dev); - if (err) { - printk(KERN_ERR "i2c-isa: Failed to register class device\n"); - goto exit_remove_name; - } - dev_dbg(&isa_adapter.dev, "%s registered\n", isa_adapter.name); return 0; -exit_remove_name: - device_remove_file(&isa_adapter.dev, &dev_attr_name); exit_unregister: init_completion(&isa_adapter.dev_released); /* Needed? */ device_unregister(&isa_adapter.dev); @@ -201,15 +188,12 @@ static void __exit i2c_isa_exit(void) /* Clean up the sysfs representation */ dev_dbg(&isa_adapter.dev, "Unregistering from sysfs\n"); init_completion(&isa_adapter.dev_released); - init_completion(&isa_adapter.class_dev_released); - class_device_unregister(&isa_adapter.class_dev); device_remove_file(&isa_adapter.dev, &dev_attr_name); device_unregister(&isa_adapter.dev); /* Wait for sysfs to drop all references */ dev_dbg(&isa_adapter.dev, "Waiting for sysfs completion\n"); wait_for_completion(&isa_adapter.dev_released); - wait_for_completion(&isa_adapter.class_dev_released); dev_dbg(&isa_adapter.dev, "%s unregistered\n", isa_adapter.name); } diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 21fe1406c8b4..40a744d73e8c 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -123,28 +123,9 @@ struct device_driver i2c_adapter_driver = { /* I2C bus adapters -- one roots each I2C or SMBUS segment */ -static void i2c_adapter_class_dev_release(struct class_device *dev) -{ - struct i2c_adapter *adap = class_dev_to_i2c_adapter(dev); - complete(&adap->class_dev_released); -} - -static ssize_t i2c_adapter_show_name(struct class_device *cdev, char *buf) -{ - struct i2c_adapter *adap = class_dev_to_i2c_adapter(cdev); - return sprintf(buf, "%s\n", adap->name); -} - -static struct class_device_attribute i2c_adapter_attrs[] = { - __ATTR(name, S_IRUGO, i2c_adapter_show_name, NULL), - { }, -}; - struct class i2c_adapter_class = { .owner = THIS_MODULE, .name = "i2c-adapter", - .class_dev_attrs = i2c_adapter_attrs, - .release = &i2c_adapter_class_dev_release, }; static ssize_t show_adapter_name(struct device *dev, struct device_attribute *attr, char *buf) @@ -223,6 +204,7 @@ int i2c_add_adapter(struct i2c_adapter *adap) sprintf(adap->dev.bus_id, "i2c-%d", adap->nr); adap->dev.driver = &i2c_adapter_driver; adap->dev.release = &i2c_adapter_dev_release; + adap->dev.class = &i2c_adapter_class; res = device_register(&adap->dev); if (res) goto out_list; @@ -230,15 +212,6 @@ int i2c_add_adapter(struct i2c_adapter *adap) if (res) goto out_unregister; - /* Add this adapter to the i2c_adapter class */ - memset(&adap->class_dev, 0x00, sizeof(struct class_device)); - adap->class_dev.dev = &adap->dev; - adap->class_dev.class = &i2c_adapter_class; - strlcpy(adap->class_dev.class_id, adap->dev.bus_id, BUS_ID_SIZE); - res = class_device_register(&adap->class_dev); - if (res) - goto out_remove_name; - dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name); /* inform drivers of new adapters */ @@ -253,8 +226,6 @@ out_unlock: mutex_unlock(&core_lists); return res; -out_remove_name: - device_remove_file(&adap->dev, &dev_attr_name); out_unregister: init_completion(&adap->dev_released); /* Needed? */ device_unregister(&adap->dev); @@ -314,15 +285,12 @@ int i2c_del_adapter(struct i2c_adapter *adap) /* clean up the sysfs representation */ init_completion(&adap->dev_released); - init_completion(&adap->class_dev_released); - class_device_unregister(&adap->class_dev); device_remove_file(&adap->dev, &dev_attr_name); device_unregister(&adap->dev); list_del(&adap->list); /* wait for sysfs to drop all references */ wait_for_completion(&adap->dev_released); - wait_for_completion(&adap->class_dev_released); /* free dynamically allocated bus id */ idr_remove(&i2c_adapter_idr, adap->nr); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 9428092017e3..7a59dc656657 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -228,17 +228,14 @@ struct i2c_adapter { int timeout; int retries; struct device dev; /* the adapter device */ - struct class_device class_dev; /* the class device */ int nr; struct list_head clients; struct list_head list; char name[I2C_NAME_SIZE]; struct completion dev_released; - struct completion class_dev_released; }; #define dev_to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) -#define class_dev_to_i2c_adapter(d) container_of(d, struct i2c_adapter, class_dev) static inline void *i2c_get_adapdata (struct i2c_adapter *dev) { -- cgit v1.2.3 From b31366f4394f7b1e8e1726ba049f294934db4495 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 1 May 2007 23:26:28 +0200 Subject: i2c: i2c_adapter devices need no driver Kill i2c_adapter_driver as it doesn't make sense and it prevents further i2c-core cleanups. i2c_adapter devices are virtual devices (ex-class devices) and as such they don't need a driver. Signed-off-by: Jean Delvare --- drivers/i2c/busses/i2c-isa.c | 1 - drivers/i2c/i2c-core.c | 14 +------------- include/linux/i2c.h | 1 - 3 files changed, 1 insertion(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/busses/i2c-isa.c b/drivers/i2c/busses/i2c-isa.c index a2e75bc9178b..745830bb8e2b 100644 --- a/drivers/i2c/busses/i2c-isa.c +++ b/drivers/i2c/busses/i2c-isa.c @@ -139,7 +139,6 @@ static int __init i2c_isa_init(void) isa_adapter.nr = ANY_I2C_ISA_BUS; isa_adapter.dev.parent = &platform_bus; sprintf(isa_adapter.dev.bus_id, "i2c-%d", isa_adapter.nr); - isa_adapter.dev.driver = &i2c_adapter_driver; isa_adapter.dev.release = &i2c_adapter_dev_release; isa_adapter.dev.class = &i2c_adapter_class; err = device_register(&isa_adapter.dev); diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 40a744d73e8c..8dca4848ad92 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -113,12 +113,6 @@ void i2c_adapter_dev_release(struct device *dev) complete(&adap->dev_released); } -struct device_driver i2c_adapter_driver = { - .owner = THIS_MODULE, - .name = "i2c_adapter", - .bus = &i2c_bus_type, -}; - /* ------------------------------------------------------------------------- */ /* I2C bus adapters -- one roots each I2C or SMBUS segment */ @@ -202,7 +196,6 @@ int i2c_add_adapter(struct i2c_adapter *adap) "physical device\n", adap->name); } sprintf(adap->dev.bus_id, "i2c-%d", adap->nr); - adap->dev.driver = &i2c_adapter_driver; adap->dev.release = &i2c_adapter_dev_release; adap->dev.class = &i2c_adapter_class; res = device_register(&adap->dev); @@ -575,9 +568,6 @@ static int __init i2c_init(void) int retval; retval = bus_register(&i2c_bus_type); - if (retval) - return retval; - retval = driver_register(&i2c_adapter_driver); if (retval) return retval; return class_register(&i2c_adapter_class); @@ -586,7 +576,6 @@ static int __init i2c_init(void) static void __exit i2c_exit(void) { class_unregister(&i2c_adapter_class); - driver_unregister(&i2c_adapter_driver); bus_unregister(&i2c_bus_type); } @@ -1174,9 +1163,8 @@ s32 i2c_smbus_xfer(struct i2c_adapter * adapter, u16 addr, unsigned short flags, } -/* Next four are needed by i2c-isa */ +/* Next three are needed by i2c-isa */ EXPORT_SYMBOL_GPL(i2c_adapter_dev_release); -EXPORT_SYMBOL_GPL(i2c_adapter_driver); EXPORT_SYMBOL_GPL(i2c_adapter_class); EXPORT_SYMBOL_GPL(i2c_bus_type); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 7a59dc656657..47c2a1907372 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -38,7 +38,6 @@ /* --- For i2c-isa ---------------------------------------------------- */ extern void i2c_adapter_dev_release(struct device *dev); -extern struct device_driver i2c_adapter_driver; extern struct class i2c_adapter_class; extern struct bus_type i2c_bus_type; -- cgit v1.2.3 From 2096b956d24c4b5950b808fc23b218425d79ebb1 Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 1 May 2007 23:26:28 +0200 Subject: i2c: Shrink struct i2c_client This shrinks the size of "struct i2c_client" by 40 bytes: - Substantially shrinks the string used to identify the chip type - The "flags" don't need to be so big - Removes some internal padding It also adds kerneldoc for that struct, explaining how "name" is really a chip type identifier; it's otherwise potentially confusing. Because the I2C_NAME_SIZE symbol was abused for both i2c_client.name and for i2c_adapter.name, this needed to affect i2c_adapter too. The adapters which used that symbol now use the more-obviously-correct idiom of taking the size of that field. JD: Shorten i2c_adapter.name from 50 to 48 bytes while we're here, to avoid wasting space in padding. Signed-off-by: David Brownell Signed-off-by: Jean Delvare --- drivers/i2c/busses/i2c-ali1535.c | 2 +- drivers/i2c/busses/i2c-ali15x3.c | 2 +- drivers/i2c/busses/i2c-amd8111.c | 2 +- drivers/i2c/busses/i2c-i801.c | 2 +- drivers/i2c/busses/i2c-ixp2000.c | 2 +- drivers/i2c/busses/i2c-ixp4xx.c | 2 +- drivers/i2c/busses/i2c-mv64xxx.c | 2 +- drivers/i2c/busses/i2c-nforce2.c | 2 +- drivers/i2c/busses/i2c-pasemi.c | 2 +- drivers/i2c/busses/i2c-piix4.c | 2 +- drivers/i2c/busses/i2c-sis96x.c | 2 +- drivers/i2c/busses/i2c-viapro.c | 2 +- drivers/i2c/busses/scx200_acb.c | 2 +- drivers/media/dvb/b2c2/flexcop-i2c.c | 3 ++- drivers/media/dvb/dvb-usb/dvb-usb-i2c.c | 2 +- drivers/media/dvb/frontends/dibx000_common.c | 4 ++-- drivers/video/intelfb/intelfb_i2c.c | 3 ++- drivers/video/matrox/i2c-matroxfb.c | 2 +- include/linux/i2c.h | 23 ++++++++++++++--------- 19 files changed, 35 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c index 1e277ba5a9f3..f14372ac2fc5 100644 --- a/drivers/i2c/busses/i2c-ali1535.c +++ b/drivers/i2c/busses/i2c-ali1535.c @@ -497,7 +497,7 @@ static int __devinit ali1535_probe(struct pci_dev *dev, const struct pci_device_ /* set up the sysfs linkage to our parent device */ ali1535_adapter.dev.parent = &dev->dev; - snprintf(ali1535_adapter.name, I2C_NAME_SIZE, + snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name), "SMBus ALI1535 adapter at %04x", ali1535_smba); return i2c_add_adapter(&ali1535_adapter); } diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c index e47fe01bf42a..93bf87d70961 100644 --- a/drivers/i2c/busses/i2c-ali15x3.c +++ b/drivers/i2c/busses/i2c-ali15x3.c @@ -492,7 +492,7 @@ static int __devinit ali15x3_probe(struct pci_dev *dev, const struct pci_device_ /* set up the sysfs linkage to our parent device */ ali15x3_adapter.dev.parent = &dev->dev; - snprintf(ali15x3_adapter.name, I2C_NAME_SIZE, + snprintf(ali15x3_adapter.name, sizeof(ali15x3_adapter.name), "SMBus ALI15X3 adapter at %04x", ali15x3_smba); return i2c_add_adapter(&ali15x3_adapter); } diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c index 0c70f8293341..c9fca7b49267 100644 --- a/drivers/i2c/busses/i2c-amd8111.c +++ b/drivers/i2c/busses/i2c-amd8111.c @@ -365,7 +365,7 @@ static int __devinit amd8111_probe(struct pci_dev *dev, } smbus->adapter.owner = THIS_MODULE; - snprintf(smbus->adapter.name, I2C_NAME_SIZE, + snprintf(smbus->adapter.name, sizeof(smbus->adapter.name), "SMBus2 AMD8111 adapter at %04x", smbus->base); smbus->adapter.id = I2C_HW_SMBUS_AMD8111; smbus->adapter.class = I2C_CLASS_HWMON; diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index a320e7d82c1f..611b57192c96 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -527,7 +527,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id /* set up the sysfs linkage to our parent device */ i801_adapter.dev.parent = &dev->dev; - snprintf(i801_adapter.name, I2C_NAME_SIZE, + snprintf(i801_adapter.name, sizeof(i801_adapter.name), "SMBus I801 adapter at %04lx", i801_smba); err = i2c_add_adapter(&i801_adapter); if (err) { diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c index efa3ecc5522a..6352121a2827 100644 --- a/drivers/i2c/busses/i2c-ixp2000.c +++ b/drivers/i2c/busses/i2c-ixp2000.c @@ -118,7 +118,7 @@ static int ixp2000_i2c_probe(struct platform_device *plat_dev) drv_data->adapter.id = I2C_HW_B_IXP2000, strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name, - I2C_NAME_SIZE); + sizeof(drv_data->adapter.name)); drv_data->adapter.algo_data = &drv_data->algo_data, drv_data->adapter.dev.parent = &plat_dev->dev; diff --git a/drivers/i2c/busses/i2c-ixp4xx.c b/drivers/i2c/busses/i2c-ixp4xx.c index 08e89b83984a..069ed7f3b395 100644 --- a/drivers/i2c/busses/i2c-ixp4xx.c +++ b/drivers/i2c/busses/i2c-ixp4xx.c @@ -127,7 +127,7 @@ static int ixp4xx_i2c_probe(struct platform_device *plat_dev) drv_data->adapter.id = I2C_HW_B_IXP4XX; drv_data->adapter.class = I2C_CLASS_HWMON; strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name, - I2C_NAME_SIZE); + sizeof(drv_data->adapter.name)); drv_data->adapter.algo_data = &drv_data->algo_data; drv_data->adapter.dev.parent = &plat_dev->dev; diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index a3283b907eb8..a55b3335d1be 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -508,7 +508,7 @@ mv64xxx_i2c_probe(struct platform_device *pd) } strlcpy(drv_data->adapter.name, MV64XXX_I2C_CTLR_NAME " adapter", - I2C_NAME_SIZE); + sizeof(drv_data->adapter.name)); init_waitqueue_head(&drv_data->waitq); spin_lock_init(&drv_data->lock); diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c index 1514ec5b77f8..cdc67dc914c3 100644 --- a/drivers/i2c/busses/i2c-nforce2.c +++ b/drivers/i2c/busses/i2c-nforce2.c @@ -240,7 +240,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar, smbus->adapter.algo = &smbus_algorithm; smbus->adapter.algo_data = smbus; smbus->adapter.dev.parent = &dev->dev; - snprintf(smbus->adapter.name, I2C_NAME_SIZE, + snprintf(smbus->adapter.name, sizeof(smbus->adapter.name), "SMBus nForce2 adapter at %04x", smbus->base); error = i2c_add_adapter(&smbus->adapter); diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c index bf89eeef74e9..58e32714afb5 100644 --- a/drivers/i2c/busses/i2c-pasemi.c +++ b/drivers/i2c/busses/i2c-pasemi.c @@ -358,7 +358,7 @@ static int __devinit pasemi_smb_probe(struct pci_dev *dev, } smbus->adapter.owner = THIS_MODULE; - snprintf(smbus->adapter.name, I2C_NAME_SIZE, + snprintf(smbus->adapter.name, sizeof(smbus->adapter.name), "PA Semi SMBus adapter at 0x%lx", smbus->base); smbus->adapter.class = I2C_CLASS_HWMON; smbus->adapter.algo = &smbus_algorithm; diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 21b180904085..5a52bf5e3fb0 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -428,7 +428,7 @@ static int __devinit piix4_probe(struct pci_dev *dev, /* set up the sysfs linkage to our parent device */ piix4_adapter.dev.parent = &dev->dev; - snprintf(piix4_adapter.name, I2C_NAME_SIZE, + snprintf(piix4_adapter.name, sizeof(piix4_adapter.name), "SMBus PIIX4 adapter at %04x", piix4_smba); if ((retval = i2c_add_adapter(&piix4_adapter))) { diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c index 4157b0cd604c..dc235bb8e24d 100644 --- a/drivers/i2c/busses/i2c-sis96x.c +++ b/drivers/i2c/busses/i2c-sis96x.c @@ -300,7 +300,7 @@ static int __devinit sis96x_probe(struct pci_dev *dev, /* set up the sysfs linkage to our parent device */ sis96x_adapter.dev.parent = &dev->dev; - snprintf(sis96x_adapter.name, I2C_NAME_SIZE, + snprintf(sis96x_adapter.name, sizeof(sis96x_adapter.name), "SiS96x SMBus adapter at 0x%04x", sis96x_smbus_base); if ((retval = i2c_add_adapter(&sis96x_adapter))) { diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c index 03c5fc868548..7a2bc06304fc 100644 --- a/drivers/i2c/busses/i2c-viapro.c +++ b/drivers/i2c/busses/i2c-viapro.c @@ -404,7 +404,7 @@ found: } vt596_adapter.dev.parent = &pdev->dev; - snprintf(vt596_adapter.name, I2C_NAME_SIZE, + snprintf(vt596_adapter.name, sizeof(vt596_adapter.name), "SMBus Via Pro adapter at %04x", vt596_smba); vt596_pdev = pci_dev_get(pdev); diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c index 0b082c5a0195..d816ab089fe2 100644 --- a/drivers/i2c/busses/scx200_acb.c +++ b/drivers/i2c/busses/scx200_acb.c @@ -441,7 +441,7 @@ static __init struct scx200_acb_iface *scx200_create_iface(const char *text, adapter = &iface->adapter; i2c_set_adapdata(adapter, iface); - snprintf(adapter->name, I2C_NAME_SIZE, "%s ACB%d", text, index); + snprintf(adapter->name, sizeof(adapter->name), "%s ACB%d", text, index); adapter->owner = THIS_MODULE; adapter->id = I2C_HW_SMBUS_SCX200; adapter->algo = &scx200_acb_algorithm; diff --git a/drivers/media/dvb/b2c2/flexcop-i2c.c b/drivers/media/dvb/b2c2/flexcop-i2c.c index 5347a406fff7..02a0ea6e1c17 100644 --- a/drivers/media/dvb/b2c2/flexcop-i2c.c +++ b/drivers/media/dvb/b2c2/flexcop-i2c.c @@ -183,7 +183,8 @@ int flexcop_i2c_init(struct flexcop_device *fc) mutex_init(&fc->i2c_mutex); memset(&fc->i2c_adap, 0, sizeof(struct i2c_adapter)); - strncpy(fc->i2c_adap.name, "B2C2 FlexCop device",I2C_NAME_SIZE); + strncpy(fc->i2c_adap.name, "B2C2 FlexCop device", + sizeof(fc->i2c_adap.name)); i2c_set_adapdata(&fc->i2c_adap,fc); diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c b/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c index 70df31b0a8a9..088b6dee3a7f 100644 --- a/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c +++ b/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c @@ -19,7 +19,7 @@ int dvb_usb_i2c_init(struct dvb_usb_device *d) return -EINVAL; } - strncpy(d->i2c_adap.name,d->desc->name,I2C_NAME_SIZE); + strncpy(d->i2c_adap.name, d->desc->name, sizeof(d->i2c_adap.name)); #ifdef I2C_ADAP_CLASS_TV_DIGITAL d->i2c_adap.class = I2C_ADAP_CLASS_TV_DIGITAL, #else diff --git a/drivers/media/dvb/frontends/dibx000_common.c b/drivers/media/dvb/frontends/dibx000_common.c index a18c8f45a2ee..315e09e95b0c 100644 --- a/drivers/media/dvb/frontends/dibx000_common.c +++ b/drivers/media/dvb/frontends/dibx000_common.c @@ -105,9 +105,9 @@ struct i2c_adapter * dibx000_get_i2c_adapter(struct dibx000_i2c_master *mst, enu } EXPORT_SYMBOL(dibx000_get_i2c_adapter); -static int i2c_adapter_init(struct i2c_adapter *i2c_adap, struct i2c_algorithm *algo, const char name[I2C_NAME_SIZE], struct dibx000_i2c_master *mst) +static int i2c_adapter_init(struct i2c_adapter *i2c_adap, struct i2c_algorithm *algo, const char *name, struct dibx000_i2c_master *mst) { - strncpy(i2c_adap->name, name, I2C_NAME_SIZE); + strncpy(i2c_adap->name, name, sizeof(i2c_adap->name)); i2c_adap->class = I2C_CLASS_TV_DIGITAL, i2c_adap->algo = algo; i2c_adap->algo_data = NULL; diff --git a/drivers/video/intelfb/intelfb_i2c.c b/drivers/video/intelfb/intelfb_i2c.c index f4ede5f6b588..61e4c8759b23 100644 --- a/drivers/video/intelfb/intelfb_i2c.c +++ b/drivers/video/intelfb/intelfb_i2c.c @@ -104,7 +104,8 @@ static int intelfb_setup_i2c_bus(struct intelfb_info *dinfo, chan->dinfo = dinfo; chan->reg = reg; - snprintf(chan->adapter.name, I2C_NAME_SIZE, "intelfb %s", name); + snprintf(chan->adapter.name, sizeof(chan->adapter.name), + "intelfb %s", name); chan->adapter.owner = THIS_MODULE; chan->adapter.id = I2C_HW_B_INTELFB; chan->adapter.algo_data = &chan->algo; diff --git a/drivers/video/matrox/i2c-matroxfb.c b/drivers/video/matrox/i2c-matroxfb.c index 5ec718a5fe22..4baab7be58de 100644 --- a/drivers/video/matrox/i2c-matroxfb.c +++ b/drivers/video/matrox/i2c-matroxfb.c @@ -111,7 +111,7 @@ static int i2c_bus_reg(struct i2c_bit_adapter* b, struct matrox_fb_info* minfo, b->mask.data = data; b->mask.clock = clock; b->adapter = matrox_i2c_adapter_template; - snprintf(b->adapter.name, I2C_NAME_SIZE, name, + snprintf(b->adapter.name, sizeof(b->adapter.name), name, minfo->fbcon.node); i2c_set_adapdata(&b->adapter, b); b->adapter.algo_data = &b->bac; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 47c2a1907372..953e71fb07b4 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -140,25 +140,30 @@ struct i2c_driver { }; #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) -#define I2C_NAME_SIZE 50 - -/* - * i2c_client identifies a single device (i.e. chip) that is connected to an - * i2c bus. The behaviour is defined by the routines of the driver. This - * function is mainly used for lookup & other admin. functions. +#define I2C_NAME_SIZE 20 + +/** + * struct i2c_client - represent an I2C slave device + * @addr: Address used on the I2C bus connected to the parent adapter. + * @name: Indicates the type of the device, usually a chip name that's + * generic enough to hide second-sourcing and compatible revisions. + * @dev: Driver model device node for the slave. + * + * An i2c_client identifies a single device (i.e. chip) connected to an + * i2c bus. The behaviour is defined by the routines of the driver. */ struct i2c_client { - unsigned int flags; /* div., see below */ + unsigned short flags; /* div., see below */ unsigned short addr; /* chip address - NOTE: 7bit */ /* addresses are stored in the */ /* _LOWER_ 7 bits */ + char name[I2C_NAME_SIZE]; struct i2c_adapter *adapter; /* the adapter we sit on */ struct i2c_driver *driver; /* and our access routines */ int usage_count; /* How many accesses currently */ /* to the client */ struct device dev; /* the device structure */ struct list_head list; - char name[I2C_NAME_SIZE]; struct completion released; }; #define to_i2c_client(d) container_of(d, struct i2c_client, dev) @@ -231,7 +236,7 @@ struct i2c_adapter { int nr; struct list_head clients; struct list_head list; - char name[I2C_NAME_SIZE]; + char name[48]; struct completion dev_released; }; #define dev_to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) -- cgit v1.2.3 From ef2c8321f5a27ff9ecdae1ee587430cafa495586 Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 1 May 2007 23:26:28 +0200 Subject: i2c: Rename dev_to_i2c_adapter() Rename dev_to_i2c_adapter() as to_i2c_adapter(), since the previous syntax was a surprising and needless difference from normal naming conventions in Linux. Signed-off-by: David Brownell Signed-off-by: Jean Delvare --- drivers/i2c/i2c-core.c | 4 ++-- include/linux/i2c.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 1e3e1a5b4d86..ae07b874a5d7 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -111,14 +111,14 @@ struct bus_type i2c_bus_type = { void i2c_adapter_dev_release(struct device *dev) { - struct i2c_adapter *adap = dev_to_i2c_adapter(dev); + struct i2c_adapter *adap = to_i2c_adapter(dev); complete(&adap->dev_released); } static ssize_t show_adapter_name(struct device *dev, struct device_attribute *attr, char *buf) { - struct i2c_adapter *adap = dev_to_i2c_adapter(dev); + struct i2c_adapter *adap = to_i2c_adapter(dev); return sprintf(buf, "%s\n", adap->name); } diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 953e71fb07b4..568dd1007b53 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -239,7 +239,7 @@ struct i2c_adapter { char name[48]; struct completion dev_released; }; -#define dev_to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) +#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) static inline void *i2c_get_adapdata (struct i2c_adapter *dev) { -- cgit v1.2.3 From 209d27c3b1676c0497108f0642c51a08b98a7856 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 1 May 2007 23:26:29 +0200 Subject: i2c: Emulate SMBus block read over I2C Let the I2C bus drivers emulate the SMBus Block Read and Block Process Call transactions if they wish. This requires to define a new message flag, which i2c-core will use to let the underlying I2C bus driver know that the first received byte will specify the length of the read message. Signed-off-by: Jean Delvare --- drivers/i2c/i2c-core.c | 34 ++++++++++++++++++++++++++-------- include/linux/i2c.h | 1 + 2 files changed, 27 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index ae07b874a5d7..fd921ce0b75b 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -590,8 +590,9 @@ int i2c_transfer(struct i2c_adapter * adap, struct i2c_msg *msgs, int num) #ifdef DEBUG for (ret = 0; ret < num; ret++) { dev_dbg(&adap->dev, "master_xfer[%d] %c, addr=0x%02x, " - "len=%d\n", ret, msgs[ret].flags & I2C_M_RD ? - 'R' : 'W', msgs[ret].addr, msgs[ret].len); + "len=%d%s\n", ret, (msgs[ret].flags & I2C_M_RD) + ? 'R' : 'W', msgs[ret].addr, msgs[ret].len, + (msgs[ret].flags & I2C_M_RECV_LEN) ? "+" : ""); } #endif @@ -1050,9 +1051,9 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr, break; case I2C_SMBUS_BLOCK_DATA: if (read_write == I2C_SMBUS_READ) { - dev_err(&adapter->dev, "Block read not supported " - "under I2C emulation!\n"); - return -1; + msg[1].flags |= I2C_M_RECV_LEN; + msg[1].len = 1; /* block length will be added by + the underlying bus driver */ } else { msg[0].len = data->block[0] + 2; if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 2) { @@ -1066,9 +1067,21 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr, } break; case I2C_SMBUS_BLOCK_PROC_CALL: - dev_dbg(&adapter->dev, "Block process call not supported " - "under I2C emulation!\n"); - return -1; + num = 2; /* Another special case */ + read_write = I2C_SMBUS_READ; + if (data->block[0] > I2C_SMBUS_BLOCK_MAX) { + dev_err(&adapter->dev, "%s called with invalid " + "block proc call size (%d)\n", __FUNCTION__, + data->block[0]); + return -1; + } + msg[0].len = data->block[0] + 2; + for (i = 1; i < msg[0].len; i++) + msgbuf0[i] = data->block[i-1]; + msg[1].flags |= I2C_M_RECV_LEN; + msg[1].len = 1; /* block length will be added by + the underlying bus driver */ + break; case I2C_SMBUS_I2C_BLOCK_DATA: if (read_write == I2C_SMBUS_READ) { msg[1].len = I2C_SMBUS_BLOCK_MAX; @@ -1132,6 +1145,11 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr, for (i = 0; i < I2C_SMBUS_BLOCK_MAX; i++) data->block[i+1] = msgbuf1[i]; break; + case I2C_SMBUS_BLOCK_DATA: + case I2C_SMBUS_BLOCK_PROC_CALL: + for (i = 0; i < msgbuf1[0] + 1; i++) + data->block[i] = msgbuf1[i]; + break; } return 0; } diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 568dd1007b53..563c9651dd37 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -366,6 +366,7 @@ struct i2c_msg { #define I2C_M_REV_DIR_ADDR 0x2000 #define I2C_M_IGNORE_NAK 0x1000 #define I2C_M_NO_RD_ACK 0x0800 +#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */ __u16 len; /* msg length */ __u8 *buf; /* pointer to msg data */ }; -- cgit v1.2.3 From f75803de6ae9aaebaf096d4590b40503c896eca7 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 1 May 2007 23:26:29 +0200 Subject: i2c-nforce2: Add support for the MCP61 and MCP65 Signed-off-by: Jean Delvare Cc: Hans-Frieder Vogt --- Documentation/i2c/busses/i2c-nforce2 | 2 ++ drivers/i2c/busses/i2c-nforce2.c | 4 ++++ include/linux/pci_ids.h | 2 ++ 3 files changed, 8 insertions(+) (limited to 'include/linux') diff --git a/Documentation/i2c/busses/i2c-nforce2 b/Documentation/i2c/busses/i2c-nforce2 index 7f61fbc03f7f..fae3495bcbaf 100644 --- a/Documentation/i2c/busses/i2c-nforce2 +++ b/Documentation/i2c/busses/i2c-nforce2 @@ -9,6 +9,8 @@ Supported adapters: * nForce4 MCP-04 10de:0034 * nForce4 MCP51 10de:0264 * nForce4 MCP55 10de:0368 + * nForce4 MCP61 10de:03EB + * nForce4 MCP65 10de:0446 Datasheet: not publicly available, but seems to be similar to the AMD-8111 SMBus 2.0 adapter. diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c index cdc67dc914c3..3cd0d63e7b50 100644 --- a/drivers/i2c/busses/i2c-nforce2.c +++ b/drivers/i2c/busses/i2c-nforce2.c @@ -33,6 +33,8 @@ nForce4 MCP-04 0034 nForce4 MCP51 0264 nForce4 MCP55 0368 + nForce MCP61 03EB + nForce MCP65 0446 This driver supports the 2 SMBuses that are included in the MCP of the nForce2/3/4/5xx chipsets. @@ -200,6 +202,8 @@ static struct pci_device_id nforce2_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS) }, { 0 } }; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 1b0ddbb8a804..5a48e963d06b 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1213,11 +1213,13 @@ #define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5 #define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC #define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE #define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS 0x0446 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448 #define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450 #define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451 -- cgit v1.2.3 From 7b4fbc50fabb810523be522fe7ec5cc40f85c7a1 Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 1 May 2007 23:26:30 +0200 Subject: i2c: i2c stack can probe() One of a series of I2C infrastructure updates to support enumeration using the standard Linux driver model. This patch updates probe() and associated hotplug/coldplug support, but not remove(). Nothing yet _uses_ it to create I2C devices, so those hotplug/coldplug mechanisms will be the only externally visible change. This patch will be an overall NOP since the I2C stack doesn't yet create clients/devices except as part of binding them to legacy drivers. Some code is moved earlier in the source code, helping group more of the per-device infrastructure in one place and simplifying handling per-device attributes. Terminology being adopted: "legacy drivers" create devices (i2c_client) themselves, while "new style" ones follow the driver model (the i2c_client is handed to the probe routine). It's an either/or thing; the two models don't mix, and drivers that try mixing them won't even be registered. Signed-off-by: David Brownell Signed-off-by: Jean Delvare --- drivers/i2c/i2c-core.c | 141 ++++++++++++++++++++++++++++++++++--------------- include/linux/i2c.h | 13 ++++- 2 files changed, 110 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index fd921ce0b75b..a2ad83ad0f53 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -44,15 +44,58 @@ static DEFINE_IDR(i2c_adapter_idr); /* ------------------------------------------------------------------------- */ -/* match always succeeds, as we want the probe() to tell if we really accept this match */ static int i2c_device_match(struct device *dev, struct device_driver *drv) { - return 1; + struct i2c_client *client = to_i2c_client(dev); + struct i2c_driver *driver = to_i2c_driver(drv); + + /* make legacy i2c drivers bypass driver model probing entirely; + * such drivers scan each i2c adapter/bus themselves. + */ + if (!driver->probe) + return 0; + + /* new style drivers use the same kind of driver matching policy + * as platform devices or SPI: compare device and driver IDs. + */ + return strcmp(client->driver_name, drv->name) == 0; } +#ifdef CONFIG_HOTPLUG + +/* uevent helps with hotplug: modprobe -q $(MODALIAS) */ +static int i2c_device_uevent(struct device *dev, char **envp, int num_envp, + char *buffer, int buffer_size) +{ + struct i2c_client *client = to_i2c_client(dev); + int i = 0, length = 0; + + /* by definition, legacy drivers can't hotplug */ + if (dev->driver || !client->driver_name) + return 0; + + if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, + "MODALIAS=%s", client->driver_name)) + return -ENOMEM; + envp[i] = NULL; + dev_dbg(dev, "uevent\n"); + return 0; +} + +#else +#define i2c_device_uevent NULL +#endif /* CONFIG_HOTPLUG */ + static int i2c_device_probe(struct device *dev) { - return -ENODEV; + struct i2c_client *client = to_i2c_client(dev); + struct i2c_driver *driver = to_i2c_driver(dev->driver); + + if (!driver->probe) + return -ENODEV; + client->driver = driver; + dev_dbg(dev, "probe\n"); + return driver->probe(client); } static int i2c_device_remove(struct device *dev) @@ -95,9 +138,38 @@ static int i2c_device_resume(struct device * dev) return driver->resume(to_i2c_client(dev)); } +static void i2c_client_release(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + complete(&client->released); +} + +static ssize_t show_client_name(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + return sprintf(buf, "%s\n", client->name); +} + +static ssize_t show_modalias(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + return client->driver_name + ? sprintf(buf, "%s\n", client->driver_name) + : 0; +} + +static struct device_attribute i2c_dev_attrs[] = { + __ATTR(name, S_IRUGO, show_client_name, NULL), + /* modalias helps coldplug: modprobe $(cat .../modalias) */ + __ATTR(modalias, S_IRUGO, show_modalias, NULL), + { }, +}; + struct bus_type i2c_bus_type = { .name = "i2c", + .dev_attrs = i2c_dev_attrs, .match = i2c_device_match, + .uevent = i2c_device_uevent, .probe = i2c_device_probe, .remove = i2c_device_remove, .shutdown = i2c_device_shutdown, @@ -134,31 +206,6 @@ struct class i2c_adapter_class = { }; -static void i2c_client_release(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - complete(&client->released); -} - -static ssize_t show_client_name(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct i2c_client *client = to_i2c_client(dev); - return sprintf(buf, "%s\n", client->name); -} - -/* - * We can't use the DEVICE_ATTR() macro here, as we used the same name for - * an i2c adapter attribute (above). - */ -static struct device_attribute dev_attr_client_name = - __ATTR(name, S_IRUGO, &show_client_name, NULL); - - -/* --------------------------------------------------- - * registering functions - * --------------------------------------------------- - */ - /* ----- * i2c_add_adapter is called from within the algorithm layer, * when a new hw adapter registers. A new device is register to be @@ -208,7 +255,7 @@ int i2c_add_adapter(struct i2c_adapter *adap) dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name); - /* inform drivers of new adapters */ + /* let legacy drivers scan this bus for matching devices */ list_for_each(item,&drivers) { driver = list_entry(item, struct i2c_driver, list); if (driver->attach_adapter) @@ -292,16 +339,32 @@ int i2c_del_adapter(struct i2c_adapter *adap) } -/* ----- - * What follows is the "upwards" interface: commands for talking to clients, - * which implement the functions to access the physical information of the - * chips. +/* ------------------------------------------------------------------------- */ + +/* + * An i2c_driver is used with one or more i2c_client (device) nodes to access + * i2c slave chips, on a bus instance associated with some i2c_adapter. There + * are two models for binding the driver to its device: "new style" drivers + * follow the standard Linux driver model and just respond to probe() calls + * issued if the driver core sees they match(); "legacy" drivers create device + * nodes themselves. */ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) { int res; + /* new style driver methods can't mix with legacy ones */ + if (driver->probe) { + if (driver->attach_adapter || driver->detach_adapter + || driver->detach_client) { + printk(KERN_WARNING + "i2c-core: driver [%s] is confused\n", + driver->driver.name); + return -EINVAL; + } + } + /* add the driver to the list of i2c drivers in the driver core */ driver->driver.owner = owner; driver->driver.bus = &i2c_bus_type; @@ -315,7 +378,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) list_add_tail(&driver->list,&drivers); pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); - /* now look for instances of driver on our adapters */ + /* legacy drivers scan i2c busses directly */ if (driver->attach_adapter) { struct i2c_adapter *adapter; @@ -380,6 +443,8 @@ int i2c_del_driver(struct i2c_driver *driver) return 0; } +/* ------------------------------------------------------------------------- */ + static int __i2c_check_addr(struct i2c_adapter *adapter, unsigned int addr) { struct list_head *item; @@ -430,9 +495,6 @@ int i2c_attach_client(struct i2c_client *client) res = device_register(&client->dev); if (res) goto out_list; - res = device_create_file(&client->dev, &dev_attr_client_name); - if (res) - goto out_unregister; mutex_unlock(&adapter->clist_lock); if (adapter->client_register) { @@ -445,10 +507,6 @@ int i2c_attach_client(struct i2c_client *client) return 0; -out_unregister: - init_completion(&client->released); /* Needed? */ - device_unregister(&client->dev); - wait_for_completion(&client->released); out_list: list_del(&client->list); dev_err(&adapter->dev, "Failed to attach i2c client %s at 0x%02x " @@ -483,7 +541,6 @@ int i2c_detach_client(struct i2c_client *client) mutex_lock(&adapter->clist_lock); list_del(&client->list); init_completion(&client->released); - device_remove_file(&client->dev, &dev_attr_client_name); device_unregister(&client->dev); mutex_unlock(&adapter->clist_lock); wait_for_completion(&client->released); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 563c9651dd37..8dcccc0f4822 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -113,7 +113,7 @@ struct i2c_driver { * can be used by the driver to test if the bus meets its conditions * & seek for the presence of the chip(s) it supports. If found, it * registers the client(s) that are on the bus to the i2c admin. via - * i2c_attach_client. + * i2c_attach_client. (LEGACY I2C DRIVERS ONLY) */ int (*attach_adapter)(struct i2c_adapter *); int (*detach_adapter)(struct i2c_adapter *); @@ -121,10 +121,16 @@ struct i2c_driver { /* tells the driver that a client is about to be deleted & gives it * the chance to remove its private data. Also, if the client struct * has been dynamically allocated by the driver in the function above, - * it must be freed here. + * it must be freed here. (LEGACY I2C DRIVERS ONLY) */ int (*detach_client)(struct i2c_client *); + /* Standard driver model interfaces, for "new style" i2c drivers. + * With the driver model, device enumeration is NEVER done by drivers; + * it's done by infrastructure. (NEW STYLE DRIVERS ONLY) + */ + int (*probe)(struct i2c_client *); + /* driver model interfaces that don't relate to enumeration */ void (*shutdown)(struct i2c_client *); int (*suspend)(struct i2c_client *, pm_message_t mesg); @@ -148,6 +154,8 @@ struct i2c_driver { * @name: Indicates the type of the device, usually a chip name that's * generic enough to hide second-sourcing and compatible revisions. * @dev: Driver model device node for the slave. + * @driver_name: Identifies new-style driver used with this device; also + * used as the module name for hotplug/coldplug modprobe support. * * An i2c_client identifies a single device (i.e. chip) connected to an * i2c bus. The behaviour is defined by the routines of the driver. @@ -163,6 +171,7 @@ struct i2c_client { int usage_count; /* How many accesses currently */ /* to the client */ struct device dev; /* the device structure */ + char driver_name[KOBJ_NAME_LEN]; struct list_head list; struct completion released; }; -- cgit v1.2.3 From a1d9e6e49f4b473a6945a6b553f5070e8c793e0a Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 1 May 2007 23:26:30 +0200 Subject: i2c: i2c stack can remove() More update for new style driver support: add a remove() method, and use it in the relevant code paths. Again, nothing will use this yet since there's nothing to create devices feeding this infrastructure. Signed-off-by: David Brownell Signed-off-by: Jean Delvare --- drivers/i2c/i2c-core.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++---- include/linux/i2c.h | 1 + 2 files changed, 62 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index a2ad83ad0f53..3aac1b5c7cbd 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -41,6 +41,7 @@ static LIST_HEAD(drivers); static DEFINE_MUTEX(core_lists); static DEFINE_IDR(i2c_adapter_idr); +#define is_newstyle_driver(d) ((d)->probe || (d)->remove) /* ------------------------------------------------------------------------- */ @@ -52,7 +53,7 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv) /* make legacy i2c drivers bypass driver model probing entirely; * such drivers scan each i2c adapter/bus themselves. */ - if (!driver->probe) + if (!is_newstyle_driver(driver)) return 0; /* new style drivers use the same kind of driver matching policy @@ -100,7 +101,24 @@ static int i2c_device_probe(struct device *dev) static int i2c_device_remove(struct device *dev) { - return 0; + struct i2c_client *client = to_i2c_client(dev); + struct i2c_driver *driver; + int status; + + if (!dev->driver) + return 0; + + driver = to_i2c_driver(dev->driver); + if (driver->remove) { + dev_dbg(dev, "remove\n"); + status = driver->remove(client); + } else { + dev->driver = NULL; + status = 0; + } + if (status == 0) + client->driver = NULL; + return status; } static void i2c_device_shutdown(struct device *dev) @@ -177,6 +195,26 @@ struct bus_type i2c_bus_type = { .resume = i2c_device_resume, }; +static void i2c_unregister_device(struct i2c_client *client) +{ + struct i2c_adapter *adapter = client->adapter; + struct i2c_driver *driver = client->driver; + + if (driver && !is_newstyle_driver(driver)) { + dev_err(&client->dev, "can't unregister devices " + "with legacy drivers\n"); + WARN_ON(1); + return; + } + + mutex_lock(&adapter->clist_lock); + list_del(&client->list); + mutex_unlock(&adapter->clist_lock); + + device_unregister(&client->dev); +} + + /* ------------------------------------------------------------------------- */ /* I2C bus adapters -- one roots each I2C or SMBUS segment */ @@ -310,9 +348,19 @@ int i2c_del_adapter(struct i2c_adapter *adap) /* detach any active clients. This must be done first, because * it can fail; in which case we give up. */ list_for_each_safe(item, _n, &adap->clients) { + struct i2c_driver *driver; + client = list_entry(item, struct i2c_client, list); + driver = client->driver; - if ((res=client->driver->detach_client(client))) { + /* new style, follow standard driver model */ + if (!driver || is_newstyle_driver(driver)) { + i2c_unregister_device(client); + continue; + } + + /* legacy drivers create and remove clients themselves */ + if ((res = driver->detach_client(client))) { dev_err(&adap->dev, "detach_client failed for client " "[%s] at address 0x%02x\n", client->name, client->addr); @@ -355,7 +403,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) int res; /* new style driver methods can't mix with legacy ones */ - if (driver->probe) { + if (is_newstyle_driver(driver)) { if (driver->attach_adapter || driver->detach_adapter || driver->detach_client) { printk(KERN_WARNING @@ -392,6 +440,10 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) } EXPORT_SYMBOL(i2c_register_driver); +/** + * i2c_del_driver - unregister I2C driver + * @driver: the driver being unregistered + */ int i2c_del_driver(struct i2c_driver *driver) { struct list_head *item1, *item2, *_n; @@ -402,6 +454,10 @@ int i2c_del_driver(struct i2c_driver *driver) mutex_lock(&core_lists); + /* new-style driver? */ + if (is_newstyle_driver(driver)) + goto unregister; + /* Have a look at each adapter, if clients of this driver are still * attached. If so, detach them to be able to kill the driver * afterwards. @@ -434,6 +490,7 @@ int i2c_del_driver(struct i2c_driver *driver) } } + unregister: driver_unregister(&driver->driver); list_del(&driver->list); pr_debug("i2c-core: driver [%s] unregistered\n", driver->driver.name); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 8dcccc0f4822..6802c3a0a3a3 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -130,6 +130,7 @@ struct i2c_driver { * it's done by infrastructure. (NEW STYLE DRIVERS ONLY) */ int (*probe)(struct i2c_client *); + int (*remove)(struct i2c_client *); /* driver model interfaces that don't relate to enumeration */ void (*shutdown)(struct i2c_client *); -- cgit v1.2.3 From 9c1600eda42e52796f49b36cf15b9debcfd09bea Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 1 May 2007 23:26:31 +0200 Subject: i2c: Add i2c_board_info and i2c_new_device() This provides partial support for new-style I2C driver binding. It builds on "struct i2c_board_info" declarations that identify I2C devices on a given board. This is needed on systems with I2C devices that can't be fully probed and/or autoconfigured, such as many embedded Linux configurations where the way a given I2C device is wired may affect how it must be used. There are two models for declaring such devices: * LATE -- using a public function i2c_new_device(). This lets modules declare I2C devices found *AFTER* a given I2C adapter becomes available. For example, a PCI card could create adapters giving access to utility chips on that card, and this would be used to associate those chips with those adapters. * EARLY -- from arch_initcall() level code, using a non-exported function i2c_register_board_info(). This copies the declarations *BEFORE* such an i2c_adapter becomes available, arranging that i2c_new_device() will be called later when i2c-core registers the relevant i2c_adapter. For example, arch/.../.../board-*.c files would declare the I2C devices along with their platform data, and I2C devices would behave much like PNPACPI devices. (That is, both enumerate from board-specific tables.) To match the exported i2c_new_device(), the previously-private function i2c_unregister_device() is now exported. Pending later patches using these new APIs, this is effectively a NOP. Signed-off-by: David Brownell Signed-off-by: Jean Delvare --- drivers/Makefile | 2 +- drivers/i2c/Kconfig | 5 +++ drivers/i2c/Makefile | 1 + drivers/i2c/i2c-boardinfo.c | 90 +++++++++++++++++++++++++++++++++++++++++++++ drivers/i2c/i2c-core.c | 90 +++++++++++++++++++++++++++++++++++++++++++-- drivers/i2c/i2c-core.h | 31 ++++++++++++++++ include/linux/i2c.h | 62 +++++++++++++++++++++++++++++++ 7 files changed, 276 insertions(+), 5 deletions(-) create mode 100644 drivers/i2c/i2c-boardinfo.c create mode 100644 drivers/i2c/i2c-core.h (limited to 'include/linux') diff --git a/drivers/Makefile b/drivers/Makefile index 920c975bb6d4..26ca9031ea49 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -58,7 +58,7 @@ obj-$(CONFIG_GAMEPORT) += input/gameport/ obj-$(CONFIG_INPUT) += input/ obj-$(CONFIG_I2O) += message/ obj-$(CONFIG_RTC_LIB) += rtc/ -obj-$(CONFIG_I2C) += i2c/ +obj-y += i2c/ obj-$(CONFIG_W1) += w1/ obj-$(CONFIG_HWMON) += hwmon/ obj-$(CONFIG_PHONE) += telephony/ diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index 11935f66fcd8..74c8518b69e7 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -22,6 +22,11 @@ config I2C This I2C support can also be built as a module. If so, the module will be called i2c-core. +config I2C_BOARDINFO + boolean + depends on I2C + default y + config I2C_CHARDEV tristate "I2C device interface" depends on I2C diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile index 71c5a854ac5d..ba26e6cbe74e 100644 --- a/drivers/i2c/Makefile +++ b/drivers/i2c/Makefile @@ -2,6 +2,7 @@ # Makefile for the i2c core. # +obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o obj-$(CONFIG_I2C) += i2c-core.o obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o obj-y += busses/ chips/ algos/ diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c new file mode 100644 index 000000000000..ffb35f09df03 --- /dev/null +++ b/drivers/i2c/i2c-boardinfo.c @@ -0,0 +1,90 @@ +/* + * i2c-boardinfo.h - collect pre-declarations of I2C devices + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include + +#include "i2c-core.h" + + +/* These symbols are exported ONLY FOR the i2c core. + * No other users will be supported. + */ +DEFINE_MUTEX(__i2c_board_lock); +EXPORT_SYMBOL_GPL(__i2c_board_lock); + +LIST_HEAD(__i2c_board_list); +EXPORT_SYMBOL_GPL(__i2c_board_list); + +int __i2c_first_dynamic_bus_num; +EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num); + + +/** + * i2c_register_board_info - statically declare I2C devices + * @busnum: identifies the bus to which these devices belong + * @info: vector of i2c device descriptors + * @len: how many descriptors in the vector; may be zero to reserve + * the specified bus number. + * + * Systems using the Linux I2C driver stack can declare tables of board info + * while they initialize. This should be done in board-specific init code + * near arch_initcall() time, or equivalent, before any I2C adapter driver is + * registered. For example, mainboard init code could define several devices, + * as could the init code for each daughtercard in a board stack. + * + * The I2C devices will be created later, after the adapter for the relevant + * bus has been registered. After that moment, standard driver model tools + * are used to bind "new style" I2C drivers to the devices. The bus number + * for any device declared using this routine is not available for dynamic + * allocation. + * + * The board info passed can safely be __initdata, but be careful of embedded + * pointers (for platform_data, functions, etc) since that won't be copied. + */ +int __init +i2c_register_board_info(int busnum, + struct i2c_board_info const *info, unsigned len) +{ + int status; + + mutex_lock(&__i2c_board_lock); + + /* dynamic bus numbers will be assigned after the last static one */ + if (busnum >= __i2c_first_dynamic_bus_num) + __i2c_first_dynamic_bus_num = busnum + 1; + + for (status = 0; len; len--, info++) { + struct i2c_devinfo *devinfo; + + devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL); + if (!devinfo) { + pr_debug("i2c-core: can't register boardinfo!\n"); + status = -ENOMEM; + break; + } + + devinfo->busnum = busnum; + devinfo->board_info = *info; + list_add_tail(&devinfo->list, &__i2c_board_list); + } + + mutex_unlock(&__i2c_board_lock); + + return status; +} diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 3aac1b5c7cbd..a7dcdc69b9e7 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -35,6 +35,8 @@ #include #include +#include "i2c-core.h" + static LIST_HEAD(adapters); static LIST_HEAD(drivers); @@ -162,6 +164,11 @@ static void i2c_client_release(struct device *dev) complete(&client->released); } +static void i2c_client_dev_release(struct device *dev) +{ + kfree(to_i2c_client(dev)); +} + static ssize_t show_client_name(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); @@ -195,7 +202,60 @@ struct bus_type i2c_bus_type = { .resume = i2c_device_resume, }; -static void i2c_unregister_device(struct i2c_client *client) +/** + * i2c_new_device - instantiate an i2c device for use with a new style driver + * @adap: the adapter managing the device + * @info: describes one I2C device; bus_num is ignored + * + * Create a device to work with a new style i2c driver, where binding is + * handled through driver model probe()/remove() methods. This call is not + * appropriate for use by mainboad initialization logic, which usually runs + * during an arch_initcall() long before any i2c_adapter could exist. + * + * This returns the new i2c client, which may be saved for later use with + * i2c_unregister_device(); or NULL to indicate an error. + */ +struct i2c_client * +i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) +{ + struct i2c_client *client; + int status; + + client = kzalloc(sizeof *client, GFP_KERNEL); + if (!client) + return NULL; + + client->adapter = adap; + + client->dev.platform_data = info->platform_data; + client->flags = info->flags; + client->addr = info->addr; + client->irq = info->irq; + + strlcpy(client->driver_name, info->driver_name, + sizeof(client->driver_name)); + strlcpy(client->name, info->type, sizeof(client->name)); + + /* a new style driver may be bound to this device when we + * return from this function, or any later moment (e.g. maybe + * hotplugging will load the driver module). and the device + * refcount model is the standard driver model one. + */ + status = i2c_attach_client(client); + if (status < 0) { + kfree(client); + client = NULL; + } + return client; +} +EXPORT_SYMBOL_GPL(i2c_new_device); + + +/** + * i2c_unregister_device - reverse effect of i2c_new_device() + * @client: value returned from i2c_new_device() + */ +void i2c_unregister_device(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct i2c_driver *driver = client->driver; @@ -213,6 +273,7 @@ static void i2c_unregister_device(struct i2c_client *client) device_unregister(&client->dev); } +EXPORT_SYMBOL_GPL(i2c_unregister_device); /* ------------------------------------------------------------------------- */ @@ -243,6 +304,22 @@ struct class i2c_adapter_class = { .dev_attrs = i2c_adapter_attrs, }; +static void i2c_scan_static_board_info(struct i2c_adapter *adapter) +{ + struct i2c_devinfo *devinfo; + + mutex_lock(&__i2c_board_lock); + list_for_each_entry(devinfo, &__i2c_board_list, list) { + if (devinfo->busnum == adapter->nr + && !i2c_new_device(adapter, + &devinfo->board_info)) + printk(KERN_ERR "i2c-core: can't create i2c%d-%04x\n", + i2c_adapter_id(adapter), + devinfo->board_info.addr); + } + mutex_unlock(&__i2c_board_lock); +} + /* ----- * i2c_add_adapter is called from within the algorithm layer, @@ -311,7 +388,6 @@ out_list: goto out_unlock; } - int i2c_del_adapter(struct i2c_adapter *adap) { struct list_head *item, *_n; @@ -541,9 +617,15 @@ int i2c_attach_client(struct i2c_client *client) client->usage_count = 0; client->dev.parent = &client->adapter->dev; - client->dev.driver = &client->driver->driver; client->dev.bus = &i2c_bus_type; - client->dev.release = &i2c_client_release; + + if (client->driver) + client->dev.driver = &client->driver->driver; + + if (client->driver && !is_newstyle_driver(client->driver)) + client->dev.release = i2c_client_release; + else + client->dev.release = i2c_client_dev_release; snprintf(&client->dev.bus_id[0], sizeof(client->dev.bus_id), "%d-%04x", i2c_adapter_id(adapter), client->addr); diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h new file mode 100644 index 000000000000..cd5bff874855 --- /dev/null +++ b/drivers/i2c/i2c-core.h @@ -0,0 +1,31 @@ +/* + * i2c-core.h - interfaces internal to the I2C framework + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +struct i2c_devinfo { + struct list_head list; + int busnum; + struct i2c_board_info board_info; +}; + +/* board_lock protects board_list and first_dynamic_bus_num. + * only i2c core components are allowed to use these symbols. + */ +extern struct mutex __i2c_board_lock; +extern struct list_head __i2c_board_list; +extern int __i2c_first_dynamic_bus_num; + diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 6802c3a0a3a3..382a43bf3ad5 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -172,6 +172,7 @@ struct i2c_client { int usage_count; /* How many accesses currently */ /* to the client */ struct device dev; /* the device structure */ + int irq; /* irq issued by device (or -1) */ char driver_name[KOBJ_NAME_LEN]; struct list_head list; struct completion released; @@ -193,6 +194,67 @@ static inline void i2c_set_clientdata (struct i2c_client *dev, void *data) dev_set_drvdata (&dev->dev, data); } +/** + * struct i2c_board_info - template for device creation + * @driver_name: identifies the driver to be bound to the device + * @type: optional chip type information, to initialize i2c_client.name + * @flags: to initialize i2c_client.flags + * @addr: stored in i2c_client.addr + * @platform_data: stored in i2c_client.dev.platform_data + * @irq: stored in i2c_client.irq + + * I2C doesn't actually support hardware probing, although controllers and + * devices may be able to use I2C_SMBUS_QUICK to tell whether or not there's + * a device at a given address. Drivers commonly need more information than + * that, such as chip type, configuration, associated IRQ, and so on. + * + * i2c_board_info is used to build tables of information listing I2C devices + * that are present. This information is used to grow the driver model tree + * for "new style" I2C drivers. For mainboards this is done statically using + * i2c_register_board_info(), where @bus_num represents an adapter that isn't + * yet available. For add-on boards, i2c_new_device() does this dynamically + * with the adapter already known. + */ +struct i2c_board_info { + char driver_name[KOBJ_NAME_LEN]; + char type[I2C_NAME_SIZE]; + unsigned short flags; + unsigned short addr; + void *platform_data; + int irq; +}; + +/** + * I2C_BOARD_INFO - macro used to list an i2c device and its driver + * @driver: identifies the driver to use with the device + * @dev_addr: the device's address on the bus. + * + * This macro initializes essential fields of a struct i2c_board_info, + * declaring what has been provided on a particular board. Optional + * fields (such as the chip type, its associated irq, or device-specific + * platform_data) are provided using conventional syntax. + */ +#define I2C_BOARD_INFO(driver,dev_addr) \ + .driver_name = (driver), .addr = (dev_addr) + + +/* Add-on boards should register/unregister their devices; e.g. a board + * with integrated I2C, a config eeprom, sensors, and a codec that's + * used in conjunction with the primary hardware. + */ +extern struct i2c_client * +i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); + +extern void i2c_unregister_device(struct i2c_client *); + +/* Mainboard arch_initcall() code should register all its I2C devices. + * This is done at arch_initcall time, before declaring any i2c adapters. + * Modules for add-on boards must use other calls. + */ +extern int +i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned n); + + /* * The following structs are for those who like to implement new bus drivers: * i2c_algorithm is the interface to a class of hardware solutions which can -- cgit v1.2.3 From 6e13e641841833cc2aa5baefe89bb04bc388801b Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 1 May 2007 23:26:31 +0200 Subject: i2c: Add i2c_add_numbered_adapter() This adds a call, i2c_add_numbered_adapter(), registering an I2C adapter with a specific bus number and then creating I2C device nodes for any pre-declared devices on that bus. It builds on previous patches adding I2C probe() and remove() support, and that pre-declaration of devices. This completes the core support for "new style" I2C device drivers. Those follow the standard driver model for binding devices to drivers (using probe and remove methods) rather than a legacy model (where the driver tries to autoconfigure each bus, and registers devices itself). Signed-off-by: David Brownell Signed-off-by: Jean Delvare --- drivers/i2c/i2c-core.c | 126 +++++++++++++++++++++++++++++++++++++++---------- include/linux/i2c.h | 1 + 2 files changed, 101 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index a7dcdc69b9e7..c50229bd9f85 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -320,38 +320,19 @@ static void i2c_scan_static_board_info(struct i2c_adapter *adapter) mutex_unlock(&__i2c_board_lock); } - -/* ----- - * i2c_add_adapter is called from within the algorithm layer, - * when a new hw adapter registers. A new device is register to be - * available for clients. - */ -int i2c_add_adapter(struct i2c_adapter *adap) +static int i2c_register_adapter(struct i2c_adapter *adap) { - int id, res = 0; + int res = 0; struct list_head *item; struct i2c_driver *driver; - mutex_lock(&core_lists); - - if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0) { - res = -ENOMEM; - goto out_unlock; - } - - res = idr_get_new(&i2c_adapter_idr, adap, &id); - if (res < 0) { - if (res == -EAGAIN) - res = -ENOMEM; - goto out_unlock; - } - - adap->nr = id & MAX_ID_MASK; mutex_init(&adap->bus_lock); mutex_init(&adap->clist_lock); - list_add_tail(&adap->list,&adapters); INIT_LIST_HEAD(&adap->clients); + mutex_lock(&core_lists); + list_add_tail(&adap->list, &adapters); + /* Add the adapter to the driver core. * If the parent pointer is not set up, * we add this adapter to the host bus. @@ -370,6 +351,10 @@ int i2c_add_adapter(struct i2c_adapter *adap) dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name); + /* create pre-declared device nodes for new-style drivers */ + if (adap->nr < __i2c_first_dynamic_bus_num) + i2c_scan_static_board_info(adap); + /* let legacy drivers scan this bus for matching devices */ list_for_each(item,&drivers) { driver = list_entry(item, struct i2c_driver, list); @@ -388,6 +373,93 @@ out_list: goto out_unlock; } +/** + * i2c_add_adapter - declare i2c adapter, use dynamic bus number + * @adapter: the adapter to add + * + * This routine is used to declare an I2C adapter when its bus number + * doesn't matter. Examples: for I2C adapters dynamically added by + * USB links or PCI plugin cards. + * + * When this returns zero, a new bus number was allocated and stored + * in adap->nr, and the specified adapter became available for clients. + * Otherwise, a negative errno value is returned. + */ +int i2c_add_adapter(struct i2c_adapter *adapter) +{ + int id, res = 0; + +retry: + if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0) + return -ENOMEM; + + mutex_lock(&core_lists); + /* "above" here means "above or equal to", sigh */ + res = idr_get_new_above(&i2c_adapter_idr, adapter, + __i2c_first_dynamic_bus_num, &id); + mutex_unlock(&core_lists); + + if (res < 0) { + if (res == -EAGAIN) + goto retry; + return res; + } + + adapter->nr = id; + return i2c_register_adapter(adapter); +} +EXPORT_SYMBOL(i2c_add_adapter); + +/** + * i2c_add_numbered_adapter - declare i2c adapter, use static bus number + * @adap: the adapter to register (with adap->nr initialized) + * + * This routine is used to declare an I2C adapter when its bus number + * matters. Example: for I2C adapters from system-on-chip CPUs, or + * otherwise built in to the system's mainboard, and where i2c_board_info + * is used to properly configure I2C devices. + * + * If no devices have pre-been declared for this bus, then be sure to + * register the adapter before any dynamically allocated ones. Otherwise + * the required bus ID may not be available. + * + * When this returns zero, the specified adapter became available for + * clients using the bus number provided in adap->nr. Also, the table + * of I2C devices pre-declared using i2c_register_board_info() is scanned, + * and the appropriate driver model device nodes are created. Otherwise, a + * negative errno value is returned. + */ +int i2c_add_numbered_adapter(struct i2c_adapter *adap) +{ + int id; + int status; + + if (adap->nr & ~MAX_ID_MASK) + return -EINVAL; + +retry: + if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0) + return -ENOMEM; + + mutex_lock(&core_lists); + /* "above" here means "above or equal to", sigh; + * we need the "equal to" result to force the result + */ + status = idr_get_new_above(&i2c_adapter_idr, adap, adap->nr, &id); + if (status == 0 && id != adap->nr) { + status = -EBUSY; + idr_remove(&i2c_adapter_idr, id); + } + mutex_unlock(&core_lists); + if (status == -EAGAIN) + goto retry; + + if (status == 0) + status = i2c_register_adapter(adap); + return status; +} +EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter); + int i2c_del_adapter(struct i2c_adapter *adap) { struct list_head *item, *_n; @@ -452,7 +524,7 @@ int i2c_del_adapter(struct i2c_adapter *adap) /* wait for sysfs to drop all references */ wait_for_completion(&adap->dev_released); - /* free dynamically allocated bus id */ + /* free bus id */ idr_remove(&i2c_adapter_idr, adap->nr); dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); @@ -493,6 +565,9 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) driver->driver.owner = owner; driver->driver.bus = &i2c_bus_type; + /* for new style drivers, when registration returns the driver core + * will have called probe() for all matching-but-unbound devices. + */ res = driver_register(&driver->driver); if (res) return res; @@ -1377,7 +1452,6 @@ EXPORT_SYMBOL_GPL(i2c_adapter_dev_release); EXPORT_SYMBOL_GPL(i2c_adapter_class); EXPORT_SYMBOL_GPL(i2c_bus_type); -EXPORT_SYMBOL(i2c_add_adapter); EXPORT_SYMBOL(i2c_del_adapter); EXPORT_SYMBOL(i2c_del_driver); EXPORT_SYMBOL(i2c_attach_client); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 382a43bf3ad5..36d6814a6df4 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -363,6 +363,7 @@ struct i2c_client_address_data { */ extern int i2c_add_adapter(struct i2c_adapter *); extern int i2c_del_adapter(struct i2c_adapter *); +extern int i2c_add_numbered_adapter(struct i2c_adapter *); extern int i2c_register_driver(struct module *, struct i2c_driver *); extern int i2c_del_driver(struct i2c_driver *); -- cgit v1.2.3 From 0f3b48385213355a2d4408bec1b481ffcf0e8638 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 1 May 2007 23:26:31 +0200 Subject: i2c-algo-bit: Add i2c_bit_add_numbered_bus Add i2c_bit_add_numbered_bus(), which is equivalent to i2c_bit_add_bus except that it calls i2c_add_numbered_adapter() at the end instead of i2c_add_adapter(). Signed-off-by: Jean Delvare --- drivers/i2c/algos/i2c-algo-bit.c | 25 ++++++++++++++++++++++++- include/linux/i2c-algo-bit.h | 1 + 2 files changed, 25 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index fcef6ff3d287..fc16f9d268a3 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c @@ -540,7 +540,7 @@ static const struct i2c_algorithm i2c_bit_algo = { /* * registering functions to load algorithms at runtime */ -int i2c_bit_add_bus(struct i2c_adapter *adap) +static int i2c_bit_prepare_bus(struct i2c_adapter *adap) { struct i2c_algo_bit_data *bit_adap = adap->algo_data; @@ -558,10 +558,33 @@ int i2c_bit_add_bus(struct i2c_adapter *adap) adap->timeout = 100; /* default values, should */ adap->retries = 3; /* be replaced by defines */ + return 0; +} + +int i2c_bit_add_bus(struct i2c_adapter *adap) +{ + int err; + + err = i2c_bit_prepare_bus(adap); + if (err) + return err; + return i2c_add_adapter(adap); } EXPORT_SYMBOL(i2c_bit_add_bus); +int i2c_bit_add_numbered_bus(struct i2c_adapter *adap) +{ + int err; + + err = i2c_bit_prepare_bus(adap); + if (err) + return err; + + return i2c_add_numbered_adapter(adap); +} +EXPORT_SYMBOL(i2c_bit_add_numbered_bus); + MODULE_AUTHOR("Simon G. Vogl "); MODULE_DESCRIPTION("I2C-Bus bit-banging algorithm"); MODULE_LICENSE("GPL"); diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h index 937da70cb4c4..d91dab886357 100644 --- a/include/linux/i2c-algo-bit.h +++ b/include/linux/i2c-algo-bit.h @@ -44,5 +44,6 @@ struct i2c_algo_bit_data { }; int i2c_bit_add_bus(struct i2c_adapter *); +int i2c_bit_add_numbered_bus(struct i2c_adapter *); #endif /* _LINUX_I2C_ALGO_BIT_H */ -- cgit v1.2.3 From 12b5053ac58709c7d475888bc18d1f61958afc4e Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 1 May 2007 23:26:31 +0200 Subject: i2c: Add i2c_new_probed_device() Add a new helper function to instantiate an i2c device. It is meant as a replacement for i2c_new_device() when you don't know for sure at which address your I2C/SMBus device lives. This happens frequently on TV adapters for example, you know there is a tuner chip on the bus, but depending on the exact board model and revision, it can live at different addresses. So, the new i2c_new_probed_device() function will probe the bus according to a list of addresses, and as soon as one of these addresses responds, it will call i2c_new_device() on that one address. This function will make it possible to port the old i2c drivers to the new model quickly. Signed-off-by: Jean Delvare --- drivers/i2c/i2c-core.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/i2c.h | 9 ++++++++ 2 files changed, 72 insertions(+) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 18124398b464..0fd4acbffb78 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1092,6 +1092,69 @@ int i2c_probe(struct i2c_adapter *adapter, } EXPORT_SYMBOL(i2c_probe); +struct i2c_client * +i2c_new_probed_device(struct i2c_adapter *adap, + struct i2c_board_info *info, + unsigned short const *addr_list) +{ + int i; + + /* Stop here if the bus doesn't support probing */ + if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_READ_BYTE)) { + dev_err(&adap->dev, "Probing not supported\n"); + return NULL; + } + + mutex_lock(&adap->clist_lock); + for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) { + /* Check address validity */ + if (addr_list[i] < 0x03 || addr_list[i] > 0x77) { + dev_warn(&adap->dev, "Invalid 7-bit address " + "0x%02x\n", addr_list[i]); + continue; + } + + /* Check address availability */ + if (__i2c_check_addr(adap, addr_list[i])) { + dev_dbg(&adap->dev, "Address 0x%02x already in " + "use, not probing\n", addr_list[i]); + continue; + } + + /* Test address responsiveness + The default probe method is a quick write, but it is known + to corrupt the 24RF08 EEPROMs due to a state machine bug, + and could also irreversibly write-protect some EEPROMs, so + for address ranges 0x30-0x37 and 0x50-0x5f, we use a byte + read instead. Also, some bus drivers don't implement + quick write, so we fallback to a byte read it that case + too. */ + if ((addr_list[i] & ~0x07) == 0x30 + || (addr_list[i] & ~0x0f) == 0x50 + || !i2c_check_functionality(adap, I2C_FUNC_SMBUS_QUICK)) { + if (i2c_smbus_xfer(adap, addr_list[i], 0, + I2C_SMBUS_READ, 0, + I2C_SMBUS_BYTE, NULL) >= 0) + break; + } else { + if (i2c_smbus_xfer(adap, addr_list[i], 0, + I2C_SMBUS_WRITE, 0, + I2C_SMBUS_QUICK, NULL) >= 0) + break; + } + } + mutex_unlock(&adap->clist_lock); + + if (addr_list[i] == I2C_CLIENT_END) { + dev_dbg(&adap->dev, "Probing failed, no device found\n"); + return NULL; + } + + info->addr = addr_list[i]; + return i2c_new_device(adap, info); +} +EXPORT_SYMBOL_GPL(i2c_new_probed_device); + struct i2c_adapter* i2c_get_adapter(int id) { struct i2c_adapter *adapter; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 36d6814a6df4..da95ce79d075 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -245,6 +245,15 @@ struct i2c_board_info { extern struct i2c_client * i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); +/* If you don't know the exact address of an I2C device, use this variant + * instead, which can probe for device presence in a list of possible + * addresses. + */ +extern struct i2c_client * +i2c_new_probed_device(struct i2c_adapter *adap, + struct i2c_board_info *info, + unsigned short const *addr_list); + extern void i2c_unregister_device(struct i2c_client *); /* Mainboard arch_initcall() code should register all its I2C devices. -- cgit v1.2.3 From a97f1ed090fc01a5876a7caf2cbdf93470436201 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 1 May 2007 23:26:32 +0200 Subject: i2c: Move i2c-isa-only exported symbol declarations Move the declaration of i2c-isa-only exported symbols to i2c-isa itself, that's the best way to ensure nobody will attempt to use them. Hopefully we'll get rid of the exports themselves soon anyway. Signed-off-by: Jean Delvare --- drivers/i2c/busses/i2c-isa.c | 4 ++++ include/linux/i2c.h | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/busses/i2c-isa.c b/drivers/i2c/busses/i2c-isa.c index 819e8991f276..b0e1370075de 100644 --- a/drivers/i2c/busses/i2c-isa.c +++ b/drivers/i2c/busses/i2c-isa.c @@ -41,6 +41,10 @@ #include #include +/* Exported by i2c-core for i2c-isa only */ +extern void i2c_adapter_dev_release(struct device *dev); +extern struct class i2c_adapter_class; + static u32 isa_func(struct i2c_adapter *adapter); /* This is the actual algorithm we define */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index da95ce79d075..3af7111c6680 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -35,10 +35,6 @@ #include /* for completion */ #include -/* --- For i2c-isa ---------------------------------------------------- */ - -extern void i2c_adapter_dev_release(struct device *dev); -extern struct class i2c_adapter_class; extern struct bus_type i2c_bus_type; /* --- General options ------------------------------------------------ */ -- cgit v1.2.3 From b3e820968ad47219f7d559117a30e85cf96b4e4e Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 1 May 2007 23:26:32 +0200 Subject: i2c: Make i2c_del_driver a void function Make i2c_del_driver a void function, like all other driver removal functions. It always returned 0 even when errors occured, and nobody ever actually checked the return value anyway. And we cannot fail a module removal anyway. Signed-off-by: Jean Delvare --- Documentation/i2c/writing-clients | 5 +---- drivers/i2c/i2c-core.c | 12 +++--------- drivers/macintosh/therm_windtunnel.c | 3 ++- include/linux/i2c.h | 2 +- 4 files changed, 7 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients index e62fbfa1282d..f7e04ec849b1 100644 --- a/Documentation/i2c/writing-clients +++ b/Documentation/i2c/writing-clients @@ -586,10 +586,7 @@ the driver module is usually enough. void foo_cleanup(void) { if (foo_initialized == 1) { - if ((res = i2c_del_driver(&foo_driver))) { - printk("foo: Driver registration failed, module not removed.\n"); - return; - } + i2c_del_driver(&foo_driver); foo_initialized --; } } diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 0fd4acbffb78..a58f89a6418a 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -599,14 +599,12 @@ EXPORT_SYMBOL(i2c_register_driver); * i2c_del_driver - unregister I2C driver * @driver: the driver being unregistered */ -int i2c_del_driver(struct i2c_driver *driver) +void i2c_del_driver(struct i2c_driver *driver) { struct list_head *item1, *item2, *_n; struct i2c_client *client; struct i2c_adapter *adap; - int res = 0; - mutex_lock(&core_lists); /* new-style driver? */ @@ -620,11 +618,10 @@ int i2c_del_driver(struct i2c_driver *driver) list_for_each(item1,&adapters) { adap = list_entry(item1, struct i2c_adapter, list); if (driver->detach_adapter) { - if ((res = driver->detach_adapter(adap))) { + if (driver->detach_adapter(adap)) { dev_err(&adap->dev, "detach_adapter failed " "for driver [%s]\n", driver->driver.name); - goto out_unlock; } } else { list_for_each_safe(item2, _n, &adap->clients) { @@ -634,12 +631,11 @@ int i2c_del_driver(struct i2c_driver *driver) dev_dbg(&adap->dev, "detaching client [%s] " "at 0x%02x\n", client->name, client->addr); - if ((res = driver->detach_client(client))) { + if (driver->detach_client(client)) { dev_err(&adap->dev, "detach_client " "failed for client [%s] at " "0x%02x\n", client->name, client->addr); - goto out_unlock; } } } @@ -650,9 +646,7 @@ int i2c_del_driver(struct i2c_driver *driver) list_del(&driver->list); pr_debug("i2c-core: driver [%s] unregistered\n", driver->driver.name); - out_unlock: mutex_unlock(&core_lists); - return 0; } EXPORT_SYMBOL(i2c_del_driver); diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index 35233de460ad..3d0354e96a97 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c @@ -459,7 +459,8 @@ therm_of_probe( struct of_device *dev, const struct of_device_id *match ) static int therm_of_remove( struct of_device *dev ) { - return i2c_del_driver( &g4fan_driver ); + i2c_del_driver( &g4fan_driver ); + return 0; } static struct of_device_id therm_of_match[] = {{ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 3af7111c6680..3fe2ad37da30 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -371,7 +371,7 @@ extern int i2c_del_adapter(struct i2c_adapter *); extern int i2c_add_numbered_adapter(struct i2c_adapter *); extern int i2c_register_driver(struct module *, struct i2c_driver *); -extern int i2c_del_driver(struct i2c_driver *); +extern void i2c_del_driver(struct i2c_driver *); static inline int i2c_add_driver(struct i2c_driver *driver) { -- cgit v1.2.3 From d24ecfcc3953f9c3b833508cd839be614a3f3c64 Mon Sep 17 00:00:00 2001 From: Bryan Wu Date: Tue, 1 May 2007 23:26:32 +0200 Subject: i2c: Blackfin Two Wire Interface driver The i2c linux driver for blackfin architecture which supports blackfin on-chip TWI controller i2c operation. Signed-off-by: Bryan Wu Reviewed-by: Alexey Dobriyan Cc: David Brownell Signed-off-by: Andrew Morton Signed-off-by: Jean Delvare --- MAINTAINERS | 7 + drivers/i2c/busses/Kconfig | 16 + drivers/i2c/busses/Makefile | 1 + drivers/i2c/busses/i2c-bfin-twi.c | 644 ++++++++++++++++++++++++++++++++++++++ include/linux/i2c-id.h | 3 +- 5 files changed, 670 insertions(+), 1 deletion(-) create mode 100644 drivers/i2c/busses/i2c-bfin-twi.c (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index af1c7926c153..2b378ab99fc3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -733,6 +733,13 @@ M: tigran@aivazian.fsnet.co.uk L: linux-kernel@vger.kernel.org S: Maintained +BLACKFIN I2C TWI DRIVER +P: Sonic Zhang +M: sonic.zhang@analog.com +L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only) +W: http://blackfin.uclinux.org/ +S: Supported + BLOCK LAYER P: Jens Axboe M: axboe@kernel.dk diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index d7e6cbb9cbef..62b9363dd40e 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -91,6 +91,22 @@ config I2C_AU1550 This driver can also be built as a module. If so, the module will be called i2c-au1550. +config I2C_BLACKFIN_TWI + tristate "Blackfin TWI I2C support" + depends on I2C && (BF534 || BF536 || BF537) + help + This is the TWI I2C device driver for Blackfin 534/536/537. + This driver can also be built as a module. If so, the module + will be called i2c-bfin-twi. + +config I2C_BLACKFIN_TWI_CLK_KHZ + int "Blackfin TWI I2C clock (kHz)" + depends on I2C_BLACKFIN_TWI + range 10 400 + default 50 + help + The unit of the TWI clock is kHz. + config I2C_ELEKTOR tristate "Elektor ISA card" depends on I2C && ISA && BROKEN_ON_SMP diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 290b54018354..e51493cfbee2 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_I2C_AMD756_S4882) += i2c-amd756-s4882.o obj-$(CONFIG_I2C_AMD8111) += i2c-amd8111.o obj-$(CONFIG_I2C_AT91) += i2c-at91.o obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o +obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o obj-$(CONFIG_I2C_I801) += i2c-i801.o diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c new file mode 100644 index 000000000000..6311039dfe60 --- /dev/null +++ b/drivers/i2c/busses/i2c-bfin-twi.c @@ -0,0 +1,644 @@ +/* + * drivers/i2c/busses/i2c-bfin-twi.c + * + * Description: Driver for Blackfin Two Wire Interface + * + * Author: sonicz + * + * Copyright (c) 2005-2007 Analog Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define POLL_TIMEOUT (2 * HZ) + +/* SMBus mode*/ +#define TWI_I2C_MODE_STANDARD 0x01 +#define TWI_I2C_MODE_STANDARDSUB 0x02 +#define TWI_I2C_MODE_COMBINED 0x04 + +struct bfin_twi_iface { + struct mutex twi_lock; + int irq; + spinlock_t lock; + char read_write; + u8 command; + u8 *transPtr; + int readNum; + int writeNum; + int cur_mode; + int manual_stop; + int result; + int timeout_count; + struct timer_list timeout_timer; + struct i2c_adapter adap; + struct completion complete; +}; + +static struct bfin_twi_iface twi_iface; + +static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface) +{ + unsigned short twi_int_status = bfin_read_TWI_INT_STAT(); + unsigned short mast_stat = bfin_read_TWI_MASTER_STAT(); + + if (twi_int_status & XMTSERV) { + /* Transmit next data */ + if (iface->writeNum > 0) { + bfin_write_TWI_XMT_DATA8(*(iface->transPtr++)); + iface->writeNum--; + } + /* start receive immediately after complete sending in + * combine mode. + */ + else if (iface->cur_mode == TWI_I2C_MODE_COMBINED) { + bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() + | MDIR | RSTART); + } else if (iface->manual_stop) + bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() + | STOP); + SSYNC(); + /* Clear status */ + bfin_write_TWI_INT_STAT(XMTSERV); + SSYNC(); + } + if (twi_int_status & RCVSERV) { + if (iface->readNum > 0) { + /* Receive next data */ + *(iface->transPtr) = bfin_read_TWI_RCV_DATA8(); + if (iface->cur_mode == TWI_I2C_MODE_COMBINED) { + /* Change combine mode into sub mode after + * read first data. + */ + iface->cur_mode = TWI_I2C_MODE_STANDARDSUB; + /* Get read number from first byte in block + * combine mode. + */ + if (iface->readNum == 1 && iface->manual_stop) + iface->readNum = *iface->transPtr + 1; + } + iface->transPtr++; + iface->readNum--; + } else if (iface->manual_stop) { + bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() + | STOP); + SSYNC(); + } + /* Clear interrupt source */ + bfin_write_TWI_INT_STAT(RCVSERV); + SSYNC(); + } + if (twi_int_status & MERR) { + bfin_write_TWI_INT_STAT(MERR); + bfin_write_TWI_INT_MASK(0); + bfin_write_TWI_MASTER_STAT(0x3e); + bfin_write_TWI_MASTER_CTL(0); + SSYNC(); + iface->result = -1; + /* if both err and complete int stats are set, return proper + * results. + */ + if (twi_int_status & MCOMP) { + bfin_write_TWI_INT_STAT(MCOMP); + bfin_write_TWI_INT_MASK(0); + bfin_write_TWI_MASTER_CTL(0); + SSYNC(); + /* If it is a quick transfer, only address bug no data, + * not an err, return 1. + */ + if (iface->writeNum == 0 && (mast_stat & BUFRDERR)) + iface->result = 1; + /* If address not acknowledged return -1, + * else return 0. + */ + else if (!(mast_stat & ANAK)) + iface->result = 0; + } + complete(&iface->complete); + return; + } + if (twi_int_status & MCOMP) { + bfin_write_TWI_INT_STAT(MCOMP); + SSYNC(); + if (iface->cur_mode == TWI_I2C_MODE_COMBINED) { + if (iface->readNum == 0) { + /* set the read number to 1 and ask for manual + * stop in block combine mode + */ + iface->readNum = 1; + iface->manual_stop = 1; + bfin_write_TWI_MASTER_CTL( + bfin_read_TWI_MASTER_CTL() + | (0xff << 6)); + } else { + /* set the readd number in other + * combine mode. + */ + bfin_write_TWI_MASTER_CTL( + (bfin_read_TWI_MASTER_CTL() & + (~(0xff << 6))) | + ( iface->readNum << 6)); + } + /* remove restart bit and enable master receive */ + bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() & + ~RSTART); + bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | + MEN | MDIR); + SSYNC(); + } else { + iface->result = 1; + bfin_write_TWI_INT_MASK(0); + bfin_write_TWI_MASTER_CTL(0); + SSYNC(); + complete(&iface->complete); + } + } +} + +/* Interrupt handler */ +static irqreturn_t bfin_twi_interrupt_entry(int irq, void *dev_id) +{ + struct bfin_twi_iface *iface = dev_id; + unsigned long flags; + + spin_lock_irqsave(&iface->lock, flags); + del_timer(&iface->timeout_timer); + bfin_twi_handle_interrupt(iface); + spin_unlock_irqrestore(&iface->lock, flags); + return IRQ_HANDLED; +} + +static void bfin_twi_timeout(unsigned long data) +{ + struct bfin_twi_iface *iface = (struct bfin_twi_iface *)data; + unsigned long flags; + + spin_lock_irqsave(&iface->lock, flags); + bfin_twi_handle_interrupt(iface); + if (iface->result == 0) { + iface->timeout_count--; + if (iface->timeout_count > 0) { + iface->timeout_timer.expires = jiffies + POLL_TIMEOUT; + add_timer(&iface->timeout_timer); + } else { + iface->result = -1; + complete(&iface->complete); + } + } + spin_unlock_irqrestore(&iface->lock, flags); +} + +/* + * Generic i2c master transfer entrypoint + */ +static int bfin_twi_master_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + struct bfin_twi_iface *iface = adap->algo_data; + struct i2c_msg *pmsg; + int i, ret; + int rc = 0; + + if (!(bfin_read_TWI_CONTROL() & TWI_ENA)) + return -ENXIO; + + mutex_lock(&iface->twi_lock); + + while (bfin_read_TWI_MASTER_STAT() & BUSBUSY) { + mutex_unlock(&iface->twi_lock); + yield(); + mutex_lock(&iface->twi_lock); + } + + ret = 0; + for (i = 0; rc >= 0 && i < num; i++) { + pmsg = &msgs[i]; + if (pmsg->flags & I2C_M_TEN) { + dev_err(&(adap->dev), "i2c-bfin-twi: 10 bits addr " + "not supported !\n"); + rc = -EINVAL; + break; + } + + iface->cur_mode = TWI_I2C_MODE_STANDARD; + iface->manual_stop = 0; + iface->transPtr = pmsg->buf; + iface->writeNum = iface->readNum = pmsg->len; + iface->result = 0; + iface->timeout_count = 10; + /* Set Transmit device address */ + bfin_write_TWI_MASTER_ADDR(pmsg->addr); + + /* FIFO Initiation. Data in FIFO should be + * discarded before start a new operation. + */ + bfin_write_TWI_FIFO_CTL(0x3); + SSYNC(); + bfin_write_TWI_FIFO_CTL(0); + SSYNC(); + + if (pmsg->flags & I2C_M_RD) + iface->read_write = I2C_SMBUS_READ; + else { + iface->read_write = I2C_SMBUS_WRITE; + /* Transmit first data */ + if (iface->writeNum > 0) { + bfin_write_TWI_XMT_DATA8(*(iface->transPtr++)); + iface->writeNum--; + SSYNC(); + } + } + + /* clear int stat */ + bfin_write_TWI_INT_STAT(MERR|MCOMP|XMTSERV|RCVSERV); + + /* Interrupt mask . Enable XMT, RCV interrupt */ + bfin_write_TWI_INT_MASK(MCOMP | MERR | + ((iface->read_write == I2C_SMBUS_READ)? + RCVSERV : XMTSERV)); + SSYNC(); + + if (pmsg->len > 0 && pmsg->len <= 255) + bfin_write_TWI_MASTER_CTL(pmsg->len << 6); + else if (pmsg->len > 255) { + bfin_write_TWI_MASTER_CTL(0xff << 6); + iface->manual_stop = 1; + } else + break; + + iface->timeout_timer.expires = jiffies + POLL_TIMEOUT; + add_timer(&iface->timeout_timer); + + /* Master enable */ + bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN | + ((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) | + ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0)); + SSYNC(); + + wait_for_completion(&iface->complete); + + rc = iface->result; + if (rc == 1) + ret++; + else if (rc == -1) + break; + } + + /* Release mutex */ + mutex_unlock(&iface->twi_lock); + + return ret; +} + +/* + * SMBus type transfer entrypoint + */ + +int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr, + unsigned short flags, char read_write, + u8 command, int size, union i2c_smbus_data *data) +{ + struct bfin_twi_iface *iface = adap->algo_data; + int rc = 0; + + if (!(bfin_read_TWI_CONTROL() & TWI_ENA)) + return -ENXIO; + + mutex_lock(&iface->twi_lock); + + while (bfin_read_TWI_MASTER_STAT() & BUSBUSY) { + mutex_unlock(&iface->twi_lock); + yield(); + mutex_lock(&iface->twi_lock); + } + + iface->writeNum = 0; + iface->readNum = 0; + + /* Prepare datas & select mode */ + switch (size) { + case I2C_SMBUS_QUICK: + iface->transPtr = NULL; + iface->cur_mode = TWI_I2C_MODE_STANDARD; + break; + case I2C_SMBUS_BYTE: + if (data == NULL) + iface->transPtr = NULL; + else { + if (read_write == I2C_SMBUS_READ) + iface->readNum = 1; + else + iface->writeNum = 1; + iface->transPtr = &data->byte; + } + iface->cur_mode = TWI_I2C_MODE_STANDARD; + break; + case I2C_SMBUS_BYTE_DATA: + if (read_write == I2C_SMBUS_READ) { + iface->readNum = 1; + iface->cur_mode = TWI_I2C_MODE_COMBINED; + } else { + iface->writeNum = 1; + iface->cur_mode = TWI_I2C_MODE_STANDARDSUB; + } + iface->transPtr = &data->byte; + break; + case I2C_SMBUS_WORD_DATA: + if (read_write == I2C_SMBUS_READ) { + iface->readNum = 2; + iface->cur_mode = TWI_I2C_MODE_COMBINED; + } else { + iface->writeNum = 2; + iface->cur_mode = TWI_I2C_MODE_STANDARDSUB; + } + iface->transPtr = (u8 *)&data->word; + break; + case I2C_SMBUS_PROC_CALL: + iface->writeNum = 2; + iface->readNum = 2; + iface->cur_mode = TWI_I2C_MODE_COMBINED; + iface->transPtr = (u8 *)&data->word; + break; + case I2C_SMBUS_BLOCK_DATA: + if (read_write == I2C_SMBUS_READ) { + iface->readNum = 0; + iface->cur_mode = TWI_I2C_MODE_COMBINED; + } else { + iface->writeNum = data->block[0] + 1; + iface->cur_mode = TWI_I2C_MODE_STANDARDSUB; + } + iface->transPtr = data->block; + break; + default: + return -1; + } + + iface->result = 0; + iface->manual_stop = 0; + iface->read_write = read_write; + iface->command = command; + iface->timeout_count = 10; + + /* FIFO Initiation. Data in FIFO should be discarded before + * start a new operation. + */ + bfin_write_TWI_FIFO_CTL(0x3); + SSYNC(); + bfin_write_TWI_FIFO_CTL(0); + + /* clear int stat */ + bfin_write_TWI_INT_STAT(MERR|MCOMP|XMTSERV|RCVSERV); + + /* Set Transmit device address */ + bfin_write_TWI_MASTER_ADDR(addr); + SSYNC(); + + iface->timeout_timer.expires = jiffies + POLL_TIMEOUT; + add_timer(&iface->timeout_timer); + + switch (iface->cur_mode) { + case TWI_I2C_MODE_STANDARDSUB: + bfin_write_TWI_XMT_DATA8(iface->command); + bfin_write_TWI_INT_MASK(MCOMP | MERR | + ((iface->read_write == I2C_SMBUS_READ) ? + RCVSERV : XMTSERV)); + SSYNC(); + + if (iface->writeNum + 1 <= 255) + bfin_write_TWI_MASTER_CTL((iface->writeNum + 1) << 6); + else { + bfin_write_TWI_MASTER_CTL(0xff << 6); + iface->manual_stop = 1; + } + /* Master enable */ + bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN | + ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0)); + break; + case TWI_I2C_MODE_COMBINED: + bfin_write_TWI_XMT_DATA8(iface->command); + bfin_write_TWI_INT_MASK(MCOMP | MERR | RCVSERV | XMTSERV); + SSYNC(); + + if (iface->writeNum > 0) + bfin_write_TWI_MASTER_CTL((iface->writeNum + 1) << 6); + else + bfin_write_TWI_MASTER_CTL(0x1 << 6); + /* Master enable */ + bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN | + ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0)); + break; + default: + bfin_write_TWI_MASTER_CTL(0); + if (size != I2C_SMBUS_QUICK) { + /* Don't access xmit data register when this is a + * read operation. + */ + if (iface->read_write != I2C_SMBUS_READ) { + if (iface->writeNum > 0) { + bfin_write_TWI_XMT_DATA8(*(iface->transPtr++)); + if (iface->writeNum <= 255) + bfin_write_TWI_MASTER_CTL(iface->writeNum << 6); + else { + bfin_write_TWI_MASTER_CTL(0xff << 6); + iface->manual_stop = 1; + } + iface->writeNum--; + } else { + bfin_write_TWI_XMT_DATA8(iface->command); + bfin_write_TWI_MASTER_CTL(1 << 6); + } + } else { + if (iface->readNum > 0 && iface->readNum <= 255) + bfin_write_TWI_MASTER_CTL(iface->readNum << 6); + else if (iface->readNum > 255) { + bfin_write_TWI_MASTER_CTL(0xff << 6); + iface->manual_stop = 1; + } else { + del_timer(&iface->timeout_timer); + break; + } + } + } + bfin_write_TWI_INT_MASK(MCOMP | MERR | + ((iface->read_write == I2C_SMBUS_READ) ? + RCVSERV : XMTSERV)); + SSYNC(); + + /* Master enable */ + bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN | + ((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) | + ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0)); + break; + } + SSYNC(); + + wait_for_completion(&iface->complete); + + rc = (iface->result >= 0) ? 0 : -1; + + /* Release mutex */ + mutex_unlock(&iface->twi_lock); + + return rc; +} + +/* + * Return what the adapter supports + */ +static u32 bfin_twi_functionality(struct i2c_adapter *adap) +{ + return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | + I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_PROC_CALL | + I2C_FUNC_I2C; +} + + +static struct i2c_algorithm bfin_twi_algorithm = { + .master_xfer = bfin_twi_master_xfer, + .smbus_xfer = bfin_twi_smbus_xfer, + .functionality = bfin_twi_functionality, +}; + + +static int i2c_bfin_twi_suspend(struct platform_device *dev, pm_message_t state) +{ +/* struct bfin_twi_iface *iface = platform_get_drvdata(dev);*/ + + /* Disable TWI */ + bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() & ~TWI_ENA); + SSYNC(); + + return 0; +} + +static int i2c_bfin_twi_resume(struct platform_device *dev) +{ +/* struct bfin_twi_iface *iface = platform_get_drvdata(dev);*/ + + /* Enable TWI */ + bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() | TWI_ENA); + SSYNC(); + + return 0; +} + +static int i2c_bfin_twi_probe(struct platform_device *dev) +{ + struct bfin_twi_iface *iface = &twi_iface; + struct i2c_adapter *p_adap; + int rc; + + mutex_init(&(iface->twi_lock)); + spin_lock_init(&(iface->lock)); + init_completion(&(iface->complete)); + iface->irq = IRQ_TWI; + + init_timer(&(iface->timeout_timer)); + iface->timeout_timer.function = bfin_twi_timeout; + iface->timeout_timer.data = (unsigned long)iface; + + p_adap = &iface->adap; + p_adap->id = I2C_HW_BLACKFIN; + strlcpy(p_adap->name, dev->name, sizeof(p_adap->name)); + p_adap->algo = &bfin_twi_algorithm; + p_adap->algo_data = iface; + p_adap->class = I2C_CLASS_ALL; + p_adap->dev.parent = &dev->dev; + + rc = request_irq(iface->irq, bfin_twi_interrupt_entry, + IRQF_DISABLED, dev->name, iface); + if (rc) { + dev_err(&(p_adap->dev), "i2c-bfin-twi: can't get IRQ %d !\n", + iface->irq); + return -ENODEV; + } + + /* Set TWI internal clock as 10MHz */ + bfin_write_TWI_CONTROL(((get_sclk() / 1024 / 1024 + 5) / 10) & 0x7F); + + /* Set Twi interface clock as specified */ + bfin_write_TWI_CLKDIV((( 5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ ) + << 8) | (( 5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ ) + & 0xFF)); + + /* Enable TWI */ + bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() | TWI_ENA); + SSYNC(); + + rc = i2c_add_adapter(p_adap); + if (rc < 0) + free_irq(iface->irq, iface); + else + platform_set_drvdata(dev, iface); + + return rc; +} + +static int i2c_bfin_twi_remove(struct platform_device *pdev) +{ + struct bfin_twi_iface *iface = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + i2c_del_adapter(&(iface->adap)); + free_irq(iface->irq, iface); + + return 0; +} + +static struct platform_driver i2c_bfin_twi_driver = { + .probe = i2c_bfin_twi_probe, + .remove = i2c_bfin_twi_remove, + .suspend = i2c_bfin_twi_suspend, + .resume = i2c_bfin_twi_resume, + .driver = { + .name = "i2c-bfin-twi", + .owner = THIS_MODULE, + }, +}; + +static int __init i2c_bfin_twi_init(void) +{ + pr_info("I2C: Blackfin I2C TWI driver\n"); + + return platform_driver_register(&i2c_bfin_twi_driver); +} + +static void __exit i2c_bfin_twi_exit(void) +{ + platform_driver_unregister(&i2c_bfin_twi_driver); +} + +MODULE_AUTHOR("Sonic Zhang "); +MODULE_DESCRIPTION("I2C-Bus adapter routines for Blackfin TWI"); +MODULE_LICENSE("GPL"); + +module_init(i2c_bfin_twi_init); +module_exit(i2c_bfin_twi_exit); diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 9c21dc793d7b..0e8da684ce68 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -258,8 +258,9 @@ /* --- MCP107 adapter */ #define I2C_HW_MPC107 0x0d0000 -/* --- Marvell mv64xxx i2c adapter */ +/* --- Embedded adapters */ #define I2C_HW_MV64XXX 0x190000 +#define I2C_HW_BLACKFIN 0x190001 /* ADI Blackfin I2C TWI driver */ /* --- Miscellaneous adapters */ #define I2C_HW_SAA7146 0x060000 /* SAA7146 video decoder bus */ -- cgit v1.2.3 From 424ed67c7dae37e8115e1bebc3261e86a624dff2 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 1 May 2007 23:26:33 +0200 Subject: i2c-algo-bit: Implement a 50/50 SCL duty cycle The original i2c-algo-bit implementation uses a 33/66 SCL duty cycle when bits are being written on the bus. While the I2C specification doesn't forbid it, this prevents us from driving the I2C bus to its max speed, limiting us to 66 kbps max on standard I2C busses. Implementing a 50/50 duty cycle instead lets us max out the bandwidth up to the theoretical max of 100 kbps on standard I2C busses. This is particularly important when large amounts of data need to be transfered over the bus, as is the case with some TV adapters when the firmware is being uploaded. In fact this change even allows, at least in theory, fast-mode I2C support at 125, 166 and 250 kbps. There's no way to reach the theoretical max of 400 kbps with this implementation. But I don't think we want to put efforts in that direction anyway: software-driven I2C is very CPU-intensive and bad for latency. Other timing changes: * Don't set SDA high explicitly on error, we're going to issue a stop condition before we leave anyway. * If an error occurs when sending the slave address, yield the CPU before retrying, and remove the additional delay after the new start condition. Signed-off-by: Jean Delvare --- drivers/i2c/algos/i2c-algo-bit.c | 42 +++++++++++++++++++++------------------- include/linux/i2c-algo-bit.h | 6 ++++-- 2 files changed, 26 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index fc16f9d268a3..d9d0ec49e60d 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c @@ -57,19 +57,19 @@ static int bit_test; /* see if the line-setting functions work */ static inline void sdalo(struct i2c_algo_bit_data *adap) { setsda(adap,0); - udelay(adap->udelay); + udelay((adap->udelay + 1) / 2); } static inline void sdahi(struct i2c_algo_bit_data *adap) { setsda(adap,1); - udelay(adap->udelay); + udelay((adap->udelay + 1) / 2); } static inline void scllo(struct i2c_algo_bit_data *adap) { setscl(adap,0); - udelay(adap->udelay); + udelay(adap->udelay / 2); } /* @@ -111,18 +111,19 @@ static void i2c_start(struct i2c_algo_bit_data *adap) { /* assert: scl, sda are high */ DEBPROTO(printk("S ")); - sdalo(adap); + setsda(adap, 0); + udelay(adap->udelay); scllo(adap); } static void i2c_repstart(struct i2c_algo_bit_data *adap) { - /* scl, sda may not be high */ + /* assert: scl is low */ DEBPROTO(printk(" Sr ")); - setsda(adap,1); + sdahi(adap); sclhi(adap); - - sdalo(adap); + setsda(adap, 0); + udelay(adap->udelay); scllo(adap); } @@ -133,7 +134,8 @@ static void i2c_stop(struct i2c_algo_bit_data *adap) /* assert: scl is low */ sdalo(adap); sclhi(adap); - sdahi(adap); + setsda(adap, 1); + udelay(adap->udelay); } @@ -156,18 +158,16 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, char c) for ( i=7 ; i>=0 ; i-- ) { sb = c & ( 1 << i ); setsda(adap,sb); - udelay(adap->udelay); + udelay((adap->udelay + 1) / 2); DEBPROTO(printk(KERN_DEBUG "%d",sb!=0)); if (sclhi(adap)<0) { /* timed out */ - sdahi(adap); /* we don't want to block the net */ DEB2(printk(KERN_DEBUG " i2c_outb: 0x%02x, timeout at bit #%d\n", c&0xff, i)); return -ETIMEDOUT; }; /* do arbitration here: * if ( sb && ! getsda(adap) ) -> ouch! Get out of here. */ - setscl(adap, 0 ); - udelay(adap->udelay); + scllo(adap); } sdahi(adap); if (sclhi(adap)<0){ /* timeout */ @@ -204,7 +204,8 @@ static int i2c_inb(struct i2c_adapter *i2c_adap) indata *= 2; if ( getsda(adap) ) indata |= 0x01; - scllo(adap); + setscl(adap, 0); + udelay(i == 7 ? adap->udelay / 2 : adap->udelay); } /* assert: scl is low */ DEB2(printk(KERN_DEBUG "i2c_inb: 0x%02x\n", indata & 0xff)); @@ -315,9 +316,9 @@ static int try_address(struct i2c_adapter *i2c_adap, if (ret == 1 || i == retries) break; i2c_stop(adap); - udelay(5/*adap->udelay*/); - i2c_start(adap); udelay(adap->udelay); + yield(); + i2c_start(adap); } DEB2(if (i) printk(KERN_DEBUG "i2c-algo-bit.o: Used %d tries to %s client at 0x%02x : %s\n", @@ -377,20 +378,21 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) if (msg->flags & I2C_M_NO_RD_ACK) continue; + /* assert: sda is high */ if ( count > 0 ) { /* send ack */ - sdalo(adap); + setsda(adap, 0); + udelay((adap->udelay + 1) / 2); DEBPROTO(printk(" Am ")); } else { - sdahi(adap); /* neg. ack on last byte */ + /* neg. ack on last byte */ + udelay((adap->udelay + 1) / 2); DEBPROTO(printk(" NAm ")); } if (sclhi(adap)<0) { /* timeout */ - sdahi(adap); printk(KERN_ERR "i2c-algo-bit.o: readbytes: Timeout at ack\n"); return -ETIMEDOUT; }; scllo(adap); - sdahi(adap); /* Some SMBus transactions require that we receive the transaction length as the first read byte. */ diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h index d91dab886357..9ee0f800592f 100644 --- a/include/linux/i2c-algo-bit.h +++ b/include/linux/i2c-algo-bit.h @@ -38,8 +38,10 @@ struct i2c_algo_bit_data { int (*getscl) (void *data); /* local settings */ - int udelay; /* half-clock-cycle time in microsecs */ - /* i.e. clock is (500 / udelay) KHz */ + int udelay; /* half clock cycle time in us, + minimum 2 us for fast-mode I2C, + minimum 5 us for standard-mode I2C and SMBus, + maximum 50 us for SMBus */ int timeout; /* in jiffies */ }; -- cgit v1.2.3 From b86a1bc8e39641d0c4676943b77a3486ee296db8 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 1 May 2007 23:26:34 +0200 Subject: i2c: Restore i2c_smbus_read_block_data Add back the i2c_smbus_read_block_data helper function, it is needed by the upcoming lm93 hardware monitoring driver and possibly others. Signed-off-by: Jean Delvare --- drivers/i2c/i2c-core.c | 16 ++++++++++++++++ include/linux/i2c.h | 3 +++ 2 files changed, 19 insertions(+) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index a58f89a6418a..64f8e56d300e 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1298,6 +1298,22 @@ s32 i2c_smbus_write_word_data(struct i2c_client *client, u8 command, u16 value) } EXPORT_SYMBOL(i2c_smbus_write_word_data); +/* Returns the number of read bytes */ +s32 i2c_smbus_read_block_data(struct i2c_client *client, u8 command, + u8 *values) +{ + union i2c_smbus_data data; + + if (i2c_smbus_xfer(client->adapter, client->addr, client->flags, + I2C_SMBUS_READ, command, + I2C_SMBUS_BLOCK_DATA, &data)) + return -1; + + memcpy(values, &data.block[1], data.block[0]); + return data.block[0]; +} +EXPORT_SYMBOL(i2c_smbus_read_block_data); + s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command, u8 length, const u8 *values) { diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 3fe2ad37da30..cae7d618030c 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -82,6 +82,9 @@ extern s32 i2c_smbus_write_byte_data(struct i2c_client * client, extern s32 i2c_smbus_read_word_data(struct i2c_client * client, u8 command); extern s32 i2c_smbus_write_word_data(struct i2c_client * client, u8 command, u16 value); +/* Returns the number of read bytes */ +extern s32 i2c_smbus_read_block_data(struct i2c_client *client, + u8 command, u8 *values); extern s32 i2c_smbus_write_block_data(struct i2c_client * client, u8 command, u8 length, const u8 *values); -- cgit v1.2.3 From 1c23af90dc44d05bbb6a3b5246ab664b1f943943 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Tue, 1 May 2007 23:26:34 +0200 Subject: i2c: Bitbanging I2C bus driver using the GPIO API This is a very simple bitbanging I2C bus driver utilizing the new arch-neutral GPIO API. Useful for chips that don't have a built-in I2C controller, additional I2C busses, or testing purposes. To use, include something similar to the following in the board-specific setup code: #include static struct i2c_gpio_platform_data i2c_gpio_data = { .sda_pin = GPIO_PIN_FOO, .scl_pin = GPIO_PIN_BAR, }; static struct platform_device i2c_gpio_device = { .name = "i2c-gpio", .id = 0, .dev = { .platform_data = &i2c_gpio_data, }, }; Register this platform_device, set up the I2C pins as GPIO if required and you're ready to go. This will use default values for udelay and timeout, and will work with GPIO hardware that does not support open drain mode, but allows sensing of the SDA and SCL lines even when they are being driven. Signed-off-by: Haavard Skinnemoen Signed-off-by: Jean Delvare --- MAINTAINERS | 5 + drivers/i2c/busses/Kconfig | 8 ++ drivers/i2c/busses/Makefile | 1 + drivers/i2c/busses/i2c-gpio.c | 215 ++++++++++++++++++++++++++++++++++++++++++ include/linux/i2c-gpio.h | 38 ++++++++ 5 files changed, 267 insertions(+) create mode 100644 drivers/i2c/busses/i2c-gpio.c create mode 100644 include/linux/i2c-gpio.h (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index 2b378ab99fc3..4da678505788 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1466,6 +1466,11 @@ L: linux-scsi@vger.kernel.org W: http://www.icp-vortex.com/ S: Supported +GENERIC GPIO I2C DRIVER +P: Haavard Skinnemoen +M: hskinnemoen@atmel.com +S: Supported + GENERIC HDLC DRIVER, N2, C101, PCI200SYN and WANXL DRIVERS P: Krzysztof Halasa M: khc@pm.waw.pl diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 8ccf6e4e91d0..33a8a67f161b 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -117,6 +117,14 @@ config I2C_ELEKTOR This support is also available as a module. If so, the module will be called i2c-elektor. +config I2C_GPIO + tristate "GPIO-based bitbanging I2C" + depends on GENERIC_GPIO + select I2C_ALGOBIT + help + This is a very simple bitbanging I2C driver utilizing the + arch-neutral GPIO API to control the SCL and SDA lines. + config I2C_HYDRA tristate "CHRP Apple Hydra Mac I/O I2C interface" depends on PCI && PPC_CHRP && EXPERIMENTAL diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index e51493cfbee2..5689165f6d6a 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_I2C_AT91) += i2c-at91.o obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o +obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o obj-$(CONFIG_I2C_I801) += i2c-i801.o obj-$(CONFIG_I2C_I810) += i2c-i810.o diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c new file mode 100644 index 000000000000..a7dd54654a9a --- /dev/null +++ b/drivers/i2c/busses/i2c-gpio.c @@ -0,0 +1,215 @@ +/* + * Bitbanging I2C bus driver using the GPIO API + * + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include + +#include + +/* Toggle SDA by changing the direction of the pin */ +static void i2c_gpio_setsda_dir(void *data, int state) +{ + struct i2c_gpio_platform_data *pdata = data; + + if (state) + gpio_direction_input(pdata->sda_pin); + else + gpio_direction_output(pdata->sda_pin, 0); +} + +/* + * Toggle SDA by changing the output value of the pin. This is only + * valid for pins configured as open drain (i.e. setting the value + * high effectively turns off the output driver.) + */ +static void i2c_gpio_setsda_val(void *data, int state) +{ + struct i2c_gpio_platform_data *pdata = data; + + gpio_set_value(pdata->sda_pin, state); +} + +/* Toggle SCL by changing the direction of the pin. */ +static void i2c_gpio_setscl_dir(void *data, int state) +{ + struct i2c_gpio_platform_data *pdata = data; + + if (state) + gpio_direction_input(pdata->scl_pin); + else + gpio_direction_output(pdata->scl_pin, 0); +} + +/* + * Toggle SCL by changing the output value of the pin. This is used + * for pins that are configured as open drain and for output-only + * pins. The latter case will break the i2c protocol, but it will + * often work in practice. + */ +static void i2c_gpio_setscl_val(void *data, int state) +{ + struct i2c_gpio_platform_data *pdata = data; + + gpio_set_value(pdata->scl_pin, state); +} + +int i2c_gpio_getsda(void *data) +{ + struct i2c_gpio_platform_data *pdata = data; + + return gpio_get_value(pdata->sda_pin); +} + +int i2c_gpio_getscl(void *data) +{ + struct i2c_gpio_platform_data *pdata = data; + + return gpio_get_value(pdata->scl_pin); +} + +static int __init i2c_gpio_probe(struct platform_device *pdev) +{ + struct i2c_gpio_platform_data *pdata; + struct i2c_algo_bit_data *bit_data; + struct i2c_adapter *adap; + int ret; + + pdata = pdev->dev.platform_data; + if (!pdata) + return -ENXIO; + + ret = -ENOMEM; + adap = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL); + if (!adap) + goto err_alloc_adap; + bit_data = kzalloc(sizeof(struct i2c_algo_bit_data), GFP_KERNEL); + if (!bit_data) + goto err_alloc_bit_data; + + ret = gpio_request(pdata->sda_pin, "sda"); + if (ret) + goto err_request_sda; + ret = gpio_request(pdata->scl_pin, "scl"); + if (ret) + goto err_request_scl; + + if (pdata->sda_is_open_drain) { + gpio_direction_output(pdata->sda_pin, 1); + bit_data->setsda = i2c_gpio_setsda_val; + } else { + gpio_direction_input(pdata->sda_pin); + bit_data->setsda = i2c_gpio_setsda_dir; + } + + if (pdata->scl_is_open_drain || pdata->scl_is_output_only) { + gpio_direction_output(pdata->scl_pin, 1); + bit_data->setscl = i2c_gpio_setscl_val; + } else { + gpio_direction_input(pdata->scl_pin); + bit_data->setscl = i2c_gpio_setscl_dir; + } + + if (!pdata->scl_is_output_only) + bit_data->getscl = i2c_gpio_getscl; + bit_data->getsda = i2c_gpio_getsda; + + if (pdata->udelay) + bit_data->udelay = pdata->udelay; + else if (pdata->scl_is_output_only) + bit_data->udelay = 50; /* 10 kHz */ + else + bit_data->udelay = 5; /* 100 kHz */ + + if (pdata->timeout) + bit_data->timeout = pdata->timeout; + else + bit_data->timeout = HZ / 10; /* 100 ms */ + + bit_data->data = pdata; + + adap->owner = THIS_MODULE; + snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id); + adap->algo_data = bit_data; + adap->dev.parent = &pdev->dev; + + ret = i2c_bit_add_bus(adap); + if (ret) + goto err_add_bus; + + platform_set_drvdata(pdev, adap); + + dev_info(&pdev->dev, "using pins %u (SDA) and %u (SCL%s)\n", + pdata->sda_pin, pdata->scl_pin, + pdata->scl_is_output_only + ? ", no clock stretching" : ""); + + return 0; + +err_add_bus: + gpio_free(pdata->scl_pin); +err_request_scl: + gpio_free(pdata->sda_pin); +err_request_sda: + kfree(bit_data); +err_alloc_bit_data: + kfree(adap); +err_alloc_adap: + return ret; +} + +static int __exit i2c_gpio_remove(struct platform_device *pdev) +{ + struct i2c_gpio_platform_data *pdata; + struct i2c_adapter *adap; + + adap = platform_get_drvdata(pdev); + pdata = pdev->dev.platform_data; + + i2c_del_adapter(adap); + gpio_free(pdata->scl_pin); + gpio_free(pdata->sda_pin); + kfree(adap->algo_data); + kfree(adap); + + return 0; +} + +static struct platform_driver i2c_gpio_driver = { + .driver = { + .name = "i2c-gpio", + .owner = THIS_MODULE, + }, + .remove = __exit_p(i2c_gpio_remove), +}; + +static int __init i2c_gpio_init(void) +{ + int ret; + + ret = platform_driver_probe(&i2c_gpio_driver, i2c_gpio_probe); + if (ret) + printk(KERN_ERR "i2c-gpio: probe failed: %d\n", ret); + + return ret; +} +module_init(i2c_gpio_init); + +static void __exit i2c_gpio_exit(void) +{ + platform_driver_unregister(&i2c_gpio_driver); +} +module_exit(i2c_gpio_exit); + +MODULE_AUTHOR("Haavard Skinnemoen "); +MODULE_DESCRIPTION("Platform-independent bitbanging I2C driver"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/i2c-gpio.h b/include/linux/i2c-gpio.h new file mode 100644 index 000000000000..c1bcb1f1d73b --- /dev/null +++ b/include/linux/i2c-gpio.h @@ -0,0 +1,38 @@ +/* + * i2c-gpio interface to platform code + * + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_I2C_GPIO_H +#define _LINUX_I2C_GPIO_H + +/** + * struct i2c_gpio_platform_data - Platform-dependent data for i2c-gpio + * @sda_pin: GPIO pin ID to use for SDA + * @scl_pin: GPIO pin ID to use for SCL + * @udelay: signal toggle delay. SCL frequency is (500 / udelay) kHz + * @timeout: clock stretching timeout in jiffies. If the slave keeps + * SCL low for longer than this, the transfer will time out. + * @sda_is_open_drain: SDA is configured as open drain, i.e. the pin + * isn't actively driven high when setting the output value high. + * gpio_get_value() must return the actual pin state even if the + * pin is configured as an output. + * @scl_is_open_drain: SCL is set up as open drain. Same requirements + * as for sda_is_open_drain apply. + * @scl_is_output_only: SCL output drivers cannot be turned off. + */ +struct i2c_gpio_platform_data { + unsigned int sda_pin; + unsigned int scl_pin; + int udelay; + int timeout; + unsigned int sda_is_open_drain:1; + unsigned int scl_is_open_drain:1; + unsigned int scl_is_output_only:1; +}; + +#endif /* _LINUX_I2C_GPIO_H */ -- cgit v1.2.3 From 32e3983fe590ac4cd70c7728eb330d43cef031a7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 24 Mar 2007 14:35:34 +1100 Subject: [CRYPTO] api: Add async block cipher interface This patch adds the frontend interface for asynchronous block ciphers. In addition to the usual block cipher parameters, there is a callback function pointer and a data pointer. The callback will be invoked only if the encrypt/decrypt handlers return -EINPROGRESS. In other words, if the return value of zero the completion handler (or the equivalent code) needs to be invoked by the caller. The request structure is allocated and freed by the caller. Its size is determined by calling crypto_ablkcipher_reqsize(). The helpers ablkcipher_request_alloc/ablkcipher_request_free can be used to manage the memory for a request. Signed-off-by: Herbert Xu --- crypto/blkcipher.c | 70 +++++++++++++++-- include/linux/crypto.h | 199 ++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 260 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index bf459179efe3..8edf40c835a7 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -349,13 +349,48 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, return cipher->setkey(tfm, key, keylen); } +static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + return setkey(crypto_ablkcipher_tfm(tfm), key, keylen); +} + +static int async_encrypt(struct ablkcipher_request *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + struct blkcipher_desc desc = { + .tfm = __crypto_blkcipher_cast(tfm), + .info = req->info, + .flags = req->base.flags, + }; + + + return alg->encrypt(&desc, req->dst, req->src, req->nbytes); +} + +static int async_decrypt(struct ablkcipher_request *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + struct blkcipher_desc desc = { + .tfm = __crypto_blkcipher_cast(tfm), + .info = req->info, + .flags = req->base.flags, + }; + + return alg->decrypt(&desc, req->dst, req->src, req->nbytes); +} + static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) { struct blkcipher_alg *cipher = &alg->cra_blkcipher; unsigned int len = alg->cra_ctxsize; - if (cipher->ivsize) { + type ^= CRYPTO_ALG_ASYNC; + mask &= CRYPTO_ALG_ASYNC; + if ((type & mask) && cipher->ivsize) { len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); len += cipher->ivsize; } @@ -363,16 +398,26 @@ static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, return len; } -static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm) +{ + struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + + crt->setkey = async_setkey; + crt->encrypt = async_encrypt; + crt->decrypt = async_decrypt; + crt->ivsize = alg->ivsize; + + return 0; +} + +static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm) { struct blkcipher_tfm *crt = &tfm->crt_blkcipher; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1; unsigned long addr; - if (alg->ivsize > PAGE_SIZE / 8) - return -EINVAL; - crt->setkey = setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; @@ -385,6 +430,21 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) return 0; } +static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +{ + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + + if (alg->ivsize > PAGE_SIZE / 8) + return -EINVAL; + + type ^= CRYPTO_ALG_ASYNC; + mask &= CRYPTO_ALG_ASYNC; + if (type & mask) + return crypto_init_blkcipher_ops_sync(tfm); + else + return crypto_init_blkcipher_ops_async(tfm); +} + static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) __attribute__ ((unused)); static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 779aa78ee643..d4d05313280c 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -56,6 +56,7 @@ #define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 +#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 #define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 #define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 @@ -88,11 +89,37 @@ #endif struct scatterlist; +struct crypto_ablkcipher; +struct crypto_async_request; struct crypto_blkcipher; struct crypto_hash; struct crypto_tfm; struct crypto_type; +typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); + +struct crypto_async_request { + struct list_head list; + crypto_completion_t complete; + void *data; + struct crypto_tfm *tfm; + + u32 flags; +}; + +struct ablkcipher_request { + struct crypto_async_request base; + + unsigned int nbytes; + + void *info; + + struct scatterlist *src; + struct scatterlist *dst; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + struct blkcipher_desc { struct crypto_blkcipher *tfm; void *info; @@ -232,6 +259,15 @@ static inline int crypto_has_alg(const char *name, u32 type, u32 mask) * crypto_free_*(), as well as the various helpers below. */ +struct ablkcipher_tfm { + int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct ablkcipher_request *req); + int (*decrypt)(struct ablkcipher_request *req); + unsigned int ivsize; + unsigned int reqsize; +}; + struct blkcipher_tfm { void *iv; int (*setkey)(struct crypto_tfm *tfm, const u8 *key, @@ -290,6 +326,7 @@ struct compress_tfm { u8 *dst, unsigned int *dlen); }; +#define crt_ablkcipher crt_u.ablkcipher #define crt_blkcipher crt_u.blkcipher #define crt_cipher crt_u.cipher #define crt_hash crt_u.hash @@ -300,6 +337,7 @@ struct crypto_tfm { u32 crt_flags; union { + struct ablkcipher_tfm ablkcipher; struct blkcipher_tfm blkcipher; struct cipher_tfm cipher; struct hash_tfm hash; @@ -311,6 +349,10 @@ struct crypto_tfm { void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; }; +struct crypto_ablkcipher { + struct crypto_tfm base; +}; + struct crypto_blkcipher { struct crypto_tfm base; }; @@ -411,6 +453,155 @@ static inline unsigned int crypto_tfm_ctx_alignment(void) /* * API wrappers. */ +static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( + struct crypto_tfm *tfm) +{ + return (struct crypto_ablkcipher *)tfm; +} + +static inline struct crypto_ablkcipher *crypto_alloc_ablkcipher( + const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_ablkcipher_cast( + crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_ablkcipher_tfm( + struct crypto_ablkcipher *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) +{ + crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); +} + +static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, + u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( + struct crypto_ablkcipher *tfm) +{ + return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; +} + +static inline unsigned int crypto_ablkcipher_ivsize( + struct crypto_ablkcipher *tfm) +{ + return crypto_ablkcipher_crt(tfm)->ivsize; +} + +static inline unsigned int crypto_ablkcipher_blocksize( + struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); +} + +static inline unsigned int crypto_ablkcipher_alignmask( + struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); +} + +static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); +} + +static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); +} + +static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); +} + +static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, + const u8 *key, unsigned int keylen) +{ + return crypto_ablkcipher_crt(tfm)->setkey(tfm, key, keylen); +} + +static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( + struct ablkcipher_request *req) +{ + return __crypto_ablkcipher_cast(req->base.tfm); +} + +static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) +{ + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + return crt->encrypt(req); +} + +static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) +{ + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + return crt->decrypt(req); +} + +static inline int crypto_ablkcipher_reqsize(struct crypto_ablkcipher *tfm) +{ + return crypto_ablkcipher_crt(tfm)->reqsize; +} + +static inline struct ablkcipher_request *ablkcipher_request_alloc( + struct crypto_ablkcipher *tfm, gfp_t gfp) +{ + struct ablkcipher_request *req; + + req = kmalloc(sizeof(struct ablkcipher_request) + + crypto_ablkcipher_reqsize(tfm), gfp); + + if (likely(req)) + req->base.tfm = crypto_ablkcipher_tfm(tfm); + + return req; +} + +static inline void ablkcipher_request_free(struct ablkcipher_request *req) +{ + kfree(req); +} + +static inline void ablkcipher_request_set_callback( + struct ablkcipher_request *req, + u32 flags, crypto_completion_t complete, void *data) +{ + req->base.complete = complete; + req->base.data = data; + req->base.flags = flags; +} + +static inline void ablkcipher_request_set_crypt( + struct ablkcipher_request *req, + struct scatterlist *src, struct scatterlist *dst, + unsigned int nbytes, void *iv) +{ + req->src = src; + req->dst = dst; + req->nbytes = nbytes; + req->info = iv; +} + static inline struct crypto_blkcipher *__crypto_blkcipher_cast( struct crypto_tfm *tfm) { @@ -427,9 +618,9 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast( static inline struct crypto_blkcipher *crypto_alloc_blkcipher( const char *alg_name, u32 type, u32 mask) { - type &= ~CRYPTO_ALG_TYPE_MASK; + type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); type |= CRYPTO_ALG_TYPE_BLKCIPHER; - mask |= CRYPTO_ALG_TYPE_MASK; + mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC; return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); } @@ -447,9 +638,9 @@ static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) { - type &= ~CRYPTO_ALG_TYPE_MASK; + type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); type |= CRYPTO_ALG_TYPE_BLKCIPHER; - mask |= CRYPTO_ALG_TYPE_MASK; + mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC; return crypto_has_alg(alg_name, type, mask); } -- cgit v1.2.3 From ebc610e5bc76df073221e64e86c3f7533a09ea40 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 1 Jan 2007 18:37:02 +1100 Subject: [CRYPTO] templates: Pass type/mask when creating instances This patch passes the type/mask along when constructing instances of templates. This is in preparation for templates that may support multiple types of instances depending on what is requested. For example, the planned software async crypto driver will use this construct. For the moment this allows us to check whether the instance constructed is of the correct type and avoid returning success if the type does not match. Signed-off-by: Herbert Xu --- crypto/algapi.c | 42 ++++++++++++++++++++++++++++++++++++------ crypto/cbc.c | 11 ++++++++--- crypto/cryptomgr.c | 28 ++++++++++++++++++---------- crypto/ecb.c | 11 ++++++++--- crypto/hmac.c | 11 ++++++++--- crypto/lrw.c | 11 ++++++++--- crypto/pcbc.c | 11 ++++++++--- crypto/xcbc.c | 12 +++++++++--- include/crypto/algapi.h | 8 +++++--- include/linux/crypto.h | 9 +++++++++ 10 files changed, 117 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/crypto/algapi.c b/crypto/algapi.c index f7d2185b2c8f..491205e11cbe 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -425,15 +425,45 @@ int crypto_unregister_notifier(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(crypto_unregister_notifier); -struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len, - u32 type, u32 mask) +struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb) { - struct rtattr *rta = param; + struct rtattr *rta = tb[CRYPTOA_TYPE - 1]; + struct crypto_attr_type *algt; + + if (!rta) + return ERR_PTR(-ENOENT); + if (RTA_PAYLOAD(rta) < sizeof(*algt)) + return ERR_PTR(-EINVAL); + + algt = RTA_DATA(rta); + + return algt; +} +EXPORT_SYMBOL_GPL(crypto_get_attr_type); + +int crypto_check_attr_type(struct rtattr **tb, u32 type) +{ + struct crypto_attr_type *algt; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + if ((algt->type ^ type) & algt->mask) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_check_attr_type); + +struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, u32 type, u32 mask) +{ + struct rtattr *rta = tb[CRYPTOA_ALG - 1]; struct crypto_attr_alg *alga; - if (!RTA_OK(rta, len)) - return ERR_PTR(-EBADR); - if (rta->rta_type != CRYPTOA_ALG || RTA_PAYLOAD(rta) < sizeof(*alga)) + if (!rta) + return ERR_PTR(-ENOENT); + if (RTA_PAYLOAD(rta) < sizeof(*alga)) return ERR_PTR(-EINVAL); alga = RTA_DATA(rta); diff --git a/crypto/cbc.c b/crypto/cbc.c index 136fea7e7000..1f2649e13b42 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -275,13 +275,18 @@ static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *crypto_cbc_alloc(void *param, unsigned int len) +static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c index 2ebffb84f1d9..7a3df9b41218 100644 --- a/crypto/cryptomgr.c +++ b/crypto/cryptomgr.c @@ -26,14 +26,19 @@ struct cryptomgr_param { struct work_struct work; + struct rtattr *tb[CRYPTOA_MAX]; + + struct { + struct rtattr attr; + struct crypto_attr_type data; + } type; + struct { struct rtattr attr; struct crypto_attr_alg data; } alg; struct { - u32 type; - u32 mask; char name[CRYPTO_MAX_ALG_NAME]; } larval; @@ -53,7 +58,7 @@ static void cryptomgr_probe(struct work_struct *work) goto err; do { - inst = tmpl->alloc(¶m->alg, sizeof(param->alg)); + inst = tmpl->alloc(param->tb); if (IS_ERR(inst)) err = PTR_ERR(inst); else if ((err = crypto_register_instance(tmpl, inst))) @@ -70,8 +75,8 @@ out: return; err: - crypto_larval_error(param->larval.name, param->larval.type, - param->larval.mask); + crypto_larval_error(param->larval.name, param->type.data.type, + param->type.data.mask); goto out; } @@ -82,7 +87,7 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) const char *p; unsigned int len; - param = kmalloc(sizeof(*param), GFP_KERNEL); + param = kzalloc(sizeof(*param), GFP_KERNEL); if (!param) goto err; @@ -94,7 +99,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) goto err_free_param; memcpy(param->template, name, len); - param->template[len] = 0; name = p + 1; for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++) @@ -104,14 +108,18 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) if (!len || *p != ')' || p[1]) goto err_free_param; + param->type.attr.rta_len = sizeof(param->type); + param->type.attr.rta_type = CRYPTOA_TYPE; + param->type.data.type = larval->alg.cra_flags; + param->type.data.mask = larval->mask; + param->tb[CRYPTOA_TYPE - 1] = ¶m->type.attr; + param->alg.attr.rta_len = sizeof(param->alg); param->alg.attr.rta_type = CRYPTOA_ALG; memcpy(param->alg.data.name, name, len); - param->alg.data.name[len] = 0; + param->tb[CRYPTOA_ALG - 1] = ¶m->alg.attr; memcpy(param->larval.name, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); - param->larval.type = larval->alg.cra_flags; - param->larval.mask = larval->mask; INIT_WORK(¶m->work, cryptomgr_probe); schedule_work(¶m->work); diff --git a/crypto/ecb.c b/crypto/ecb.c index 839a0aed8c22..6310387a872c 100644 --- a/crypto/ecb.c +++ b/crypto/ecb.c @@ -115,13 +115,18 @@ static void crypto_ecb_exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *crypto_ecb_alloc(void *param, unsigned int len) +static struct crypto_instance *crypto_ecb_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/hmac.c b/crypto/hmac.c index 44187c5ee593..8802fb6dd5a6 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -197,13 +197,18 @@ static void hmac_free(struct crypto_instance *inst) kfree(inst); } -static struct crypto_instance *hmac_alloc(void *param, unsigned int len) +static struct crypto_instance *hmac_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, + CRYPTO_ALG_TYPE_HASH_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/lrw.c b/crypto/lrw.c index b4105080ac7a..621095db28b3 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -228,13 +228,18 @@ static void exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *alloc(void *param, unsigned int len) +static struct crypto_instance *alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/pcbc.c b/crypto/pcbc.c index 5174d7fdad6e..c3ed8a1c9f46 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -279,13 +279,18 @@ static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *crypto_pcbc_alloc(void *param, unsigned int len) +static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 53e8ccbf0f5f..9f502b86e0ea 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -288,12 +288,18 @@ static void xcbc_exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *xcbc_alloc(void *param, unsigned int len) +static struct crypto_instance *xcbc_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC); + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); + if (err) + return ERR_PTR(err); + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 4e05e93ff681..d0c190b4d02f 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -15,6 +15,7 @@ #include struct module; +struct rtattr; struct seq_file; struct crypto_type { @@ -38,7 +39,7 @@ struct crypto_template { struct hlist_head instances; struct module *module; - struct crypto_instance *(*alloc)(void *param, unsigned int len); + struct crypto_instance *(*alloc)(struct rtattr **tb); void (*free)(struct crypto_instance *inst); char name[CRYPTO_MAX_ALG_NAME]; @@ -96,8 +97,9 @@ void crypto_drop_spawn(struct crypto_spawn *spawn); struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, u32 mask); -struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len, - u32 type, u32 mask); +struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); +int crypto_check_attr_type(struct rtattr **tb, u32 type); +struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, u32 type, u32 mask); struct crypto_instance *crypto_alloc_instance(const char *name, struct crypto_alg *alg); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index d4d05313280c..67830e7c2c31 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -372,12 +372,21 @@ struct crypto_hash { enum { CRYPTOA_UNSPEC, CRYPTOA_ALG, + CRYPTOA_TYPE, + __CRYPTOA_MAX, }; +#define CRYPTOA_MAX (__CRYPTOA_MAX - 1) + struct crypto_attr_alg { char name[CRYPTO_MAX_ALG_NAME]; }; +struct crypto_attr_type { + u32 type; + u32 mask; +}; + /* * Transform user interface. */ -- cgit v1.2.3 From b5b7f08869340aa8cfa23303f7d195f161479592 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 16 Apr 2007 20:48:54 +1000 Subject: [CRYPTO] api: Add async blkcipher type This patch adds the mid-level interface for asynchronous block ciphers. It also includes a generic queueing mechanism that can be used by other asynchronous crypto operations in future. Signed-off-by: Herbert Xu --- crypto/Kconfig | 4 +++ crypto/Makefile | 1 + crypto/ablkcipher.c | 83 +++++++++++++++++++++++++++++++++++++++++++++++++ crypto/algapi.c | 62 ++++++++++++++++++++++++++++++++++++ include/crypto/algapi.h | 58 ++++++++++++++++++++++++++++++++++ include/linux/crypto.h | 22 +++++++++++++ 6 files changed, 230 insertions(+) create mode 100644 crypto/ablkcipher.c (limited to 'include/linux') diff --git a/crypto/Kconfig b/crypto/Kconfig index 086fcec44720..a20a3f14d324 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -16,6 +16,10 @@ config CRYPTO_ALGAPI help This option provides the API for cryptographic algorithms. +config CRYPTO_ABLKCIPHER + tristate + select CRYPTO_BLKCIPHER + config CRYPTO_BLKCIPHER tristate select CRYPTO_ALGAPI diff --git a/crypto/Makefile b/crypto/Makefile index 12f93f578171..3820d4c5ccc2 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -8,6 +8,7 @@ crypto_algapi-$(CONFIG_PROC_FS) += proc.o crypto_algapi-objs := algapi.o $(crypto_algapi-y) obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o +obj-$(CONFIG_CRYPTO_ABLKCIPHER) += ablkcipher.o obj-$(CONFIG_CRYPTO_BLKCIPHER) += blkcipher.o crypto_hash-objs := hash.o diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c new file mode 100644 index 000000000000..9348ddd84a56 --- /dev/null +++ b/crypto/ablkcipher.c @@ -0,0 +1,83 @@ +/* + * Asynchronous block chaining cipher operations. + * + * This is the asynchronous version of blkcipher.c indicating completion + * via a callback. + * + * Copyright (c) 2006 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include + +static int setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); + + if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + return cipher->setkey(tfm, key, keylen); +} + +static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type, + u32 mask) +{ + return alg->cra_ctxsize; +} + +static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, + u32 mask) +{ + struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; + struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; + + if (alg->ivsize > PAGE_SIZE / 8) + return -EINVAL; + + crt->setkey = setkey; + crt->encrypt = alg->encrypt; + crt->decrypt = alg->decrypt; + crt->ivsize = alg->ivsize; + + return 0; +} + +static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); +static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) +{ + struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; + + seq_printf(m, "type : ablkcipher\n"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); + seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); + seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); + seq_printf(m, "qlen : %u\n", ablkcipher->queue->qlen); + seq_printf(m, "max qlen : %u\n", ablkcipher->queue->max_qlen); +} + +const struct crypto_type crypto_ablkcipher_type = { + .ctxsize = crypto_ablkcipher_ctxsize, + .init = crypto_init_ablkcipher_ops, +#ifdef CONFIG_PROC_FS + .show = crypto_ablkcipher_show, +#endif +}; +EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Asynchronous block chaining cipher type"); diff --git a/crypto/algapi.c b/crypto/algapi.c index 491205e11cbe..1c2185b5b005 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -507,6 +507,68 @@ err_free_inst: } EXPORT_SYMBOL_GPL(crypto_alloc_instance); +void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) +{ + INIT_LIST_HEAD(&queue->list); + queue->backlog = &queue->list; + queue->qlen = 0; + queue->max_qlen = max_qlen; +} +EXPORT_SYMBOL_GPL(crypto_init_queue); + +int crypto_enqueue_request(struct crypto_queue *queue, + struct crypto_async_request *request) +{ + int err = -EINPROGRESS; + + if (unlikely(queue->qlen >= queue->max_qlen)) { + err = -EBUSY; + if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + goto out; + if (queue->backlog == &queue->list) + queue->backlog = &request->list; + } + + queue->qlen++; + list_add_tail(&request->list, &queue->list); + +out: + return err; +} +EXPORT_SYMBOL_GPL(crypto_enqueue_request); + +struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) +{ + struct list_head *request; + + if (unlikely(!queue->qlen)) + return NULL; + + queue->qlen--; + + if (queue->backlog != &queue->list) + queue->backlog = queue->backlog->next; + + request = queue->list.next; + list_del(request); + + return list_entry(request, struct crypto_async_request, list); +} +EXPORT_SYMBOL_GPL(crypto_dequeue_request); + +int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm) +{ + struct crypto_async_request *req; + + list_for_each_entry(req, &queue->list, list) { + if (req->tfm == tfm) + return 1; + } + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_tfm_in_queue); + static int __init crypto_algapi_init(void) { crypto_init_proc(); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index d0c190b4d02f..469f511315c5 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -13,6 +13,8 @@ #define _CRYPTO_ALGAPI_H #include +#include +#include struct module; struct rtattr; @@ -51,6 +53,14 @@ struct crypto_spawn { struct crypto_instance *inst; }; +struct crypto_queue { + struct list_head list; + struct list_head *backlog; + + unsigned int qlen; + unsigned int max_qlen; +}; + struct scatter_walk { struct scatterlist *sg; unsigned int offset; @@ -82,6 +92,7 @@ struct blkcipher_walk { int flags; }; +extern const struct crypto_type crypto_ablkcipher_type; extern const struct crypto_type crypto_blkcipher_type; extern const struct crypto_type crypto_hash_type; @@ -103,6 +114,12 @@ struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, u32 type, u32 mask); struct crypto_instance *crypto_alloc_instance(const char *name, struct crypto_alg *alg); +void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); +int crypto_enqueue_request(struct crypto_queue *queue, + struct crypto_async_request *request); +struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); +int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); + int blkcipher_walk_done(struct blkcipher_desc *desc, struct blkcipher_walk *walk, int err); int blkcipher_walk_virt(struct blkcipher_desc *desc, @@ -125,6 +142,17 @@ static inline void *crypto_instance_ctx(struct crypto_instance *inst) return inst->__ctx; } +static inline struct ablkcipher_alg *crypto_ablkcipher_alg( + struct crypto_ablkcipher *tfm) +{ + return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher; +} + +static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm) { return crypto_tfm_ctx(&tfm->base); @@ -172,5 +200,35 @@ static inline void blkcipher_walk_init(struct blkcipher_walk *walk, walk->total = nbytes; } +static inline struct crypto_async_request *crypto_get_backlog( + struct crypto_queue *queue) +{ + return queue->backlog == &queue->list ? NULL : + container_of(queue->backlog, struct crypto_async_request, list); +} + +static inline int ablkcipher_enqueue_request(struct ablkcipher_alg *alg, + struct ablkcipher_request *request) +{ + return crypto_enqueue_request(alg->queue, &request->base); +} + +static inline struct ablkcipher_request *ablkcipher_dequeue_request( + struct ablkcipher_alg *alg) +{ + return ablkcipher_request_cast(crypto_dequeue_request(alg->queue)); +} + +static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) +{ + return req->__ctx; +} + +static inline int ablkcipher_tfm_in_queue(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_in_queue(crypto_ablkcipher_alg(tfm)->queue, + crypto_ablkcipher_tfm(tfm)); +} + #endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 67830e7c2c31..0ec2467891db 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -93,6 +93,7 @@ struct crypto_ablkcipher; struct crypto_async_request; struct crypto_blkcipher; struct crypto_hash; +struct crypto_queue; struct crypto_tfm; struct crypto_type; @@ -143,6 +144,19 @@ struct hash_desc { * Algorithms: modular crypto algorithm implementations, managed * via crypto_register_alg() and crypto_unregister_alg(). */ +struct ablkcipher_alg { + int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct ablkcipher_request *req); + int (*decrypt)(struct ablkcipher_request *req); + + struct crypto_queue *queue; + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; +}; + struct blkcipher_alg { int (*setkey)(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); @@ -197,6 +211,7 @@ struct compress_alg { unsigned int slen, u8 *dst, unsigned int *dlen); }; +#define cra_ablkcipher cra_u.ablkcipher #define cra_blkcipher cra_u.blkcipher #define cra_cipher cra_u.cipher #define cra_digest cra_u.digest @@ -221,6 +236,7 @@ struct crypto_alg { const struct crypto_type *cra_type; union { + struct ablkcipher_alg ablkcipher; struct blkcipher_alg blkcipher; struct cipher_alg cipher; struct digest_alg digest; @@ -572,6 +588,12 @@ static inline int crypto_ablkcipher_reqsize(struct crypto_ablkcipher *tfm) return crypto_ablkcipher_crt(tfm)->reqsize; } +static inline struct ablkcipher_request *ablkcipher_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct ablkcipher_request, base); +} + static inline struct ablkcipher_request *ablkcipher_request_alloc( struct crypto_ablkcipher *tfm, gfp_t gfp) { -- cgit v1.2.3 From e196d6259141eda47aeafd88514aae652bfbfc7f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 14 Apr 2007 16:09:14 +1000 Subject: [CRYPTO] api: Add ablkcipher_request_set_tfm This patch adds ablkcipher_request_set_tfm for those users that need to manage the memory for ablkcipher requests directly. Signed-off-by: Herbert Xu --- include/linux/crypto.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 0ec2467891db..0de7e2ace822 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -588,6 +588,12 @@ static inline int crypto_ablkcipher_reqsize(struct crypto_ablkcipher *tfm) return crypto_ablkcipher_crt(tfm)->reqsize; } +static inline void ablkcipher_request_set_tfm( + struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) +{ + req->base.tfm = crypto_ablkcipher_tfm(tfm); +} + static inline struct ablkcipher_request *ablkcipher_request_cast( struct crypto_async_request *req) { @@ -603,7 +609,7 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc( crypto_ablkcipher_reqsize(tfm), gfp); if (likely(req)) - req->base.tfm = crypto_ablkcipher_tfm(tfm); + ablkcipher_request_set_tfm(req, tfm); return req; } -- cgit v1.2.3 From d7e5a5462f68270ed66efff22b1981be57a28c19 Mon Sep 17 00:00:00 2001 From: Segher Boessenkool Date: Wed, 2 May 2007 12:18:41 +0200 Subject: [RSLIB] Support non-canonical GF representations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For the CAFÉ NAND controller, we need to support non-canonical representations of the Galois field. Allow the caller to provide its own function for generating the field, and CAFÉ can use rslib instead of its own implementation. Signed-off-by: Segher Boessenkool Signed-off-by: David Woodhouse --- include/linux/rslib.h | 4 ++ lib/reed_solomon/reed_solomon.c | 84 +++++++++++++++++++++++++++++++++-------- 2 files changed, 73 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rslib.h b/include/linux/rslib.h index ace25acfdc97..746580c1939c 100644 --- a/include/linux/rslib.h +++ b/include/linux/rslib.h @@ -34,6 +34,7 @@ * @prim: Primitive element, index form * @iprim: prim-th root of 1, index form * @gfpoly: The primitive generator polynominal + * @gffunc: Function to generate the field, if non-canonical representation * @users: Users of this structure * @list: List entry for the rs control list */ @@ -48,6 +49,7 @@ struct rs_control { int prim; int iprim; int gfpoly; + int (*gffunc)(int); int users; struct list_head list; }; @@ -77,6 +79,8 @@ int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len, /* Create or get a matching rs control structure */ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, int nroots); +struct rs_control *init_rs_non_canonical(int symsize, int (*func)(int), + int fcr, int prim, int nroots); /* Release a rs control structure */ void free_rs(struct rs_control *rs); diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c index a4b730a2180c..5b0d8522b7ca 100644 --- a/lib/reed_solomon/reed_solomon.c +++ b/lib/reed_solomon/reed_solomon.c @@ -56,6 +56,7 @@ static DEFINE_MUTEX(rslistlock); * rs_init - Initialize a Reed-Solomon codec * @symsize: symbol size, bits (1-8) * @gfpoly: Field generator polynomial coefficients + * @gffunc: Field generator function * @fcr: first root of RS code generator polynomial, index form * @prim: primitive element to generate polynomial roots * @nroots: RS code generator polynomial degree (number of roots) @@ -63,8 +64,8 @@ static DEFINE_MUTEX(rslistlock); * Allocate a control structure and the polynom arrays for faster * en/decoding. Fill the arrays according to the given parameters. */ -static struct rs_control *rs_init(int symsize, int gfpoly, int fcr, - int prim, int nroots) +static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int), + int fcr, int prim, int nroots) { struct rs_control *rs; int i, j, sr, root, iprim; @@ -82,6 +83,7 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int fcr, rs->prim = prim; rs->nroots = nroots; rs->gfpoly = gfpoly; + rs->gffunc = gffunc; /* Allocate the arrays */ rs->alpha_to = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL); @@ -99,17 +101,26 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int fcr, /* Generate Galois field lookup tables */ rs->index_of[0] = rs->nn; /* log(zero) = -inf */ rs->alpha_to[rs->nn] = 0; /* alpha**-inf = 0 */ - sr = 1; - for (i = 0; i < rs->nn; i++) { - rs->index_of[sr] = i; - rs->alpha_to[i] = sr; - sr <<= 1; - if (sr & (1 << symsize)) - sr ^= gfpoly; - sr &= rs->nn; + if (gfpoly) { + sr = 1; + for (i = 0; i < rs->nn; i++) { + rs->index_of[sr] = i; + rs->alpha_to[i] = sr; + sr <<= 1; + if (sr & (1 << symsize)) + sr ^= gfpoly; + sr &= rs->nn; + } + } else { + sr = gffunc(0); + for (i = 0; i < rs->nn; i++) { + rs->index_of[sr] = i; + rs->alpha_to[i] = sr; + sr = gffunc(sr); + } } /* If it's not primitive, exit */ - if(sr != 1) + if(sr != rs->alpha_to[0]) goto errpol; /* Find prim-th root of 1, used in decoding */ @@ -173,18 +184,22 @@ void free_rs(struct rs_control *rs) } /** - * init_rs - Find a matching or allocate a new rs control structure + * init_rs_internal - Find a matching or allocate a new rs control structure * @symsize: the symbol size (number of bits) * @gfpoly: the extended Galois field generator polynomial coefficients, * with the 0th coefficient in the low order bit. The polynomial * must be primitive; + * @gffunc: pointer to function to generate the next field element, + * or the multiplicative identity element if given 0. Used + * instead of gfpoly if gfpoly is 0 * @fcr: the first consecutive root of the rs code generator polynomial * in index form * @prim: primitive element to generate polynomial roots * @nroots: RS code generator polynomial degree (number of roots) */ -struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, - int nroots) +static struct rs_control *init_rs_internal(int symsize, int gfpoly, + int (*gffunc)(int), int fcr, + int prim, int nroots) { struct list_head *tmp; struct rs_control *rs; @@ -208,6 +223,8 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, continue; if (gfpoly != rs->gfpoly) continue; + if (gffunc != rs->gffunc) + continue; if (fcr != rs->fcr) continue; if (prim != rs->prim) @@ -220,7 +237,7 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, } /* Create a new one */ - rs = rs_init(symsize, gfpoly, fcr, prim, nroots); + rs = rs_init(symsize, gfpoly, gffunc, fcr, prim, nroots); if (rs) { rs->users = 1; list_add(&rs->list, &rslist); @@ -230,6 +247,42 @@ out: return rs; } +/** + * init_rs - Find a matching or allocate a new rs control structure + * @symsize: the symbol size (number of bits) + * @gfpoly: the extended Galois field generator polynomial coefficients, + * with the 0th coefficient in the low order bit. The polynomial + * must be primitive; + * @fcr: the first consecutive root of the rs code generator polynomial + * in index form + * @prim: primitive element to generate polynomial roots + * @nroots: RS code generator polynomial degree (number of roots) + */ +struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, + int nroots) +{ + return init_rs_internal(symsize, gfpoly, NULL, fcr, prim, nroots); +} + +/** + * init_rs_non_canonical - Find a matching or allocate a new rs control + * structure, for fields with non-canonical + * representation + * @symsize: the symbol size (number of bits) + * @gffunc: pointer to function to generate the next field element, + * or the multiplicative identity element if given 0. Used + * instead of gfpoly if gfpoly is 0 + * @fcr: the first consecutive root of the rs code generator polynomial + * in index form + * @prim: primitive element to generate polynomial roots + * @nroots: RS code generator polynomial degree (number of roots) + */ +struct rs_control *init_rs_non_canonical(int symsize, int (*gffunc)(int), + int fcr, int prim, int nroots) +{ + return init_rs_internal(symsize, 0, gffunc, fcr, prim, nroots); +} + #ifdef CONFIG_REED_SOLOMON_ENC8 /** * encode_rs8 - Calculate the parity for data values (8bit data width) @@ -321,6 +374,7 @@ EXPORT_SYMBOL_GPL(decode_rs16); #endif EXPORT_SYMBOL_GPL(init_rs); +EXPORT_SYMBOL_GPL(init_rs_non_canonical); EXPORT_SYMBOL_GPL(free_rs); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 86c0baf123e474b6eb404798926ecf62b426bf3a Mon Sep 17 00:00:00 2001 From: Prarit Bhargava Date: Wed, 2 May 2007 19:27:05 +0200 Subject: [PATCH] i386: Change sysenter_setup to __cpuinit & improve __INIT, __INITDATA Change sysenter_setup to __cpuinit. Change __INIT & __INITDATA to be cpu hotplug aware. Resolve MODPOST warnings similar to: WARNING: vmlinux - Section mismatch: reference to .init.text:sysenter_setup from .text between 'identify_cpu' (at offset 0xc040a380) and 'detect_ht' and WARNING: vmlinux - Section mismatch: reference to .init.data:vsyscall_int80_end from .text between 'sysenter_setup' (at offset 0xc041a269) and 'enable_sep_cpu' WARNING: vmlinux - Section mismatch: reference to .init.data:vsyscall_int80_start from .text between 'sysenter_setup' (at offset 0xc041a26e) and 'enable_sep_cpu' WARNING: vmlinux - Section mismatch: reference to .init.data:vsyscall_sysenter_end from .text between 'sysenter_setup' (at offset 0xc041a275) and 'enable_sep_cpu' WARNING: vmlinux - Section mismatch: reference to .init.data:vsyscall_sysenter_start from .text between 'sysenter_setup' (at offset 0xc041a27a) and 'enable_sep_cpu' Signed-off-by: Prarit Bhargava Signed-off-by: Andi Kleen --- arch/i386/kernel/sysenter.c | 2 +- include/linux/init.h | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c index 13ca54a85a1c..168f8147d3b4 100644 --- a/arch/i386/kernel/sysenter.c +++ b/arch/i386/kernel/sysenter.c @@ -72,7 +72,7 @@ extern const char vsyscall_int80_start, vsyscall_int80_end; extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; static struct page *syscall_pages[1]; -int __init sysenter_setup(void) +int __cpuinit sysenter_setup(void) { void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); syscall_pages[0] = virt_to_page(syscall_page); diff --git a/include/linux/init.h b/include/linux/init.h index e290a010e3f2..9abf120ec9f8 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -52,9 +52,14 @@ #endif /* For assembly routines */ +#ifdef CONFIG_HOTPLUG_CPU +#define __INIT .section ".text","ax" +#define __INITDATA .section ".data","aw" +#else #define __INIT .section ".init.text","ax" -#define __FINIT .previous #define __INITDATA .section ".init.data","aw" +#endif +#define __FINIT .previous #ifndef __ASSEMBLY__ /* -- cgit v1.2.3 From 973efae21beb2feda138f152ed06d4204774d93c Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 2 May 2007 19:27:06 +0200 Subject: [PATCH] i386: clean up mach_reboot_fixups The reboot_fixups stuff seems to be a bit of a mess, specifically the header is in linux/ when its a purely i386-specific piece of code. I'm not sure why it has its config option; its only currently needed for "geode-gx1/cs5530a", so perhaps whatever config option controls that hardware should enable this? Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Andi Kleen --- arch/i386/kernel/reboot.c | 6 +++++- arch/i386/kernel/reboot_fixups.c | 2 +- include/asm-i386/reboot_fixups.h | 6 ++++++ include/linux/reboot_fixups.h | 10 ---------- 4 files changed, 12 insertions(+), 12 deletions(-) create mode 100644 include/asm-i386/reboot_fixups.h delete mode 100644 include/linux/reboot_fixups.h (limited to 'include/linux') diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c index 3514b4153f7f..8b5ff6e15412 100644 --- a/arch/i386/kernel/reboot.c +++ b/arch/i386/kernel/reboot.c @@ -17,7 +17,7 @@ #include #include #include "mach_reboot.h" -#include +#include /* * Power off function, if any @@ -316,6 +316,10 @@ void machine_shutdown(void) #endif } +void __attribute__((weak)) mach_reboot_fixups(void) +{ +} + void machine_emergency_restart(void) { if (!reboot_thru_bios) { diff --git a/arch/i386/kernel/reboot_fixups.c b/arch/i386/kernel/reboot_fixups.c index 99aab41a05b0..2d78d918340f 100644 --- a/arch/i386/kernel/reboot_fixups.c +++ b/arch/i386/kernel/reboot_fixups.c @@ -10,7 +10,7 @@ #include #include -#include +#include static void cs5530a_warm_reset(struct pci_dev *dev) { diff --git a/include/asm-i386/reboot_fixups.h b/include/asm-i386/reboot_fixups.h new file mode 100644 index 000000000000..0cb7d87c2b68 --- /dev/null +++ b/include/asm-i386/reboot_fixups.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_REBOOT_FIXUPS_H +#define _LINUX_REBOOT_FIXUPS_H + +extern void mach_reboot_fixups(void); + +#endif /* _LINUX_REBOOT_FIXUPS_H */ diff --git a/include/linux/reboot_fixups.h b/include/linux/reboot_fixups.h deleted file mode 100644 index 480ea2d489d8..000000000000 --- a/include/linux/reboot_fixups.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _LINUX_REBOOT_FIXUPS_H -#define _LINUX_REBOOT_FIXUPS_H - -#ifdef CONFIG_X86_REBOOTFIXUPS -extern void mach_reboot_fixups(void); -#else -#define mach_reboot_fixups() ((void)(0)) -#endif - -#endif /* _LINUX_REBOOT_FIXUPS_H */ -- cgit v1.2.3 From 184c44d2049c4db7ef6ec65794546954da2c6a0e Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 2 May 2007 19:27:08 +0200 Subject: [PATCH] x86-64: fix x86_64-mm-sched-clock-share Fix for the following patch. Provide dummy cpufreq functions when CPUFREQ is not compiled in. Cc: Andi Kleen Cc: Dave Jones Signed-off-by: Andrew Morton Signed-off-by: Andi Kleen --- include/linux/cpufreq.h | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 0899e2cdcdd1..cb9b2ec8849c 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -32,7 +32,15 @@ * CPUFREQ NOTIFIER INTERFACE * *********************************************************************/ +#ifdef CONFIG_CPU_FREQ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); +#else +static inline int cpufreq_register_notifier(struct notifier_block *nb, + unsigned int list) +{ + return 0; +} +#endif int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); #define CPUFREQ_TRANSITION_NOTIFIER (0) @@ -261,17 +269,22 @@ int cpufreq_set_policy(struct cpufreq_policy *policy); int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_update_policy(unsigned int cpu); -/* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */ -unsigned int cpufreq_get(unsigned int cpu); -/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ +/* + * query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it + */ #ifdef CONFIG_CPU_FREQ unsigned int cpufreq_quick_get(unsigned int cpu); +unsigned int cpufreq_get(unsigned int cpu); #else static inline unsigned int cpufreq_quick_get(unsigned int cpu) { return 0; } +static inline unsigned int cpufreq_get(unsigned int cpu) +{ + return 0; +} #endif -- cgit v1.2.3 From e073ae1b34d5600ffc550407625dcb2d4cf46c6e Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Wed, 2 May 2007 19:27:08 +0200 Subject: [PATCH] x86-64: Set HASHDIST_DEFAULT to 1 for x86_64 NUMA Enable system hashtable memory to be distributed among nodes on x86_64 NUMA Forcing the kernel to use node interleaved vmalloc instead of bootmem for the system hashtable memory (alloc_large_system_hash) reduces the memory imbalance on node 0 by around 40MB on a 8 node x86_64 NUMA box: Before the following patch, on bootup of a 8 node box: Node 0 MemTotal: 3407488 kB Node 0 MemFree: 3206296 kB Node 0 MemUsed: 201192 kB Node 0 Active: 7012 kB Node 0 Inactive: 512 kB Node 0 Dirty: 0 kB Node 0 Writeback: 0 kB Node 0 FilePages: 1912 kB Node 0 Mapped: 420 kB Node 0 AnonPages: 5612 kB Node 0 PageTables: 468 kB Node 0 NFS_Unstable: 0 kB Node 0 Bounce: 0 kB Node 0 Slab: 5408 kB Node 0 SReclaimable: 644 kB Node 0 SUnreclaim: 4764 kB After the patch (or using hashdist=1 on the kernel command line): Node 0 MemTotal: 3407488 kB Node 0 MemFree: 3247608 kB Node 0 MemUsed: 159880 kB Node 0 Active: 3012 kB Node 0 Inactive: 616 kB Node 0 Dirty: 0 kB Node 0 Writeback: 0 kB Node 0 FilePages: 2424 kB Node 0 Mapped: 380 kB Node 0 AnonPages: 1200 kB Node 0 PageTables: 396 kB Node 0 NFS_Unstable: 0 kB Node 0 Bounce: 0 kB Node 0 Slab: 6304 kB Node 0 SReclaimable: 1596 kB Node 0 SUnreclaim: 4708 kB I guess it is a good idea to keep HASHDIST_DEFAULT "on" for x86_64 NUMA since x86_64 has no dearth of vmalloc space? Or maybe enable hash distribution for all 64bit NUMA arches? The following patch does it only for x86_64. I ran a HPC MPI benchmark -- 'Ansys wingsolid', which takes up quite a bit of memory and uses up tlb entries. This was on a 4 way, 2 socket Tyan AMD box (non vsmp), with 8G total memory (4G pernode). The results with and without hash distribution are: 1. Vanilla - runtime of 1188.000s 2. With hashdist=1 runtime of 1154.000s Oprofile output for the duration of run is: 1. Vanilla: PU: AMD64 processors, speed 2411.16 MHz (estimated) Counted L1_AND_L2_DTLB_MISSES events (L1 and L2 DTLB misses) with a unit mask of 0x00 (No unit mask) count 500 samples % app name symbol name 163054 6.5513 libansys1.so MultiFront::decompose(int, int, Elemset *, int *, int, int, int) 162061 6.5114 libansys3.so blockSaxpy6L_fd 162042 6.5107 libansys3.so blockInnerProduct6L_fd 156286 6.2794 libansys3.so maxb33_ 87879 3.5309 libansys1.so elmatrixmultpcg_ 84857 3.4095 libansys4.so saxpy_pcg 58637 2.3560 libansys4.so .st4560 46612 1.8728 libansys4.so .st4282 43043 1.7294 vmlinux-t copy_user_generic_string 41326 1.6604 libansys3.so blockSaxpyBackSolve6L_fd 41288 1.6589 libansys3.so blockInnerProductBackSolve6L_fd 2. With hashdist=1 CPU: AMD64 processors, speed 2411.13 MHz (estimated) Counted L1_AND_L2_DTLB_MISSES events (L1 and L2 DTLB misses) with a unit mask of 0x00 (No unit mask) count 500 samples % app name symbol name 162993 6.9814 libansys1.so MultiFront::decompose(int, int, Elemset *, int *, int, int, int) 160799 6.8874 libansys3.so blockInnerProduct6L_fd 160459 6.8729 libansys3.so blockSaxpy6L_fd 156018 6.6826 libansys3.so maxb33_ 84700 3.6279 libansys4.so saxpy_pcg 83434 3.5737 libansys1.so elmatrixmultpcg_ 58074 2.4875 libansys4.so .st4560 46000 1.9703 libansys4.so .st4282 41166 1.7632 libansys3.so blockSaxpyBackSolve6L_fd 41033 1.7575 libansys3.so blockInnerProductBackSolve6L_fd 35762 1.5318 libansys1.so inner_product_sub 35591 1.5245 libansys1.so inner_product_sub2 28259 1.2104 libansys4.so addVectors Signed-off-by: Pravin B. Shelar Signed-off-by: Ravikiran Thirumalai Signed-off-by: Shai Fultheim Signed-off-by: Andi Kleen Acked-by: Christoph Lameter Cc: Andi Kleen Signed-off-by: Andrew Morton --- include/linux/bootmem.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 81c07cd18643..0365ec9fc0c9 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -122,9 +122,9 @@ extern void *alloc_large_system_hash(const char *tablename, #define HASH_EARLY 0x00000001 /* Allocating during early boot? */ /* Only NUMA needs hash distribution. - * IA64 is known to have sufficient vmalloc space. + * IA64 and x86_64 have sufficient vmalloc space. */ -#if defined(CONFIG_NUMA) && defined(CONFIG_IA64) +#if defined(CONFIG_NUMA) && (defined(CONFIG_IA64) || defined(CONFIG_X86_64)) #define HASHDIST_DEFAULT 1 #else #define HASHDIST_DEFAULT 0 -- cgit v1.2.3 From 79e030114a8d97a1dcd593ab84fb986f8c91c536 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Wed, 2 May 2007 19:27:09 +0200 Subject: [PATCH] i386: Allow i386 crash kernels to handle x86_64 dumps The specific case I am encountering is kdump under Xen with a 64 bit hypervisor and 32 bit kernel/userspace. The dump created is 64 bit due to the hypervisor but the dump kernel is 32 bit for maximum compatibility. It's possibly less likely to be useful in a purely native scenario but I see no reason to disallow it. [akpm@linux-foundation.org: build fix] Signed-off-by: Ian Campbell Signed-off-by: Andi Kleen Acked-by: Vivek Goyal Cc: Horms Cc: Magnus Damm Cc: "Eric W. Biederman" Cc: Andi Kleen Signed-off-by: Andrew Morton --- fs/proc/vmcore.c | 2 +- include/asm-i386/kexec.h | 3 +++ include/linux/crash_dump.h | 8 ++++++++ 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index d96050728c43..523e1098ae88 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -514,7 +514,7 @@ static int __init parse_crash_elf64_headers(void) /* Do some basic Verification. */ if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || (ehdr.e_type != ET_CORE) || - !elf_check_arch(&ehdr) || + !vmcore_elf_check_arch(&ehdr) || ehdr.e_ident[EI_CLASS] != ELFCLASS64 || ehdr.e_ident[EI_VERSION] != EV_CURRENT || ehdr.e_version != EV_CURRENT || diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h index c5b4ab95bdc2..bcb5b21de2d2 100644 --- a/include/asm-i386/kexec.h +++ b/include/asm-i386/kexec.h @@ -42,6 +42,9 @@ /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_386 +/* We can also handle crash dumps from 64 bit kernel. */ +#define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) + #define MAX_NOTE_BYTES 1024 /* CPU does not save ss and esp on stack if execution is already diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 32503657f14f..22c7ac5cd80c 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -14,5 +14,13 @@ extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, extern const struct file_operations proc_vmcore_operations; extern struct proc_dir_entry *proc_vmcore; +/* Architecture code defines this if there are other possible ELF + * machine types, e.g. on bi-arch capable hardware. */ +#ifndef vmcore_elf_check_arch_cross +#define vmcore_elf_check_arch_cross(x) 0 +#endif + +#define vmcore_elf_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x)) + #endif /* CONFIG_CRASH_DUMP */ #endif /* LINUX_CRASHDUMP_H */ -- cgit v1.2.3 From 6fb14755a676282a4e6caa05a08c92db8e45cfff Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 2 May 2007 19:27:10 +0200 Subject: [PATCH] x86: tighten kernel image page access rights On x86-64, kernel memory freed after init can be entirely unmapped instead of just getting 'poisoned' by overwriting with a debug pattern. On i386 and x86-64 (under CONFIG_DEBUG_RODATA), kernel text and bug table can also be write-protected. Compared to the first version, this one prevents re-creating deleted mappings in the kernel image range on x86-64, if those got removed previously. This, together with the original changes, prevents temporarily having inconsistent mappings when cacheability attributes are being changed on such pages (e.g. from AGP code). While on i386 such duplicate mappings don't exist, the same change is done there, too, both for consistency and because checking pte_present() before using various other pte_XXX functions is a requirement anyway. At once, i386 code gets adjusted to use pte_huge() instead of open coding this. AK: split out cpa() changes Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen --- arch/i386/kernel/vmlinux.lds.S | 4 ++-- arch/i386/mm/init.c | 25 +++++++++++++++++++------ arch/x86_64/kernel/head.S | 1 - arch/x86_64/kernel/vmlinux.lds.S | 5 +++-- arch/x86_64/mm/init.c | 25 ++++++++++++++++--------- include/linux/poison.h | 3 --- 6 files changed, 40 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S index 6f38f818380b..f4ec72231835 100644 --- a/arch/i386/kernel/vmlinux.lds.S +++ b/arch/i386/kernel/vmlinux.lds.S @@ -61,8 +61,6 @@ SECTIONS __stop___ex_table = .; } - RODATA - BUG_TABLE . = ALIGN(4); @@ -72,6 +70,8 @@ SECTIONS __tracedata_end = .; } + RODATA + /* writeable */ . = ALIGN(4096); .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index 23be1b0aafa4..bd5ef3718504 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -751,13 +752,25 @@ static int noinline do_test_wp_bit(void) void mark_rodata_ro(void) { - unsigned long addr = (unsigned long)__start_rodata; + unsigned long start = PFN_ALIGN(_text); + unsigned long size = PFN_ALIGN(_etext) - start; - for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE) - change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO); +#ifdef CONFIG_HOTPLUG_CPU + /* It must still be possible to apply SMP alternatives. */ + if (num_possible_cpus() <= 1) +#endif + { + change_page_attr(virt_to_page(start), + size >> PAGE_SHIFT, PAGE_KERNEL_RX); + printk("Write protecting the kernel text: %luk\n", size >> 10); + } - printk("Write protecting the kernel read-only data: %uk\n", - (__end_rodata - __start_rodata) >> 10); + start += size; + size = (unsigned long)__end_rodata - start; + change_page_attr(virt_to_page(start), + size >> PAGE_SHIFT, PAGE_KERNEL_RO); + printk("Write protecting the kernel read-only data: %luk\n", + size >> 10); /* * change_page_attr() requires a global_flush_tlb() call after it. @@ -781,7 +794,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) __free_page(page); totalram_pages++; } - printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); + printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); } void free_initmem(void) diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S index 36aa98a6d15c..fd9fdfdd143e 100644 --- a/arch/x86_64/kernel/head.S +++ b/arch/x86_64/kernel/head.S @@ -280,7 +280,6 @@ early_idt_ripmsg: .balign PAGE_SIZE ENTRY(stext) -ENTRY(_stext) #define NEXT_PAGE(name) \ .balign PAGE_SIZE; \ diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S index 5176ecf006ee..3bdeb88d28f4 100644 --- a/arch/x86_64/kernel/vmlinux.lds.S +++ b/arch/x86_64/kernel/vmlinux.lds.S @@ -29,6 +29,7 @@ SECTIONS .text : AT(ADDR(.text) - LOAD_OFFSET) { /* First the code that has to be first for bootstrapping */ *(.bootstrap.text) + _stext = .; /* Then all the functions that are "hot" in profiles, to group them onto the same hugetlb entry */ #include "functionlist" @@ -50,10 +51,10 @@ SECTIONS __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) } __stop___ex_table = .; - RODATA - BUG_TABLE + RODATA + . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */ /* Data */ .data : AT(ADDR(.data) - LOAD_OFFSET) { diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 69e22d3c9238..e3134bc9a4fc 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -563,21 +564,23 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) if (begin >= end) return; - printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); + printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); for (addr = begin; addr < end; addr += PAGE_SIZE) { struct page *page = pfn_to_page(addr >> PAGE_SHIFT); ClearPageReserved(page); init_page_count(page); memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE); + if (addr >= __START_KERNEL_map) + change_page_attr_addr(addr, 1, __pgprot(0)); __free_page(page); totalram_pages++; } + if (addr > __START_KERNEL_map) + global_flush_tlb(); } void free_initmem(void) { - memset(__initdata_begin, POISON_FREE_INITDATA, - __initdata_end - __initdata_begin); free_init_pages("unused kernel memory", __pa_symbol(&__init_begin), __pa_symbol(&__init_end)); @@ -587,14 +590,18 @@ void free_initmem(void) void mark_rodata_ro(void) { - unsigned long addr = (unsigned long)__va(__pa_symbol(&__start_rodata)); - unsigned long end = (unsigned long)__va(__pa_symbol(&__end_rodata)); + unsigned long start = PFN_ALIGN(__va(__pa_symbol(&_stext))), size; - for (; addr < end; addr += PAGE_SIZE) - change_page_attr_addr(addr, 1, PAGE_KERNEL_RO); +#ifdef CONFIG_HOTPLUG_CPU + /* It must still be possible to apply SMP alternatives. */ + if (num_possible_cpus() > 1) + start = PFN_ALIGN(__va(__pa_symbol(&_etext))); +#endif + size = (unsigned long)__va(__pa_symbol(&__end_rodata)) - start; + change_page_attr_addr(start, size >> PAGE_SHIFT, PAGE_KERNEL_RO); - printk ("Write protecting the kernel read-only data: %luk\n", - (__end_rodata - __start_rodata) >> 10); + printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", + size >> 10); /* * change_page_attr_addr() requires a global_flush_tlb() call after it. diff --git a/include/linux/poison.h b/include/linux/poison.h index 3e628f990fdf..89580b764959 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -26,9 +26,6 @@ /********** arch/$ARCH/mm/init.c **********/ #define POISON_FREE_INITMEM 0xcc -/********** arch/x86_64/mm/init.c **********/ -#define POISON_FREE_INITDATA 0xba - /********** arch/ia64/hp/common/sba_iommu.c **********/ /* * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a -- cgit v1.2.3 From b00742d399513a4100c24cc2accefdc1bb1e0b15 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 2 May 2007 19:27:11 +0200 Subject: [PATCH] x86-64: Account for module percpu space separately from kernel percpu Rather than using a single constant PERCPU_ENOUGH_ROOM, compute it as the sum of kernel_percpu + PERCPU_MODULE_RESERVE. This is now common to all architectures; if an architecture wants to set PERCPU_ENOUGH_ROOM to something special, then it may do so (ia64 is the only one which does). Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Andi Kleen Cc: Rusty Russell Cc: Eric W. Biederman Cc: Andi Kleen --- include/asm-alpha/percpu.h | 14 -------------- include/asm-sparc64/percpu.h | 10 ---------- include/asm-x86_64/percpu.h | 10 ---------- include/linux/percpu.h | 9 ++++++++- kernel/module.c | 2 +- 5 files changed, 9 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/include/asm-alpha/percpu.h b/include/asm-alpha/percpu.h index 651ebb141b24..48348fe34c19 100644 --- a/include/asm-alpha/percpu.h +++ b/include/asm-alpha/percpu.h @@ -1,20 +1,6 @@ #ifndef __ALPHA_PERCPU_H #define __ALPHA_PERCPU_H -/* - * Increase the per cpu area for Alpha so that - * modules using percpu area can load. - */ -#ifdef CONFIG_MODULES -# define PERCPU_MODULE_RESERVE 8192 -#else -# define PERCPU_MODULE_RESERVE 0 -#endif - -#define PERCPU_ENOUGH_ROOM \ - (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ - PERCPU_MODULE_RESERVE) - #include #endif /* __ALPHA_PERCPU_H */ diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h index 0d3df76aa47f..ced8cbde046d 100644 --- a/include/asm-sparc64/percpu.h +++ b/include/asm-sparc64/percpu.h @@ -5,16 +5,6 @@ #ifdef CONFIG_SMP -#ifdef CONFIG_MODULES -# define PERCPU_MODULE_RESERVE 8192 -#else -# define PERCPU_MODULE_RESERVE 0 -#endif - -#define PERCPU_ENOUGH_ROOM \ - (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ - PERCPU_MODULE_RESERVE) - extern void setup_per_cpu_areas(void); extern unsigned long __per_cpu_base; diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h index 5ed0ef340842..c6fbb67eac90 100644 --- a/include/asm-x86_64/percpu.h +++ b/include/asm-x86_64/percpu.h @@ -11,16 +11,6 @@ #include -#ifdef CONFIG_MODULES -# define PERCPU_MODULE_RESERVE 8192 -#else -# define PERCPU_MODULE_RESERVE 0 -#endif - -#define PERCPU_ENOUGH_ROOM \ - (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ - PERCPU_MODULE_RESERVE) - #define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) #define __my_cpu_offset() read_pda(data_offset) diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 600e3d387ffc..b72be2f79e6a 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -11,9 +11,16 @@ /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ #ifndef PERCPU_ENOUGH_ROOM -#define PERCPU_ENOUGH_ROOM 32768 +#ifdef CONFIG_MODULES +#define PERCPU_MODULE_RESERVE 8192 +#else +#define PERCPU_MODULE_RESERVE 0 #endif +#define PERCPU_ENOUGH_ROOM \ + (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) +#endif /* PERCPU_ENOUGH_ROOM */ + /* * Must be an lvalue. Since @var must be a simple identifier, * we force a syntax error here if it isn't. diff --git a/kernel/module.c b/kernel/module.c index 9da5af668a20..cf49ca25fcce 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -430,7 +430,7 @@ static int percpu_modinit(void) pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated, GFP_KERNEL); /* Static in-kernel percpu data (used). */ - pcpu_size[0] = -ALIGN(__per_cpu_end-__per_cpu_start, SMP_CACHE_BYTES); + pcpu_size[0] = -(__per_cpu_end-__per_cpu_start); /* Free room. */ pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0]; if (pcpu_size[1] < 0) { -- cgit v1.2.3 From d4f7a2c18e59e0304a1c733589ce14fc02fec1bd Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 2 May 2007 19:27:12 +0200 Subject: [PATCH] i386: Relocate VDSO ELF headers to match mapped location with COMPAT_VDSO Some versions of libc can't deal with a VDSO which doesn't have its ELF headers matching its mapped address. COMPAT_VDSO maps the VDSO at a specific system-wide fixed address. Previously this was all done at build time, on the grounds that the fixed VDSO address is always at the top of the address space. However, a hypervisor may reserve some of that address space, pushing the fixmap address down. This patch does the adjustment dynamically at runtime, depending on the runtime location of the VDSO fixmap. [ Patch has been through several hands: Jan Beulich wrote the orignal version; Zach reworked it, and Jeremy converted it to relocate phdrs as well as sections. ] Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Andi Kleen Cc: Zachary Amsden Cc: "Jan Beulich" Cc: Eric W. Biederman Cc: Andi Kleen Cc: Ingo Molnar Cc: Roland McGrath --- arch/i386/kernel/entry.S | 4 -- arch/i386/kernel/sysenter.c | 158 +++++++++++++++++++++++++++++++++++++++++--- arch/i386/mm/pgtable.c | 6 -- include/asm-i386/elf.h | 28 +++----- include/asm-i386/fixmap.h | 8 +-- include/linux/elf.h | 17 +++++ 6 files changed, 178 insertions(+), 43 deletions(-) (limited to 'include/linux') diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index c61c6b67e856..e901952dff37 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S @@ -305,16 +305,12 @@ sysenter_past_esp: pushl $(__USER_CS) CFI_ADJUST_CFA_OFFSET 4 /*CFI_REL_OFFSET cs, 0*/ -#ifndef CONFIG_COMPAT_VDSO /* * Push current_thread_info()->sysenter_return to the stack. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words * pushed above; +8 corresponds to copy_thread's esp0 setting. */ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) -#else - pushl $SYSENTER_RETURN -#endif CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET eip, 0 diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c index 13ca54a85a1c..e5a958379ac9 100644 --- a/arch/i386/kernel/sysenter.c +++ b/arch/i386/kernel/sysenter.c @@ -22,6 +22,7 @@ #include #include #include +#include /* * Should the kernel map a VDSO page into processes and pass its @@ -46,6 +47,129 @@ __setup("vdso=", vdso_setup); extern asmlinkage void sysenter_entry(void); +#ifdef CONFIG_COMPAT_VDSO +static __init void reloc_symtab(Elf32_Ehdr *ehdr, + unsigned offset, unsigned size) +{ + Elf32_Sym *sym = (void *)ehdr + offset; + unsigned nsym = size / sizeof(*sym); + unsigned i; + + for(i = 0; i < nsym; i++, sym++) { + if (sym->st_shndx == SHN_UNDEF || + sym->st_shndx == SHN_ABS) + continue; /* skip */ + + if (sym->st_shndx > SHN_LORESERVE) { + printk(KERN_INFO "VDSO: unexpected st_shndx %x\n", + sym->st_shndx); + continue; + } + + switch(ELF_ST_TYPE(sym->st_info)) { + case STT_OBJECT: + case STT_FUNC: + case STT_SECTION: + case STT_FILE: + sym->st_value += VDSO_HIGH_BASE; + } + } +} + +static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset) +{ + Elf32_Dyn *dyn = (void *)ehdr + offset; + + for(; dyn->d_tag != DT_NULL; dyn++) + switch(dyn->d_tag) { + case DT_PLTGOT: + case DT_HASH: + case DT_STRTAB: + case DT_SYMTAB: + case DT_RELA: + case DT_INIT: + case DT_FINI: + case DT_REL: + case DT_DEBUG: + case DT_JMPREL: + case DT_VERSYM: + case DT_VERDEF: + case DT_VERNEED: + case DT_ADDRRNGLO ... DT_ADDRRNGHI: + /* definitely pointers needing relocation */ + dyn->d_un.d_ptr += VDSO_HIGH_BASE; + break; + + case DT_ENCODING ... OLD_DT_LOOS-1: + case DT_LOOS ... DT_HIOS-1: + /* Tags above DT_ENCODING are pointers if + they're even */ + if (dyn->d_tag >= DT_ENCODING && + (dyn->d_tag & 1) == 0) + dyn->d_un.d_ptr += VDSO_HIGH_BASE; + break; + + case DT_VERDEFNUM: + case DT_VERNEEDNUM: + case DT_FLAGS_1: + case DT_RELACOUNT: + case DT_RELCOUNT: + case DT_VALRNGLO ... DT_VALRNGHI: + /* definitely not pointers */ + break; + + case OLD_DT_LOOS ... DT_LOOS-1: + case DT_HIOS ... DT_VALRNGLO-1: + default: + if (dyn->d_tag > DT_ENCODING) + printk(KERN_INFO "VDSO: unexpected DT_tag %x\n", + dyn->d_tag); + break; + } +} + +static __init void relocate_vdso(Elf32_Ehdr *ehdr) +{ + Elf32_Phdr *phdr; + Elf32_Shdr *shdr; + int i; + + BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 || + !elf_check_arch(ehdr) || + ehdr->e_type != ET_DYN); + + ehdr->e_entry += VDSO_HIGH_BASE; + + /* rebase phdrs */ + phdr = (void *)ehdr + ehdr->e_phoff; + for (i = 0; i < ehdr->e_phnum; i++) { + phdr[i].p_vaddr += VDSO_HIGH_BASE; + + /* relocate dynamic stuff */ + if (phdr[i].p_type == PT_DYNAMIC) + reloc_dyn(ehdr, phdr[i].p_offset); + } + + /* rebase sections */ + shdr = (void *)ehdr + ehdr->e_shoff; + for(i = 0; i < ehdr->e_shnum; i++) { + if (!(shdr[i].sh_flags & SHF_ALLOC)) + continue; + + shdr[i].sh_addr += VDSO_HIGH_BASE; + + if (shdr[i].sh_type == SHT_SYMTAB || + shdr[i].sh_type == SHT_DYNSYM) + reloc_symtab(ehdr, shdr[i].sh_offset, + shdr[i].sh_size); + } +} +#else +static inline void relocate_vdso(Elf32_Ehdr *ehdr) +{ +} +#endif /* COMPAT_VDSO */ + void enable_sep_cpu(void) { int cpu = get_cpu(); @@ -75,6 +199,9 @@ static struct page *syscall_pages[1]; int __init sysenter_setup(void) { void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); + const void *vsyscall; + size_t vsyscall_len; + syscall_pages[0] = virt_to_page(syscall_page); #ifdef CONFIG_COMPAT_VDSO @@ -83,23 +210,23 @@ int __init sysenter_setup(void) #endif if (!boot_cpu_has(X86_FEATURE_SEP)) { - memcpy(syscall_page, - &vsyscall_int80_start, - &vsyscall_int80_end - &vsyscall_int80_start); - return 0; + vsyscall = &vsyscall_int80_start; + vsyscall_len = &vsyscall_int80_end - &vsyscall_int80_start; + } else { + vsyscall = &vsyscall_sysenter_start; + vsyscall_len = &vsyscall_sysenter_end - &vsyscall_sysenter_start; } - memcpy(syscall_page, - &vsyscall_sysenter_start, - &vsyscall_sysenter_end - &vsyscall_sysenter_start); + memcpy(syscall_page, vsyscall, vsyscall_len); + relocate_vdso(syscall_page); return 0; } -#ifndef CONFIG_COMPAT_VDSO /* Defined in vsyscall-sysenter.S */ extern void SYSENTER_RETURN; +#ifdef __HAVE_ARCH_GATE_AREA /* Setup a VMA at program startup for the vsyscall page */ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) { @@ -159,4 +286,17 @@ int in_gate_area_no_task(unsigned long addr) { return 0; } -#endif +#else /* !__HAVE_ARCH_GATE_AREA */ +int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) +{ + /* + * If not creating userspace VMA, simply set vdso to point to + * fixmap page. + */ + current->mm->context.vdso = (void *)VDSO_HIGH_BASE; + current_thread_info()->sysenter_return = + (void *)VDSO_SYM(&SYSENTER_RETURN); + + return 0; +} +#endif /* __HAVE_ARCH_GATE_AREA */ diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c index fa0cfbd551e1..99c09edc3dbb 100644 --- a/arch/i386/mm/pgtable.c +++ b/arch/i386/mm/pgtable.c @@ -144,10 +144,8 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) } static int fixmaps; -#ifndef CONFIG_COMPAT_VDSO unsigned long __FIXADDR_TOP = 0xfffff000; EXPORT_SYMBOL(__FIXADDR_TOP); -#endif void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) { @@ -173,12 +171,8 @@ void reserve_top_address(unsigned long reserve) BUG_ON(fixmaps > 0); printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", (int)-reserve); -#ifdef CONFIG_COMPAT_VDSO - BUG_ON(reserve != 0); -#else __FIXADDR_TOP = -reserve - PAGE_SIZE; __VMALLOC_RESERVE += reserve; -#endif } pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h index 952b3ee3c9bb..d304ab4161ff 100644 --- a/include/asm-i386/elf.h +++ b/include/asm-i386/elf.h @@ -133,39 +133,31 @@ extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs) #define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) -#define VDSO_BASE ((unsigned long)current->mm->context.vdso) - -#ifdef CONFIG_COMPAT_VDSO -# define VDSO_COMPAT_BASE VDSO_HIGH_BASE -# define VDSO_PRELINK VDSO_HIGH_BASE -#else -# define VDSO_COMPAT_BASE VDSO_BASE -# define VDSO_PRELINK 0 -#endif +#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) +#define VDSO_PRELINK 0 #define VDSO_SYM(x) \ - (VDSO_COMPAT_BASE + (unsigned long)(x) - VDSO_PRELINK) + (VDSO_CURRENT_BASE + (unsigned long)(x) - VDSO_PRELINK) #define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE) -#define VDSO_EHDR ((const struct elfhdr *) VDSO_COMPAT_BASE) +#define VDSO_EHDR ((const struct elfhdr *) VDSO_CURRENT_BASE) extern void __kernel_vsyscall; #define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall) -#ifndef CONFIG_COMPAT_VDSO -#define ARCH_HAS_SETUP_ADDITIONAL_PAGES struct linux_binprm; + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES extern int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack); -#endif extern unsigned int vdso_enabled; -#define ARCH_DLINFO \ -do if (vdso_enabled) { \ - NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ - NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_COMPAT_BASE); \ +#define ARCH_DLINFO \ +do if (vdso_enabled) { \ + NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ } while (0) #endif diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h index 3e9f610c35df..e5651b2a585a 100644 --- a/include/asm-i386/fixmap.h +++ b/include/asm-i386/fixmap.h @@ -19,13 +19,9 @@ * Leave one empty page between vmalloc'ed areas and * the start of the fixmap. */ -#ifndef CONFIG_COMPAT_VDSO extern unsigned long __FIXADDR_TOP; -#else -#define __FIXADDR_TOP 0xfffff000 -#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO) -#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1) -#endif +#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO) +#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1) #ifndef __ASSEMBLY__ #include diff --git a/include/linux/elf.h b/include/linux/elf.h index 60713e6ea297..8b17ffe222c4 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -83,6 +83,23 @@ typedef __s64 Elf64_Sxword; #define DT_DEBUG 21 #define DT_TEXTREL 22 #define DT_JMPREL 23 +#define DT_ENCODING 32 +#define OLD_DT_LOOS 0x60000000 +#define DT_LOOS 0x6000000d +#define DT_HIOS 0x6ffff000 +#define DT_VALRNGLO 0x6ffffd00 +#define DT_VALRNGHI 0x6ffffdff +#define DT_ADDRRNGLO 0x6ffffe00 +#define DT_ADDRRNGHI 0x6ffffeff +#define DT_VERSYM 0x6ffffff0 +#define DT_RELACOUNT 0x6ffffff9 +#define DT_RELCOUNT 0x6ffffffa +#define DT_FLAGS_1 0x6ffffffb +#define DT_VERDEF 0x6ffffffc +#define DT_VERDEFNUM 0x6ffffffd +#define DT_VERNEED 0x6ffffffe +#define DT_VERNEEDNUM 0x6fffffff +#define OLD_DT_HIOS 0x6fffffff #define DT_LOPROC 0x70000000 #define DT_HIPROC 0x7fffffff -- cgit v1.2.3 From ce6234b5298902aaec831a67d5f8d9bd2ef5a488 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 2 May 2007 19:27:15 +0200 Subject: [PATCH] i386: PARAVIRT: add kmap_atomic_pte for mapping highpte pages Xen and VMI both have special requirements when mapping a highmem pte page into the kernel address space. These can be dealt with by adding a new kmap_atomic_pte() function for mapping highptes, and hooking it into the paravirt_ops infrastructure. Xen specifically wants to map the pte page RO, so this patch exposes a helper function, kmap_atomic_prot, which maps the page with the specified page protections. This also adds a kmap_flush_unused() function to clear out the cached kmap mappings. Xen needs this to clear out any potential stray RW mappings of pages which will become part of a pagetable. [ Zach - vmi.c will need some attention after this patch. It wasn't immediately obvious to me what needs to be done. ] Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Andi Kleen Cc: Zachary Amsden --- arch/i386/kernel/paravirt.c | 5 +++++ arch/i386/mm/highmem.c | 9 +++++++-- include/asm-i386/highmem.h | 6 ++++++ include/asm-i386/paravirt.h | 15 +++++++++++++++ include/asm-i386/pgtable.h | 4 ++-- include/linux/highmem.h | 6 ++++++ mm/highmem.c | 9 +++++++++ 7 files changed, 50 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c index 13f41b5c887a..596f382c641c 100644 --- a/arch/i386/kernel/paravirt.c +++ b/arch/i386/kernel/paravirt.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -316,6 +317,10 @@ struct paravirt_ops paravirt_ops = { .ptep_get_and_clear = native_ptep_get_and_clear, +#ifdef CONFIG_HIGHPTE + .kmap_atomic_pte = kmap_atomic, +#endif + #ifdef CONFIG_X86_PAE .set_pte_atomic = native_set_pte_atomic, .set_pte_present = native_set_pte_present, diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c index ac70d09df7ee..a1a21abf742e 100644 --- a/arch/i386/mm/highmem.c +++ b/arch/i386/mm/highmem.c @@ -26,7 +26,7 @@ void kunmap(struct page *page) * However when holding an atomic kmap is is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ -void *kmap_atomic(struct page *page, enum km_type type) +void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) { enum fixed_addresses idx; unsigned long vaddr; @@ -41,12 +41,17 @@ void *kmap_atomic(struct page *page, enum km_type type) return page_address(page); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); + set_pte(kmap_pte-idx, mk_pte(page, prot)); arch_flush_lazy_mmu_mode(); return (void*) vaddr; } +void *kmap_atomic(struct page *page, enum km_type type) +{ + return kmap_atomic_prot(page, type, kmap_prot); +} + void kunmap_atomic(void *kvaddr, enum km_type type) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; diff --git a/include/asm-i386/highmem.h b/include/asm-i386/highmem.h index e9a34ebc25d5..13cdcd66fff2 100644 --- a/include/asm-i386/highmem.h +++ b/include/asm-i386/highmem.h @@ -24,6 +24,7 @@ #include #include #include +#include /* declarations for highmem.c */ extern unsigned long highstart_pfn, highend_pfn; @@ -67,11 +68,16 @@ extern void FASTCALL(kunmap_high(struct page *page)); void *kmap(struct page *page); void kunmap(struct page *page); +void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); void *kmap_atomic(struct page *page, enum km_type type); void kunmap_atomic(void *kvaddr, enum km_type type); void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); struct page *kmap_atomic_to_page(void *ptr); +#ifndef CONFIG_PARAVIRT +#define kmap_atomic_pte(page, type) kmap_atomic(page, type) +#endif + #define flush_cache_kmaps() do { } while (0) #endif /* __KERNEL__ */ diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h index 10f44af76b49..5048b41428fa 100644 --- a/include/asm-i386/paravirt.h +++ b/include/asm-i386/paravirt.h @@ -16,7 +16,9 @@ #ifndef __ASSEMBLY__ #include #include +#include +struct page; struct thread_struct; struct Xgt_desc_struct; struct tss_struct; @@ -187,6 +189,10 @@ struct paravirt_ops pte_t (*ptep_get_and_clear)(pte_t *ptep); +#ifdef CONFIG_HIGHPTE + void *(*kmap_atomic_pte)(struct page *page, enum km_type type); +#endif + #ifdef CONFIG_X86_PAE void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); @@ -884,6 +890,15 @@ static inline void paravirt_release_pd(unsigned pfn) PVOP_VCALL1(release_pd, pfn); } +#ifdef CONFIG_HIGHPTE +static inline void *kmap_atomic_pte(struct page *page, enum km_type type) +{ + unsigned long ret; + ret = PVOP_CALL2(unsigned long, kmap_atomic_pte, page, type); + return (void *)ret; +} +#endif + static inline void pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 6599f2aa91b7..befc697821e5 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -476,9 +476,9 @@ extern pte_t *lookup_address(unsigned long address); #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ - ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) + ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) #define pte_offset_map_nested(dir, address) \ - ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) + ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) #else diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 645d440807c2..bca8e2dfa355 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -27,6 +27,8 @@ static inline void flush_kernel_dcache_page(struct page *page) unsigned int nr_free_highpages(void); extern unsigned long totalhigh_pages; +void kmap_flush_unused(void); + #else /* CONFIG_HIGHMEM */ static inline unsigned int nr_free_highpages(void) { return 0; } @@ -44,9 +46,13 @@ static inline void *kmap(struct page *page) #define kmap_atomic(page, idx) \ ({ pagefault_disable(); page_address(page); }) +#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) + #define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0) #define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) #define kmap_atomic_to_page(ptr) virt_to_page(ptr) + +#define kmap_flush_unused() do {} while(0) #endif #endif /* CONFIG_HIGHMEM */ diff --git a/mm/highmem.c b/mm/highmem.c index 51e1c1995fec..be8f8d36a8b9 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -99,6 +99,15 @@ static void flush_all_zero_pkmaps(void) flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); } +/* Flush all unused kmap mappings in order to remove stray + mappings. */ +void kmap_flush_unused(void) +{ + spin_lock(&kmap_lock); + flush_all_zero_pkmaps(); + spin_unlock(&kmap_lock); +} + static inline unsigned long map_new_virtual(struct page *page) { unsigned long vaddr; -- cgit v1.2.3 From 03df4f6ee997589a84d5f9492c6419183724c710 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 2 May 2007 19:27:17 +0200 Subject: [PATCH] i386: Clean up ELF note generation Three cleanups: 1: ELF notes are never mapped, so there's no need to have any access flags in their phdr. 2: When generating them from asm, tell the assembler to use a SHT_NOTE section type. There doesn't seem to be a way to do this from C. 3: Use ANSI rather than traditional cpp behaviour to stringify the macro argument. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Andi Kleen Cc: Eric W. Biederman --- arch/i386/kernel/vmlinux.lds.S | 2 +- include/asm-generic/vmlinux.lds.h | 2 +- include/linux/elfnote.h | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S index 568caca87715..23e8614edeee 100644 --- a/arch/i386/kernel/vmlinux.lds.S +++ b/arch/i386/kernel/vmlinux.lds.S @@ -30,7 +30,7 @@ jiffies = jiffies_64; PHDRS { text PT_LOAD FLAGS(5); /* R_E */ data PT_LOAD FLAGS(7); /* RWE */ - note PT_NOTE FLAGS(4); /* R__ */ + note PT_NOTE FLAGS(0); /* ___ */ } SECTIONS { diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 9fcc8d9fbb14..f3806a74c478 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -208,7 +208,7 @@ } #define NOTES \ - .notes : { *(.note.*) } :note + .notes : { *(.note.*) } :note #define INITCALLS \ *(.initcall0.init) \ diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h index 67396db141e8..9a1e0674e56c 100644 --- a/include/linux/elfnote.h +++ b/include/linux/elfnote.h @@ -39,12 +39,12 @@ * ELFNOTE(XYZCo, 12, .long, 0xdeadbeef) */ #define ELFNOTE(name, type, desctype, descdata) \ -.pushsection .note.name ; \ +.pushsection .note.name, "",@note ; \ .align 4 ; \ .long 2f - 1f /* namesz */ ; \ .long 4f - 3f /* descsz */ ; \ .long type ; \ -1:.asciz "name" ; \ +1:.asciz #name ; \ 2:.align 4 ; \ 3:desctype descdata ; \ 4:.align 4 ; \ -- cgit v1.2.3 From dc24f0e708c8a6a27b5b967a2599c04973054398 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Fri, 9 Mar 2007 19:59:06 +0100 Subject: kbuild: remove dependency on input.h from file2alias Almost all definitions used by file2alias was already present in mod_devicetable.h. Added the last definition and killed the input.h usage. The errornous include was pointed out by: Jan Engelhardt Signed-off-by: Sam Ravnborg Cc: Jan Engelhardt Cc: Deepak Saxena --- include/linux/input.h | 4 ++++ include/linux/mod_devicetable.h | 1 + scripts/mod/file2alias.c | 21 +++++++++++---------- 3 files changed, 16 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/input.h b/include/linux/input.h index bde65c8a3519..13d510c3a5aa 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -1010,6 +1010,10 @@ struct input_dev { #error "EV_MAX and INPUT_DEVICE_ID_EV_MAX do not match" #endif +#if KEY_MIN_INTERESTING != INPUT_DEVICE_ID_KEY_MIN_INTERESTING +#error "KEY_MIN_INTERESTING and INPUT_DEVICE_ID_KEY_MIN_INTERESTING do not match" +#endif + #if KEY_MAX != INPUT_DEVICE_ID_KEY_MAX #error "KEY_MAX and INPUT_DEVICE_ID_KEY_MAX do not match" #endif diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index e96b2dee10bb..af04a555b52c 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -262,6 +262,7 @@ struct i2c_device_id { /* Input */ #define INPUT_DEVICE_ID_EV_MAX 0x1f +#define INPUT_DEVICE_ID_KEY_MIN_INTERESTING 0x71 #define INPUT_DEVICE_ID_KEY_MAX 0x1ff #define INPUT_DEVICE_ID_REL_MAX 0x0f #define INPUT_DEVICE_ID_ABS_MAX 0x3f diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index b2f73ffb40bd..ed1244dd58d0 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -37,7 +37,6 @@ typedef unsigned char __u8; * even potentially has different endianness and word sizes, since * we handle those differences explicitly below */ #include "../../include/linux/mod_devicetable.h" -#include "../../include/linux/input.h" #define ADD(str, sep, cond, field) \ do { \ @@ -416,31 +415,33 @@ static int do_input_entry(const char *filename, struct input_device_id *id, sprintf(alias + strlen(alias), "-e*"); if (id->flags & INPUT_DEVICE_ID_MATCH_EVBIT) - do_input(alias, id->evbit, 0, EV_MAX); + do_input(alias, id->evbit, 0, INPUT_DEVICE_ID_EV_MAX); sprintf(alias + strlen(alias), "k*"); if (id->flags & INPUT_DEVICE_ID_MATCH_KEYBIT) - do_input(alias, id->keybit, KEY_MIN_INTERESTING, KEY_MAX); + do_input(alias, id->keybit, + INPUT_DEVICE_ID_KEY_MIN_INTERESTING, + INPUT_DEVICE_ID_KEY_MAX); sprintf(alias + strlen(alias), "r*"); if (id->flags & INPUT_DEVICE_ID_MATCH_RELBIT) - do_input(alias, id->relbit, 0, REL_MAX); + do_input(alias, id->relbit, 0, INPUT_DEVICE_ID_REL_MAX); sprintf(alias + strlen(alias), "a*"); if (id->flags & INPUT_DEVICE_ID_MATCH_ABSBIT) - do_input(alias, id->absbit, 0, ABS_MAX); + do_input(alias, id->absbit, 0, INPUT_DEVICE_ID_ABS_MAX); sprintf(alias + strlen(alias), "m*"); if (id->flags & INPUT_DEVICE_ID_MATCH_MSCIT) - do_input(alias, id->mscbit, 0, MSC_MAX); + do_input(alias, id->mscbit, 0, INPUT_DEVICE_ID_MSC_MAX); sprintf(alias + strlen(alias), "l*"); if (id->flags & INPUT_DEVICE_ID_MATCH_LEDBIT) - do_input(alias, id->ledbit, 0, LED_MAX); + do_input(alias, id->ledbit, 0, INPUT_DEVICE_ID_LED_MAX); sprintf(alias + strlen(alias), "s*"); if (id->flags & INPUT_DEVICE_ID_MATCH_SNDBIT) - do_input(alias, id->sndbit, 0, SND_MAX); + do_input(alias, id->sndbit, 0, INPUT_DEVICE_ID_SND_MAX); sprintf(alias + strlen(alias), "f*"); if (id->flags & INPUT_DEVICE_ID_MATCH_FFBIT) - do_input(alias, id->ffbit, 0, FF_MAX); + do_input(alias, id->ffbit, 0, INPUT_DEVICE_ID_FF_MAX); sprintf(alias + strlen(alias), "w*"); if (id->flags & INPUT_DEVICE_ID_MATCH_SWBIT) - do_input(alias, id->swbit, 0, SW_MAX); + do_input(alias, id->swbit, 0, INPUT_DEVICE_ID_SW_MAX); return 1; } -- cgit v1.2.3 From 823bccfc4002296ba88c3ad0f049e1abd8108d30 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 13 Apr 2007 13:15:19 -0700 Subject: remove "struct subsystem" as it is no longer needed We need to work on cleaning up the relationship between kobjects, ksets and ktypes. The removal of 'struct subsystem' is the first step of this, especially as it is not really needed at all. Thanks to Kay for fixing the bugs in this patch. Signed-off-by: Greg Kroah-Hartman --- arch/arm/mach-omap1/pm.c | 6 +-- arch/powerpc/kernel/vio.c | 4 +- arch/powerpc/platforms/pseries/power.c | 8 +-- arch/s390/kernel/ipl.c | 32 ++++++------ block/genhd.c | 12 ++--- drivers/base/base.h | 2 + drivers/base/bus.c | 16 +++--- drivers/base/class.c | 18 +++---- drivers/base/core.c | 22 ++++---- drivers/base/firmware.c | 6 +-- drivers/base/power/shutdown.c | 4 +- drivers/base/sys.c | 14 ++--- drivers/firmware/efivars.c | 12 ++--- drivers/input/evdev.c | 4 +- drivers/input/joydev.c | 4 +- drivers/input/mousedev.c | 4 +- drivers/input/tsdev.c | 4 +- drivers/parisc/pdc_stable.c | 94 +++++++++++++++++----------------- drivers/pci/hotplug/acpiphp_ibm.c | 4 +- drivers/pci/hotplug/pci_hotplug_core.c | 4 +- fs/configfs/mount.c | 2 +- fs/debugfs/inode.c | 2 +- fs/dlm/lockspace.c | 2 +- fs/ecryptfs/main.c | 12 ++--- fs/fuse/inode.c | 4 +- fs/gfs2/locking/dlm/sysfs.c | 2 +- fs/gfs2/sys.c | 2 +- fs/ocfs2/cluster/masklog.c | 4 +- fs/ocfs2/cluster/masklog.h | 2 +- fs/ocfs2/cluster/sys.c | 7 ++- fs/partitions/check.c | 6 +-- fs/sysfs/file.c | 11 ++-- include/acpi/acpi_bus.h | 2 +- include/linux/device.h | 8 +-- include/linux/fs.h | 2 +- include/linux/kobject.h | 58 +++++++++------------ include/linux/module.h | 2 +- include/linux/pci_hotplug.h | 2 +- kernel/ksysfs.c | 12 ++--- kernel/module.c | 8 +-- kernel/params.c | 2 + kernel/power/disk.c | 14 ++--- kernel/power/main.c | 10 ++-- kernel/power/power.h | 2 +- lib/kobject.c | 69 +++---------------------- security/inode.c | 2 +- 46 files changed, 231 insertions(+), 292 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c index 0383ab334270..6f4ea4bda5e0 100644 --- a/arch/arm/mach-omap1/pm.c +++ b/arch/arm/mach-omap1/pm.c @@ -72,12 +72,12 @@ static unsigned int mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_SIZE]; static unsigned short enable_dyn_sleep = 1; -static ssize_t omap_pm_sleep_while_idle_show(struct subsystem * subsys, char *buf) +static ssize_t omap_pm_sleep_while_idle_show(struct kset *kset, char *buf) { return sprintf(buf, "%hu\n", enable_dyn_sleep); } -static ssize_t omap_pm_sleep_while_idle_store(struct subsystem * subsys, +static ssize_t omap_pm_sleep_while_idle_store(struct kset *kset, const char * buf, size_t n) { @@ -100,7 +100,7 @@ static struct subsys_attribute sleep_while_idle_attr = { .store = omap_pm_sleep_while_idle_store, }; -extern struct subsystem power_subsys; +extern struct kset power_subsys; static void (*omap_sram_idle)(void) = NULL; static void (*omap_sram_suspend)(unsigned long r0, unsigned long r1) = NULL; diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 9eaefac5053f..b2c1b67a10a7 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -37,7 +37,7 @@ #include #include -extern struct subsystem devices_subsys; /* needed for vio_find_name() */ +extern struct kset devices_subsys; /* needed for vio_find_name() */ static struct vio_dev vio_bus_device = { /* fake "parent" device */ .name = vio_bus_device.dev.bus_id, @@ -427,7 +427,7 @@ static struct vio_dev *vio_find_name(const char *kobj_name) { struct kobject *found; - found = kset_find_obj(&devices_subsys.kset, kobj_name); + found = kset_find_obj(&devices_subsys, kobj_name); if (!found) return NULL; diff --git a/arch/powerpc/platforms/pseries/power.c b/arch/powerpc/platforms/pseries/power.c index 2624b71df73d..73e69023d90a 100644 --- a/arch/powerpc/platforms/pseries/power.c +++ b/arch/powerpc/platforms/pseries/power.c @@ -28,13 +28,13 @@ unsigned long rtas_poweron_auto; /* default and normal state is 0 */ -static ssize_t auto_poweron_show(struct subsystem *subsys, char *buf) +static ssize_t auto_poweron_show(struct kset *kset, char *buf) { return sprintf(buf, "%lu\n", rtas_poweron_auto); } static ssize_t -auto_poweron_store(struct subsystem *subsys, const char *buf, size_t n) +auto_poweron_store(struct kset *kset, const char *buf, size_t n) { int ret; unsigned long ups_restart; @@ -72,12 +72,12 @@ static int __init pm_init(void) { int error = subsystem_register(&power_subsys); if (!error) - error = sysfs_create_group(&power_subsys.kset.kobj,&attr_group); + error = sysfs_create_group(&power_subsys.kobj, &attr_group); return error; } core_initcall(pm_init); #else -extern struct subsystem power_subsys; +extern struct kset power_subsys; static int __init apo_pm_init(void) { diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 06833ac2b115..0ea048d350d8 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(diag308); /* SYSFS */ #define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ -static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \ +static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset, \ char *page) \ { \ return sprintf(page, _format, _value); \ @@ -173,13 +173,13 @@ static struct subsys_attribute sys_##_prefix##_##_name##_attr = \ __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL); #define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \ -static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \ +static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset, \ char *page) \ { \ return sprintf(page, _fmt_out, \ (unsigned long long) _value); \ } \ -static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\ +static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset, \ const char *buf, size_t len) \ { \ unsigned long long value; \ @@ -194,12 +194,12 @@ static struct subsys_attribute sys_##_prefix##_##_name##_attr = \ sys_##_prefix##_##_name##_store); #define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\ -static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \ +static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset, \ char *page) \ { \ return sprintf(page, _fmt_out, _value); \ } \ -static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\ +static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset, \ const char *buf, size_t len) \ { \ if (sscanf(buf, _fmt_in, _value) != 1) \ @@ -272,14 +272,14 @@ void __init setup_ipl_info(void) struct ipl_info ipl_info; EXPORT_SYMBOL_GPL(ipl_info); -static ssize_t ipl_type_show(struct subsystem *subsys, char *page) +static ssize_t ipl_type_show(struct kset *kset, char *page) { return sprintf(page, "%s\n", ipl_type_str(ipl_info.type)); } static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); -static ssize_t sys_ipl_device_show(struct subsystem *subsys, char *page) +static ssize_t sys_ipl_device_show(struct kset *kset, char *page) { struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; @@ -371,7 +371,7 @@ static struct attribute_group ipl_fcp_attr_group = { /* CCW ipl device attributes */ -static ssize_t ipl_ccw_loadparm_show(struct subsystem *subsys, char *page) +static ssize_t ipl_ccw_loadparm_show(struct kset *kset, char *page) { char loadparm[LOADPARM_LEN + 1] = {}; @@ -469,7 +469,7 @@ static void reipl_get_ascii_loadparm(char *loadparm) strstrip(loadparm); } -static ssize_t reipl_ccw_loadparm_show(struct subsystem *subsys, char *page) +static ssize_t reipl_ccw_loadparm_show(struct kset *kset, char *page) { char buf[LOADPARM_LEN + 1]; @@ -477,7 +477,7 @@ static ssize_t reipl_ccw_loadparm_show(struct subsystem *subsys, char *page) return sprintf(page, "%s\n", buf); } -static ssize_t reipl_ccw_loadparm_store(struct subsystem *subsys, +static ssize_t reipl_ccw_loadparm_store(struct kset *kset, const char *buf, size_t len) { int i, lp_len; @@ -572,12 +572,12 @@ static int reipl_set_type(enum ipl_type type) return 0; } -static ssize_t reipl_type_show(struct subsystem *subsys, char *page) +static ssize_t reipl_type_show(struct kset *kset, char *page) { return sprintf(page, "%s\n", ipl_type_str(reipl_type)); } -static ssize_t reipl_type_store(struct subsystem *subsys, const char *buf, +static ssize_t reipl_type_store(struct kset *kset, const char *buf, size_t len) { int rc = -EINVAL; @@ -665,12 +665,12 @@ static int dump_set_type(enum dump_type type) return 0; } -static ssize_t dump_type_show(struct subsystem *subsys, char *page) +static ssize_t dump_type_show(struct kset *kset, char *page) { return sprintf(page, "%s\n", dump_type_str(dump_type)); } -static ssize_t dump_type_store(struct subsystem *subsys, const char *buf, +static ssize_t dump_type_store(struct kset *kset, const char *buf, size_t len) { int rc = -EINVAL; @@ -697,12 +697,12 @@ static decl_subsys(shutdown_actions, NULL, NULL); /* on panic */ -static ssize_t on_panic_show(struct subsystem *subsys, char *page) +static ssize_t on_panic_show(struct kset *kset, char *page) { return sprintf(page, "%s\n", shutdown_action_str(on_panic_action)); } -static ssize_t on_panic_store(struct subsystem *subsys, const char *buf, +static ssize_t on_panic_store(struct kset *kset, const char *buf, size_t len) { if (strncmp(buf, SHUTDOWN_REIPL_STR, strlen(SHUTDOWN_REIPL_STR)) == 0) diff --git a/block/genhd.c b/block/genhd.c index 441432a142f2..b5664440896c 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -17,7 +17,7 @@ #include #include -struct subsystem block_subsys; +struct kset block_subsys; static DEFINE_MUTEX(block_subsys_lock); /* @@ -221,7 +221,7 @@ static void *part_start(struct seq_file *part, loff_t *pos) loff_t l = *pos; mutex_lock(&block_subsys_lock); - list_for_each(p, &block_subsys.kset.list) + list_for_each(p, &block_subsys.list) if (!l--) return list_entry(p, struct gendisk, kobj.entry); return NULL; @@ -231,7 +231,7 @@ static void *part_next(struct seq_file *part, void *v, loff_t *pos) { struct list_head *p = ((struct gendisk *)v)->kobj.entry.next; ++*pos; - return p==&block_subsys.kset.list ? NULL : + return p==&block_subsys.list ? NULL : list_entry(p, struct gendisk, kobj.entry); } @@ -246,7 +246,7 @@ static int show_partition(struct seq_file *part, void *v) int n; char buf[BDEVNAME_SIZE]; - if (&sgp->kobj.entry == block_subsys.kset.list.next) + if (&sgp->kobj.entry == block_subsys.list.next) seq_puts(part, "major minor #blocks name\n\n"); /* Don't show non-partitionable removeable devices or empty devices */ @@ -565,7 +565,7 @@ static void *diskstats_start(struct seq_file *part, loff_t *pos) struct list_head *p; mutex_lock(&block_subsys_lock); - list_for_each(p, &block_subsys.kset.list) + list_for_each(p, &block_subsys.list) if (!k--) return list_entry(p, struct gendisk, kobj.entry); return NULL; @@ -575,7 +575,7 @@ static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos) { struct list_head *p = ((struct gendisk *)v)->kobj.entry.next; ++*pos; - return p==&block_subsys.kset.list ? NULL : + return p==&block_subsys.list ? NULL : list_entry(p, struct gendisk, kobj.entry); } diff --git a/drivers/base/base.h b/drivers/base/base.h index d597f2659b23..5512d84452f2 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -45,3 +45,5 @@ struct class_device_attribute *to_class_dev_attr(struct attribute *_attr) extern char *make_class_name(const char *name, struct kobject *kobj); extern void devres_release_all(struct device *dev); + +extern struct kset devices_subsys; diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 1d76e2349654..dca734819e50 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -17,7 +17,7 @@ #include "power/power.h" #define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr) -#define to_bus(obj) container_of(obj, struct bus_type, subsys.kset.kobj) +#define to_bus(obj) container_of(obj, struct bus_type, subsys.kobj) /* * sysfs bindings for drivers @@ -123,7 +123,7 @@ int bus_create_file(struct bus_type * bus, struct bus_attribute * attr) { int error; if (get_bus(bus)) { - error = sysfs_create_file(&bus->subsys.kset.kobj, &attr->attr); + error = sysfs_create_file(&bus->subsys.kobj, &attr->attr); put_bus(bus); } else error = -EINVAL; @@ -133,7 +133,7 @@ int bus_create_file(struct bus_type * bus, struct bus_attribute * attr) void bus_remove_file(struct bus_type * bus, struct bus_attribute * attr) { if (get_bus(bus)) { - sysfs_remove_file(&bus->subsys.kset.kobj, &attr->attr); + sysfs_remove_file(&bus->subsys.kobj, &attr->attr); put_bus(bus); } } @@ -397,7 +397,7 @@ static void device_remove_attrs(struct bus_type * bus, struct device * dev) static int make_deprecated_bus_links(struct device *dev) { return sysfs_create_link(&dev->kobj, - &dev->bus->subsys.kset.kobj, "bus"); + &dev->bus->subsys.kobj, "bus"); } static void remove_deprecated_bus_links(struct device *dev) @@ -431,7 +431,7 @@ int bus_add_device(struct device * dev) if (error) goto out_id; error = sysfs_create_link(&dev->kobj, - &dev->bus->subsys.kset.kobj, "subsystem"); + &dev->bus->subsys.kobj, "subsystem"); if (error) goto out_subsys; error = make_deprecated_bus_links(dev); @@ -810,7 +810,7 @@ int bus_register(struct bus_type * bus) BLOCKING_INIT_NOTIFIER_HEAD(&bus->bus_notifier); - retval = kobject_set_name(&bus->subsys.kset.kobj, "%s", bus->name); + retval = kobject_set_name(&bus->subsys.kobj, "%s", bus->name); if (retval) goto out; @@ -820,13 +820,13 @@ int bus_register(struct bus_type * bus) goto out; kobject_set_name(&bus->devices.kobj, "devices"); - bus->devices.subsys = &bus->subsys; + bus->devices.kobj.parent = &bus->subsys.kobj; retval = kset_register(&bus->devices); if (retval) goto bus_devices_fail; kobject_set_name(&bus->drivers.kobj, "drivers"); - bus->drivers.subsys = &bus->subsys; + bus->drivers.kobj.parent = &bus->subsys.kobj; bus->drivers.ktype = &ktype_driver; retval = kset_register(&bus->drivers); if (retval) diff --git a/drivers/base/class.c b/drivers/base/class.c index 80bbb2074636..20c4ea6eb50d 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -19,10 +19,8 @@ #include #include "base.h" -extern struct subsystem devices_subsys; - #define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr) -#define to_class(obj) container_of(obj, struct class, subsys.kset.kobj) +#define to_class(obj) container_of(obj, struct class, subsys.kobj) static ssize_t class_attr_show(struct kobject * kobj, struct attribute * attr, char * buf) @@ -80,7 +78,7 @@ int class_create_file(struct class * cls, const struct class_attribute * attr) { int error; if (cls) { - error = sysfs_create_file(&cls->subsys.kset.kobj, &attr->attr); + error = sysfs_create_file(&cls->subsys.kobj, &attr->attr); } else error = -EINVAL; return error; @@ -89,7 +87,7 @@ int class_create_file(struct class * cls, const struct class_attribute * attr) void class_remove_file(struct class * cls, const struct class_attribute * attr) { if (cls) - sysfs_remove_file(&cls->subsys.kset.kobj, &attr->attr); + sysfs_remove_file(&cls->subsys.kobj, &attr->attr); } static struct class *class_get(struct class *cls) @@ -147,7 +145,7 @@ int class_register(struct class * cls) INIT_LIST_HEAD(&cls->interfaces); kset_init(&cls->class_dirs); init_MUTEX(&cls->sem); - error = kobject_set_name(&cls->subsys.kset.kobj, "%s", cls->name); + error = kobject_set_name(&cls->subsys.kobj, "%s", cls->name); if (error) return error; @@ -611,7 +609,7 @@ int class_device_add(struct class_device *class_dev) if (parent_class_dev) class_dev->kobj.parent = &parent_class_dev->kobj; else - class_dev->kobj.parent = &parent_class->subsys.kset.kobj; + class_dev->kobj.parent = &parent_class->subsys.kobj; error = kobject_add(&class_dev->kobj); if (error) @@ -619,7 +617,7 @@ int class_device_add(struct class_device *class_dev) /* add the needed attributes to this device */ error = sysfs_create_link(&class_dev->kobj, - &parent_class->subsys.kset.kobj, "subsystem"); + &parent_class->subsys.kobj, "subsystem"); if (error) goto out3; class_dev->uevent_attr.attr.name = "uevent"; @@ -917,8 +915,8 @@ int __init classes_init(void) /* ick, this is ugly, the things we go through to keep from showing up * in sysfs... */ subsystem_init(&class_obj_subsys); - if (!class_obj_subsys.kset.subsys) - class_obj_subsys.kset.subsys = &class_obj_subsys; + if (!class_obj_subsys.kobj.parent) + class_obj_subsys.kobj.parent = &class_obj_subsys.kobj; return 0; } diff --git a/drivers/base/core.c b/drivers/base/core.c index 59d9816c332e..b78fc1e68264 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -565,7 +565,7 @@ static struct kobject * get_device_parent(struct device *dev, /* Set the parent to the class, not the parent device */ /* this keeps sysfs from having a symlink to make old udevs happy */ if (dev->class) - return &dev->class->subsys.kset.kobj; + return &dev->class->subsys.kobj; else if (parent) return &parent->kobj; @@ -577,7 +577,7 @@ static struct kobject *virtual_device_parent(struct device *dev) static struct kobject *virtual_dir = NULL; if (!virtual_dir) - virtual_dir = kobject_add_dir(&devices_subsys.kset.kobj, "virtual"); + virtual_dir = kobject_add_dir(&devices_subsys.kobj, "virtual"); return virtual_dir; } @@ -711,12 +711,12 @@ int device_add(struct device *dev) } if (dev->class) { - sysfs_create_link(&dev->kobj, &dev->class->subsys.kset.kobj, + sysfs_create_link(&dev->kobj, &dev->class->subsys.kobj, "subsystem"); /* If this is not a "fake" compatible device, then create the * symlink from the class to the device. */ - if (dev->kobj.parent != &dev->class->subsys.kset.kobj) - sysfs_create_link(&dev->class->subsys.kset.kobj, + if (dev->kobj.parent != &dev->class->subsys.kobj) + sysfs_create_link(&dev->class->subsys.kobj, &dev->kobj, dev->bus_id); if (parent) { sysfs_create_link(&dev->kobj, &dev->parent->kobj, @@ -774,8 +774,8 @@ int device_add(struct device *dev) sysfs_remove_link(&dev->kobj, "subsystem"); /* If this is not a "fake" compatible device, remove the * symlink from the class to the device. */ - if (dev->kobj.parent != &dev->class->subsys.kset.kobj) - sysfs_remove_link(&dev->class->subsys.kset.kobj, + if (dev->kobj.parent != &dev->class->subsys.kobj) + sysfs_remove_link(&dev->class->subsys.kobj, dev->bus_id); if (parent) { #ifdef CONFIG_SYSFS_DEPRECATED @@ -875,8 +875,8 @@ void device_del(struct device * dev) sysfs_remove_link(&dev->kobj, "subsystem"); /* If this is not a "fake" compatible device, remove the * symlink from the class to the device. */ - if (dev->kobj.parent != &dev->class->subsys.kset.kobj) - sysfs_remove_link(&dev->class->subsys.kset.kobj, + if (dev->kobj.parent != &dev->class->subsys.kobj) + sysfs_remove_link(&dev->class->subsys.kobj, dev->bus_id); if (parent) { #ifdef CONFIG_SYSFS_DEPRECATED @@ -1192,9 +1192,9 @@ int device_rename(struct device *dev, char *new_name) #endif if (dev->class) { - sysfs_remove_link(&dev->class->subsys.kset.kobj, + sysfs_remove_link(&dev->class->subsys.kobj, old_symlink_name); - sysfs_create_link(&dev->class->subsys.kset.kobj, &dev->kobj, + sysfs_create_link(&dev->class->subsys.kobj, &dev->kobj, dev->bus_id); } put_device(dev); diff --git a/drivers/base/firmware.c b/drivers/base/firmware.c index cb1b98ae0d58..90c862932169 100644 --- a/drivers/base/firmware.c +++ b/drivers/base/firmware.c @@ -17,13 +17,13 @@ static decl_subsys(firmware, NULL, NULL); -int firmware_register(struct subsystem * s) +int firmware_register(struct kset *s) { - kset_set_kset_s(s, firmware_subsys); + kobj_set_kset_s(s, firmware_subsys); return subsystem_register(s); } -void firmware_unregister(struct subsystem * s) +void firmware_unregister(struct kset *s) { subsystem_unregister(s); } diff --git a/drivers/base/power/shutdown.c b/drivers/base/power/shutdown.c index 58b6f77a1b34..a47ee1b70d20 100644 --- a/drivers/base/power/shutdown.c +++ b/drivers/base/power/shutdown.c @@ -16,8 +16,6 @@ #define to_dev(node) container_of(node, struct device, kobj.entry) -extern struct subsystem devices_subsys; - /** * We handle system devices differently - we suspend and shut them @@ -36,7 +34,7 @@ void device_shutdown(void) { struct device * dev, *devn; - list_for_each_entry_safe_reverse(dev, devn, &devices_subsys.kset.list, + list_for_each_entry_safe_reverse(dev, devn, &devices_subsys.list, kobj.entry) { if (dev->bus && dev->bus->shutdown) { dev_dbg(dev, "shutdown\n"); diff --git a/drivers/base/sys.c b/drivers/base/sys.c index 04e5db445c74..29f1291966c1 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c @@ -25,7 +25,7 @@ #include "base.h" -extern struct subsystem devices_subsys; +extern struct kset devices_subsys; #define to_sysdev(k) container_of(k, struct sys_device, kobj) #define to_sysdev_attr(a) container_of(a, struct sysdev_attribute, attr) @@ -138,7 +138,7 @@ int sysdev_class_register(struct sysdev_class * cls) pr_debug("Registering sysdev class '%s'\n", kobject_name(&cls->kset.kobj)); INIT_LIST_HEAD(&cls->drivers); - cls->kset.subsys = &system_subsys; + cls->kset.kobj.parent = &system_subsys.kobj; kset_set_kset_s(cls, system_subsys); return kset_register(&cls->kset); } @@ -309,7 +309,7 @@ void sysdev_shutdown(void) pr_debug("Shutting Down System Devices\n"); down(&sysdev_drivers_lock); - list_for_each_entry_reverse(cls, &system_subsys.kset.list, + list_for_each_entry_reverse(cls, &system_subsys.list, kset.kobj.entry) { struct sys_device * sysdev; @@ -384,7 +384,7 @@ int sysdev_suspend(pm_message_t state) pr_debug("Suspending System Devices\n"); - list_for_each_entry_reverse(cls, &system_subsys.kset.list, + list_for_each_entry_reverse(cls, &system_subsys.list, kset.kobj.entry) { pr_debug("Suspending type '%s':\n", @@ -457,7 +457,7 @@ gbl_driver: } /* resume other classes */ - list_for_each_entry_continue(cls, &system_subsys.kset.list, + list_for_each_entry_continue(cls, &system_subsys.list, kset.kobj.entry) { list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { pr_debug(" %s\n", kobject_name(&err_dev->kobj)); @@ -483,7 +483,7 @@ int sysdev_resume(void) pr_debug("Resuming System Devices\n"); - list_for_each_entry(cls, &system_subsys.kset.list, kset.kobj.entry) { + list_for_each_entry(cls, &system_subsys.list, kset.kobj.entry) { struct sys_device * sysdev; pr_debug("Resuming type '%s':\n", @@ -501,7 +501,7 @@ int sysdev_resume(void) int __init system_bus_init(void) { - system_subsys.kset.kobj.parent = &devices_subsys.kset.kobj; + system_subsys.kobj.parent = &devices_subsys.kobj; return subsystem_register(&system_subsys); } diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index c6281ccd4fe7..1324984a4c35 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c @@ -409,7 +409,7 @@ static struct kobj_type ktype_efivar = { }; static ssize_t -dummy(struct subsystem *sub, char *buf) +dummy(struct kset *kset, char *buf) { return -ENODEV; } @@ -422,7 +422,7 @@ efivar_unregister(struct efivar_entry *var) static ssize_t -efivar_create(struct subsystem *sub, const char *buf, size_t count) +efivar_create(struct kset *kset, const char *buf, size_t count) { struct efi_variable *new_var = (struct efi_variable *)buf; struct efivar_entry *search_efivar, *n; @@ -480,7 +480,7 @@ efivar_create(struct subsystem *sub, const char *buf, size_t count) } static ssize_t -efivar_delete(struct subsystem *sub, const char *buf, size_t count) +efivar_delete(struct kset *kset, const char *buf, size_t count) { struct efi_variable *del_var = (struct efi_variable *)buf; struct efivar_entry *search_efivar, *n; @@ -551,11 +551,11 @@ static struct subsys_attribute *var_subsys_attrs[] = { * the efivars driver */ static ssize_t -systab_read(struct subsystem *entry, char *buf) +systab_read(struct kset *kset, char *buf) { char *str = buf; - if (!entry || !buf) + if (!kset || !buf) return -EINVAL; if (efi.mps != EFI_INVALID_TABLE_ADDR) @@ -687,7 +687,7 @@ efivars_init(void) goto out_free; } - kset_set_kset_s(&vars_subsys, efi_subsys); + kobj_set_kset_s(&vars_subsys, efi_subsys); error = subsystem_register(&vars_subsys); diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 6439f378f6cc..6e55b2c5874e 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -650,7 +650,7 @@ static struct input_handle *evdev_connect(struct input_handler *handler, struct dev->cdev.dev, evdev->name); /* temporary symlink to keep userspace happy */ - sysfs_create_link(&input_class.subsys.kset.kobj, &cdev->kobj, + sysfs_create_link(&input_class.subsys.kobj, &cdev->kobj, evdev->name); return &evdev->handle; @@ -661,7 +661,7 @@ static void evdev_disconnect(struct input_handle *handle) struct evdev *evdev = handle->private; struct evdev_list *list; - sysfs_remove_link(&input_class.subsys.kset.kobj, evdev->name); + sysfs_remove_link(&input_class.subsys.kobj, evdev->name); class_device_destroy(&input_class, MKDEV(INPUT_MAJOR, EVDEV_MINOR_BASE + evdev->minor)); evdev->exist = 0; diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 9f3529ad3fda..d2482e4f71be 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -539,7 +539,7 @@ static struct input_handle *joydev_connect(struct input_handler *handler, struct dev->cdev.dev, joydev->name); /* temporary symlink to keep userspace happy */ - sysfs_create_link(&input_class.subsys.kset.kobj, &cdev->kobj, + sysfs_create_link(&input_class.subsys.kobj, &cdev->kobj, joydev->name); return &joydev->handle; @@ -550,7 +550,7 @@ static void joydev_disconnect(struct input_handle *handle) struct joydev *joydev = handle->private; struct joydev_list *list; - sysfs_remove_link(&input_class.subsys.kset.kobj, joydev->name); + sysfs_remove_link(&input_class.subsys.kobj, joydev->name); class_device_destroy(&input_class, MKDEV(INPUT_MAJOR, JOYDEV_MINOR_BASE + joydev->minor)); joydev->exist = 0; diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 664bcc8116fc..074fee429d1b 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -661,7 +661,7 @@ static struct input_handle *mousedev_connect(struct input_handler *handler, stru dev->cdev.dev, mousedev->name); /* temporary symlink to keep userspace happy */ - sysfs_create_link(&input_class.subsys.kset.kobj, &cdev->kobj, + sysfs_create_link(&input_class.subsys.kobj, &cdev->kobj, mousedev->name); return &mousedev->handle; @@ -672,7 +672,7 @@ static void mousedev_disconnect(struct input_handle *handle) struct mousedev *mousedev = handle->private; struct mousedev_list *list; - sysfs_remove_link(&input_class.subsys.kset.kobj, mousedev->name); + sysfs_remove_link(&input_class.subsys.kobj, mousedev->name); class_device_destroy(&input_class, MKDEV(INPUT_MAJOR, MOUSEDEV_MINOR_BASE + mousedev->minor)); mousedev->exist = 0; diff --git a/drivers/input/tsdev.c b/drivers/input/tsdev.c index 0300dca8591d..05dfc10bc079 100644 --- a/drivers/input/tsdev.c +++ b/drivers/input/tsdev.c @@ -420,7 +420,7 @@ static struct input_handle *tsdev_connect(struct input_handler *handler, dev->cdev.dev, tsdev->name); /* temporary symlink to keep userspace happy */ - sysfs_create_link(&input_class.subsys.kset.kobj, &cdev->kobj, + sysfs_create_link(&input_class.subsys.kobj, &cdev->kobj, tsdev->name); return &tsdev->handle; @@ -431,7 +431,7 @@ static void tsdev_disconnect(struct input_handle *handle) struct tsdev *tsdev = handle->private; struct tsdev_list *list; - sysfs_remove_link(&input_class.subsys.kset.kobj, tsdev->name); + sysfs_remove_link(&input_class.subsys.kobj, tsdev->name); class_device_destroy(&input_class, MKDEV(INPUT_MAJOR, TSDEV_MINOR_BASE + tsdev->minor)); tsdev->exist = 0; diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index ea1b7a63598e..815e445c3125 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c @@ -520,17 +520,17 @@ static struct pdcspath_entry *pdcspath_entries[] = { /** * pdcs_size_read - Stable Storage size output. - * @entry: An allocated and populated subsytem struct. We don't use it tho. + * @kset: An allocated and populated struct kset. We don't use it tho. * @buf: The output buffer to write to. */ static ssize_t -pdcs_size_read(struct subsystem *entry, char *buf) +pdcs_size_read(struct kset *kset, char *buf) { char *out = buf; - - if (!entry || !buf) + + if (!kset || !buf) return -EINVAL; - + /* show the size of the stable storage */ out += sprintf(out, "%ld\n", pdcs_size); @@ -539,17 +539,17 @@ pdcs_size_read(struct subsystem *entry, char *buf) /** * pdcs_auto_read - Stable Storage autoboot/search flag output. - * @entry: An allocated and populated subsytem struct. We don't use it tho. + * @kset: An allocated and populated struct kset. We don't use it tho. * @buf: The output buffer to write to. * @knob: The PF_AUTOBOOT or PF_AUTOSEARCH flag */ static ssize_t -pdcs_auto_read(struct subsystem *entry, char *buf, int knob) +pdcs_auto_read(struct kset *kset, char *buf, int knob) { char *out = buf; struct pdcspath_entry *pathentry; - if (!entry || !buf) + if (!kset || !buf) return -EINVAL; /* Current flags are stored in primary boot path entry */ @@ -565,40 +565,40 @@ pdcs_auto_read(struct subsystem *entry, char *buf, int knob) /** * pdcs_autoboot_read - Stable Storage autoboot flag output. - * @entry: An allocated and populated subsytem struct. We don't use it tho. + * @kset: An allocated and populated struct kset. We don't use it tho. * @buf: The output buffer to write to. */ static inline ssize_t -pdcs_autoboot_read(struct subsystem *entry, char *buf) +pdcs_autoboot_read(struct kset *kset, char *buf) { - return pdcs_auto_read(entry, buf, PF_AUTOBOOT); + return pdcs_auto_read(kset, buf, PF_AUTOBOOT); } /** * pdcs_autosearch_read - Stable Storage autoboot flag output. - * @entry: An allocated and populated subsytem struct. We don't use it tho. + * @kset: An allocated and populated struct kset. We don't use it tho. * @buf: The output buffer to write to. */ static inline ssize_t -pdcs_autosearch_read(struct subsystem *entry, char *buf) +pdcs_autosearch_read(struct kset *kset, char *buf) { - return pdcs_auto_read(entry, buf, PF_AUTOSEARCH); + return pdcs_auto_read(kset, buf, PF_AUTOSEARCH); } /** * pdcs_timer_read - Stable Storage timer count output (in seconds). - * @entry: An allocated and populated subsytem struct. We don't use it tho. + * @kset: An allocated and populated struct kset. We don't use it tho. * @buf: The output buffer to write to. * * The value of the timer field correponds to a number of seconds in powers of 2. */ static ssize_t -pdcs_timer_read(struct subsystem *entry, char *buf) +pdcs_timer_read(struct kset *kset, char *buf) { char *out = buf; struct pdcspath_entry *pathentry; - if (!entry || !buf) + if (!kset || !buf) return -EINVAL; /* Current flags are stored in primary boot path entry */ @@ -615,15 +615,15 @@ pdcs_timer_read(struct subsystem *entry, char *buf) /** * pdcs_osid_read - Stable Storage OS ID register output. - * @entry: An allocated and populated subsytem struct. We don't use it tho. + * @kset: An allocated and populated struct kset. We don't use it tho. * @buf: The output buffer to write to. */ static ssize_t -pdcs_osid_read(struct subsystem *entry, char *buf) +pdcs_osid_read(struct kset *kset, char *buf) { char *out = buf; - if (!entry || !buf) + if (!kset || !buf) return -EINVAL; out += sprintf(out, "%s dependent data (0x%.4x)\n", @@ -634,18 +634,18 @@ pdcs_osid_read(struct subsystem *entry, char *buf) /** * pdcs_osdep1_read - Stable Storage OS-Dependent data area 1 output. - * @entry: An allocated and populated subsytem struct. We don't use it tho. + * @kset: An allocated and populated struct kset. We don't use it tho. * @buf: The output buffer to write to. * * This can hold 16 bytes of OS-Dependent data. */ static ssize_t -pdcs_osdep1_read(struct subsystem *entry, char *buf) +pdcs_osdep1_read(struct kset *kset, char *buf) { char *out = buf; u32 result[4]; - if (!entry || !buf) + if (!kset || !buf) return -EINVAL; if (pdc_stable_read(PDCS_ADDR_OSD1, &result, sizeof(result)) != PDC_OK) @@ -661,18 +661,18 @@ pdcs_osdep1_read(struct subsystem *entry, char *buf) /** * pdcs_diagnostic_read - Stable Storage Diagnostic register output. - * @entry: An allocated and populated subsytem struct. We don't use it tho. + * @kset: An allocated and populated struct kset. We don't use it tho. * @buf: The output buffer to write to. * * I have NFC how to interpret the content of that register ;-). */ static ssize_t -pdcs_diagnostic_read(struct subsystem *entry, char *buf) +pdcs_diagnostic_read(struct kset *kset, char *buf) { char *out = buf; u32 result; - if (!entry || !buf) + if (!kset || !buf) return -EINVAL; /* get diagnostic */ @@ -686,18 +686,18 @@ pdcs_diagnostic_read(struct subsystem *entry, char *buf) /** * pdcs_fastsize_read - Stable Storage FastSize register output. - * @entry: An allocated and populated subsytem struct. We don't use it tho. + * @kset: An allocated and populated struct kset. We don't use it tho. * @buf: The output buffer to write to. * * This register holds the amount of system RAM to be tested during boot sequence. */ static ssize_t -pdcs_fastsize_read(struct subsystem *entry, char *buf) +pdcs_fastsize_read(struct kset *kset, char *buf) { char *out = buf; u32 result; - if (!entry || !buf) + if (!kset || !buf) return -EINVAL; /* get fast-size */ @@ -715,13 +715,13 @@ pdcs_fastsize_read(struct subsystem *entry, char *buf) /** * pdcs_osdep2_read - Stable Storage OS-Dependent data area 2 output. - * @entry: An allocated and populated subsytem struct. We don't use it tho. + * @kset: An allocated and populated struct kset. We don't use it tho. * @buf: The output buffer to write to. * * This can hold pdcs_size - 224 bytes of OS-Dependent data, when available. */ static ssize_t -pdcs_osdep2_read(struct subsystem *entry, char *buf) +pdcs_osdep2_read(struct kset *kset, char *buf) { char *out = buf; unsigned long size; @@ -733,7 +733,7 @@ pdcs_osdep2_read(struct subsystem *entry, char *buf) size = pdcs_size - 224; - if (!entry || !buf) + if (!kset || !buf) return -EINVAL; for (i=0; i #include -int mlog_sys_init(struct subsystem *o2cb_subsys); +int mlog_sys_init(struct kset *o2cb_subsys); void mlog_sys_shutdown(void); #endif /* O2CLUSTER_MASKLOG_H */ diff --git a/fs/ocfs2/cluster/sys.c b/fs/ocfs2/cluster/sys.c index 1d9f6acafa2e..64f6f378fd09 100644 --- a/fs/ocfs2/cluster/sys.c +++ b/fs/ocfs2/cluster/sys.c @@ -42,7 +42,6 @@ struct o2cb_attribute { #define O2CB_ATTR(_name, _mode, _show, _store) \ struct o2cb_attribute o2cb_attr_##_name = __ATTR(_name, _mode, _show, _store) -#define to_o2cb_subsys(k) container_of(to_kset(k), struct subsystem, kset) #define to_o2cb_attr(_attr) container_of(_attr, struct o2cb_attribute, attr) static ssize_t o2cb_interface_revision_show(char *buf) @@ -79,7 +78,7 @@ static ssize_t o2cb_show(struct kobject * kobj, struct attribute * attr, char * buffer) { struct o2cb_attribute *o2cb_attr = to_o2cb_attr(attr); - struct subsystem *sbs = to_o2cb_subsys(kobj); + struct kset *sbs = to_kset(kobj); BUG_ON(sbs != &o2cb_subsys); @@ -93,7 +92,7 @@ o2cb_store(struct kobject * kobj, struct attribute * attr, const char * buffer, size_t count) { struct o2cb_attribute *o2cb_attr = to_o2cb_attr(attr); - struct subsystem *sbs = to_o2cb_subsys(kobj); + struct kset *sbs = to_kset(kobj); BUG_ON(sbs != &o2cb_subsys); @@ -112,7 +111,7 @@ int o2cb_sys_init(void) { int ret; - o2cb_subsys.kset.kobj.ktype = &o2cb_subsys_type; + o2cb_subsys.kobj.ktype = &o2cb_subsys_type; ret = subsystem_register(&o2cb_subsys); if (ret) return ret; diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 8a7d0035ad7a..f01572fca02f 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -312,7 +312,7 @@ static struct attribute * default_attrs[] = { NULL, }; -extern struct subsystem block_subsys; +extern struct kset block_subsys; static void part_release(struct kobject *kobj) { @@ -388,7 +388,7 @@ void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len, kobject_add(&p->kobj); if (!disk->part_uevent_suppress) kobject_uevent(&p->kobj, KOBJ_ADD); - sysfs_create_link(&p->kobj, &block_subsys.kset.kobj, "subsystem"); + sysfs_create_link(&p->kobj, &block_subsys.kobj, "subsystem"); if (flags & ADDPART_FLAG_WHOLEDISK) { static struct attribute addpartattr = { .name = "whole_disk", @@ -444,7 +444,7 @@ static int disk_sysfs_symlinks(struct gendisk *disk) goto err_out_dev_link; } - err = sysfs_create_link(&disk->kobj, &block_subsys.kset.kobj, + err = sysfs_create_link(&disk->kobj, &block_subsys.kobj, "subsystem"); if (err) goto err_out_disk_name_lnk; diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index db0413a411d6..0e637adc2b87 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -13,8 +13,7 @@ #include "sysfs.h" -#define to_subsys(k) container_of(k,struct subsystem,kset.kobj) -#define to_sattr(a) container_of(a,struct subsys_attribute,attr) +#define to_sattr(a) container_of(a,struct subsys_attribute, attr) /* * Subsystem file operations. @@ -24,12 +23,12 @@ static ssize_t subsys_attr_show(struct kobject * kobj, struct attribute * attr, char * page) { - struct subsystem * s = to_subsys(kobj); + struct kset *kset = to_kset(kobj); struct subsys_attribute * sattr = to_sattr(attr); ssize_t ret = -EIO; if (sattr->show) - ret = sattr->show(s,page); + ret = sattr->show(kset, page); return ret; } @@ -37,12 +36,12 @@ static ssize_t subsys_attr_store(struct kobject * kobj, struct attribute * attr, const char * page, size_t count) { - struct subsystem * s = to_subsys(kobj); + struct kset *kset = to_kset(kobj); struct subsys_attribute * sattr = to_sattr(attr); ssize_t ret = -EIO; if (sattr->store) - ret = sattr->store(s,page,count); + ret = sattr->store(kset, page, count); return ret; } diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 0d9f984a60a1..16c3c441256e 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -316,7 +316,7 @@ struct acpi_bus_event { u32 data; }; -extern struct subsystem acpi_subsys; +extern struct kset acpi_subsys; /* * External Functions diff --git a/include/linux/device.h b/include/linux/device.h index a0cd2ced31a9..ee292fe87cb2 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -53,7 +53,7 @@ struct bus_type { const char * name; struct module * owner; - struct subsystem subsys; + struct kset subsys; struct kset drivers; struct kset devices; struct klist klist_devices; @@ -179,7 +179,7 @@ struct class { const char * name; struct module * owner; - struct subsystem subsys; + struct kset subsys; struct list_head children; struct list_head devices; struct list_head interfaces; @@ -559,8 +559,8 @@ extern void device_shutdown(void); /* drivers/base/firmware.c */ -extern int __must_check firmware_register(struct subsystem *); -extern void firmware_unregister(struct subsystem *); +extern int __must_check firmware_register(struct kset *); +extern void firmware_unregister(struct kset *); /* debugging and troubleshooting/diagnostic helpers. */ extern const char *dev_driver_string(struct device *dev); diff --git a/include/linux/fs.h b/include/linux/fs.h index 095a9c9a64fb..7c0077f06e24 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1416,7 +1416,7 @@ extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *, extern int vfs_statfs(struct dentry *, struct kstatfs *); /* /sys/fs */ -extern struct subsystem fs_subsys; +extern struct kset fs_subsys; #define FLOCK_VERIFY_READ 1 #define FLOCK_VERIFY_WRITE 2 diff --git a/include/linux/kobject.h b/include/linux/kobject.h index eb0e63ef297f..c288e41ba331 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -124,7 +124,6 @@ struct kset_uevent_ops { }; struct kset { - struct subsystem * subsys; struct kobj_type * ktype; struct list_head list; spinlock_t list_lock; @@ -171,32 +170,23 @@ extern struct kobject * kset_find_obj(struct kset *, const char *); #define set_kset_name(str) .kset = { .kobj = { .name = str } } - -struct subsystem { - struct kset kset; -}; - #define decl_subsys(_name,_type,_uevent_ops) \ -struct subsystem _name##_subsys = { \ - .kset = { \ - .kobj = { .name = __stringify(_name) }, \ - .ktype = _type, \ - .uevent_ops =_uevent_ops, \ - } \ +struct kset _name##_subsys = { \ + .kobj = { .name = __stringify(_name) }, \ + .ktype = _type, \ + .uevent_ops =_uevent_ops, \ } #define decl_subsys_name(_varname,_name,_type,_uevent_ops) \ -struct subsystem _varname##_subsys = { \ - .kset = { \ - .kobj = { .name = __stringify(_name) }, \ - .ktype = _type, \ - .uevent_ops =_uevent_ops, \ - } \ +struct kset _varname##_subsys = { \ + .kobj = { .name = __stringify(_name) }, \ + .ktype = _type, \ + .uevent_ops =_uevent_ops, \ } /* The global /sys/kernel/ subsystem for people to chain off of */ -extern struct subsystem kernel_subsys; +extern struct kset kernel_subsys; /* The global /sys/hypervisor/ subsystem */ -extern struct subsystem hypervisor_subsys; +extern struct kset hypervisor_subsys; /** * Helpers for setting the kset of registered objects. @@ -214,7 +204,7 @@ extern struct subsystem hypervisor_subsys; */ #define kobj_set_kset_s(obj,subsys) \ - (obj)->kobj.kset = &(subsys).kset + (obj)->kobj.kset = &(subsys) /** * kset_set_kset_s(obj,subsys) - set kset for embedded kset. @@ -228,7 +218,7 @@ extern struct subsystem hypervisor_subsys; */ #define kset_set_kset_s(obj,subsys) \ - (obj)->kset.kobj.kset = &(subsys).kset + (obj)->kset.kobj.kset = &(subsys) /** * subsys_set_kset(obj,subsys) - set kset for subsystem @@ -241,29 +231,31 @@ extern struct subsystem hypervisor_subsys; */ #define subsys_set_kset(obj,_subsys) \ - (obj)->subsys.kset.kobj.kset = &(_subsys).kset + (obj)->subsys.kobj.kset = &(_subsys) -extern void subsystem_init(struct subsystem *); -extern int __must_check subsystem_register(struct subsystem *); -extern void subsystem_unregister(struct subsystem *); +extern void subsystem_init(struct kset *); +extern int __must_check subsystem_register(struct kset *); +extern void subsystem_unregister(struct kset *); -static inline struct subsystem * subsys_get(struct subsystem * s) +static inline struct kset *subsys_get(struct kset *s) { - return s ? container_of(kset_get(&s->kset),struct subsystem,kset) : NULL; + if (s) + return kset_get(s); + return NULL; } -static inline void subsys_put(struct subsystem * s) +static inline void subsys_put(struct kset *s) { - kset_put(&s->kset); + kset_put(s); } struct subsys_attribute { struct attribute attr; - ssize_t (*show)(struct subsystem *, char *); - ssize_t (*store)(struct subsystem *, const char *, size_t); + ssize_t (*show)(struct kset *, char *); + ssize_t (*store)(struct kset *, const char *, size_t); }; -extern int __must_check subsys_create_file(struct subsystem * , +extern int __must_check subsys_create_file(struct kset *, struct subsys_attribute *); #if defined(CONFIG_HOTPLUG) diff --git a/include/linux/module.h b/include/linux/module.h index 95679eb8571e..f0b0faf42d5d 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -568,7 +568,7 @@ struct device_driver; #ifdef CONFIG_SYSFS struct module; -extern struct subsystem module_subsys; +extern struct kset module_subsys; int mod_sysfs_init(struct module *mod); int mod_sysfs_setup(struct module *mod, diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index a675a05c4091..ab4cb6ecd47c 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -174,7 +174,7 @@ extern int pci_hp_register (struct hotplug_slot *slot); extern int pci_hp_deregister (struct hotplug_slot *slot); extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot, struct hotplug_slot_info *info); -extern struct subsystem pci_hotplug_slots_subsys; +extern struct kset pci_hotplug_slots_subsys; /* PCI Setting Record (Type 0) */ struct hpp_type0 { diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index e0ffe4ab0917..559deca5ed15 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -24,18 +24,18 @@ static struct subsys_attribute _name##_attr = \ #if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET) /* current uevent sequence number */ -static ssize_t uevent_seqnum_show(struct subsystem *subsys, char *page) +static ssize_t uevent_seqnum_show(struct kset *kset, char *page) { return sprintf(page, "%llu\n", (unsigned long long)uevent_seqnum); } KERNEL_ATTR_RO(uevent_seqnum); /* uevent helper program, used during early boo */ -static ssize_t uevent_helper_show(struct subsystem *subsys, char *page) +static ssize_t uevent_helper_show(struct kset *kset, char *page) { return sprintf(page, "%s\n", uevent_helper); } -static ssize_t uevent_helper_store(struct subsystem *subsys, const char *page, size_t count) +static ssize_t uevent_helper_store(struct kset *kset, const char *page, size_t count) { if (count+1 > UEVENT_HELPER_PATH_LEN) return -ENOENT; @@ -49,13 +49,13 @@ KERNEL_ATTR_RW(uevent_helper); #endif #ifdef CONFIG_KEXEC -static ssize_t kexec_loaded_show(struct subsystem *subsys, char *page) +static ssize_t kexec_loaded_show(struct kset *kset, char *page) { return sprintf(page, "%d\n", !!kexec_image); } KERNEL_ATTR_RO(kexec_loaded); -static ssize_t kexec_crash_loaded_show(struct subsystem *subsys, char *page) +static ssize_t kexec_crash_loaded_show(struct kset *kset, char *page) { return sprintf(page, "%d\n", !!kexec_crash_image); } @@ -85,7 +85,7 @@ static int __init ksysfs_init(void) { int error = subsystem_register(&kernel_subsys); if (!error) - error = sysfs_create_group(&kernel_subsys.kset.kobj, + error = sysfs_create_group(&kernel_subsys.kobj, &kernel_attr_group); return error; diff --git a/kernel/module.c b/kernel/module.c index 9da5af668a20..ff982ec435fc 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -45,6 +45,8 @@ #include #include +extern int module_sysfs_initialized; + #if 0 #define DEBUGP printk #else @@ -1117,8 +1119,8 @@ int mod_sysfs_init(struct module *mod) { int err; - if (!module_subsys.kset.subsys) { - printk(KERN_ERR "%s: module_subsys not initialized\n", + if (!module_sysfs_initialized) { + printk(KERN_ERR "%s: module sysfs not initialized\n", mod->name); err = -EINVAL; goto out; @@ -2385,7 +2387,7 @@ void module_add_driver(struct module *mod, struct device_driver *drv) struct kobject *mkobj; /* Lookup built-in module entry in /sys/modules */ - mkobj = kset_find_obj(&module_subsys.kset, drv->mod_name); + mkobj = kset_find_obj(&module_subsys, drv->mod_name); if (mkobj) { mk = container_of(mkobj, struct module_kobject, kobj); /* remember our module structure */ diff --git a/kernel/params.c b/kernel/params.c index 1fc4ac746cd8..312172320b4c 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -691,6 +691,7 @@ static struct kset_uevent_ops module_uevent_ops = { }; decl_subsys(module, &module_ktype, &module_uevent_ops); +int module_sysfs_initialized; static struct kobj_type module_ktype = { .sysfs_ops = &module_sysfs_ops, @@ -709,6 +710,7 @@ static int __init param_sysfs_init(void) __FILE__, __LINE__, ret); return ret; } + module_sysfs_initialized = 1; param_sysfs_builtin(); diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 02e4fb69111a..8df51c23bba4 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -322,13 +322,13 @@ static const char * const pm_disk_modes[] = { * supports it (as determined from pm_ops->pm_disk_mode). */ -static ssize_t disk_show(struct subsystem * subsys, char * buf) +static ssize_t disk_show(struct kset *kset, char *buf) { return sprintf(buf, "%s\n", pm_disk_modes[pm_disk_mode]); } -static ssize_t disk_store(struct subsystem * s, const char * buf, size_t n) +static ssize_t disk_store(struct kset *kset, const char *buf, size_t n) { int error = 0; int i; @@ -373,13 +373,13 @@ static ssize_t disk_store(struct subsystem * s, const char * buf, size_t n) power_attr(disk); -static ssize_t resume_show(struct subsystem * subsys, char *buf) +static ssize_t resume_show(struct kset *kset, char *buf) { return sprintf(buf,"%d:%d\n", MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); } -static ssize_t resume_store(struct subsystem *subsys, const char *buf, size_t n) +static ssize_t resume_store(struct kset *kset, const char *buf, size_t n) { unsigned int maj, min; dev_t res; @@ -405,12 +405,12 @@ static ssize_t resume_store(struct subsystem *subsys, const char *buf, size_t n) power_attr(resume); -static ssize_t image_size_show(struct subsystem * subsys, char *buf) +static ssize_t image_size_show(struct kset *kset, char *buf) { return sprintf(buf, "%lu\n", image_size); } -static ssize_t image_size_store(struct subsystem * subsys, const char * buf, size_t n) +static ssize_t image_size_store(struct kset *kset, const char *buf, size_t n) { unsigned long size; @@ -439,7 +439,7 @@ static struct attribute_group attr_group = { static int __init pm_disk_init(void) { - return sysfs_create_group(&power_subsys.kset.kobj,&attr_group); + return sysfs_create_group(&power_subsys.kobj, &attr_group); } core_initcall(pm_disk_init); diff --git a/kernel/power/main.c b/kernel/power/main.c index 72419a3b1beb..b21c2a56f960 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -285,7 +285,7 @@ decl_subsys(power,NULL,NULL); * proper enumerated value, and initiates a suspend transition. */ -static ssize_t state_show(struct subsystem * subsys, char * buf) +static ssize_t state_show(struct kset *kset, char *buf) { int i; char * s = buf; @@ -298,7 +298,7 @@ static ssize_t state_show(struct subsystem * subsys, char * buf) return (s - buf); } -static ssize_t state_store(struct subsystem * subsys, const char * buf, size_t n) +static ssize_t state_store(struct kset *kset, const char *buf, size_t n) { suspend_state_t state = PM_SUSPEND_STANDBY; const char * const *s; @@ -325,13 +325,13 @@ power_attr(state); #ifdef CONFIG_PM_TRACE int pm_trace_enabled; -static ssize_t pm_trace_show(struct subsystem * subsys, char * buf) +static ssize_t pm_trace_show(struct kset *kset, char *buf) { return sprintf(buf, "%d\n", pm_trace_enabled); } static ssize_t -pm_trace_store(struct subsystem * subsys, const char * buf, size_t n) +pm_trace_store(struct kset *kset, const char *buf, size_t n) { int val; @@ -365,7 +365,7 @@ static int __init pm_init(void) { int error = subsystem_register(&power_subsys); if (!error) - error = sysfs_create_group(&power_subsys.kset.kobj,&attr_group); + error = sysfs_create_group(&power_subsys.kobj,&attr_group); return error; } diff --git a/kernel/power/power.h b/kernel/power/power.h index eb461b816bf4..5f842c3efc4b 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -35,7 +35,7 @@ static struct subsys_attribute _name##_attr = { \ .store = _name##_store, \ } -extern struct subsystem power_subsys; +extern struct kset power_subsys; /* References to section boundaries */ extern const void __nosave_begin, __nosave_end; diff --git a/lib/kobject.c b/lib/kobject.c index cecf2fbede3e..fc5f3f6e7329 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -582,22 +582,10 @@ void kset_init(struct kset * k) /** * kset_add - add a kset object to the hierarchy. * @k: kset. - * - * Simply, this adds the kset's embedded kobject to the - * hierarchy. - * We also try to make sure that the kset's embedded kobject - * has a parent before it is added. We only care if the embedded - * kobject is not part of a kset itself, since kobject_add() - * assigns a parent in that case. - * If that is the case, and the kset has a controlling subsystem, - * then we set the kset's parent to be said subsystem. */ int kset_add(struct kset * k) { - if (!k->kobj.parent && !k->kobj.kset && k->subsys) - k->kobj.parent = &k->subsys->kset.kobj; - return kobject_add(&k->kobj); } @@ -656,53 +644,28 @@ struct kobject * kset_find_obj(struct kset * kset, const char * name) return ret; } - -void subsystem_init(struct subsystem * s) +void subsystem_init(struct kset *s) { - kset_init(&s->kset); + kset_init(s); } -/** - * subsystem_register - register a subsystem. - * @s: the subsystem we're registering. - * - * Once we register the subsystem, we want to make sure that - * the kset points back to this subsystem. - */ - -int subsystem_register(struct subsystem * s) +int subsystem_register(struct kset *s) { - int error; - - if (!s) - return -EINVAL; - - subsystem_init(s); - pr_debug("subsystem %s: registering\n",s->kset.kobj.name); - - if (!(error = kset_add(&s->kset))) { - if (!s->kset.subsys) - s->kset.subsys = s; - } - return error; + return kset_register(s); } -void subsystem_unregister(struct subsystem * s) +void subsystem_unregister(struct kset *s) { - if (!s) - return; - pr_debug("subsystem %s: unregistering\n",s->kset.kobj.name); - kset_unregister(&s->kset); + kset_unregister(s); } - /** * subsystem_create_file - export sysfs attribute file. * @s: subsystem. * @a: subsystem attribute descriptor. */ -int subsys_create_file(struct subsystem * s, struct subsys_attribute * a) +int subsys_create_file(struct kset *s, struct subsys_attribute *a) { int error = 0; @@ -710,28 +673,12 @@ int subsys_create_file(struct subsystem * s, struct subsys_attribute * a) return -EINVAL; if (subsys_get(s)) { - error = sysfs_create_file(&s->kset.kobj,&a->attr); + error = sysfs_create_file(&s->kobj, &a->attr); subsys_put(s); } return error; } - -/** - * subsystem_remove_file - remove sysfs attribute file. - * @s: subsystem. - * @a: attribute desciptor. - */ -#if 0 -void subsys_remove_file(struct subsystem * s, struct subsys_attribute * a) -{ - if (subsys_get(s)) { - sysfs_remove_file(&s->kset.kobj,&a->attr); - subsys_put(s); - } -} -#endif /* 0 */ - EXPORT_SYMBOL(kobject_init); EXPORT_SYMBOL(kobject_register); EXPORT_SYMBOL(kobject_unregister); diff --git a/security/inode.c b/security/inode.c index d7ecf89fbc74..307211ac7346 100644 --- a/security/inode.c +++ b/security/inode.c @@ -321,7 +321,7 @@ static int __init securityfs_init(void) { int retval; - kset_set_kset_s(&security_subsys, kernel_subsys); + kobj_set_kset_s(&security_subsys, kernel_subsys); retval = subsystem_register(&security_subsys); if (retval) return retval; -- cgit v1.2.3 From f7bdd12d234d9064bd0aa1b5610508959120a9b4 Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 6 Apr 2007 16:39:36 -0500 Subject: pci: New PCI-E reset API Adds a new API which can be used to issue various types of PCI-E reset, including PCI-E warm reset and PCI-E hot reset. This is needed for an ipr PCI-E adapter which does not properly implement BIST. Running BIST on this adapter results in PCI-E errors. The only reliable reset mechanism that exists on this hardware is PCI Fundamental reset (warm reset). Since driving this type of reset is architecture unique, this provides the necessary hooks for architectures to add this support. Signed-off-by: Brian King Acked-by: Linas Vepstas Signed-off-by: Greg Kroah-Hartman --- drivers/pci/pci.c | 29 +++++++++++++++++++++++++++++ include/linux/pci.h | 14 ++++++++++++++ 2 files changed, 43 insertions(+) (limited to 'include/linux') diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 2a458279327a..2086cd1b5e0e 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -891,6 +891,34 @@ pci_disable_device(struct pci_dev *dev) pcibios_disable_device(dev); } +/** + * pcibios_set_pcie_reset_state - set reset state for device dev + * @dev: the PCI-E device reset + * @state: Reset state to enter into + * + * + * Sets the PCI-E reset state for the device. This is the default + * implementation. Architecture implementations can override this. + */ +int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, + enum pcie_reset_state state) +{ + return -EINVAL; +} + +/** + * pci_set_pcie_reset_state - set reset state for device dev + * @dev: the PCI-E device reset + * @state: Reset state to enter into + * + * + * Sets the PCI reset state for the device. + */ +int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) +{ + return pcibios_set_pcie_reset_state(dev, state); +} + /** * pci_enable_wake - enable PCI device as wakeup event source * @dev: PCI device affected @@ -1427,4 +1455,5 @@ EXPORT_SYMBOL(pci_set_power_state); EXPORT_SYMBOL(pci_save_state); EXPORT_SYMBOL(pci_restore_state); EXPORT_SYMBOL(pci_enable_wake); +EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); diff --git a/include/linux/pci.h b/include/linux/pci.h index 972491089ac9..3aba02a04792 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -96,6 +96,19 @@ enum pci_channel_state { pci_channel_io_perm_failure = (__force pci_channel_state_t) 3, }; +typedef unsigned int __bitwise pcie_reset_state_t; + +enum pcie_reset_state { + /* Reset is NOT asserted (Use to deassert reset) */ + pcie_deassert_reset = (__force pcie_reset_state_t) 1, + + /* Use #PERST to reset PCI-E device */ + pcie_warm_reset = (__force pcie_reset_state_t) 2, + + /* Use PCI-E Hot Reset to reset device */ + pcie_hot_reset = (__force pcie_reset_state_t) 3 +}; + typedef unsigned short __bitwise pci_bus_flags_t; enum pci_bus_flags { PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, @@ -532,6 +545,7 @@ static inline int pci_is_managed(struct pci_dev *pdev) void pci_disable_device(struct pci_dev *dev); void pci_set_master(struct pci_dev *dev); +int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); #define HAVE_PCI_SET_MWI int __must_check pci_set_mwi(struct pci_dev *dev); void pci_clear_mwi(struct pci_dev *dev); -- cgit v1.2.3 From 0da0ead90122578ef6e4afba9ba4bcd3455fd8e8 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 1 Apr 2007 21:13:58 +0400 Subject: PCI: define pci_request/release_regions() for CONFIG_PCI=n Balance declarations of pci_request_regions() and pci_release_regions() with empty inline definitions for the CONFIG_PCI=n case -- otherwise my patch to drivers/net/3c59x.c in the -mm tree doesn't compile. :-) Signed-off-by: Sergei Shtylyov Signed-off-by: Greg Kroah-Hartman --- include/linux/pci.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pci.h b/include/linux/pci.h index 3aba02a04792..99d45751830c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -744,6 +744,9 @@ static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { static inline pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) { return PCI_D0; } static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { return 0; } +static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) { return -EIO; } +static inline void pci_release_regions(struct pci_dev *dev) { } + #define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0) static inline void pci_block_user_cfg_access(struct pci_dev *dev) { } -- cgit v1.2.3 From c9953a73e92df11edd812d863ff741877ea9e58c Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 5 Apr 2007 17:19:08 +1000 Subject: MSI: Add an arch_msi_check_device() Add an arch_check_device(), which gives archs a chance to check the input to pci_enable_msi/x. The arch might be interested in the value of nvec so pass it in. Propagate the error value returned from the arch routine out to the caller. Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- drivers/pci/msi.c | 29 +++++++++++++++++++++++++---- include/linux/msi.h | 1 + 2 files changed, 26 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 34087af68833..5902c00f4fce 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -459,15 +459,17 @@ static int msix_capability_init(struct pci_dev *dev, /** * pci_msi_check_device - check whether MSI may be enabled on a device * @dev: pointer to the pci_dev data structure of MSI device function + * @nvec: how many MSIs have been requested ? * @type: are we checking for MSI or MSI-X ? * * Look at global flags, the device itself, and its parent busses * to determine if MSI/-X are supported for the device. If MSI/-X is * supported return 0, else return an error code. **/ -static int pci_msi_check_device(struct pci_dev * dev, int type) +static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) { struct pci_bus *bus; + int ret; /* MSI must be globally enabled and supported by the device */ if (!pci_msi_enable || !dev || dev->no_msi) @@ -483,6 +485,10 @@ static int pci_msi_check_device(struct pci_dev * dev, int type) if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) return -EINVAL; + ret = arch_msi_check_device(dev, nvec, type); + if (ret) + return ret; + if (!pci_find_capability(dev, type)) return -EINVAL; @@ -503,8 +509,9 @@ int pci_enable_msi(struct pci_dev* dev) { int status; - if (pci_msi_check_device(dev, PCI_CAP_ID_MSI)) - return -EINVAL; + status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI); + if (status) + return status; WARN_ON(!!dev->msi_enabled); @@ -601,9 +608,13 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) int i, j; u16 control; - if (!entries || pci_msi_check_device(dev, PCI_CAP_ID_MSIX)) + if (!entries) return -EINVAL; + status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); + if (status) + return status; + pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); pci_read_config_word(dev, msi_control_reg(pos), &control); nr_entries = multi_msix_capable(control); @@ -687,3 +698,13 @@ void pci_no_msi(void) { pci_msi_enable = 0; } + + +/* Arch hooks */ + +int __attribute__ ((weak)) +arch_msi_check_device(struct pci_dev* dev, int nvec, int type) +{ + return 0; +} + diff --git a/include/linux/msi.h b/include/linux/msi.h index e38fe6822cb4..d2a200048b22 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -41,6 +41,7 @@ struct msi_desc { */ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); void arch_teardown_msi_irq(unsigned int irq); +extern int arch_msi_check_device(struct pci_dev* dev, int nvec, int type); #endif /* LINUX_MSI_H */ -- cgit v1.2.3 From 65891215e6b822c368fb3f36abf129ed48af8be0 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 5 Apr 2007 17:19:08 +1000 Subject: PCI: Create alloc_pci_dev(), the one true way to create a struct pci_dev There are currently several places in the kernel where we kmalloc() a struct pci_dev and start initialising it. It'd be preferable to have an allocator so we can ensure the pci_dev is correctly initialised in one place. Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- drivers/pci/probe.c | 15 +++++++++++++++ include/linux/pci.h | 2 ++ 2 files changed, 17 insertions(+) (limited to 'include/linux') diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 629edf39a07d..70d37bbf09bb 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -846,6 +846,21 @@ static void pci_release_bus_bridge_dev(struct device *dev) kfree(dev); } +struct pci_dev *alloc_pci_dev(void) +{ + struct pci_dev *dev; + + dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); + if (!dev) + return NULL; + + INIT_LIST_HEAD(&dev->global_list); + INIT_LIST_HEAD(&dev->bus_list); + + return dev; +} +EXPORT_SYMBOL(alloc_pci_dev); + /* * Read the config data for a PCI device, sanity-check it * and fill in the dev structure... diff --git a/include/linux/pci.h b/include/linux/pci.h index 99d45751830c..c02074785d40 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -193,6 +193,8 @@ struct pci_dev { #endif }; +extern struct pci_dev *alloc_pci_dev(void); + #define pci_dev_g(n) list_entry(n, struct pci_dev, global_list) #define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list) #define to_pci_dev(n) container_of(n, struct pci_dev, dev) -- cgit v1.2.3 From 4aa9bc955d61fdf03b5f9cee67db188fe1ffa8b7 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 5 Apr 2007 17:19:10 +1000 Subject: MSI: Use a list instead of the custom link structure The msi descriptors are linked together with what looks a lot like a linked list, but isn't a struct list_head list. Make it one. The only complication is that previously we walked a list of irqs, and got the descriptor for each with get_irq_msi(). Now we have a list of descriptors and need to get the irq out of it, so it needs to be in the actual struct msi_desc. We use 0 to indicate no irq is setup. Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- drivers/pci/msi.c | 66 +++++++++++++++++++---------------------------------- drivers/pci/pci.h | 2 ++ drivers/pci/probe.c | 2 ++ include/linux/msi.h | 8 +++---- include/linux/pci.h | 1 + 5 files changed, 33 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 5902c00f4fce..434c7182d926 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -218,7 +218,8 @@ static struct msi_desc* alloc_msi_entry(void) if (!entry) return NULL; - entry->link.tail = entry->link.head = 0; /* single message */ + INIT_LIST_HEAD(&entry->list); + entry->irq = 0; entry->dev = NULL; return entry; @@ -253,7 +254,6 @@ static void __pci_restore_msi_state(struct pci_dev *dev) static void __pci_restore_msix_state(struct pci_dev *dev) { int pos; - int irq, head, tail = 0; struct msi_desc *entry; u16 control; @@ -263,18 +263,14 @@ static void __pci_restore_msix_state(struct pci_dev *dev) /* route the table */ pci_intx(dev, 0); /* disable intx */ msix_set_enable(dev, 0); - irq = head = dev->first_msi_irq; - entry = get_irq_msi(irq); - pos = entry->msi_attrib.pos; - while (head != tail) { - entry = get_irq_msi(irq); - write_msi_msg(irq, &entry->msg); - msi_set_mask_bit(irq, entry->msi_attrib.masked); - tail = entry->link.tail; - irq = tail; + list_for_each_entry(entry, &dev->msi_list, list) { + write_msi_msg(entry->irq, &entry->msg); + msi_set_mask_bit(entry->irq, entry->msi_attrib.masked); } + entry = get_irq_msi(dev->first_msi_irq); + pos = entry->msi_attrib.pos; pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); control &= ~PCI_MSIX_FLAGS_MASKALL; control |= PCI_MSIX_FLAGS_ENABLE; @@ -343,8 +339,8 @@ static int msi_capability_init(struct pci_dev *dev) kfree(entry); return irq; } - entry->link.head = irq; - entry->link.tail = irq; + entry->irq = irq; + list_add(&entry->list, &dev->msi_list); dev->first_msi_irq = irq; set_irq_msi(irq, entry); @@ -370,8 +366,8 @@ static int msi_capability_init(struct pci_dev *dev) static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, int nvec) { - struct msi_desc *head = NULL, *tail = NULL, *entry = NULL; - int irq, pos, i, j, nr_entries, temp = 0; + struct msi_desc *entry; + int irq, pos, i, j, nr_entries; unsigned long phys_addr; u32 table_offset; u16 control; @@ -416,19 +412,9 @@ static int msix_capability_init(struct pci_dev *dev, kfree(entry); break; } + entry->irq = irq; entries[i].vector = irq; - if (!head) { - entry->link.head = irq; - entry->link.tail = irq; - head = entry; - } else { - entry->link.head = temp; - entry->link.tail = tail->link.tail; - tail->link.tail = irq; - head->link.head = irq; - } - temp = irq; - tail = entry; + list_add(&entry->list, &dev->msi_list); set_irq_msi(irq, entry); } @@ -557,7 +543,7 @@ EXPORT_SYMBOL(pci_disable_msi); static int msi_free_irq(struct pci_dev* dev, int irq) { struct msi_desc *entry; - int head, entry_nr, type; + int entry_nr, type; void __iomem *base; BUG_ON(irq_has_action(irq)); @@ -568,10 +554,8 @@ static int msi_free_irq(struct pci_dev* dev, int irq) } type = entry->msi_attrib.type; entry_nr = entry->msi_attrib.entry_nr; - head = entry->link.head; base = entry->mask_base; - get_irq_msi(entry->link.head)->link.tail = entry->link.tail; - get_irq_msi(entry->link.tail)->link.head = entry->link.head; + list_del(&entry->list); arch_teardown_msi_irq(irq); kfree(entry); @@ -580,7 +564,7 @@ static int msi_free_irq(struct pci_dev* dev, int irq) writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); - if (head == irq) + if (list_empty(&dev->msi_list)) iounmap(base); } @@ -646,17 +630,10 @@ EXPORT_SYMBOL(pci_enable_msix); static void msix_free_all_irqs(struct pci_dev *dev) { - int irq, head, tail = 0; - - irq = head = dev->first_msi_irq; - while (head != tail) { - tail = get_irq_msi(irq)->link.tail; + struct msi_desc *entry; - if (irq != head) - msi_free_irq(dev, irq); - irq = tail; - } - msi_free_irq(dev, irq); + list_for_each_entry(entry, &dev->msi_list, list) + msi_free_irq(dev, entry->irq); dev->first_msi_irq = 0; } @@ -699,6 +676,11 @@ void pci_no_msi(void) pci_msi_enable = 0; } +void pci_msi_init_pci_dev(struct pci_dev *dev) +{ + INIT_LIST_HEAD(&dev->msi_list); +} + /* Arch hooks */ diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 62ea04c8af64..3fec13d3add7 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -47,8 +47,10 @@ extern unsigned int pci_pm_d3_delay; #ifdef CONFIG_PCI_MSI void pci_no_msi(void); +extern void pci_msi_init_pci_dev(struct pci_dev *dev); #else static inline void pci_no_msi(void) { } +static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } #endif #if defined(CONFIG_PCI_MSI) && defined(CONFIG_PM) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index c659f8ae441a..e48fcf089621 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -857,6 +857,8 @@ struct pci_dev *alloc_pci_dev(void) INIT_LIST_HEAD(&dev->global_list); INIT_LIST_HEAD(&dev->bus_list); + pci_msi_init_pci_dev(dev); + return dev; } EXPORT_SYMBOL(alloc_pci_dev); diff --git a/include/linux/msi.h b/include/linux/msi.h index d2a200048b22..931e013f1db5 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -1,6 +1,8 @@ #ifndef LINUX_MSI_H #define LINUX_MSI_H +#include + struct msi_msg { u32 address_lo; /* low 32 bits of msi message address */ u32 address_hi; /* high 32 bits of msi message address */ @@ -24,10 +26,8 @@ struct msi_desc { unsigned default_irq; /* default pre-assigned irq */ }msi_attrib; - struct { - __u16 head; - __u16 tail; - }link; + unsigned int irq; + struct list_head list; void __iomem *mask_base; struct pci_dev *dev; diff --git a/include/linux/pci.h b/include/linux/pci.h index c02074785d40..d43097dc8672 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -190,6 +190,7 @@ struct pci_dev { struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ #ifdef CONFIG_PCI_MSI unsigned int first_msi_irq; + struct list_head msi_list; #endif }; -- cgit v1.2.3 From 314e77b3eec57001eaff82b82920150175b74e09 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 5 Apr 2007 17:19:12 +1000 Subject: MSI: Remove dev->first_msi_irq Now that we keep a list of msi descriptors, we don't need first_msi_irq in the pci dev. If we somehow have zero MSIs configured list_entry() will give us weird oopes or nice memory corruption bugs. So be paranoid. Add BUG_ONs and also a check in pci_msi_check_device() to make sure nvec > 0. Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- drivers/pci/msi.c | 31 ++++++++++++++++++++----------- include/linux/pci.h | 1 - 2 files changed, 20 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 434c7182d926..7a44ba467481 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -269,7 +269,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev) msi_set_mask_bit(entry->irq, entry->msi_attrib.masked); } - entry = get_irq_msi(dev->first_msi_irq); + BUG_ON(list_empty(&dev->msi_list)); + entry = list_entry(dev->msi_list.next, struct msi_desc, list); pos = entry->msi_attrib.pos; pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); control &= ~PCI_MSIX_FLAGS_MASKALL; @@ -341,7 +342,6 @@ static int msi_capability_init(struct pci_dev *dev) } entry->irq = irq; list_add(&entry->list, &dev->msi_list); - dev->first_msi_irq = irq; set_irq_msi(irq, entry); /* Set MSI enabled bits */ @@ -433,7 +433,6 @@ static int msix_capability_init(struct pci_dev *dev, avail = -EBUSY; return avail; } - dev->first_msi_irq = entries[0].vector; /* Set MSI-X enabled bits */ pci_intx(dev, 0); /* disable intx */ msix_set_enable(dev, 1); @@ -461,6 +460,14 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) if (!pci_msi_enable || !dev || dev->no_msi) return -EINVAL; + /* + * You can't ask to have 0 or less MSIs configured. + * a) it's stupid .. + * b) the list manipulation code assumes nvec >= 1. + */ + if (nvec < 1) + return -ERANGE; + /* Any bridge which does NOT route MSI transactions from it's * secondary bus to it's primary bus must set NO_MSI flag on * the secondary pci_bus. @@ -525,18 +532,17 @@ void pci_disable_msi(struct pci_dev* dev) pci_intx(dev, 1); /* enable intx */ dev->msi_enabled = 0; - entry = get_irq_msi(dev->first_msi_irq); - if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { + BUG_ON(list_empty(&dev->msi_list)); + entry = list_entry(dev->msi_list.next, struct msi_desc, list); + if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { return; } default_irq = entry->msi_attrib.default_irq; - msi_free_irq(dev, dev->first_msi_irq); + msi_free_irq(dev, entry->irq); /* Restore dev->irq to its default pin-assertion irq */ dev->irq = default_irq; - - dev->first_msi_irq = 0; } EXPORT_SYMBOL(pci_disable_msi); @@ -634,7 +640,6 @@ static void msix_free_all_irqs(struct pci_dev *dev) list_for_each_entry(entry, &dev->msi_list, list) msi_free_irq(dev, entry->irq); - dev->first_msi_irq = 0; } void pci_disable_msix(struct pci_dev* dev) @@ -664,8 +669,12 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev) if (!pci_msi_enable || !dev) return; - if (dev->msi_enabled) - msi_free_irq(dev, dev->first_msi_irq); + if (dev->msi_enabled) { + struct msi_desc *entry; + BUG_ON(list_empty(&dev->msi_list)); + entry = list_entry(dev->msi_list.next, struct msi_desc, list); + msi_free_irq(dev, entry->irq); + } if (dev->msix_enabled) msix_free_all_irqs(dev); diff --git a/include/linux/pci.h b/include/linux/pci.h index d43097dc8672..a15569bf78b5 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -189,7 +189,6 @@ struct pci_dev { int rom_attr_enabled; /* has display of the rom attribute been enabled? */ struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ #ifdef CONFIG_PCI_MSI - unsigned int first_msi_irq; struct list_head msi_list; #endif }; -- cgit v1.2.3 From 9c8313343c83c0ca731ceb8d2a4ab1e022ed9c94 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 18 Apr 2007 19:39:21 +1000 Subject: MSI: Give archs the option to allocate all MSI/Xs at once. This patch introduces an optional function, arch_setup_msi_irqs(), (note the plural) which gives an arch the opportunity to do per-device setup for MSI/X and then allocate all the requested MSI/Xs at once. If that's not required by the arch, the default version simply calls arch_setup_msi_irq() for each MSI irq required. arch_setup_msi_irqs() is passed a pdev, attached to the pdev is a list of msi_descs with irq == 0, it is up to the arch to connect these up to an irq (via set_irq_msi()) or return an error. For convenience the number of vectors and the type are passed also. All msi_descs with irq != 0 are considered allocated, and the arch teardown routine will be called on them when necessary. The existing semantics of pci_enable_msix() are that if the requested number of irqs can not be allocated, the maximum number that _could_ be allocated is returned. To support that, we define that in case of an error from arch_setup_msi_irqs(), the number of msi_descs with irq != 0 are considered allocated, and are counted toward the "max that could be allocated". Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- drivers/pci/msi.c | 63 +++++++++++++++++++++++++++++++++++++---------------- include/linux/msi.h | 1 + 2 files changed, 45 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 88362f1bd9cf..c71e8e4c7168 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -334,13 +334,15 @@ static int msi_capability_init(struct pci_dev *dev) msi_mask_bits_reg(pos, is_64bit_address(control)), maskbits); } + list_add(&entry->list, &dev->msi_list); + /* Configure MSI capability structure */ - ret = arch_setup_msi_irq(dev, entry); + ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); if (ret) { + list_del(&entry->list); kfree(entry); return ret; } - list_add(&entry->list, &dev->msi_list); /* Set MSI enabled bits */ pci_intx(dev, 0); /* disable intx */ @@ -365,7 +367,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, int nvec) { struct msi_desc *entry; - int irq, pos, i, j, nr_entries, ret; + int pos, i, j, nr_entries, ret; unsigned long phys_addr; u32 table_offset; u16 control; @@ -404,30 +406,33 @@ static int msix_capability_init(struct pci_dev *dev, entry->dev = dev; entry->mask_base = base; - /* Configure MSI-X capability structure */ - ret = arch_setup_msi_irq(dev, entry); - if (ret) { - kfree(entry); - break; - } - entries[i].vector = entry->irq; list_add(&entry->list, &dev->msi_list); } - if (i != nvec) { - int avail = i - 1; - i--; - for (; i >= 0; i--) { - irq = (entries + i)->vector; - msi_free_irq(dev, irq); - (entries + i)->vector = 0; + + ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); + if (ret) { + int avail = 0; + list_for_each_entry(entry, &dev->msi_list, list) { + if (entry->irq != 0) { + avail++; + msi_free_irq(dev, entry->irq); + } } + /* If we had some success report the number of irqs * we succeeded in setting up. */ - if (avail <= 0) - avail = -EBUSY; + if (avail == 0) + avail = ret; return avail; } + + i = 0; + list_for_each_entry(entry, &dev->msi_list, list) { + entries[i].vector = entry->irq; + set_irq_msi(entry->irq, entry); + i++; + } /* Set MSI-X enabled bits */ pci_intx(dev, 0); /* disable intx */ msix_set_enable(dev, 1); @@ -694,3 +699,23 @@ arch_msi_check_device(struct pci_dev* dev, int nvec, int type) return 0; } +int __attribute__ ((weak)) +arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) +{ + return 0; +} + +int __attribute__ ((weak)) +arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct msi_desc *entry; + int ret; + + list_for_each_entry(entry, &dev->msi_list, list) { + ret = arch_setup_msi_irq(dev, entry); + if (ret) + return ret; + } + + return 0; +} diff --git a/include/linux/msi.h b/include/linux/msi.h index 931e013f1db5..494627ae021f 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -41,6 +41,7 @@ struct msi_desc { */ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); void arch_teardown_msi_irq(unsigned int irq); +extern int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); extern int arch_msi_check_device(struct pci_dev* dev, int nvec, int type); -- cgit v1.2.3 From 032de8e2fe3c0eec5fb0ffe4d38aa602dad397dc Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 18 Apr 2007 19:39:22 +1000 Subject: MSI: Give archs the option to free all MSI/Xs at once. This patch introduces an optional function, arch_teardown_msi_irqs(), which gives an arch the opportunity to do per-device teardown for MSI/X. If that's not required, the default version simply calls arch_teardown_msi_irq() for each msi irq required. arch_teardown_msi_irqs() is simply passed a pdev, attached to the pdev is a list of msi_descs, it is up to the arch to free the irq associated with each of these as appropriate. For archs that _don't_ implement arch_teardown_msi_irqs(), all msi_descs with irq == 0 are considered unallocated, and the arch teardown routine is not called on them. Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- drivers/pci/msi.c | 75 ++++++++++++++++++++++++++++------------------------- include/linux/msi.h | 1 + 2 files changed, 40 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index c71e8e4c7168..9e1321d0d5e6 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -207,7 +207,7 @@ void unmask_msi_irq(unsigned int irq) msix_flush_writes(irq); } -static int msi_free_irq(struct pci_dev* dev, int irq); +static int msi_free_irqs(struct pci_dev* dev); static struct msi_desc* alloc_msi_entry(void) @@ -339,8 +339,7 @@ static int msi_capability_init(struct pci_dev *dev) /* Configure MSI capability structure */ ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); if (ret) { - list_del(&entry->list); - kfree(entry); + msi_free_irqs(dev); return ret; } @@ -415,10 +414,11 @@ static int msix_capability_init(struct pci_dev *dev, list_for_each_entry(entry, &dev->msi_list, list) { if (entry->irq != 0) { avail++; - msi_free_irq(dev, entry->irq); } } + msi_free_irqs(dev); + /* If we had some success report the number of irqs * we succeeded in setting up. */ @@ -539,39 +539,33 @@ void pci_disable_msi(struct pci_dev* dev) } default_irq = entry->msi_attrib.default_irq; - msi_free_irq(dev, entry->irq); + msi_free_irqs(dev); /* Restore dev->irq to its default pin-assertion irq */ dev->irq = default_irq; } EXPORT_SYMBOL(pci_disable_msi); -static int msi_free_irq(struct pci_dev* dev, int irq) +static int msi_free_irqs(struct pci_dev* dev) { - struct msi_desc *entry; - int entry_nr, type; - void __iomem *base; - - BUG_ON(irq_has_action(irq)); + struct msi_desc *entry, *tmp; - entry = get_irq_msi(irq); - if (!entry || entry->dev != dev) { - return -EINVAL; - } - type = entry->msi_attrib.type; - entry_nr = entry->msi_attrib.entry_nr; - base = entry->mask_base; - list_del(&entry->list); + list_for_each_entry(entry, &dev->msi_list, list) + BUG_ON(irq_has_action(entry->irq)); - arch_teardown_msi_irq(irq); - kfree(entry); + arch_teardown_msi_irqs(dev); - if (type == PCI_CAP_ID_MSIX) { - writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); + list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { + if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) { + if (list_is_last(&entry->list, &dev->msi_list)) + iounmap(entry->mask_base); - if (list_empty(&dev->msi_list)) - iounmap(base); + writel(1, entry->mask_base + entry->msi_attrib.entry_nr + * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); + } + list_del(&entry->list); + kfree(entry); } return 0; @@ -636,10 +630,7 @@ EXPORT_SYMBOL(pci_enable_msix); static void msix_free_all_irqs(struct pci_dev *dev) { - struct msi_desc *entry; - - list_for_each_entry(entry, &dev->msi_list, list) - msi_free_irq(dev, entry->irq); + msi_free_irqs(dev); } void pci_disable_msix(struct pci_dev* dev) @@ -669,12 +660,8 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev) if (!pci_msi_enable || !dev) return; - if (dev->msi_enabled) { - struct msi_desc *entry; - BUG_ON(list_empty(&dev->msi_list)); - entry = list_entry(dev->msi_list.next, struct msi_desc, list); - msi_free_irq(dev, entry->irq); - } + if (dev->msi_enabled) + msi_free_irqs(dev); if (dev->msix_enabled) msix_free_all_irqs(dev); @@ -719,3 +706,19 @@ arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) return 0; } + +void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) +{ + return; +} + +void __attribute__ ((weak)) +arch_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *entry; + + list_for_each_entry(entry, &dev->msi_list, list) { + if (entry->irq != 0) + arch_teardown_msi_irq(entry->irq); + } +} diff --git a/include/linux/msi.h b/include/linux/msi.h index 494627ae021f..94bb46d82efd 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -42,6 +42,7 @@ struct msi_desc { int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); void arch_teardown_msi_irq(unsigned int irq); extern int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); +extern void arch_teardown_msi_irqs(struct pci_dev *dev); extern int arch_msi_check_device(struct pci_dev* dev, int nvec, int type); -- cgit v1.2.3 From 5adc55da4a7758021bcc374904b0f8b076508a11 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Tue, 27 Mar 2007 03:02:51 +0200 Subject: PCI: remove the broken PCI_MULTITHREAD_PROBE option This patch removes the PCI_MULTITHREAD_PROBE option that had already been marked as broken. Signed-off-by: Adrian Bunk Signed-off-by: Greg Kroah-Hartman --- Documentation/pci.txt | 4 ---- drivers/base/dd.c | 41 +++-------------------------------------- drivers/pci/Kconfig | 25 ------------------------- drivers/pci/pci-driver.c | 15 --------------- include/linux/device.h | 1 - 5 files changed, 3 insertions(+), 83 deletions(-) (limited to 'include/linux') diff --git a/Documentation/pci.txt b/Documentation/pci.txt index a8ded1a8bd62..40c4717d236b 100644 --- a/Documentation/pci.txt +++ b/Documentation/pci.txt @@ -124,10 +124,6 @@ initialization with a pointer to a structure describing the driver err_handler See Documentation/pci-error-recovery.txt - multithread_probe Enable multi-threaded probe/scan. Driver must - provide its own locking/syncronization for init - operations if this is enabled. - The ID table is an array of struct pci_device_id entries ending with an all-zero entry. Each entry consists of: diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 18dba8e78da7..92428e55b0c2 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -226,12 +226,10 @@ static int device_probe_drivers(void *data) * * Walk the list of drivers that the bus has and call * driver_probe_device() for each pair. If a compatible - * pair is found, break out and return. If the bus specifies - * multithreaded probing, walking the list of drivers is done - * on a probing thread. + * pair is found, break out and return. * * Returns 1 if the device was bound to a driver; - * 0 if no matching device was found or multithreaded probing is done; + * 0 if no matching device was found; * -ENODEV if the device is not registered. * * When called for a USB interface, @dev->parent->sem must be held. @@ -239,7 +237,6 @@ static int device_probe_drivers(void *data) int device_attach(struct device * dev) { int ret = 0; - struct task_struct *probe_task = ERR_PTR(-ENOMEM); down(&dev->sem); if (dev->driver) { @@ -251,12 +248,7 @@ int device_attach(struct device * dev) ret = 0; } } else { - if (dev->bus->multithread_probe) - probe_task = kthread_run(device_probe_drivers, dev, - "probe-%s", dev->bus_id); - if(IS_ERR(probe_task)) - ret = bus_for_each_drv(dev->bus, NULL, dev, - __device_attach); + ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); } up(&dev->sem); return ret; @@ -383,33 +375,6 @@ void driver_detach(struct device_driver * drv) } } -#ifdef CONFIG_PCI_MULTITHREAD_PROBE -static int __init wait_for_probes(void) -{ - DEFINE_WAIT(wait); - - printk(KERN_INFO "%s: waiting for %d threads\n", __FUNCTION__, - atomic_read(&probe_count)); - if (!atomic_read(&probe_count)) - return 0; - while (atomic_read(&probe_count)) { - prepare_to_wait(&probe_waitqueue, &wait, TASK_UNINTERRUPTIBLE); - if (atomic_read(&probe_count)) - schedule(); - } - finish_wait(&probe_waitqueue, &wait); - return 0; -} - -core_initcall_sync(wait_for_probes); -postcore_initcall_sync(wait_for_probes); -arch_initcall_sync(wait_for_probes); -subsys_initcall_sync(wait_for_probes); -fs_initcall_sync(wait_for_probes); -device_initcall_sync(wait_for_probes); -late_initcall_sync(wait_for_probes); -#endif - EXPORT_SYMBOL_GPL(device_bind_driver); EXPORT_SYMBOL_GPL(device_release_driver); EXPORT_SYMBOL_GPL(device_attach); diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 70efe8f285a6..7a1d6d512837 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -21,31 +21,6 @@ config PCI_MSI If you don't know what to do here, say N. -config PCI_MULTITHREAD_PROBE - bool "PCI Multi-threaded probe (EXPERIMENTAL)" - depends on PCI && EXPERIMENTAL && BROKEN - help - Say Y here if you want the PCI core to spawn a new thread for - every PCI device that is probed. This can cause a huge - speedup in boot times on multiprocessor machines, and even a - smaller speedup on single processor machines. - - But it can also cause lots of bad things to happen. A number - of PCI drivers cannot properly handle running in this way, - some will just not work properly at all, while others might - decide to blow up power supplies with a huge load all at once, - so use this option at your own risk. - - It is very unwise to use this option if you are not using a - boot process that can handle devices being created in any - order. A program that can create persistent block and network - device names (like udev) is a good idea if you wish to use - this option. - - Again, use this option at your own risk, you have been warned! - - When in doubt, say N. - config PCI_DEBUG bool "PCI Debugging" depends on PCI && DEBUG_KERNEL diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 07fdb3cd6172..3bb7739d26a5 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -13,20 +13,6 @@ #include #include "pci.h" -/* - * Registration of PCI drivers and handling of hot-pluggable devices. - */ - -/* multithreaded probe logic */ -static int pci_multithread_probe = -#ifdef CONFIG_PCI_MULTITHREAD_PROBE - 1; -#else - 0; -#endif -__module_param_call("", pci_multithread_probe, param_set_bool, param_get_bool, &pci_multithread_probe, 0644); - - /* * Dynamic device IDs are disabled for !CONFIG_HOTPLUG */ @@ -569,7 +555,6 @@ struct bus_type pci_bus_type = { static int __init pci_driver_init(void) { - pci_bus_type.multithread_probe = pci_multithread_probe; return bus_register(&pci_bus_type); } diff --git a/include/linux/device.h b/include/linux/device.h index a0cd2ced31a9..ff83b61d283d 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -80,7 +80,6 @@ struct bus_type { int (*resume)(struct device * dev); unsigned int drivers_autoprobe:1; - unsigned int multithread_probe:1; }; extern int __must_check bus_register(struct bus_type * bus); -- cgit v1.2.3 From ecf36501bc4ad399e6df2e0bdaa513a2d510b7ec Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Fri, 6 Apr 2007 12:19:48 +0200 Subject: PCI: the overdue removal of pci_module_init() Unless we finally completely remove it, people will always add new users. Signed-off-by: Adrian Bunk Signed-off-by: Greg Kroah-Hartman --- Documentation/feature-removal-schedule.txt | 7 ------- Documentation/pci.txt | 2 -- include/linux/pci.h | 6 ------ 3 files changed, 15 deletions(-) (limited to 'include/linux') diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 5c88ba1ea262..144058cf849a 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -117,13 +117,6 @@ Who: Adrian Bunk --------------------------- -What: pci_module_init(driver) -When: January 2007 -Why: Is replaced by pci_register_driver(pci_driver). -Who: Richard Knutsson and Greg Kroah-Hartman - ---------------------------- - What: Usage of invalid timevals in setitimer When: March 2007 Why: POSIX requires to validate timevals in the setitimer call. This diff --git a/Documentation/pci.txt b/Documentation/pci.txt index 40c4717d236b..e2c9d0a0c43d 100644 --- a/Documentation/pci.txt +++ b/Documentation/pci.txt @@ -545,8 +545,6 @@ pci_find_slot() Find pci_dev corresponding to given bus and pci_set_power_state() Set PCI Power Management state (0=D0 ... 3=D3) pci_find_capability() Find specified capability in device's capability list. -pci_module_init() Inline helper function for ensuring correct - pci_driver initialization and error handling. pci_resource_start() Returns bus start address for a given PCI region pci_resource_end() Returns bus end address for a given PCI region pci_resource_len() Returns the byte length of a PCI region diff --git a/include/linux/pci.h b/include/linux/pci.h index a15569bf78b5..fbf3766dac1e 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -407,12 +407,6 @@ struct pci_driver { .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID -/* - * pci_module_init is obsolete, this stays here till we fix up all usages of it - * in the tree. - */ -#define pci_module_init pci_register_driver - /** * PCI_VDEVICE - macro used to describe a specific pci device in short form * @vend: the vendor name -- cgit v1.2.3 From a830df367cc8cd802b45baed2449bea267727721 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Thu, 3 May 2007 00:55:34 -0400 Subject: Input: pull input.h into uinpit.h uinput.h relies on structures found in input.h, so pull in the header Signed-off-by: Mike Frysinger Signed-off-by: Dmitry Torokhov --- drivers/input/misc/uinput.c | 1 - include/linux/uinput.h | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 031467eadd31..a56ad4ba8fe2 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include diff --git a/include/linux/uinput.h b/include/linux/uinput.h index 1fd61eeed664..a6c1e8eed226 100644 --- a/include/linux/uinput.h +++ b/include/linux/uinput.h @@ -32,6 +32,8 @@ * - first public version */ +#include + #define UINPUT_VERSION 3 #ifdef __KERNEL__ -- cgit v1.2.3 From bbe4432e669ab94fc8059e7ab878cafad7b8d123 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 4 Mar 2007 13:27:36 +0200 Subject: KVM: Use own minor number Use the minor number (232) allocated to kvm by lanana. Signed-off-by: Avi Kivity --- drivers/kvm/kvm_main.c | 2 +- include/linux/miscdevice.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index ff7c836ff001..946ed86a0595 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -2299,7 +2299,7 @@ static struct file_operations kvm_chardev_ops = { }; static struct miscdevice kvm_dev = { - MISC_DYNAMIC_MINOR, + KVM_MINOR, "kvm", &kvm_chardev_ops, }; diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 326da7d500c7..dff9ea32606a 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -29,6 +29,7 @@ #define TUN_MINOR 200 #define HPET_MINOR 228 +#define KVM_MINOR 232 struct device; -- cgit v1.2.3 From ff42697436ddf5bd026e2cb4f117656b967f0709 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 7 Mar 2007 09:29:48 +0200 Subject: KVM: Export This allows users to actually build prgrams that use kvm without the entire source tree. Signed-off-by: Avi Kivity --- include/linux/Kbuild | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 4ff0f57d0add..9f05279e7dd3 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -96,6 +96,7 @@ header-y += iso_fs.h header-y += ixjuser.h header-y += jffs2.h header-y += keyctl.h +header-y += kvm.h header-y += limits.h header-y += lock_dlm_plock.h header-y += magic.h -- cgit v1.2.3 From 9a2bb7f486dc639a1cf2ad803bf2227f0dc0809d Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 22 Feb 2007 12:58:31 +0200 Subject: KVM: Use a shared page for kernel/user communication when runing a vcpu Instead of passing a 'struct kvm_run' back and forth between the kernel and userspace, allocate a page and allow the user to mmap() it. This reduces needless copying and makes the interface expandable by providing lots of free space. Signed-off-by: Avi Kivity --- drivers/kvm/kvm.h | 1 + drivers/kvm/kvm_main.c | 54 +++++++++++++++++++++++++++++++++++++------------- include/linux/kvm.h | 6 +++--- 3 files changed, 44 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 0d122bf889db..901b8d917b55 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -228,6 +228,7 @@ struct kvm_vcpu { struct mutex mutex; int cpu; int launched; + struct kvm_run *run; int interrupt_window_open; unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ #define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long) diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 946ed86a0595..42be8a8f299d 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -355,6 +355,8 @@ static void kvm_free_vcpu(struct kvm_vcpu *vcpu) kvm_mmu_destroy(vcpu); vcpu_put(vcpu); kvm_arch_ops->vcpu_free(vcpu); + free_page((unsigned long)vcpu->run); + vcpu->run = NULL; } static void kvm_free_vcpus(struct kvm *kvm) @@ -1887,6 +1889,33 @@ static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, return r; } +static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma, + unsigned long address, + int *type) +{ + struct kvm_vcpu *vcpu = vma->vm_file->private_data; + unsigned long pgoff; + struct page *page; + + *type = VM_FAULT_MINOR; + pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; + if (pgoff != 0) + return NOPAGE_SIGBUS; + page = virt_to_page(vcpu->run); + get_page(page); + return page; +} + +static struct vm_operations_struct kvm_vcpu_vm_ops = { + .nopage = kvm_vcpu_nopage, +}; + +static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) +{ + vma->vm_ops = &kvm_vcpu_vm_ops; + return 0; +} + static int kvm_vcpu_release(struct inode *inode, struct file *filp) { struct kvm_vcpu *vcpu = filp->private_data; @@ -1899,6 +1928,7 @@ static struct file_operations kvm_vcpu_fops = { .release = kvm_vcpu_release, .unlocked_ioctl = kvm_vcpu_ioctl, .compat_ioctl = kvm_vcpu_ioctl, + .mmap = kvm_vcpu_mmap, }; /* @@ -1947,6 +1977,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) { int r; struct kvm_vcpu *vcpu; + struct page *page; r = -EINVAL; if (!valid_vcpu(n)) @@ -1961,6 +1992,12 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) return -EEXIST; } + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + r = -ENOMEM; + if (!page) + goto out_unlock; + vcpu->run = page_address(page); + vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf, FX_IMAGE_ALIGN); vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE; @@ -1990,6 +2027,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) out_free_vcpus: kvm_free_vcpu(vcpu); +out_unlock: mutex_unlock(&vcpu->mutex); out: return r; @@ -2003,21 +2041,9 @@ static long kvm_vcpu_ioctl(struct file *filp, int r = -EINVAL; switch (ioctl) { - case KVM_RUN: { - struct kvm_run kvm_run; - - r = -EFAULT; - if (copy_from_user(&kvm_run, argp, sizeof kvm_run)) - goto out; - r = kvm_vcpu_ioctl_run(vcpu, &kvm_run); - if (r < 0 && r != -EINTR) - goto out; - if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) { - r = -EFAULT; - goto out; - } + case KVM_RUN: + r = kvm_vcpu_ioctl_run(vcpu, vcpu->run); break; - } case KVM_GET_REGS: { struct kvm_regs kvm_regs; diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 275354ffa1cb..d88e7508ee0a 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -11,7 +11,7 @@ #include #include -#define KVM_API_VERSION 4 +#define KVM_API_VERSION 5 /* * Architectural interrupt line count, and the size of the bitmap needed @@ -49,7 +49,7 @@ enum kvm_exit_reason { KVM_EXIT_SHUTDOWN = 8, }; -/* for KVM_RUN */ +/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { /* in */ __u32 emulated; /* skip current instruction */ @@ -233,7 +233,7 @@ struct kvm_dirty_log { /* * ioctls for vcpu fds */ -#define KVM_RUN _IOWR(KVMIO, 2, struct kvm_run) +#define KVM_RUN _IO(KVMIO, 16) #define KVM_GET_REGS _IOR(KVMIO, 3, struct kvm_regs) #define KVM_SET_REGS _IOW(KVMIO, 4, struct kvm_regs) #define KVM_GET_SREGS _IOR(KVMIO, 5, struct kvm_sregs) -- cgit v1.2.3 From 46fc1477887c41c8e900f2c95485e222b9a54822 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 22 Feb 2007 19:39:30 +0200 Subject: KVM: Do not communicate to userspace through cpu registers during PIO Currently when passing the a PIO emulation request to userspace, we rely on userspace updating %rax (on 'in' instructions) and %rsi/%rdi/%rcx (on string instructions). This (a) requires two extra ioctls for getting and setting the registers and (b) is unfriendly to non-x86 archs, when they get kvm ports. So fix by doing the register fixups in the kernel and passing to userspace only an abstract description of the PIO to be done. Signed-off-by: Avi Kivity --- drivers/kvm/kvm.h | 1 + drivers/kvm/kvm_main.c | 48 +++++++++++++++++++++++++++++++++++++++++++++--- drivers/kvm/svm.c | 2 ++ drivers/kvm/vmx.c | 2 ++ include/linux/kvm.h | 6 +++--- 5 files changed, 53 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 901b8d917b55..59cbc5b1d905 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -274,6 +274,7 @@ struct kvm_vcpu { int mmio_size; unsigned char mmio_data[8]; gpa_t mmio_phys_addr; + int pio_pending; struct { int active; diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 42be8a8f299d..ff8bcfee76e5 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -1504,6 +1504,44 @@ void save_msrs(struct vmx_msr_entry *e, int n) } EXPORT_SYMBOL_GPL(save_msrs); +static void complete_pio(struct kvm_vcpu *vcpu) +{ + struct kvm_io *io = &vcpu->run->io; + long delta; + + kvm_arch_ops->cache_regs(vcpu); + + if (!io->string) { + if (io->direction == KVM_EXIT_IO_IN) + memcpy(&vcpu->regs[VCPU_REGS_RAX], &io->value, + io->size); + } else { + delta = 1; + if (io->rep) { + delta *= io->count; + /* + * The size of the register should really depend on + * current address size. + */ + vcpu->regs[VCPU_REGS_RCX] -= delta; + } + if (io->string_down) + delta = -delta; + delta *= io->size; + if (io->direction == KVM_EXIT_IO_IN) + vcpu->regs[VCPU_REGS_RDI] += delta; + else + vcpu->regs[VCPU_REGS_RSI] += delta; + } + + vcpu->pio_pending = 0; + vcpu->run->io_completed = 0; + + kvm_arch_ops->decache_regs(vcpu); + + kvm_arch_ops->skip_emulated_instruction(vcpu); +} + static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; @@ -1518,9 +1556,13 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) kvm_run->emulated = 0; } - if (kvm_run->mmio_completed) { - memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); - vcpu->mmio_read_completed = 1; + if (kvm_run->io_completed) { + if (vcpu->pio_pending) + complete_pio(vcpu); + else { + memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); + vcpu->mmio_read_completed = 1; + } } vcpu->mmio_needed = 0; diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index 6787f11738cf..c35b8c83bf3f 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c @@ -1037,6 +1037,7 @@ static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) kvm_run->io.size = ((io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT); kvm_run->io.string = (io_info & SVM_IOIO_STR_MASK) != 0; kvm_run->io.rep = (io_info & SVM_IOIO_REP_MASK) != 0; + kvm_run->io.count = 1; if (kvm_run->io.string) { unsigned addr_mask; @@ -1056,6 +1057,7 @@ static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) } } else kvm_run->io.value = vcpu->svm->vmcb->save.rax; + vcpu->pio_pending = 1; return 0; } diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index a721b60f7385..4d5f40fcb651 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c @@ -1459,12 +1459,14 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0; kvm_run->io.rep = (exit_qualification & 32) != 0; kvm_run->io.port = exit_qualification >> 16; + kvm_run->io.count = 1; if (kvm_run->io.string) { if (!get_io_count(vcpu, &kvm_run->io.count)) return 1; kvm_run->io.address = vmcs_readl(GUEST_LINEAR_ADDRESS); } else kvm_run->io.value = vcpu->regs[VCPU_REGS_RAX]; /* rax */ + vcpu->pio_pending = 1; return 0; } diff --git a/include/linux/kvm.h b/include/linux/kvm.h index d88e7508ee0a..19aeb3385188 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -11,7 +11,7 @@ #include #include -#define KVM_API_VERSION 5 +#define KVM_API_VERSION 6 /* * Architectural interrupt line count, and the size of the bitmap needed @@ -53,7 +53,7 @@ enum kvm_exit_reason { struct kvm_run { /* in */ __u32 emulated; /* skip current instruction */ - __u32 mmio_completed; /* mmio request completed */ + __u32 io_completed; /* mmio/pio request completed */ __u8 request_interrupt_window; __u8 padding1[7]; @@ -80,7 +80,7 @@ struct kvm_run { __u32 error_code; } ex; /* KVM_EXIT_IO */ - struct { + struct kvm_io { #define KVM_EXIT_IO_IN 0 #define KVM_EXIT_IO_OUT 1 __u8 direction; -- cgit v1.2.3 From 06465c5a3aa9948a7b00af49cd22ed8f235cdb0f Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 28 Feb 2007 20:46:53 +0200 Subject: KVM: Handle cpuid in the kernel instead of punting to userspace KVM used to handle cpuid by letting userspace decide what values to return to the guest. We now handle cpuid completely in the kernel. We still let userspace decide which values the guest will see by having userspace set up the value table beforehand (this is necessary to allow management software to set the cpu features to the least common denominator, so that live migration can work). The motivation for the change is that kvm kernel code can be impacted by cpuid features, for example the x86 emulator. Signed-off-by: Avi Kivity --- drivers/kvm/kvm.h | 5 ++++ drivers/kvm/kvm_main.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++ drivers/kvm/svm.c | 4 +-- drivers/kvm/vmx.c | 4 +-- include/linux/kvm.h | 18 ++++++++++++- 5 files changed, 95 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 59cbc5b1d905..be3a0e7ecae4 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -55,6 +55,7 @@ #define KVM_NUM_MMU_PAGES 256 #define KVM_MIN_FREE_MMU_PAGES 5 #define KVM_REFILL_PAGES 25 +#define KVM_MAX_CPUID_ENTRIES 40 #define FX_IMAGE_SIZE 512 #define FX_IMAGE_ALIGN 16 @@ -286,6 +287,9 @@ struct kvm_vcpu { u32 ar; } tr, es, ds, fs, gs; } rmode; + + int cpuid_nent; + struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES]; }; struct kvm_memory_slot { @@ -446,6 +450,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, struct x86_emulate_ctxt; +void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); int emulate_clts(struct kvm_vcpu *vcpu); int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index ff8bcfee76e5..caec54fbf07f 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -1504,6 +1504,43 @@ void save_msrs(struct vmx_msr_entry *e, int n) } EXPORT_SYMBOL_GPL(save_msrs); +void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) +{ + int i; + u32 function; + struct kvm_cpuid_entry *e, *best; + + kvm_arch_ops->cache_regs(vcpu); + function = vcpu->regs[VCPU_REGS_RAX]; + vcpu->regs[VCPU_REGS_RAX] = 0; + vcpu->regs[VCPU_REGS_RBX] = 0; + vcpu->regs[VCPU_REGS_RCX] = 0; + vcpu->regs[VCPU_REGS_RDX] = 0; + best = NULL; + for (i = 0; i < vcpu->cpuid_nent; ++i) { + e = &vcpu->cpuid_entries[i]; + if (e->function == function) { + best = e; + break; + } + /* + * Both basic or both extended? + */ + if (((e->function ^ function) & 0x80000000) == 0) + if (!best || e->function > best->function) + best = e; + } + if (best) { + vcpu->regs[VCPU_REGS_RAX] = best->eax; + vcpu->regs[VCPU_REGS_RBX] = best->ebx; + vcpu->regs[VCPU_REGS_RCX] = best->ecx; + vcpu->regs[VCPU_REGS_RDX] = best->edx; + } + kvm_arch_ops->decache_regs(vcpu); + kvm_arch_ops->skip_emulated_instruction(vcpu); +} +EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); + static void complete_pio(struct kvm_vcpu *vcpu) { struct kvm_io *io = &vcpu->run->io; @@ -2075,6 +2112,26 @@ out: return r; } +static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, + struct kvm_cpuid *cpuid, + struct kvm_cpuid_entry __user *entries) +{ + int r; + + r = -E2BIG; + if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) + goto out; + r = -EFAULT; + if (copy_from_user(&vcpu->cpuid_entries, entries, + cpuid->nent * sizeof(struct kvm_cpuid_entry))) + goto out; + vcpu->cpuid_nent = cpuid->nent; + return 0; + +out: + return r; +} + static long kvm_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -2181,6 +2238,18 @@ static long kvm_vcpu_ioctl(struct file *filp, case KVM_SET_MSRS: r = msr_io(vcpu, argp, do_set_msr, 0); break; + case KVM_SET_CPUID: { + struct kvm_cpuid __user *cpuid_arg = argp; + struct kvm_cpuid cpuid; + + r = -EFAULT; + if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) + goto out; + r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); + if (r) + goto out; + break; + } default: ; } diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index c35b8c83bf3f..d4b2936479d9 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c @@ -1101,8 +1101,8 @@ static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_r static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; - kvm_run->exit_reason = KVM_EXIT_CPUID; - return 0; + kvm_emulate_cpuid(vcpu); + return 1; } static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index 4d5f40fcb651..71410a65bb90 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c @@ -1585,8 +1585,8 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { - kvm_run->exit_reason = KVM_EXIT_CPUID; - return 0; + kvm_emulate_cpuid(vcpu); + return 1; } static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 19aeb3385188..15e23bc06e8b 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -41,7 +41,6 @@ enum kvm_exit_reason { KVM_EXIT_UNKNOWN = 0, KVM_EXIT_EXCEPTION = 1, KVM_EXIT_IO = 2, - KVM_EXIT_CPUID = 3, KVM_EXIT_DEBUG = 4, KVM_EXIT_HLT = 5, KVM_EXIT_MMIO = 6, @@ -210,6 +209,22 @@ struct kvm_dirty_log { }; }; +struct kvm_cpuid_entry { + __u32 function; + __u32 eax; + __u32 ebx; + __u32 ecx; + __u32 edx; + __u32 padding; +}; + +/* for KVM_SET_CPUID */ +struct kvm_cpuid { + __u32 nent; + __u32 padding; + struct kvm_cpuid_entry entries[0]; +}; + #define KVMIO 0xAE /* @@ -243,5 +258,6 @@ struct kvm_dirty_log { #define KVM_DEBUG_GUEST _IOW(KVMIO, 9, struct kvm_debug_guest) #define KVM_GET_MSRS _IOWR(KVMIO, 13, struct kvm_msrs) #define KVM_SET_MSRS _IOW(KVMIO, 14, struct kvm_msrs) +#define KVM_SET_CPUID _IOW(KVMIO, 17, struct kvm_cpuid) #endif -- cgit v1.2.3 From 106b552b43beac2694df5fbafc8f125a72df5f65 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 1 Mar 2007 16:20:40 +0200 Subject: KVM: Remove the 'emulated' field from the userspace interface We no longer emulate single instructions in userspace. Instead, we service mmio or pio requests. Signed-off-by: Avi Kivity --- drivers/kvm/kvm_main.c | 5 ----- include/linux/kvm.h | 3 +-- 2 files changed, 1 insertion(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index caec54fbf07f..5d24203afd20 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -1588,11 +1588,6 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) /* re-sync apic's tpr */ vcpu->cr8 = kvm_run->cr8; - if (kvm_run->emulated) { - kvm_arch_ops->skip_emulated_instruction(vcpu); - kvm_run->emulated = 0; - } - if (kvm_run->io_completed) { if (vcpu->pio_pending) complete_pio(vcpu); diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 15e23bc06e8b..c6dd4a79b74b 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -51,10 +51,9 @@ enum kvm_exit_reason { /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { /* in */ - __u32 emulated; /* skip current instruction */ __u32 io_completed; /* mmio/pio request completed */ __u8 request_interrupt_window; - __u8 padding1[7]; + __u8 padding1[3]; /* out */ __u32 exit_type; -- cgit v1.2.3 From 2a4dac3952468157297b81ae0a29815c02ead179 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 1 Mar 2007 16:47:06 +0200 Subject: KVM: Remove minor wart from KVM_CREATE_VCPU ioctl That ioctl does not transfer any data, so it should be an _IO rather than an _IOW. Signed-off-by: Avi Kivity --- include/linux/kvm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/kvm.h b/include/linux/kvm.h index c6dd4a79b74b..d89189a81aba 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -241,7 +241,7 @@ struct kvm_cpuid { * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns * a vcpu fd. */ -#define KVM_CREATE_VCPU _IOW(KVMIO, 11, int) +#define KVM_CREATE_VCPU _IO(KVMIO, 11) #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 12, struct kvm_dirty_log) /* -- cgit v1.2.3 From 739872c56f3322c38320c7a5a543ef6f56f174bc Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 1 Mar 2007 17:20:13 +0200 Subject: KVM: Renumber ioctls The recent changes have left the ioctl numbers in complete disarray. Signed-off-by: Avi Kivity --- include/linux/kvm.h | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kvm.h b/include/linux/kvm.h index d89189a81aba..93472daec120 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -229,34 +229,34 @@ struct kvm_cpuid { /* * ioctls for /dev/kvm fds: */ -#define KVM_GET_API_VERSION _IO(KVMIO, 1) -#define KVM_CREATE_VM _IO(KVMIO, 2) /* returns a VM fd */ -#define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 15, struct kvm_msr_list) +#define KVM_GET_API_VERSION _IO(KVMIO, 0x00) +#define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */ +#define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list) /* * ioctls for VM fds */ -#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 10, struct kvm_memory_region) +#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region) /* * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns * a vcpu fd. */ -#define KVM_CREATE_VCPU _IO(KVMIO, 11) -#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 12, struct kvm_dirty_log) +#define KVM_CREATE_VCPU _IO(KVMIO, 0x41) +#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) /* * ioctls for vcpu fds */ -#define KVM_RUN _IO(KVMIO, 16) -#define KVM_GET_REGS _IOR(KVMIO, 3, struct kvm_regs) -#define KVM_SET_REGS _IOW(KVMIO, 4, struct kvm_regs) -#define KVM_GET_SREGS _IOR(KVMIO, 5, struct kvm_sregs) -#define KVM_SET_SREGS _IOW(KVMIO, 6, struct kvm_sregs) -#define KVM_TRANSLATE _IOWR(KVMIO, 7, struct kvm_translation) -#define KVM_INTERRUPT _IOW(KVMIO, 8, struct kvm_interrupt) -#define KVM_DEBUG_GUEST _IOW(KVMIO, 9, struct kvm_debug_guest) -#define KVM_GET_MSRS _IOWR(KVMIO, 13, struct kvm_msrs) -#define KVM_SET_MSRS _IOW(KVMIO, 14, struct kvm_msrs) -#define KVM_SET_CPUID _IOW(KVMIO, 17, struct kvm_cpuid) +#define KVM_RUN _IO(KVMIO, 0x80) +#define KVM_GET_REGS _IOR(KVMIO, 0x81, struct kvm_regs) +#define KVM_SET_REGS _IOW(KVMIO, 0x82, struct kvm_regs) +#define KVM_GET_SREGS _IOR(KVMIO, 0x83, struct kvm_sregs) +#define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs) +#define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation) +#define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt) +#define KVM_DEBUG_GUEST _IOW(KVMIO, 0x87, struct kvm_debug_guest) +#define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs) +#define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs) +#define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid) #endif -- cgit v1.2.3 From 5d308f4550d9dc4c236e08b0377b610b9578577b Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 1 Mar 2007 17:56:20 +0200 Subject: KVM: Add method to check for backwards-compatible API extensions Signed-off-by: Avi Kivity --- drivers/kvm/kvm_main.c | 6 ++++++ include/linux/kvm.h | 5 +++++ 2 files changed, 11 insertions(+) (limited to 'include/linux') diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 5d24203afd20..39cf8fd343a3 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -2416,6 +2416,12 @@ static long kvm_dev_ioctl(struct file *filp, r = 0; break; } + case KVM_CHECK_EXTENSION: + /* + * No extensions defined at present. + */ + r = 0; + break; default: ; } diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 93472daec120..c93cf53953af 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -232,6 +232,11 @@ struct kvm_cpuid { #define KVM_GET_API_VERSION _IO(KVMIO, 0x00) #define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */ #define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list) +/* + * Check if a kvm extension is available. Argument is extension number, + * return is 1 (yes) or 0 (no, sorry). + */ +#define KVM_CHECK_EXTENSION _IO(KVMIO, 0x03) /* * ioctls for VM fds -- cgit v1.2.3 From b4e63f560beb187cffdaf706e534a1e2f9effb66 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 4 Mar 2007 13:59:30 +0200 Subject: KVM: Allow userspace to process hypercalls which have no kernel handler This is useful for paravirtualized graphics devices, for example. Signed-off-by: Avi Kivity --- drivers/kvm/kvm_main.c | 18 +++++++++++++++++- include/linux/kvm.h | 10 +++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 39cf8fd343a3..de93117f1e97 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -1203,7 +1203,16 @@ int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run) } switch (nr) { default: - ; + run->hypercall.args[0] = a0; + run->hypercall.args[1] = a1; + run->hypercall.args[2] = a2; + run->hypercall.args[3] = a3; + run->hypercall.args[4] = a4; + run->hypercall.args[5] = a5; + run->hypercall.ret = ret; + run->hypercall.longmode = is_long_mode(vcpu); + kvm_arch_ops->decache_regs(vcpu); + return 0; } vcpu->regs[VCPU_REGS_RAX] = ret; kvm_arch_ops->decache_regs(vcpu); @@ -1599,6 +1608,13 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu->mmio_needed = 0; + if (kvm_run->exit_type == KVM_EXIT_TYPE_VM_EXIT + && kvm_run->exit_reason == KVM_EXIT_HYPERCALL) { + kvm_arch_ops->cache_regs(vcpu); + vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret; + kvm_arch_ops->decache_regs(vcpu); + } + r = kvm_arch_ops->run(vcpu, kvm_run); vcpu_put(vcpu); diff --git a/include/linux/kvm.h b/include/linux/kvm.h index c93cf53953af..9151ebfa22e9 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -11,7 +11,7 @@ #include #include -#define KVM_API_VERSION 6 +#define KVM_API_VERSION 7 /* * Architectural interrupt line count, and the size of the bitmap needed @@ -41,6 +41,7 @@ enum kvm_exit_reason { KVM_EXIT_UNKNOWN = 0, KVM_EXIT_EXCEPTION = 1, KVM_EXIT_IO = 2, + KVM_EXIT_HYPERCALL = 3, KVM_EXIT_DEBUG = 4, KVM_EXIT_HLT = 5, KVM_EXIT_MMIO = 6, @@ -103,6 +104,13 @@ struct kvm_run { __u32 len; __u8 is_write; } mmio; + /* KVM_EXIT_HYPERCALL */ + struct { + __u64 args[6]; + __u64 ret; + __u32 longmode; + __u32 pad; + } hypercall; }; }; -- cgit v1.2.3 From 8eb7d334bd8e693340ee198280f7d45035cdab8c Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 4 Mar 2007 14:17:08 +0200 Subject: KVM: Fold kvm_run::exit_type into kvm_run::exit_reason Currently, userspace is told about the nature of the last exit from the guest using two fields, exit_type and exit_reason, where exit_type has just two enumerations (and no need for more). So fold exit_type into exit_reason, reducing the complexity of determining what really happened. Signed-off-by: Avi Kivity --- drivers/kvm/kvm_main.c | 3 +-- drivers/kvm/svm.c | 7 +++---- drivers/kvm/vmx.c | 7 +++---- include/linux/kvm.h | 15 ++++++++------- 4 files changed, 15 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index de93117f1e97..ac44df551aa8 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -1608,8 +1608,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu->mmio_needed = 0; - if (kvm_run->exit_type == KVM_EXIT_TYPE_VM_EXIT - && kvm_run->exit_reason == KVM_EXIT_HYPERCALL) { + if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) { kvm_arch_ops->cache_regs(vcpu); vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret; kvm_arch_ops->decache_regs(vcpu); diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index d4b2936479d9..b09928f14219 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c @@ -1298,8 +1298,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { u32 exit_code = vcpu->svm->vmcb->control.exit_code; - kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT; - if (is_external_interrupt(vcpu->svm->vmcb->control.exit_int_info) && exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " @@ -1609,8 +1607,9 @@ again: vcpu->svm->next_rip = 0; if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) { - kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; - kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code; + kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; + kvm_run->fail_entry.hardware_entry_failure_reason + = vcpu->svm->vmcb->control.exit_code; post_kvm_run_save(vcpu, kvm_run); return 0; } diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index 71410a65bb90..cf9568fbe8a5 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c @@ -1922,10 +1922,10 @@ again: asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); - kvm_run->exit_type = 0; if (fail) { - kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; - kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR); + kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; + kvm_run->fail_entry.hardware_entry_failure_reason + = vmcs_read32(VM_INSTRUCTION_ERROR); r = 0; } else { /* @@ -1935,7 +1935,6 @@ again: profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP)); vcpu->launched = 1; - kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT; r = kvm_handle_exit(kvm_run, vcpu); if (r > 0) { /* Give scheduler a change to reschedule. */ diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 9151ebfa22e9..57f47ef93829 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -11,7 +11,7 @@ #include #include -#define KVM_API_VERSION 7 +#define KVM_API_VERSION 8 /* * Architectural interrupt line count, and the size of the bitmap needed @@ -34,9 +34,6 @@ struct kvm_memory_region { #define KVM_MEM_LOG_DIRTY_PAGES 1UL -#define KVM_EXIT_TYPE_FAIL_ENTRY 1 -#define KVM_EXIT_TYPE_VM_EXIT 2 - enum kvm_exit_reason { KVM_EXIT_UNKNOWN = 0, KVM_EXIT_EXCEPTION = 1, @@ -47,6 +44,7 @@ enum kvm_exit_reason { KVM_EXIT_MMIO = 6, KVM_EXIT_IRQ_WINDOW_OPEN = 7, KVM_EXIT_SHUTDOWN = 8, + KVM_EXIT_FAIL_ENTRY = 9, }; /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ @@ -57,12 +55,11 @@ struct kvm_run { __u8 padding1[3]; /* out */ - __u32 exit_type; __u32 exit_reason; __u32 instruction_length; __u8 ready_for_interrupt_injection; __u8 if_flag; - __u16 padding2; + __u8 padding2[6]; /* in (pre_kvm_run), out (post_kvm_run) */ __u64 cr8; @@ -71,8 +68,12 @@ struct kvm_run { union { /* KVM_EXIT_UNKNOWN */ struct { - __u32 hardware_exit_reason; + __u64 hardware_exit_reason; } hw; + /* KVM_EXIT_FAIL_ENTRY */ + struct { + __u64 hardware_entry_failure_reason; + } fail_entry; /* KVM_EXIT_EXCEPTION */ struct { __u32 exception; -- cgit v1.2.3 From 1b19f3e61d7e1edb395dd64bf7d63621a37af8ca Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 4 Mar 2007 14:24:03 +0200 Subject: KVM: Add a special exit reason when exiting due to an interrupt This is redundant, as we also return -EINTR from the ioctl, but it allows us to examine the exit_reason field on resume without seeing old data. Signed-off-by: Avi Kivity --- drivers/kvm/svm.c | 2 ++ drivers/kvm/vmx.c | 2 ++ include/linux/kvm.h | 3 ++- 3 files changed, 6 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index b09928f14219..0311665e3c83 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c @@ -1619,12 +1619,14 @@ again: if (signal_pending(current)) { ++kvm_stat.signal_exits; post_kvm_run_save(vcpu, kvm_run); + kvm_run->exit_reason = KVM_EXIT_INTR; return -EINTR; } if (dm_request_for_irq_injection(vcpu, kvm_run)) { ++kvm_stat.request_irq_exits; post_kvm_run_save(vcpu, kvm_run); + kvm_run->exit_reason = KVM_EXIT_INTR; return -EINTR; } kvm_resched(vcpu); diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index cf9568fbe8a5..e69bab6d811d 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c @@ -1941,12 +1941,14 @@ again: if (signal_pending(current)) { ++kvm_stat.signal_exits; post_kvm_run_save(vcpu, kvm_run); + kvm_run->exit_reason = KVM_EXIT_INTR; return -EINTR; } if (dm_request_for_irq_injection(vcpu, kvm_run)) { ++kvm_stat.request_irq_exits; post_kvm_run_save(vcpu, kvm_run); + kvm_run->exit_reason = KVM_EXIT_INTR; return -EINTR; } diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 57f47ef93829..b3af92e7bf5d 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -11,7 +11,7 @@ #include #include -#define KVM_API_VERSION 8 +#define KVM_API_VERSION 9 /* * Architectural interrupt line count, and the size of the bitmap needed @@ -45,6 +45,7 @@ enum kvm_exit_reason { KVM_EXIT_IRQ_WINDOW_OPEN = 7, KVM_EXIT_SHUTDOWN = 8, KVM_EXIT_FAIL_ENTRY = 9, + KVM_EXIT_INTR = 10, }; /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ -- cgit v1.2.3 From 1961d276c877b99f5f16aaf36377c75e0e191c3a Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 5 Mar 2007 19:46:05 +0200 Subject: KVM: Add guest mode signal mask Allow a special signal mask to be used while executing in guest mode. This allows signals to be used to interrupt a vcpu without requiring signal delivery to a userspace handler, which is quite expensive. Userspace still receives -EINTR and can get the signal via sigwait(). Signed-off-by: Avi Kivity --- drivers/kvm/kvm.h | 3 +++ drivers/kvm/kvm_main.c | 41 +++++++++++++++++++++++++++++++++++++++++ include/linux/kvm.h | 7 +++++++ 3 files changed, 51 insertions(+) (limited to 'include/linux') diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index be3a0e7ecae4..1c4a581938bf 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -277,6 +277,9 @@ struct kvm_vcpu { gpa_t mmio_phys_addr; int pio_pending; + int sigset_active; + sigset_t sigset; + struct { int active; u8 save_iopl; diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index ac44df551aa8..df85f5f65489 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -1591,9 +1591,13 @@ static void complete_pio(struct kvm_vcpu *vcpu) static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; + sigset_t sigsaved; vcpu_load(vcpu); + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + /* re-sync apic's tpr */ vcpu->cr8 = kvm_run->cr8; @@ -1616,6 +1620,9 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) r = kvm_arch_ops->run(vcpu, kvm_run); + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &sigsaved, NULL); + vcpu_put(vcpu); return r; } @@ -2142,6 +2149,17 @@ out: return r; } +static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) +{ + if (sigset) { + sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); + vcpu->sigset_active = 1; + vcpu->sigset = *sigset; + } else + vcpu->sigset_active = 0; + return 0; +} + static long kvm_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -2260,6 +2278,29 @@ static long kvm_vcpu_ioctl(struct file *filp, goto out; break; } + case KVM_SET_SIGNAL_MASK: { + struct kvm_signal_mask __user *sigmask_arg = argp; + struct kvm_signal_mask kvm_sigmask; + sigset_t sigset, *p; + + p = NULL; + if (argp) { + r = -EFAULT; + if (copy_from_user(&kvm_sigmask, argp, + sizeof kvm_sigmask)) + goto out; + r = -EINVAL; + if (kvm_sigmask.len != sizeof sigset) + goto out; + r = -EFAULT; + if (copy_from_user(&sigset, sigmask_arg->sigset, + sizeof sigset)) + goto out; + p = &sigset; + } + r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); + break; + } default: ; } diff --git a/include/linux/kvm.h b/include/linux/kvm.h index b3af92e7bf5d..c0d10cd8088e 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -234,6 +234,12 @@ struct kvm_cpuid { struct kvm_cpuid_entry entries[0]; }; +/* for KVM_SET_SIGNAL_MASK */ +struct kvm_signal_mask { + __u32 len; + __u8 sigset[0]; +}; + #define KVMIO 0xAE /* @@ -273,5 +279,6 @@ struct kvm_cpuid { #define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs) #define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs) #define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid) +#define KVM_SET_SIGNAL_MASK _IOW(KVMIO, 0x8b, struct kvm_signal_mask) #endif -- cgit v1.2.3 From 07c45a366d89f8eaec5d9890e810171b408f9a52 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 7 Mar 2007 13:05:38 +0200 Subject: KVM: Allow kernel to select size of mmap() buffer This allows us to store offsets in the kernel/user kvm_run area, and be sure that userspace has them mapped. As offsets can be outside the kvm_run struct, userspace has no way of knowing how much to mmap. Signed-off-by: Avi Kivity --- drivers/kvm/kvm_main.c | 8 +++++++- include/linux/kvm.h | 4 ++++ 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index df85f5f65489..cba0b87c34e4 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -2436,7 +2436,7 @@ static long kvm_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; - int r = -EINVAL; + long r = -EINVAL; switch (ioctl) { case KVM_GET_API_VERSION: @@ -2478,6 +2478,12 @@ static long kvm_dev_ioctl(struct file *filp, */ r = 0; break; + case KVM_GET_VCPU_MMAP_SIZE: + r = -EINVAL; + if (arg) + goto out; + r = PAGE_SIZE; + break; default: ; } diff --git a/include/linux/kvm.h b/include/linux/kvm.h index c0d10cd8088e..dad90816cad8 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -253,6 +253,10 @@ struct kvm_signal_mask { * return is 1 (yes) or 0 (no, sorry). */ #define KVM_CHECK_EXTENSION _IO(KVMIO, 0x03) +/* + * Get size for mmap(vcpu_fd) + */ +#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ /* * ioctls for VM fds -- cgit v1.2.3 From 039576c03c35e2f990ad9bb9c39e1bad3cd60d34 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 20 Mar 2007 12:46:50 +0200 Subject: KVM: Avoid guest virtual addresses in string pio userspace interface The current string pio interface communicates using guest virtual addresses, relying on userspace to translate addresses and to check permissions. This interface cannot fully support guest smp, as the check needs to take into account two pages at one in case an unaligned string transfer straddles a page boundary. Change the interface not to communicate guest addresses at all; instead use a buffer page (mmaped by userspace) and do transfers there. The kernel manages the virtual to physical translation and can perform the checks atomically by taking the appropriate locks. Signed-off-by: Avi Kivity --- drivers/kvm/kvm.h | 21 +++++- drivers/kvm/kvm_main.c | 183 +++++++++++++++++++++++++++++++++++++++++++++---- drivers/kvm/mmu.c | 9 +++ drivers/kvm/svm.c | 40 +++++------ drivers/kvm/vmx.c | 40 +++++------ include/linux/kvm.h | 11 +-- 6 files changed, 238 insertions(+), 66 deletions(-) (limited to 'include/linux') diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 1c4a581938bf..7866b34b6c96 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -74,6 +74,8 @@ #define IOPL_SHIFT 12 +#define KVM_PIO_PAGE_OFFSET 1 + /* * Address types: * @@ -220,6 +222,18 @@ enum { VCPU_SREG_LDTR, }; +struct kvm_pio_request { + unsigned long count; + int cur_count; + struct page *guest_pages[2]; + unsigned guest_page_offset; + int in; + int size; + int string; + int down; + int rep; +}; + struct kvm_vcpu { struct kvm *kvm; union { @@ -275,7 +289,8 @@ struct kvm_vcpu { int mmio_size; unsigned char mmio_data[8]; gpa_t mmio_phys_addr; - int pio_pending; + struct kvm_pio_request pio; + void *pio_data; int sigset_active; sigset_t sigset; @@ -421,6 +436,7 @@ hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva); +struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); void kvm_emulator_want_group7_invlpg(void); @@ -453,6 +469,9 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, struct x86_emulate_ctxt; +int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, + int size, unsigned long count, int string, int down, + gva_t address, int rep, unsigned port); void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); int emulate_clts(struct kvm_vcpu *vcpu); diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index ba7f43a4459e..205998c141fb 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -346,6 +346,17 @@ static void kvm_free_physmem(struct kvm *kvm) kvm_free_physmem_slot(&kvm->memslots[i], NULL); } +static void free_pio_guest_pages(struct kvm_vcpu *vcpu) +{ + int i; + + for (i = 0; i < 2; ++i) + if (vcpu->pio.guest_pages[i]) { + __free_page(vcpu->pio.guest_pages[i]); + vcpu->pio.guest_pages[i] = NULL; + } +} + static void kvm_free_vcpu(struct kvm_vcpu *vcpu) { if (!vcpu->vmcs) @@ -357,6 +368,9 @@ static void kvm_free_vcpu(struct kvm_vcpu *vcpu) kvm_arch_ops->vcpu_free(vcpu); free_page((unsigned long)vcpu->run); vcpu->run = NULL; + free_page((unsigned long)vcpu->pio_data); + vcpu->pio_data = NULL; + free_pio_guest_pages(vcpu); } static void kvm_free_vcpus(struct kvm *kvm) @@ -1550,44 +1564,168 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); -static void complete_pio(struct kvm_vcpu *vcpu) +static int pio_copy_data(struct kvm_vcpu *vcpu) { - struct kvm_io *io = &vcpu->run->io; + void *p = vcpu->pio_data; + void *q; + unsigned bytes; + int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1; + + kvm_arch_ops->vcpu_put(vcpu); + q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE, + PAGE_KERNEL); + if (!q) { + kvm_arch_ops->vcpu_load(vcpu); + free_pio_guest_pages(vcpu); + return -ENOMEM; + } + q += vcpu->pio.guest_page_offset; + bytes = vcpu->pio.size * vcpu->pio.cur_count; + if (vcpu->pio.in) + memcpy(q, p, bytes); + else + memcpy(p, q, bytes); + q -= vcpu->pio.guest_page_offset; + vunmap(q); + kvm_arch_ops->vcpu_load(vcpu); + free_pio_guest_pages(vcpu); + return 0; +} + +static int complete_pio(struct kvm_vcpu *vcpu) +{ + struct kvm_pio_request *io = &vcpu->pio; long delta; + int r; kvm_arch_ops->cache_regs(vcpu); if (!io->string) { - if (io->direction == KVM_EXIT_IO_IN) - memcpy(&vcpu->regs[VCPU_REGS_RAX], &io->value, + if (io->in) + memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data, io->size); } else { + if (io->in) { + r = pio_copy_data(vcpu); + if (r) { + kvm_arch_ops->cache_regs(vcpu); + return r; + } + } + delta = 1; if (io->rep) { - delta *= io->count; + delta *= io->cur_count; /* * The size of the register should really depend on * current address size. */ vcpu->regs[VCPU_REGS_RCX] -= delta; } - if (io->string_down) + if (io->down) delta = -delta; delta *= io->size; - if (io->direction == KVM_EXIT_IO_IN) + if (io->in) vcpu->regs[VCPU_REGS_RDI] += delta; else vcpu->regs[VCPU_REGS_RSI] += delta; } - vcpu->pio_pending = 0; vcpu->run->io_completed = 0; kvm_arch_ops->decache_regs(vcpu); - kvm_arch_ops->skip_emulated_instruction(vcpu); + io->count -= io->cur_count; + io->cur_count = 0; + + if (!io->count) + kvm_arch_ops->skip_emulated_instruction(vcpu); + return 0; } +int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, + int size, unsigned long count, int string, int down, + gva_t address, int rep, unsigned port) +{ + unsigned now, in_page; + int i; + int nr_pages = 1; + struct page *page; + + vcpu->run->exit_reason = KVM_EXIT_IO; + vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; + vcpu->run->io.size = size; + vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; + vcpu->run->io.count = count; + vcpu->run->io.port = port; + vcpu->pio.count = count; + vcpu->pio.cur_count = count; + vcpu->pio.size = size; + vcpu->pio.in = in; + vcpu->pio.string = string; + vcpu->pio.down = down; + vcpu->pio.guest_page_offset = offset_in_page(address); + vcpu->pio.rep = rep; + + if (!string) { + kvm_arch_ops->cache_regs(vcpu); + memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4); + kvm_arch_ops->decache_regs(vcpu); + return 0; + } + + if (!count) { + kvm_arch_ops->skip_emulated_instruction(vcpu); + return 1; + } + + now = min(count, PAGE_SIZE / size); + + if (!down) + in_page = PAGE_SIZE - offset_in_page(address); + else + in_page = offset_in_page(address) + size; + now = min(count, (unsigned long)in_page / size); + if (!now) { + /* + * String I/O straddles page boundary. Pin two guest pages + * so that we satisfy atomicity constraints. Do just one + * transaction to avoid complexity. + */ + nr_pages = 2; + now = 1; + } + if (down) { + /* + * String I/O in reverse. Yuck. Kill the guest, fix later. + */ + printk(KERN_ERR "kvm: guest string pio down\n"); + inject_gp(vcpu); + return 1; + } + vcpu->run->io.count = now; + vcpu->pio.cur_count = now; + + for (i = 0; i < nr_pages; ++i) { + spin_lock(&vcpu->kvm->lock); + page = gva_to_page(vcpu, address + i * PAGE_SIZE); + if (page) + get_page(page); + vcpu->pio.guest_pages[i] = page; + spin_unlock(&vcpu->kvm->lock); + if (!page) { + inject_gp(vcpu); + free_pio_guest_pages(vcpu); + return 1; + } + } + + if (!vcpu->pio.in) + return pio_copy_data(vcpu); + return 0; +} +EXPORT_SYMBOL_GPL(kvm_setup_pio); + static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; @@ -1602,9 +1740,11 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu->cr8 = kvm_run->cr8; if (kvm_run->io_completed) { - if (vcpu->pio_pending) - complete_pio(vcpu); - else { + if (vcpu->pio.cur_count) { + r = complete_pio(vcpu); + if (r) + goto out; + } else { memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); vcpu->mmio_read_completed = 1; } @@ -1620,6 +1760,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) r = kvm_arch_ops->run(vcpu, kvm_run); +out: if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); @@ -1995,9 +2136,12 @@ static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma, *type = VM_FAULT_MINOR; pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - if (pgoff != 0) + if (pgoff == 0) + page = virt_to_page(vcpu->run); + else if (pgoff == KVM_PIO_PAGE_OFFSET) + page = virt_to_page(vcpu->pio_data); + else return NOPAGE_SIGBUS; - page = virt_to_page(vcpu->run); get_page(page); return page; } @@ -2094,6 +2238,12 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) goto out_unlock; vcpu->run = page_address(page); + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + r = -ENOMEM; + if (!page) + goto out_free_run; + vcpu->pio_data = page_address(page); + vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf, FX_IMAGE_ALIGN); vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE; @@ -2123,6 +2273,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) out_free_vcpus: kvm_free_vcpu(vcpu); +out_free_run: + free_page((unsigned long)vcpu->run); + vcpu->run = NULL; out_unlock: mutex_unlock(&vcpu->mutex); out: @@ -2491,7 +2644,7 @@ static long kvm_dev_ioctl(struct file *filp, r = -EINVAL; if (arg) goto out; - r = PAGE_SIZE; + r = 2 * PAGE_SIZE; break; default: ; diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 2d905770fd88..4843e95e54e1 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c @@ -735,6 +735,15 @@ hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva) return gpa_to_hpa(vcpu, gpa); } +struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) +{ + gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); + + if (gpa == UNMAPPED_GVA) + return NULL; + return pfn_to_page(gpa_to_hpa(vcpu, gpa) >> PAGE_SHIFT); +} + static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) { } diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index 2396ada23777..64afc5cf890d 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c @@ -984,7 +984,7 @@ static int io_get_override(struct kvm_vcpu *vcpu, return 0; } -static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, u64 *address) +static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address) { unsigned long addr_mask; unsigned long *reg; @@ -1028,40 +1028,38 @@ static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, u64 *address) static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug? - int _in = io_info & SVM_IOIO_TYPE_MASK; + int size, down, in, string, rep; + unsigned port; + unsigned long count; + gva_t address = 0; ++kvm_stat.io_exits; vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2; - kvm_run->exit_reason = KVM_EXIT_IO; - kvm_run->io.port = io_info >> 16; - kvm_run->io.direction = (_in) ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; - kvm_run->io.size = ((io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT); - kvm_run->io.string = (io_info & SVM_IOIO_STR_MASK) != 0; - kvm_run->io.rep = (io_info & SVM_IOIO_REP_MASK) != 0; - kvm_run->io.count = 1; + in = (io_info & SVM_IOIO_TYPE_MASK) != 0; + port = io_info >> 16; + size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; + string = (io_info & SVM_IOIO_STR_MASK) != 0; + rep = (io_info & SVM_IOIO_REP_MASK) != 0; + count = 1; + down = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; - if (kvm_run->io.string) { + if (string) { unsigned addr_mask; - addr_mask = io_adress(vcpu, _in, &kvm_run->io.address); + addr_mask = io_adress(vcpu, in, &address); if (!addr_mask) { printk(KERN_DEBUG "%s: get io address failed\n", __FUNCTION__); return 1; } - if (kvm_run->io.rep) { - kvm_run->io.count - = vcpu->regs[VCPU_REGS_RCX] & addr_mask; - kvm_run->io.string_down = (vcpu->svm->vmcb->save.rflags - & X86_EFLAGS_DF) != 0; - } - } else - kvm_run->io.value = vcpu->svm->vmcb->save.rax; - vcpu->pio_pending = 1; - return 0; + if (rep) + count = vcpu->regs[VCPU_REGS_RCX] & addr_mask; + } + return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down, + address, rep, port); } static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index e69bab6d811d..0d9bf0b36d37 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c @@ -1394,7 +1394,7 @@ static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return 0; } -static int get_io_count(struct kvm_vcpu *vcpu, u64 *count) +static int get_io_count(struct kvm_vcpu *vcpu, unsigned long *count) { u64 inst; gva_t rip; @@ -1439,35 +1439,35 @@ static int get_io_count(struct kvm_vcpu *vcpu, u64 *count) done: countr_size *= 8; *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size)); + //printk("cx: %lx\n", vcpu->regs[VCPU_REGS_RCX]); return 1; } static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { u64 exit_qualification; + int size, down, in, string, rep; + unsigned port; + unsigned long count; + gva_t address; ++kvm_stat.io_exits; exit_qualification = vmcs_read64(EXIT_QUALIFICATION); - kvm_run->exit_reason = KVM_EXIT_IO; - if (exit_qualification & 8) - kvm_run->io.direction = KVM_EXIT_IO_IN; - else - kvm_run->io.direction = KVM_EXIT_IO_OUT; - kvm_run->io.size = (exit_qualification & 7) + 1; - kvm_run->io.string = (exit_qualification & 16) != 0; - kvm_run->io.string_down - = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0; - kvm_run->io.rep = (exit_qualification & 32) != 0; - kvm_run->io.port = exit_qualification >> 16; - kvm_run->io.count = 1; - if (kvm_run->io.string) { - if (!get_io_count(vcpu, &kvm_run->io.count)) + in = (exit_qualification & 8) != 0; + size = (exit_qualification & 7) + 1; + string = (exit_qualification & 16) != 0; + down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0; + count = 1; + rep = (exit_qualification & 32) != 0; + port = exit_qualification >> 16; + address = 0; + if (string) { + if (rep && !get_io_count(vcpu, &count)) return 1; - kvm_run->io.address = vmcs_readl(GUEST_LINEAR_ADDRESS); - } else - kvm_run->io.value = vcpu->regs[VCPU_REGS_RAX]; /* rax */ - vcpu->pio_pending = 1; - return 0; + address = vmcs_readl(GUEST_LINEAR_ADDRESS); + } + return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down, + address, rep, port); } static void diff --git a/include/linux/kvm.h b/include/linux/kvm.h index dad90816cad8..728b24cf5d77 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -86,16 +86,9 @@ struct kvm_run { #define KVM_EXIT_IO_OUT 1 __u8 direction; __u8 size; /* bytes */ - __u8 string; - __u8 string_down; - __u8 rep; - __u8 pad; __u16 port; - __u64 count; - union { - __u64 address; - __u32 value; - }; + __u32 count; + __u64 data_offset; /* relative to kvm_run start */ } io; struct { } debug; -- cgit v1.2.3 From e8207547d2f7b2f557bdb73015c1f74c32474438 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Fri, 30 Mar 2007 16:54:30 +0300 Subject: KVM: Add physical memory aliasing feature With this, we can specify that accesses to one physical memory range will be remapped to another. This is useful for the vga window at 0xa0000 which is used as a movable window into the (much larger) framebuffer. Signed-off-by: Avi Kivity --- drivers/kvm/kvm.h | 9 +++++ drivers/kvm/kvm_main.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++++-- include/linux/kvm.h | 10 +++++- 3 files changed, 104 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index d19985a5508a..fceeb840a255 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -51,6 +51,7 @@ #define UNMAPPED_GVA (~(gpa_t)0) #define KVM_MAX_VCPUS 1 +#define KVM_ALIAS_SLOTS 4 #define KVM_MEMORY_SLOTS 4 #define KVM_NUM_MMU_PAGES 256 #define KVM_MIN_FREE_MMU_PAGES 5 @@ -312,6 +313,12 @@ struct kvm_vcpu { struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES]; }; +struct kvm_mem_alias { + gfn_t base_gfn; + unsigned long npages; + gfn_t target_gfn; +}; + struct kvm_memory_slot { gfn_t base_gfn; unsigned long npages; @@ -322,6 +329,8 @@ struct kvm_memory_slot { struct kvm { spinlock_t lock; /* protects everything except vcpus */ + int naliases; + struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; int nmemslots; struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS]; /* diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index a0ec5dda3305..e495855727e2 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -846,7 +846,73 @@ out: return r; } -struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) +/* + * Set a new alias region. Aliases map a portion of physical memory into + * another portion. This is useful for memory windows, for example the PC + * VGA region. + */ +static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, + struct kvm_memory_alias *alias) +{ + int r, n; + struct kvm_mem_alias *p; + + r = -EINVAL; + /* General sanity checks */ + if (alias->memory_size & (PAGE_SIZE - 1)) + goto out; + if (alias->guest_phys_addr & (PAGE_SIZE - 1)) + goto out; + if (alias->slot >= KVM_ALIAS_SLOTS) + goto out; + if (alias->guest_phys_addr + alias->memory_size + < alias->guest_phys_addr) + goto out; + if (alias->target_phys_addr + alias->memory_size + < alias->target_phys_addr) + goto out; + + spin_lock(&kvm->lock); + + p = &kvm->aliases[alias->slot]; + p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; + p->npages = alias->memory_size >> PAGE_SHIFT; + p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT; + + for (n = KVM_ALIAS_SLOTS; n > 0; --n) + if (kvm->aliases[n - 1].npages) + break; + kvm->naliases = n; + + spin_unlock(&kvm->lock); + + vcpu_load(&kvm->vcpus[0]); + spin_lock(&kvm->lock); + kvm_mmu_zap_all(&kvm->vcpus[0]); + spin_unlock(&kvm->lock); + vcpu_put(&kvm->vcpus[0]); + + return 0; + +out: + return r; +} + +static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) +{ + int i; + struct kvm_mem_alias *alias; + + for (i = 0; i < kvm->naliases; ++i) { + alias = &kvm->aliases[i]; + if (gfn >= alias->base_gfn + && gfn < alias->base_gfn + alias->npages) + return alias->target_gfn + gfn - alias->base_gfn; + } + return gfn; +} + +static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn) { int i; @@ -859,13 +925,19 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) } return NULL; } -EXPORT_SYMBOL_GPL(gfn_to_memslot); + +struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) +{ + gfn = unalias_gfn(kvm, gfn); + return __gfn_to_memslot(kvm, gfn); +} struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *slot; - slot = gfn_to_memslot(kvm, gfn); + gfn = unalias_gfn(kvm, gfn); + slot = __gfn_to_memslot(kvm, gfn); if (!slot) return NULL; return slot->phys_mem[gfn - slot->base_gfn]; @@ -2512,6 +2584,17 @@ static long kvm_vm_ioctl(struct file *filp, goto out; break; } + case KVM_SET_MEMORY_ALIAS: { + struct kvm_memory_alias alias; + + r = -EFAULT; + if (copy_from_user(&alias, argp, sizeof alias)) + goto out; + r = kvm_vm_ioctl_set_memory_alias(kvm, &alias); + if (r) + goto out; + break; + } default: ; } diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 728b24cf5d77..da9b23fa4b6c 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -11,7 +11,7 @@ #include #include -#define KVM_API_VERSION 9 +#define KVM_API_VERSION 10 /* * Architectural interrupt line count, and the size of the bitmap needed @@ -33,6 +33,13 @@ struct kvm_memory_region { /* for kvm_memory_region::flags */ #define KVM_MEM_LOG_DIRTY_PAGES 1UL +struct kvm_memory_alias { + __u32 slot; /* this has a different namespace than memory slots */ + __u32 flags; + __u64 guest_phys_addr; + __u64 memory_size; + __u64 target_phys_addr; +}; enum kvm_exit_reason { KVM_EXIT_UNKNOWN = 0, @@ -261,6 +268,7 @@ struct kvm_signal_mask { */ #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) +#define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) /* * ioctls for vcpu fds -- cgit v1.2.3 From b8836737d92c139be770eae3d6574e33d1224caf Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 1 Apr 2007 16:34:31 +0300 Subject: KVM: Add fpu get/set operations These are really helpful when migrating an floating point app to another machine. Signed-off-by: Avi Kivity --- drivers/kvm/kvm_main.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/kvm.h | 17 ++++++++++ 2 files changed, 103 insertions(+) (limited to 'include/linux') diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index e495855727e2..b065c4991568 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -2398,6 +2398,67 @@ static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) return 0; } +/* + * fxsave fpu state. Taken from x86_64/processor.h. To be killed when + * we have asm/x86/processor.h + */ +struct fxsave { + u16 cwd; + u16 swd; + u16 twd; + u16 fop; + u64 rip; + u64 rdp; + u32 mxcsr; + u32 mxcsr_mask; + u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ +#ifdef CONFIG_X86_64 + u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ +#else + u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ +#endif +}; + +static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image; + + vcpu_load(vcpu); + + memcpy(fpu->fpr, fxsave->st_space, 128); + fpu->fcw = fxsave->cwd; + fpu->fsw = fxsave->swd; + fpu->ftwx = fxsave->twd; + fpu->last_opcode = fxsave->fop; + fpu->last_ip = fxsave->rip; + fpu->last_dp = fxsave->rdp; + memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); + + vcpu_put(vcpu); + + return 0; +} + +static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image; + + vcpu_load(vcpu); + + memcpy(fxsave->st_space, fpu->fpr, 128); + fxsave->cwd = fpu->fcw; + fxsave->swd = fpu->fsw; + fxsave->twd = fpu->ftwx; + fxsave->fop = fpu->last_opcode; + fxsave->rip = fpu->last_ip; + fxsave->rdp = fpu->last_dp; + memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); + + vcpu_put(vcpu); + + return 0; +} + static long kvm_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -2542,6 +2603,31 @@ static long kvm_vcpu_ioctl(struct file *filp, r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); break; } + case KVM_GET_FPU: { + struct kvm_fpu fpu; + + memset(&fpu, 0, sizeof fpu); + r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu); + if (r) + goto out; + r = -EFAULT; + if (copy_to_user(argp, &fpu, sizeof fpu)) + goto out; + r = 0; + break; + } + case KVM_SET_FPU: { + struct kvm_fpu fpu; + + r = -EFAULT; + if (copy_from_user(&fpu, argp, sizeof fpu)) + goto out; + r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu); + if (r) + goto out; + r = 0; + break; + } default: ; } diff --git a/include/linux/kvm.h b/include/linux/kvm.h index da9b23fa4b6c..07bf353eeb6f 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -126,6 +126,21 @@ struct kvm_regs { __u64 rip, rflags; }; +/* for KVM_GET_FPU and KVM_SET_FPU */ +struct kvm_fpu { + __u8 fpr[8][16]; + __u16 fcw; + __u16 fsw; + __u8 ftwx; /* in fxsave format */ + __u8 pad1; + __u16 last_opcode; + __u64 last_ip; + __u64 last_dp; + __u8 xmm[16][16]; + __u32 mxcsr; + __u32 pad2; +}; + struct kvm_segment { __u64 base; __u32 limit; @@ -285,5 +300,7 @@ struct kvm_signal_mask { #define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs) #define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid) #define KVM_SET_SIGNAL_MASK _IOW(KVMIO, 0x8b, struct kvm_signal_mask) +#define KVM_GET_FPU _IOR(KVMIO, 0x8c, struct kvm_fpu) +#define KVM_SET_FPU _IOW(KVMIO, 0x8d, struct kvm_fpu) #endif -- cgit v1.2.3 From 02c83209726270ddf9597deabc45e08f6fc3942c Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 29 Apr 2007 15:02:17 +0300 Subject: KVM: Don't require explicit indication of completion of mmio or pio It is illegal not to return from a pio or mmio request without completing it, as mmio or pio is an atomic operation. Therefore, we can simplify the userspace interface by avoiding the completion indication. Signed-off-by: Avi Kivity --- drivers/kvm/kvm_main.c | 44 ++++++++++++++++++++++---------------------- include/linux/kvm.h | 5 ++--- 2 files changed, 24 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index f267dbb52845..c8b8cfa332bb 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -1237,8 +1237,10 @@ int emulate_instruction(struct kvm_vcpu *vcpu, kvm_arch_ops->decache_regs(vcpu); kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags); - if (vcpu->mmio_is_write) + if (vcpu->mmio_is_write) { + vcpu->mmio_needed = 0; return EMULATE_DO_MMIO; + } return EMULATE_DONE; } @@ -1692,8 +1694,6 @@ static int complete_pio(struct kvm_vcpu *vcpu) vcpu->regs[VCPU_REGS_RSI] += delta; } - vcpu->run->io_completed = 0; - kvm_arch_ops->decache_regs(vcpu); io->count -= io->cur_count; @@ -1800,25 +1800,25 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) /* re-sync apic's tpr */ vcpu->cr8 = kvm_run->cr8; - if (kvm_run->io_completed) { - if (vcpu->pio.cur_count) { - r = complete_pio(vcpu); - if (r) - goto out; - } else if (!vcpu->mmio_is_write) { - memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); - vcpu->mmio_read_completed = 1; - vcpu->mmio_needed = 0; - r = emulate_instruction(vcpu, kvm_run, - vcpu->mmio_fault_cr2, 0); - if (r == EMULATE_DO_MMIO) { - /* - * Read-modify-write. Back to userspace. - */ - kvm_run->exit_reason = KVM_EXIT_MMIO; - r = 0; - goto out; - } + if (vcpu->pio.cur_count) { + r = complete_pio(vcpu); + if (r) + goto out; + } + + if (vcpu->mmio_needed) { + memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); + vcpu->mmio_read_completed = 1; + vcpu->mmio_needed = 0; + r = emulate_instruction(vcpu, kvm_run, + vcpu->mmio_fault_cr2, 0); + if (r == EMULATE_DO_MMIO) { + /* + * Read-modify-write. Back to userspace. + */ + kvm_run->exit_reason = KVM_EXIT_MMIO; + r = 0; + goto out; } } diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 07bf353eeb6f..738c2f50c774 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -11,7 +11,7 @@ #include #include -#define KVM_API_VERSION 10 +#define KVM_API_VERSION 11 /* * Architectural interrupt line count, and the size of the bitmap needed @@ -58,9 +58,8 @@ enum kvm_exit_reason { /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { /* in */ - __u32 io_completed; /* mmio/pio request completed */ __u8 request_interrupt_window; - __u8 padding1[3]; + __u8 padding1[7]; /* out */ __u32 exit_reason; -- cgit v1.2.3 From 2ff81f70b56dc1cdd3bf2f08414608069db6ef1a Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 29 Apr 2007 16:25:49 +0300 Subject: KVM: Remove unused 'instruction_length' As we no longer emulate in userspace, this is meaningless. We don't compute it on SVM anyway. Signed-off-by: Avi Kivity --- drivers/kvm/vmx.c | 1 - include/linux/kvm.h | 5 ++--- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index 5a2a68dec6bf..724db0027f00 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c @@ -1783,7 +1783,6 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) exit_reason != EXIT_REASON_EXCEPTION_NMI ) printk(KERN_WARNING "%s: unexpected, valid vectoring info and " "exit reason is 0x%x\n", __FUNCTION__, exit_reason); - kvm_run->instruction_length = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); if (exit_reason < kvm_vmx_max_exit_handlers && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run); diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 738c2f50c774..e6edca81ab84 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -11,7 +11,7 @@ #include #include -#define KVM_API_VERSION 11 +#define KVM_API_VERSION 12 /* * Architectural interrupt line count, and the size of the bitmap needed @@ -63,10 +63,9 @@ struct kvm_run { /* out */ __u32 exit_reason; - __u32 instruction_length; __u8 ready_for_interrupt_injection; __u8 if_flag; - __u8 padding2[6]; + __u8 padding2[2]; /* in (pre_kvm_run), out (post_kvm_run) */ __u64 cr8; -- cgit v1.2.3 From ef4533f8af7a8798cb8f52b06f47acf0c0d2d767 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 3 May 2007 03:10:39 -0700 Subject: [AFS]: Make the match_*() functions take const options. Make the match_*() functions take a const pointer to the options table and make strings pointers in the options table const too. Signed-off-by: David Howells Signed-off-by: David S. Miller --- include/linux/parser.h | 8 ++++---- lib/parser.c | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/parser.h b/include/linux/parser.h index fa3332861a09..86676f600992 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h @@ -11,10 +11,10 @@ /* associates an integer enumerator with a pattern string. */ struct match_token { int token; - char *pattern; + const char *pattern; }; -typedef struct match_token match_table_t[]; +typedef const struct match_token match_table_t[]; /* Maximum number of arguments that match_token will find in a pattern */ enum {MAX_OPT_ARGS = 3}; @@ -29,5 +29,5 @@ int match_token(char *, match_table_t table, substring_t args[]); int match_int(substring_t *, int *result); int match_octal(substring_t *, int *result); int match_hex(substring_t *, int *result); -void match_strcpy(char *, substring_t *); -char *match_strdup(substring_t *); +void match_strcpy(char *, const substring_t *); +char *match_strdup(const substring_t *); diff --git a/lib/parser.c b/lib/parser.c index 7ad2a48abc5e..703c8c13b346 100644 --- a/lib/parser.c +++ b/lib/parser.c @@ -22,7 +22,7 @@ * match extremely simple token=arg style patterns. If the pattern is found, * the location(s) of the arguments will be returned in the @args array. */ -static int match_one(char *s, char *p, substring_t args[]) +static int match_one(char *s, const char *p, substring_t args[]) { char *meta; int argc = 0; @@ -43,7 +43,7 @@ static int match_one(char *s, char *p, substring_t args[]) p = meta + 1; if (isdigit(*p)) - len = simple_strtoul(p, &p, 10); + len = simple_strtoul(p, (char **) &p, 10); else if (*p == '%') { if (*s++ != '%') return 0; @@ -102,7 +102,7 @@ static int match_one(char *s, char *p, substring_t args[]) */ int match_token(char *s, match_table_t table, substring_t args[]) { - struct match_token *p; + const struct match_token *p; for (p = table; !match_one(s, p->pattern, args) ; p++) ; @@ -190,7 +190,7 @@ int match_hex(substring_t *s, int *result) * &substring_t @s to the c-style string @to. Caller guarantees that @to is * large enough to hold the characters of @s. */ -void match_strcpy(char *to, substring_t *s) +void match_strcpy(char *to, const substring_t *s) { memcpy(to, s->from, s->to - s->from); to[s->to - s->from] = '\0'; @@ -204,7 +204,7 @@ void match_strcpy(char *to, substring_t *s) * the &substring_t @s. The caller is responsible for freeing the returned * string with kfree(). */ -char *match_strdup(substring_t *s) +char *match_strdup(const substring_t *s) { char *p = kmalloc(s->to - s->from + 1, GFP_KERNEL); if (p) -- cgit v1.2.3 From be52178b9f73969b583c6a781ca613f4e601221a Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 3 May 2007 03:16:20 -0700 Subject: [NET] skbuff: fix kernel-doc Fix skbuff.h kernel-doc: linux-2.6.21-git4//include/linux/skbuff.h:316): No description found for parameter 'transport_header' Signed-off-by: Randy Dunlap Signed-off-by: David S. Miller --- include/linux/skbuff.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 253a2b9be9d6..e7367c74e1bb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -197,7 +197,7 @@ typedef unsigned char *sk_buff_data_t; * @tstamp: Time we arrived * @dev: Device we arrived on/are leaving by * @iif: ifindex of device we arrived on - * @h: Transport layer header + * @transport_header: Transport layer header * @network_header: Network layer header * @mac_header: Link layer header * @dst: destination entry -- cgit v1.2.3 From 4e9cac2ba437fcb093c7417b1cd91a77ebd1756a Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Thu, 3 May 2007 03:28:13 -0700 Subject: [NET]: Add __dev_getfirstbyhwtype Add __dev_getfirstbyhwtype for callers that don't want a reference but some data from the device and thus need to take the rtnl anyway. Signed-off-by: Patrick McHardy Signed-off-by: David Howells Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 + net/core/dev.c | 21 ++++++++++++++++----- 2 files changed, 17 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ac0c92b1e002..4428f1c3c13c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -582,6 +582,7 @@ extern int netdev_boot_setup_check(struct net_device *dev); extern unsigned long netdev_boot_base(const char *prefix, int unit); extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr); extern struct net_device *dev_getfirstbyhwtype(unsigned short type); +extern struct net_device *__dev_getfirstbyhwtype(unsigned short type); extern void dev_add_pack(struct packet_type *pt); extern void dev_remove_pack(struct packet_type *pt); extern void __dev_remove_pack(struct packet_type *pt); diff --git a/net/core/dev.c b/net/core/dev.c index eb999003bbb7..c305819b7266 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -576,17 +576,28 @@ struct net_device *dev_getbyhwaddr(unsigned short type, char *ha) EXPORT_SYMBOL(dev_getbyhwaddr); -struct net_device *dev_getfirstbyhwtype(unsigned short type) +struct net_device *__dev_getfirstbyhwtype(unsigned short type) { struct net_device *dev; - rtnl_lock(); + ASSERT_RTNL(); for (dev = dev_base; dev; dev = dev->next) { - if (dev->type == type) { - dev_hold(dev); + if (dev->type == type) break; - } } + return dev; +} + +EXPORT_SYMBOL(__dev_getfirstbyhwtype); + +struct net_device *dev_getfirstbyhwtype(unsigned short type) +{ + struct net_device *dev; + + rtnl_lock(); + dev = __dev_getfirstbyhwtype(type); + if (dev) + dev_hold(dev); rtnl_unlock(); return dev; } -- cgit v1.2.3 From c2a1910b06fed96db77bb358c18c52a1fcf2b7fe Mon Sep 17 00:00:00 2001 From: Jorge Boncompte Date: Thu, 3 May 2007 03:34:42 -0700 Subject: [NETFILTER]: nf_nat_proto_gre: do not modify/corrupt GREv0 packets through NAT While porting some changes of the 2.6.21-rc7 pptp/proto_gre conntrack and nat modules to a 2.4.32 kernel I noticed that the gre_key function returns a wrong pointer to the GRE key of a version 0 packet thus corrupting the packet payload. The intended behaviour for GREv0 packets is to act like nf_conntrack_proto_generic/nf_nat_proto_unknown so I have ripped the offending functions (not used anymore) and modified the nf_nat_proto_gre modules to not touch version 0 (non PPTP) packets. Signed-off-by: Jorge Boncompte Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/netfilter/nf_conntrack_proto_gre.h | 18 ------------------ net/ipv4/netfilter/nf_nat_proto_gre.c | 20 ++++++++------------ 2 files changed, 8 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h index 4e6bbce04ff8..535e4219d2bb 100644 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h @@ -87,24 +87,6 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, /* delete keymap entries */ void nf_ct_gre_keymap_destroy(struct nf_conn *ct); -/* get pointer to gre key, if present */ -static inline __be32 *gre_key(struct gre_hdr *greh) -{ - if (!greh->key) - return NULL; - if (greh->csum || greh->routing) - return (__be32 *)(greh+sizeof(*greh)+4); - return (__be32 *)(greh+sizeof(*greh)); -} - -/* get pointer ot gre csum, if present */ -static inline __sum16 *gre_csum(struct gre_hdr *greh) -{ - if (!greh->csum) - return NULL; - return (__sum16 *)(greh+sizeof(*greh)); -} - extern void nf_ct_gre_keymap_flush(void); extern void nf_nat_need_gre(void); diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c index e5a34c17d927..c3908bc5a709 100644 --- a/net/ipv4/netfilter/nf_nat_proto_gre.c +++ b/net/ipv4/netfilter/nf_nat_proto_gre.c @@ -72,6 +72,11 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple, __be16 *keyptr; unsigned int min, i, range_size; + /* If there is no master conntrack we are not PPTP, + do not change tuples */ + if (!conntrack->master) + return 0; + if (maniptype == IP_NAT_MANIP_SRC) keyptr = &tuple->src.u.gre.key; else @@ -122,18 +127,9 @@ gre_manip_pkt(struct sk_buff **pskb, unsigned int iphdroff, if (maniptype != IP_NAT_MANIP_DST) return 1; switch (greh->version) { - case 0: - if (!greh->key) { - DEBUGP("can't nat GRE w/o key\n"); - break; - } - if (greh->csum) { - /* FIXME: Never tested this code... */ - nf_proto_csum_replace4(gre_csum(greh), *pskb, - *(gre_key(greh)), - tuple->dst.u.gre.key, 0); - } - *(gre_key(greh)) = tuple->dst.u.gre.key; + case GRE_VERSION_1701: + /* We do not currently NAT any GREv0 packets. + * Try to behave like "nf_nat_proto_unknown" */ break; case GRE_VERSION_PPTP: DEBUGP("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key)); -- cgit v1.2.3 From fc38582db98533066f4ba64f948720483fbfe7b2 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Thu, 3 May 2007 03:36:16 -0700 Subject: [NETFILTER]: bridge netfilter: consolidate header pushing/pulling code Consolidate the common push/pull sequences into a few helper functions. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/netfilter_bridge.h | 25 ++++--- net/bridge/br_netfilter.c | 138 ++++++++++++--------------------------- 2 files changed, 56 insertions(+), 107 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 19060030bac9..533ee351a273 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -55,18 +55,25 @@ static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb) return 0; } +static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb) +{ + switch (skb->protocol) { + case __constant_htons(ETH_P_8021Q): + return VLAN_HLEN; + case __constant_htons(ETH_P_PPP_SES): + return PPPOE_SES_HLEN; + default: + return 0; + } +} + /* This is called by the IP fragmenting code and it ensures there is * enough room for the encapsulating header (if there is one). */ -static inline int nf_bridge_pad(const struct sk_buff *skb) +static inline unsigned int nf_bridge_pad(const struct sk_buff *skb) { - int padding = 0; - - if (skb->nf_bridge && skb->protocol == htons(ETH_P_8021Q)) - padding = VLAN_HLEN; - else if (skb->nf_bridge && skb->protocol == htons(ETH_P_PPP_SES)) - padding = PPPOE_SES_HLEN; - - return padding; + if (skb->nf_bridge) + return nf_bridge_encap_header_len(skb); + return 0; } struct bridge_skb_cb { diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 9b2986b182ba..fa779874b9dd 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -142,14 +142,33 @@ static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) return skb->nf_bridge; } -static inline void nf_bridge_save_header(struct sk_buff *skb) +static inline void nf_bridge_push_encap_header(struct sk_buff *skb) +{ + unsigned int len = nf_bridge_encap_header_len(skb); + + skb_push(skb, len); + skb->network_header -= len; +} + +static inline void nf_bridge_pull_encap_header(struct sk_buff *skb) { - int header_size = ETH_HLEN; + unsigned int len = nf_bridge_encap_header_len(skb); + + skb_pull(skb, len); + skb->network_header += len; +} - if (skb->protocol == htons(ETH_P_8021Q)) - header_size += VLAN_HLEN; - else if (skb->protocol == htons(ETH_P_PPP_SES)) - header_size += PPPOE_SES_HLEN; +static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb) +{ + unsigned int len = nf_bridge_encap_header_len(skb); + + skb_pull_rcsum(skb, len); + skb->network_header += len; +} + +static inline void nf_bridge_save_header(struct sk_buff *skb) +{ + int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); skb_copy_from_linear_data_offset(skb, -header_size, skb->nf_bridge->data, header_size); @@ -162,12 +181,7 @@ static inline void nf_bridge_save_header(struct sk_buff *skb) int nf_bridge_copy_header(struct sk_buff *skb) { int err; - int header_size = ETH_HLEN; - - if (skb->protocol == htons(ETH_P_8021Q)) - header_size += VLAN_HLEN; - else if (skb->protocol == htons(ETH_P_PPP_SES)) - header_size += PPPOE_SES_HLEN; + int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); err = skb_cow(skb, header_size); if (err) @@ -175,11 +189,7 @@ int nf_bridge_copy_header(struct sk_buff *skb) skb_copy_to_linear_data_offset(skb, -header_size, skb->nf_bridge->data, header_size); - - if (skb->protocol == htons(ETH_P_8021Q)) - __skb_push(skb, VLAN_HLEN); - else if (skb->protocol == htons(ETH_P_PPP_SES)) - __skb_push(skb, PPPOE_SES_HLEN); + __skb_push(skb, nf_bridge_encap_header_len(skb)); return 0; } @@ -200,13 +210,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) dst_hold(skb->dst); skb->dev = nf_bridge->physindev; - if (skb->protocol == htons(ETH_P_8021Q)) { - skb_push(skb, VLAN_HLEN); - skb->network_header -= VLAN_HLEN; - } else if (skb->protocol == htons(ETH_P_PPP_SES)) { - skb_push(skb, PPPOE_SES_HLEN); - skb->network_header -= PPPOE_SES_HLEN; - } + nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish, 1); @@ -284,13 +288,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) if (!skb->dev) kfree_skb(skb); else { - if (skb->protocol == htons(ETH_P_8021Q)) { - skb_pull(skb, VLAN_HLEN); - skb->network_header += VLAN_HLEN; - } else if (skb->protocol == htons(ETH_P_PPP_SES)) { - skb_pull(skb, PPPOE_SES_HLEN); - skb->network_header += PPPOE_SES_HLEN; - } + nf_bridge_pull_encap_header(skb); skb->dst->output(skb); } return 0; @@ -356,15 +354,7 @@ bridged_dnat: * bridged frame */ nf_bridge->mask |= BRNF_BRIDGED_DNAT; skb->dev = nf_bridge->physindev; - if (skb->protocol == - htons(ETH_P_8021Q)) { - skb_push(skb, VLAN_HLEN); - skb->network_header -= VLAN_HLEN; - } else if(skb->protocol == - htons(ETH_P_PPP_SES)) { - skb_push(skb, PPPOE_SES_HLEN); - skb->network_header -= PPPOE_SES_HLEN; - } + nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish_bridge, @@ -380,13 +370,7 @@ bridged_dnat: } skb->dev = nf_bridge->physindev; - if (skb->protocol == htons(ETH_P_8021Q)) { - skb_push(skb, VLAN_HLEN); - skb->network_header -= VLAN_HLEN; - } else if (skb->protocol == htons(ETH_P_PPP_SES)) { - skb_push(skb, PPPOE_SES_HLEN); - skb->network_header -= PPPOE_SES_HLEN; - } + nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish, 1); @@ -536,14 +520,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, #endif if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL) goto out; - - if (skb->protocol == htons(ETH_P_8021Q)) { - skb_pull_rcsum(skb, VLAN_HLEN); - skb->network_header += VLAN_HLEN; - } else if (skb->protocol == htons(ETH_P_PPP_SES)) { - skb_pull_rcsum(skb, PPPOE_SES_HLEN); - skb->network_header += PPPOE_SES_HLEN; - } + nf_bridge_pull_encap_header_rcsum(skb); return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn); } #ifdef CONFIG_SYSCTL @@ -557,14 +534,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL) goto out; - - if (skb->protocol == htons(ETH_P_8021Q)) { - skb_pull_rcsum(skb, VLAN_HLEN); - skb->network_header += VLAN_HLEN; - } else if (skb->protocol == htons(ETH_P_PPP_SES)) { - skb_pull_rcsum(skb, PPPOE_SES_HLEN); - skb->network_header += PPPOE_SES_HLEN; - } + nf_bridge_pull_encap_header_rcsum(skb); if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto inhdr_error; @@ -642,13 +612,7 @@ static int br_nf_forward_finish(struct sk_buff *skb) } else { in = *((struct net_device **)(skb->cb)); } - if (skb->protocol == htons(ETH_P_8021Q)) { - skb_push(skb, VLAN_HLEN); - skb->network_header -= VLAN_HLEN; - } else if (skb->protocol == htons(ETH_P_PPP_SES)) { - skb_push(skb, PPPOE_SES_HLEN); - skb->network_header -= PPPOE_SES_HLEN; - } + nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, in, skb->dev, br_forward_finish, 1); return 0; @@ -682,13 +646,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb, else pf = PF_INET6; - if (skb->protocol == htons(ETH_P_8021Q)) { - skb_pull(*pskb, VLAN_HLEN); - (*pskb)->network_header += VLAN_HLEN; - } else if (skb->protocol == htons(ETH_P_PPP_SES)) { - skb_pull(*pskb, PPPOE_SES_HLEN); - (*pskb)->network_header += PPPOE_SES_HLEN; - } + nf_bridge_pull_encap_header(*pskb); nf_bridge = skb->nf_bridge; if (skb->pkt_type == PACKET_OTHERHOST) { @@ -722,15 +680,12 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff **pskb, if (skb->protocol != htons(ETH_P_ARP)) { if (!IS_VLAN_ARP(skb)) return NF_ACCEPT; - skb_pull(*pskb, VLAN_HLEN); - (*pskb)->network_header += VLAN_HLEN; + nf_bridge_pull_encap_header(*pskb); } if (arp_hdr(skb)->ar_pln != 4) { - if (IS_VLAN_ARP(skb)) { - skb_push(*pskb, VLAN_HLEN); - (*pskb)->network_header -= VLAN_HLEN; - } + if (IS_VLAN_ARP(skb)) + nf_bridge_push_encap_header(*pskb); return NF_ACCEPT; } *d = (struct net_device *)in; @@ -777,13 +732,7 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } - if (skb->protocol == htons(ETH_P_8021Q)) { - skb_push(skb, VLAN_HLEN); - skb->network_header -= VLAN_HLEN; - } else if (skb->protocol == htons(ETH_P_PPP_SES)) { - skb_push(skb, PPPOE_SES_HLEN); - skb->network_header -= PPPOE_SES_HLEN; - } + nf_bridge_push_encap_header(skb); NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, realindev, skb->dev, br_forward_finish); @@ -848,14 +797,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, nf_bridge->mask |= BRNF_PKT_TYPE; } - if (skb->protocol == htons(ETH_P_8021Q)) { - skb_pull(skb, VLAN_HLEN); - skb->network_header += VLAN_HLEN; - } else if (skb->protocol == htons(ETH_P_PPP_SES)) { - skb_pull(skb, PPPOE_SES_HLEN); - skb->network_header += PPPOE_SES_HLEN; - } - + nf_bridge_pull_encap_header(skb); nf_bridge_save_header(skb); #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) -- cgit v1.2.3 From 427c2196b92697a4a8ee87959ebc16bfac024f6b Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 3 May 2007 13:17:25 -0700 Subject: [ETHTOOL]: Add 2.5G bit definitions. Add 2.5G supported and advertising bit definitions. 2.5G is supported by the bnx2 driver. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- include/linux/ethtool.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index c6310aef5ab0..f2d248f8cc92 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -434,6 +434,7 @@ struct ethtool_ops { #define SUPPORTED_10000baseT_Full (1 << 12) #define SUPPORTED_Pause (1 << 13) #define SUPPORTED_Asym_Pause (1 << 14) +#define SUPPORTED_2500baseX_Full (1 << 15) /* Indicates what features are advertised by the interface. */ #define ADVERTISED_10baseT_Half (1 << 0) @@ -451,6 +452,7 @@ struct ethtool_ops { #define ADVERTISED_10000baseT_Full (1 << 12) #define ADVERTISED_Pause (1 << 13) #define ADVERTISED_Asym_Pause (1 << 14) +#define ADVERTISED_2500baseX_Full (1 << 15) /* The following are all involved in forcing a particular link * mode for the device for setting things. When getting the -- cgit v1.2.3 From 27a005b883984ef3a3cf24e7ddd78eb78902f494 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 3 May 2007 13:23:41 -0700 Subject: [BNX2]: Add support for 5709 Serdes. Add PCI ID and code to support the 5709 Serdes PHY. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/bnx2.c | 203 +++++++++++++++++++++++++++++++++++++++++++++--- drivers/net/bnx2.h | 36 +++++++++ include/linux/pci_ids.h | 1 + 3 files changed, 228 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 944f547a7406..ab589090d634 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -84,6 +84,7 @@ typedef enum { BCM5708, BCM5708S, BCM5709, + BCM5709S, } board_t; /* indexed by board_t, above */ @@ -98,6 +99,7 @@ static const struct { { "Broadcom NetXtreme II BCM5708 1000Base-T" }, { "Broadcom NetXtreme II BCM5708 1000Base-SX" }, { "Broadcom NetXtreme II BCM5709 1000Base-T" }, + { "Broadcom NetXtreme II BCM5709 1000Base-SX" }, }; static struct pci_device_id bnx2_pci_tbl[] = { @@ -117,6 +119,8 @@ static struct pci_device_id bnx2_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S }, { 0, } }; @@ -699,6 +703,45 @@ bnx2_resolve_flow_ctrl(struct bnx2 *bp) } } +static int +bnx2_5709s_linkup(struct bnx2 *bp) +{ + u32 val, speed; + + bp->link_up = 1; + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS); + bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val); + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + + if ((bp->autoneg & AUTONEG_SPEED) == 0) { + bp->line_speed = bp->req_line_speed; + bp->duplex = bp->req_duplex; + return 0; + } + speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK; + switch (speed) { + case MII_BNX2_GP_TOP_AN_SPEED_10: + bp->line_speed = SPEED_10; + break; + case MII_BNX2_GP_TOP_AN_SPEED_100: + bp->line_speed = SPEED_100; + break; + case MII_BNX2_GP_TOP_AN_SPEED_1G: + case MII_BNX2_GP_TOP_AN_SPEED_1GKV: + bp->line_speed = SPEED_1000; + break; + case MII_BNX2_GP_TOP_AN_SPEED_2_5G: + bp->line_speed = SPEED_2500; + break; + } + if (val & MII_BNX2_GP_TOP_AN_FD) + bp->duplex = DUPLEX_FULL; + else + bp->duplex = DUPLEX_HALF; + return 0; +} + static int bnx2_5708s_linkup(struct bnx2 *bp) { @@ -898,6 +941,24 @@ bnx2_set_mac_link(struct bnx2 *bp) return 0; } +static void +bnx2_enable_bmsr1(struct bnx2 *bp) +{ + if ((bp->phy_flags & PHY_SERDES_FLAG) && + (CHIP_NUM(bp) == CHIP_NUM_5709)) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_GP_STATUS); +} + +static void +bnx2_disable_bmsr1(struct bnx2 *bp) +{ + if ((bp->phy_flags & PHY_SERDES_FLAG) && + (CHIP_NUM(bp) == CHIP_NUM_5709)) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); +} + static int bnx2_test_and_enable_2g5(struct bnx2 *bp) { @@ -910,6 +971,9 @@ bnx2_test_and_enable_2g5(struct bnx2 *bp) if (bp->autoneg & AUTONEG_SPEED) bp->advertising |= ADVERTISED_2500baseX_Full; + if (CHIP_NUM(bp) == CHIP_NUM_5709) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); + bnx2_read_phy(bp, bp->mii_up1, &up1); if (!(up1 & BCM5708S_UP1_2G5)) { up1 |= BCM5708S_UP1_2G5; @@ -917,6 +981,10 @@ bnx2_test_and_enable_2g5(struct bnx2 *bp) ret = 0; } + if (CHIP_NUM(bp) == CHIP_NUM_5709) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + return ret; } @@ -929,6 +997,9 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp) if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) return 0; + if (CHIP_NUM(bp) == CHIP_NUM_5709) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); + bnx2_read_phy(bp, bp->mii_up1, &up1); if (up1 & BCM5708S_UP1_2G5) { up1 &= ~BCM5708S_UP1_2G5; @@ -936,6 +1007,10 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp) ret = 1; } + if (CHIP_NUM(bp) == CHIP_NUM_5709) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + return ret; } @@ -947,7 +1022,21 @@ bnx2_enable_forced_2g5(struct bnx2 *bp) if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) return; - if (CHIP_NUM(bp) == CHIP_NUM_5708) { + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + u32 val; + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_SERDES_DIG); + bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val); + val &= ~MII_BNX2_SD_MISC1_FORCE_MSK; + val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G; + bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + + } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bmcr |= BCM5708S_BMCR_FORCE_2500; } @@ -968,7 +1057,20 @@ bnx2_disable_forced_2g5(struct bnx2 *bp) if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) return; - if (CHIP_NUM(bp) == CHIP_NUM_5708) { + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + u32 val; + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_SERDES_DIG); + bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val); + val &= ~MII_BNX2_SD_MISC1_FORCE; + bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + + } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bmcr &= ~BCM5708S_BMCR_FORCE_2500; } @@ -991,8 +1093,10 @@ bnx2_set_link(struct bnx2 *bp) link_up = bp->link_up; - bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); - bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); + bnx2_enable_bmsr1(bp); + bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); + bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); + bnx2_disable_bmsr1(bp); if ((bp->phy_flags & PHY_SERDES_FLAG) && (CHIP_NUM(bp) == CHIP_NUM_5706)) { @@ -1013,6 +1117,8 @@ bnx2_set_link(struct bnx2 *bp) bnx2_5706s_linkup(bp); else if (CHIP_NUM(bp) == CHIP_NUM_5708) bnx2_5708s_linkup(bp); + else if (CHIP_NUM(bp) == CHIP_NUM_5709) + bnx2_5709s_linkup(bp); } else { bnx2_copper_linkup(bp); @@ -1119,7 +1225,15 @@ bnx2_setup_serdes_phy(struct bnx2 *bp) new_bmcr = bmcr & ~BMCR_ANENABLE; new_bmcr |= BMCR_SPEED1000; - if (CHIP_NUM(bp) == CHIP_NUM_5708) { + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (bp->req_line_speed == SPEED_2500) + bnx2_enable_forced_2g5(bp); + else if (bp->req_line_speed == SPEED_1000) { + bnx2_disable_forced_2g5(bp); + new_bmcr &= ~0x2000; + } + + } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { if (bp->req_line_speed == SPEED_2500) new_bmcr |= BCM5708S_BMCR_FORCE_2500; else @@ -1302,6 +1416,9 @@ bnx2_setup_copper_phy(struct bnx2 *bp) bnx2_resolve_flow_ctrl(bp); bnx2_set_mac_link(bp); } + } else { + bnx2_resolve_flow_ctrl(bp); + bnx2_set_mac_link(bp); } return 0; } @@ -1320,11 +1437,64 @@ bnx2_setup_phy(struct bnx2 *bp) } } +static int +bnx2_init_5709s_phy(struct bnx2 *bp) +{ + u32 val; + + bp->mii_bmcr = MII_BMCR + 0x10; + bp->mii_bmsr = MII_BMSR + 0x10; + bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1; + bp->mii_adv = MII_ADVERTISE + 0x10; + bp->mii_lpa = MII_LPA + 0x10; + bp->mii_up1 = MII_BNX2_OVER1G_UP1; + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER); + bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + bnx2_reset_phy(bp); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG); + + bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val); + val &= ~MII_BNX2_SD_1000XCTL1_AUTODET; + val |= MII_BNX2_SD_1000XCTL1_FIBER; + bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); + bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val); + if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) + val |= BCM5708S_UP1_2G5; + else + val &= ~BCM5708S_UP1_2G5; + bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG); + bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val); + val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM; + bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0); + + val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN | + MII_BNX2_CL73_BAM_NP_AFT_BP_EN; + bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + + return 0; +} + static int bnx2_init_5708s_phy(struct bnx2 *bp) { u32 val; + bnx2_reset_phy(bp); + + bp->mii_up1 = BCM5708S_UP1; + bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3); bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE); bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); @@ -1377,6 +1547,8 @@ bnx2_init_5708s_phy(struct bnx2 *bp) static int bnx2_init_5706s_phy(struct bnx2 *bp) { + bnx2_reset_phy(bp); + bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG; if (CHIP_NUM(bp) == CHIP_NUM_5706) @@ -1414,6 +1586,8 @@ bnx2_init_copper_phy(struct bnx2 *bp) { u32 val; + bnx2_reset_phy(bp); + if (bp->phy_flags & PHY_CRC_FIX_FLAG) { bnx2_write_phy(bp, 0x18, 0x0c00); bnx2_write_phy(bp, 0x17, 0x000a); @@ -1470,13 +1644,12 @@ bnx2_init_phy(struct bnx2 *bp) bp->mii_bmcr = MII_BMCR; bp->mii_bmsr = MII_BMSR; + bp->mii_bmsr1 = MII_BMSR; bp->mii_adv = MII_ADVERTISE; bp->mii_lpa = MII_LPA; REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); - bnx2_reset_phy(bp); - bnx2_read_phy(bp, MII_PHYSID1, &val); bp->phy_id = val << 16; bnx2_read_phy(bp, MII_PHYSID2, &val); @@ -1487,6 +1660,8 @@ bnx2_init_phy(struct bnx2 *bp) rc = bnx2_init_5706s_phy(bp); else if (CHIP_NUM(bp) == CHIP_NUM_5708) rc = bnx2_init_5708s_phy(bp); + else if (CHIP_NUM(bp) == CHIP_NUM_5709) + rc = bnx2_init_5709s_phy(bp); } else { rc = bnx2_init_copper_phy(bp); @@ -4262,8 +4437,10 @@ bnx2_test_link(struct bnx2 *bp) u32 bmsr; spin_lock_bh(&bp->phy_lock); - bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); - bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); + bnx2_enable_bmsr1(bp); + bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); + bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); + bnx2_disable_bmsr1(bp); spin_unlock_bh(&bp->phy_lock); if (bmsr & BMSR_LSTATUS) { @@ -4407,7 +4584,7 @@ bnx2_timer(unsigned long data) if (bp->phy_flags & PHY_SERDES_FLAG) { if (CHIP_NUM(bp) == CHIP_NUM_5706) bnx2_5706_serdes_timer(bp); - else if (CHIP_NUM(bp) == CHIP_NUM_5708) + else bnx2_5708_serdes_timer(bp); } @@ -4910,8 +5087,10 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) advertising = cmd->advertising; - } - else if (cmd->advertising == ADVERTISED_1000baseT_Full) { + } else if (cmd->advertising == ADVERTISED_2500baseX_Full) { + if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) + return -EINVAL; + } else if (cmd->advertising == ADVERTISED_1000baseT_Full) { advertising = cmd->advertising; } else if (cmd->advertising == ADVERTISED_1000baseT_Half) { diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index d4a85d7b5ebe..124bd03cff3e 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -6296,6 +6296,41 @@ struct l2_fhdr { #define MII_BNX2_DSP_ADDRESS 0x17 #define MII_BNX2_DSP_EXPAND_REG 0x0f00 +#define MII_BNX2_BLK_ADDR 0x1f +#define MII_BNX2_BLK_ADDR_IEEE0 0x0000 +#define MII_BNX2_BLK_ADDR_GP_STATUS 0x8120 +#define MII_BNX2_GP_TOP_AN_STATUS1 0x1b +#define MII_BNX2_GP_TOP_AN_SPEED_MSK 0x3f00 +#define MII_BNX2_GP_TOP_AN_SPEED_10 0x0000 +#define MII_BNX2_GP_TOP_AN_SPEED_100 0x0100 +#define MII_BNX2_GP_TOP_AN_SPEED_1G 0x0200 +#define MII_BNX2_GP_TOP_AN_SPEED_2_5G 0x0300 +#define MII_BNX2_GP_TOP_AN_SPEED_1GKV 0x0d00 +#define MII_BNX2_GP_TOP_AN_FD 0x8 +#define MII_BNX2_BLK_ADDR_SERDES_DIG 0x8300 +#define MII_BNX2_SERDES_DIG_1000XCTL1 0x10 +#define MII_BNX2_SD_1000XCTL1_FIBER 0x01 +#define MII_BNX2_SD_1000XCTL1_AUTODET 0x10 +#define MII_BNX2_SERDES_DIG_MISC1 0x18 +#define MII_BNX2_SD_MISC1_FORCE_MSK 0xf +#define MII_BNX2_SD_MISC1_FORCE_2_5G 0x0 +#define MII_BNX2_SD_MISC1_FORCE 0x10 +#define MII_BNX2_BLK_ADDR_OVER1G 0x8320 +#define MII_BNX2_OVER1G_UP1 0x19 +#define MII_BNX2_BLK_ADDR_BAM_NXTPG 0x8350 +#define MII_BNX2_BAM_NXTPG_CTL 0x10 +#define MII_BNX2_NXTPG_CTL_BAM 0x1 +#define MII_BNX2_NXTPG_CTL_T2 0x2 +#define MII_BNX2_BLK_ADDR_CL73_USERB0 0x8370 +#define MII_BNX2_CL73_BAM_CTL1 0x12 +#define MII_BNX2_CL73_BAM_EN 0x8000 +#define MII_BNX2_CL73_BAM_STA_MGR_EN 0x4000 +#define MII_BNX2_CL73_BAM_NP_AFT_BP_EN 0x2000 +#define MII_BNX2_BLK_ADDR_AER 0xffd0 +#define MII_BNX2_AER_AER 0x1e +#define MII_BNX2_AER_AER_AN_MMD 0x3800 +#define MII_BNX2_BLK_ADDR_COMBO_IEEEB0 0xffe0 + #define MIN_ETHERNET_PACKET_SIZE 60 #define MAX_ETHERNET_PACKET_SIZE 1514 #define MAX_ETHERNET_JUMBO_PACKET_SIZE 9014 @@ -6500,6 +6535,7 @@ struct bnx2 { u32 mii_bmcr; u32 mii_bmsr; + u32 mii_bmsr1; u32 mii_adv; u32 mii_lpa; u32 mii_up1; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 1b0ddbb8a804..840a7e543c70 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1924,6 +1924,7 @@ #define PCI_DEVICE_ID_TIGON3_5752 0x1600 #define PCI_DEVICE_ID_TIGON3_5752M 0x1601 #define PCI_DEVICE_ID_NX2_5709 0x1639 +#define PCI_DEVICE_ID_NX2_5709S 0x163a #define PCI_DEVICE_ID_TIGON3_5700 0x1644 #define PCI_DEVICE_ID_TIGON3_5701 0x1645 #define PCI_DEVICE_ID_TIGON3_5702 0x1646 -- cgit v1.2.3 From 7562f876cd93800f2f8c89445f2a563590b24e09 Mon Sep 17 00:00:00 2001 From: Pavel Emelianov Date: Thu, 3 May 2007 15:13:45 -0700 Subject: [NET]: Rework dev_base via list_head (v3) Cleanup of dev_base list use, with the aim to simplify making device list per-namespace. In almost every occasion, use of dev_base variable and dev->next pointer could be easily replaced by for_each_netdev loop. A few most complicated places were converted to using first_netdev()/next_netdev(). Signed-off-by: Pavel Emelianov Acked-by: Kirill Korotaev Signed-off-by: David S. Miller --- arch/s390/appldata/appldata_net_sum.c | 2 +- arch/sparc64/solaris/ioctl.c | 3 +- drivers/block/aoe/aoecmd.c | 8 ++-- drivers/net/wireless/strip.c | 4 +- drivers/parisc/led.c | 2 +- fs/afs/netdevices.c | 2 +- include/linux/netdevice.h | 26 ++++++++++- net/8021q/vlan.c | 3 +- net/8021q/vlanproc.c | 36 ++++++++++----- net/bridge/br_if.c | 4 +- net/bridge/br_ioctl.c | 4 +- net/bridge/br_netlink.c | 3 +- net/core/dev.c | 84 ++++++++++++++++------------------ net/core/dev_mcast.c | 5 +-- net/core/rtnetlink.c | 7 ++- net/decnet/af_decnet.c | 11 +++-- net/decnet/dn_dev.c | 85 +++++++++++++++++------------------ net/decnet/dn_fib.c | 2 +- net/decnet/dn_route.c | 14 +++--- net/ipv4/devinet.c | 17 ++++--- net/ipv4/igmp.c | 15 +++---- net/ipv4/ipconfig.c | 2 +- net/ipv6/addrconf.c | 28 +++++++----- net/ipv6/anycast.c | 17 ++++--- net/ipv6/mcast.c | 15 +++---- net/llc/llc_core.c | 10 ++++- net/netrom/nr_route.c | 5 ++- net/rose/rose_route.c | 8 ++-- net/sched/sch_api.c | 7 ++- net/sctp/protocol.c | 2 +- net/tipc/eth_media.c | 12 ++--- 31 files changed, 249 insertions(+), 194 deletions(-) (limited to 'include/linux') diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index a43f3488fecf..2180ac105b05 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c @@ -107,7 +107,7 @@ static void appldata_get_net_sum_data(void *data) tx_dropped = 0; collisions = 0; read_lock(&dev_base_lock); - for (dev = dev_base; dev != NULL; dev = dev->next) { + for_each_netdev(dev) { stats = dev->get_stats(dev); rx_packets += stats->rx_packets; tx_packets += stats->tx_packets; diff --git a/arch/sparc64/solaris/ioctl.c b/arch/sparc64/solaris/ioctl.c index 330743c5b3d8..18352a498628 100644 --- a/arch/sparc64/solaris/ioctl.c +++ b/arch/sparc64/solaris/ioctl.c @@ -686,7 +686,8 @@ static inline int solaris_i(unsigned int fd, unsigned int cmd, u32 arg) int i = 0; read_lock_bh(&dev_base_lock); - for (d = dev_base; d; d = d->next) i++; + for_each_netdev(d) + i++; read_unlock_bh(&dev_base_lock); if (put_user (i, (int __user *)A(arg))) diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 1a6aeac5a1c3..01fbdd38e3be 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -194,15 +194,15 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail) sl = sl_tail = NULL; read_lock(&dev_base_lock); - for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) { + for_each_netdev(ifp) { dev_hold(ifp); if (!is_aoe_netif(ifp)) - continue; + goto cont; skb = new_skb(sizeof *h + sizeof *ch); if (skb == NULL) { printk(KERN_INFO "aoe: skb alloc failure\n"); - continue; + goto cont; } skb_put(skb, sizeof *h + sizeof *ch); skb->dev = ifp; @@ -221,6 +221,8 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail) skb->next = sl; sl = skb; +cont: + dev_put(ifp); } read_unlock(&dev_base_lock); diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index 2a299a0676a6..ef32a5c1e818 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c @@ -1971,8 +1971,7 @@ static struct net_device *get_strip_dev(struct strip *strip_info) sizeof(zero_address))) { struct net_device *dev; read_lock_bh(&dev_base_lock); - dev = dev_base; - while (dev) { + for_each_netdev(dev) { if (dev->type == strip_info->dev->type && !memcmp(dev->dev_addr, &strip_info->true_dev_addr, @@ -1983,7 +1982,6 @@ static struct net_device *get_strip_dev(struct strip *strip_info) read_unlock_bh(&dev_base_lock); return (dev); } - dev = dev->next; } read_unlock_bh(&dev_base_lock); } diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index 3df82fe9ce8c..98be2880757d 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -365,7 +365,7 @@ static __inline__ int led_get_net_activity(void) * for reading should be OK */ read_lock(&dev_base_lock); rcu_read_lock(); - for (dev = dev_base; dev; dev = dev->next) { + for_each_netdev(dev) { struct net_device_stats *stats; struct in_device *in_dev = __in_dev_get_rcu(dev); if (!in_dev || !in_dev->ifa_list) diff --git a/fs/afs/netdevices.c b/fs/afs/netdevices.c index ce08977858e0..fc27d4b52e5f 100644 --- a/fs/afs/netdevices.c +++ b/fs/afs/netdevices.c @@ -47,7 +47,7 @@ int afs_get_ipv4_interfaces(struct afs_interface *bufs, size_t maxbufs, ASSERT(maxbufs > 0); rtnl_lock(); - for (dev = dev_base; dev; dev = dev->next) { + for_each_netdev(dev) { if (dev->type == ARPHRD_LOOPBACK && !wantloopback) continue; idev = __in_dev_get_rtnl(dev); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 4428f1c3c13c..30446222b396 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -304,7 +304,7 @@ struct net_device unsigned long state; - struct net_device *next; + struct list_head dev_list; /* The device initialization function. Called only once. */ int (*init)(struct net_device *dev); @@ -575,9 +575,31 @@ struct packet_type { #include extern struct net_device loopback_dev; /* The loopback */ -extern struct net_device *dev_base; /* All devices */ +extern struct list_head dev_base_head; /* All devices */ extern rwlock_t dev_base_lock; /* Device list lock */ +#define for_each_netdev(d) \ + list_for_each_entry(d, &dev_base_head, dev_list) +#define for_each_netdev_safe(d, n) \ + list_for_each_entry_safe(d, n, &dev_base_head, dev_list) +#define for_each_netdev_continue(d) \ + list_for_each_entry_continue(d, &dev_base_head, dev_list) +#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) + +static inline struct net_device *next_net_device(struct net_device *dev) +{ + struct list_head *lh; + + lh = dev->dev_list.next; + return lh == &dev_base_head ? NULL : net_device_entry(lh); +} + +static inline struct net_device *first_net_device(void) +{ + return list_empty(&dev_base_head) ? NULL : + net_device_entry(dev_base_head.next); +} + extern int netdev_boot_setup_check(struct net_device *dev); extern unsigned long netdev_boot_base(const char *prefix, int unit); extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index c0c7bb8e9f07..bd93c45778d4 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -117,8 +117,7 @@ static void __exit vlan_cleanup_devices(void) struct net_device *dev, *nxt; rtnl_lock(); - for (dev = dev_base; dev; dev = nxt) { - nxt = dev->next; + for_each_netdev_safe(dev, nxt) { if (dev->priv_flags & IFF_802_1Q_VLAN) { unregister_vlan_dev(VLAN_DEV_INFO(dev)->real_dev, VLAN_DEV_INFO(dev)->vlan_id); diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index 5e24f72602a1..d216a64421cd 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c @@ -237,13 +237,9 @@ int vlan_proc_rem_dev(struct net_device *vlandev) * The following few functions build the content of /proc/net/vlan/config */ -/* starting at dev, find a VLAN device */ -static struct net_device *vlan_skip(struct net_device *dev) +static inline int is_vlan_dev(struct net_device *dev) { - while (dev && !(dev->priv_flags & IFF_802_1Q_VLAN)) - dev = dev->next; - - return dev; + return dev->priv_flags & IFF_802_1Q_VLAN; } /* start read of /proc/net/vlan/config */ @@ -257,19 +253,35 @@ static void *vlan_seq_start(struct seq_file *seq, loff_t *pos) if (*pos == 0) return SEQ_START_TOKEN; - for (dev = vlan_skip(dev_base); dev && i < *pos; - dev = vlan_skip(dev->next), ++i); + for_each_netdev(dev) { + if (!is_vlan_dev(dev)) + continue; + + if (i++ == *pos) + return dev; + } - return (i == *pos) ? dev : NULL; + return NULL; } static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + struct net_device *dev; + ++*pos; - return vlan_skip((v == SEQ_START_TOKEN) - ? dev_base - : ((struct net_device *)v)->next); + dev = (struct net_device *)v; + if (v == SEQ_START_TOKEN) + dev = net_device_entry(&dev_base_head); + + for_each_netdev_continue(dev) { + if (!is_vlan_dev(dev)) + continue; + + return dev; + } + + return NULL; } static void vlan_seq_stop(struct seq_file *seq, void *v) diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 690573bbf012..849deaf14108 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -475,11 +475,9 @@ void __exit br_cleanup_bridges(void) struct net_device *dev, *nxt; rtnl_lock(); - for (dev = dev_base; dev; dev = nxt) { - nxt = dev->next; + for_each_netdev_safe(dev, nxt) if (dev->priv_flags & IFF_EBRIDGE) del_br(dev->priv); - } rtnl_unlock(); } diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index eda0fbfc923a..bb15e9e259b1 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -27,7 +27,9 @@ static int get_bridge_ifindices(int *indices, int num) struct net_device *dev; int i = 0; - for (dev = dev_base; dev && i < num; dev = dev->next) { + for_each_netdev(dev) { + if (i >= num) + break; if (dev->priv_flags & IFF_EBRIDGE) indices[i++] = dev->ifindex; } diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 35facc0c11c2..0fcf6f073064 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -109,7 +109,8 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) struct net_device *dev; int idx; - for (dev = dev_base, idx = 0; dev; dev = dev->next) { + idx = 0; + for_each_netdev(dev) { /* not a bridge port */ if (dev->br_port == NULL || idx < cb->args[0]) goto skip; diff --git a/net/core/dev.c b/net/core/dev.c index c305819b7266..f27d4ab181e6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -156,13 +156,13 @@ static spinlock_t net_dma_event_lock; #endif /* - * The @dev_base list is protected by @dev_base_lock and the rtnl + * The @dev_base_head list is protected by @dev_base_lock and the rtnl * semaphore. * * Pure readers hold dev_base_lock for reading. * * Writers must hold the rtnl semaphore while they loop through the - * dev_base list, and hold dev_base_lock for writing when they do the + * dev_base_head list, and hold dev_base_lock for writing when they do the * actual updates. This allows pure readers to access the list even * while a writer is preparing to update it. * @@ -174,11 +174,10 @@ static spinlock_t net_dma_event_lock; * unregister_netdevice(), which must be called with the rtnl * semaphore held. */ -struct net_device *dev_base; -static struct net_device **dev_tail = &dev_base; +LIST_HEAD(dev_base_head); DEFINE_RWLOCK(dev_base_lock); -EXPORT_SYMBOL(dev_base); +EXPORT_SYMBOL(dev_base_head); EXPORT_SYMBOL(dev_base_lock); #define NETDEV_HASHBITS 8 @@ -567,11 +566,12 @@ struct net_device *dev_getbyhwaddr(unsigned short type, char *ha) ASSERT_RTNL(); - for (dev = dev_base; dev; dev = dev->next) + for_each_netdev(dev) if (dev->type == type && !memcmp(dev->dev_addr, ha, dev->addr_len)) - break; - return dev; + return dev; + + return NULL; } EXPORT_SYMBOL(dev_getbyhwaddr); @@ -581,11 +581,11 @@ struct net_device *__dev_getfirstbyhwtype(unsigned short type) struct net_device *dev; ASSERT_RTNL(); - for (dev = dev_base; dev; dev = dev->next) { + for_each_netdev(dev) if (dev->type == type) - break; - } - return dev; + return dev; + + return NULL; } EXPORT_SYMBOL(__dev_getfirstbyhwtype); @@ -617,17 +617,19 @@ EXPORT_SYMBOL(dev_getfirstbyhwtype); struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask) { - struct net_device *dev; + struct net_device *dev, *ret; + ret = NULL; read_lock(&dev_base_lock); - for (dev = dev_base; dev != NULL; dev = dev->next) { + for_each_netdev(dev) { if (((dev->flags ^ if_flags) & mask) == 0) { dev_hold(dev); + ret = dev; break; } } read_unlock(&dev_base_lock); - return dev; + return ret; } /** @@ -693,7 +695,7 @@ int dev_alloc_name(struct net_device *dev, const char *name) if (!inuse) return -ENOMEM; - for (d = dev_base; d; d = d->next) { + for_each_netdev(d) { if (!sscanf(d->name, name, &i)) continue; if (i < 0 || i >= max_netdevices) @@ -975,7 +977,7 @@ int register_netdevice_notifier(struct notifier_block *nb) rtnl_lock(); err = raw_notifier_chain_register(&netdev_chain, nb); if (!err) { - for (dev = dev_base; dev; dev = dev->next) { + for_each_netdev(dev) { nb->notifier_call(nb, NETDEV_REGISTER, dev); if (dev->flags & IFF_UP) @@ -2049,7 +2051,7 @@ static int dev_ifconf(char __user *arg) */ total = 0; - for (dev = dev_base; dev; dev = dev->next) { + for_each_netdev(dev) { for (i = 0; i < NPROTO; i++) { if (gifconf_list[i]) { int done; @@ -2081,26 +2083,28 @@ static int dev_ifconf(char __user *arg) * This is invoked by the /proc filesystem handler to display a device * in detail. */ -static struct net_device *dev_get_idx(loff_t pos) +void *dev_seq_start(struct seq_file *seq, loff_t *pos) { + loff_t off; struct net_device *dev; - loff_t i; - for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next); + read_lock(&dev_base_lock); + if (!*pos) + return SEQ_START_TOKEN; - return i == pos ? dev : NULL; -} + off = 1; + for_each_netdev(dev) + if (off++ == *pos) + return dev; -void *dev_seq_start(struct seq_file *seq, loff_t *pos) -{ - read_lock(&dev_base_lock); - return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN; + return NULL; } void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; - return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next; + return v == SEQ_START_TOKEN ? + first_net_device() : next_net_device((struct net_device *)v); } void dev_seq_stop(struct seq_file *seq, void *v) @@ -3082,11 +3086,9 @@ int register_netdevice(struct net_device *dev) set_bit(__LINK_STATE_PRESENT, &dev->state); - dev->next = NULL; dev_init_scheduler(dev); write_lock_bh(&dev_base_lock); - *dev_tail = dev; - dev_tail = &dev->next; + list_add_tail(&dev->dev_list, &dev_base_head); hlist_add_head(&dev->name_hlist, head); hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex)); dev_hold(dev); @@ -3360,8 +3362,6 @@ void synchronize_net(void) void unregister_netdevice(struct net_device *dev) { - struct net_device *d, **dp; - BUG_ON(dev_boot_phase); ASSERT_RTNL(); @@ -3381,19 +3381,11 @@ void unregister_netdevice(struct net_device *dev) dev_close(dev); /* And unlink it from device chain. */ - for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) { - if (d == dev) { - write_lock_bh(&dev_base_lock); - hlist_del(&dev->name_hlist); - hlist_del(&dev->index_hlist); - if (dev_tail == &dev->next) - dev_tail = dp; - *dp = d->next; - write_unlock_bh(&dev_base_lock); - break; - } - } - BUG_ON(!d); + write_lock_bh(&dev_base_lock); + list_del(&dev->dev_list); + hlist_del(&dev->name_hlist); + hlist_del(&dev->index_hlist); + write_unlock_bh(&dev_base_lock); dev->reg_state = NETREG_UNREGISTERING; diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index 7d57bf77f3a3..5a54053386c8 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c @@ -223,7 +223,7 @@ static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos) loff_t off = 0; read_lock(&dev_base_lock); - for (dev = dev_base; dev; dev = dev->next) { + for_each_netdev(dev) { if (off++ == *pos) return dev; } @@ -232,9 +232,8 @@ static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos) static void *dev_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - struct net_device *dev = v; ++*pos; - return dev->next; + return next_net_device((struct net_device *)v); } static void dev_mc_seq_stop(struct seq_file *seq, void *v) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index cec111109155..8c971a2efe2a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -539,13 +539,16 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) int s_idx = cb->args[0]; struct net_device *dev; - for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) { + idx = 0; + for_each_netdev(dev) { if (idx < s_idx) - continue; + goto cont; if (rtnl_fill_ifinfo(skb, dev, NULL, 0, RTM_NEWLINK, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 0, NLM_F_MULTI) <= 0) break; +cont: + idx++; } cb->args[0] = idx; diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index a205eaa87f52..9fbe87c93802 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -721,7 +721,7 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr; - struct net_device *dev; + struct net_device *dev, *ldev; int rv; if (addr_len != sizeof(struct sockaddr_dn)) @@ -746,14 +746,17 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (!(saddr->sdn_flags & SDF_WILD)) { if (dn_ntohs(saddr->sdn_nodeaddrl)) { read_lock(&dev_base_lock); - for(dev = dev_base; dev; dev = dev->next) { + ldev = NULL; + for_each_netdev(dev) { if (!dev->dn_ptr) continue; - if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) + if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) { + ldev = dev; break; + } } read_unlock(&dev_base_lock); - if (dev == NULL) + if (ldev == NULL) return -EADDRNOTAVAIL; } } diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 5c2a9951b638..764a56a13e38 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -799,9 +799,10 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) skip_ndevs = cb->args[0]; skip_naddr = cb->args[1]; - for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) { + idx = 0; + for_each_netdev(dev) { if (idx < skip_ndevs) - continue; + goto cont; else if (idx > skip_ndevs) { /* Only skip over addresses for first dev dumped * in this iteration (idx == skip_ndevs) */ @@ -809,18 +810,20 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) } if ((dn_db = dev->dn_ptr) == NULL) - continue; + goto cont; for (ifa = dn_db->ifa_list, dn_idx = 0; ifa; ifa = ifa->ifa_next, dn_idx++) { if (dn_idx < skip_naddr) - continue; + goto cont; if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWADDR, NLM_F_MULTI) < 0) goto done; } +cont: + idx++; } done: cb->args[0] = idx; @@ -1296,7 +1299,7 @@ void dn_dev_devices_off(void) struct net_device *dev; rtnl_lock(); - for(dev = dev_base; dev; dev = dev->next) + for_each_netdev(dev) dn_dev_down(dev); rtnl_unlock(); @@ -1307,7 +1310,7 @@ void dn_dev_devices_on(void) struct net_device *dev; rtnl_lock(); - for(dev = dev_base; dev; dev = dev->next) { + for_each_netdev(dev) { if (dev->flags & IFF_UP) dn_dev_up(dev); } @@ -1325,62 +1328,56 @@ int unregister_dnaddr_notifier(struct notifier_block *nb) } #ifdef CONFIG_PROC_FS -static inline struct net_device *dn_dev_get_next(struct seq_file *seq, struct net_device *dev) +static inline int is_dn_dev(struct net_device *dev) { - do { - dev = dev->next; - } while(dev && !dev->dn_ptr); - - return dev; + return dev->dn_ptr != NULL; } -static struct net_device *dn_dev_get_idx(struct seq_file *seq, loff_t pos) +static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) { + int i; struct net_device *dev; - dev = dev_base; - if (dev && !dev->dn_ptr) - dev = dn_dev_get_next(seq, dev); - if (pos) { - while(dev && (dev = dn_dev_get_next(seq, dev))) - --pos; - } - return dev; -} + read_lock(&dev_base_lock); -static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) -{ - if (*pos) { - struct net_device *dev; - read_lock(&dev_base_lock); - dev = dn_dev_get_idx(seq, *pos - 1); - if (dev == NULL) - read_unlock(&dev_base_lock); - return dev; + if (*pos == 0) + return SEQ_START_TOKEN; + + i = 1; + for_each_netdev(dev) { + if (!is_dn_dev(dev)) + continue; + + if (i++ == *pos) + return dev; } - return SEQ_START_TOKEN; + + return NULL; } static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - struct net_device *dev = v; - loff_t one = 1; + struct net_device *dev; - if (v == SEQ_START_TOKEN) { - dev = dn_dev_seq_start(seq, &one); - } else { - dev = dn_dev_get_next(seq, dev); - if (dev == NULL) - read_unlock(&dev_base_lock); - } ++*pos; - return dev; + + dev = (struct net_device *)v; + if (v == SEQ_START_TOKEN) + dev = net_device_entry(&dev_base_head); + + for_each_netdev_continue(dev) { + if (!is_dn_dev(dev)) + continue; + + return dev; + } + + return NULL; } static void dn_dev_seq_stop(struct seq_file *seq, void *v) { - if (v && v != SEQ_START_TOKEN) - read_unlock(&dev_base_lock); + read_unlock(&dev_base_lock); } static char *dn_type2asc(char type) diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index 310a86268d2b..d2bc19d47950 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c @@ -602,7 +602,7 @@ static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa) /* Scan device list */ read_lock(&dev_base_lock); - for(dev = dev_base; dev; dev = dev->next) { + for_each_netdev(dev) { dn_db = dev->dn_ptr; if (dn_db == NULL) continue; diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 5d7337bcf0fe..a8bf106b7a61 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -886,7 +886,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old .iif = loopback_dev.ifindex, .oif = oldflp->oif }; struct dn_route *rt = NULL; - struct net_device *dev_out = NULL; + struct net_device *dev_out = NULL, *dev; struct neighbour *neigh = NULL; unsigned hash; unsigned flags = 0; @@ -925,15 +925,17 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old goto out; } read_lock(&dev_base_lock); - for(dev_out = dev_base; dev_out; dev_out = dev_out->next) { - if (!dev_out->dn_ptr) + for_each_netdev(dev) { + if (!dev->dn_ptr) continue; - if (!dn_dev_islocal(dev_out, oldflp->fld_src)) + if (!dn_dev_islocal(dev, oldflp->fld_src)) continue; - if ((dev_out->flags & IFF_LOOPBACK) && + if ((dev->flags & IFF_LOOPBACK) && oldflp->fld_dst && - !dn_dev_islocal(dev_out, oldflp->fld_dst)) + !dn_dev_islocal(dev, oldflp->fld_dst)) continue; + + dev_out = dev; break; } read_unlock(&dev_base_lock); diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 088888db8b3d..7f95e6e9beeb 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -910,7 +910,7 @@ no_in_dev: */ read_lock(&dev_base_lock); rcu_read_lock(); - for (dev = dev_base; dev; dev = dev->next) { + for_each_netdev(dev) { if ((in_dev = __in_dev_get_rcu(dev)) == NULL) continue; @@ -989,7 +989,7 @@ __be32 inet_confirm_addr(const struct net_device *dev, __be32 dst, __be32 local, read_lock(&dev_base_lock); rcu_read_lock(); - for (dev = dev_base; dev; dev = dev->next) { + for_each_netdev(dev) { if ((in_dev = __in_dev_get_rcu(dev))) { addr = confirm_addr_indev(in_dev, dst, local, scope); if (addr) @@ -1182,23 +1182,26 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) int s_ip_idx, s_idx = cb->args[0]; s_ip_idx = ip_idx = cb->args[1]; - for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) { + idx = 0; + for_each_netdev(dev) { if (idx < s_idx) - continue; + goto cont; if (idx > s_idx) s_ip_idx = 0; if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) - continue; + goto cont; for (ifa = in_dev->ifa_list, ip_idx = 0; ifa; ifa = ifa->ifa_next, ip_idx++) { if (ip_idx < s_ip_idx) - continue; + goto cont; if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWADDR, NLM_F_MULTI) <= 0) goto done; } +cont: + idx++; } done: @@ -1243,7 +1246,7 @@ void inet_forward_change(void) ipv4_devconf_dflt.forwarding = on; read_lock(&dev_base_lock); - for (dev = dev_base; dev; dev = dev->next) { + for_each_netdev(dev) { struct in_device *in_dev; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 2506021c2935..f4dd47453108 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -2288,9 +2288,8 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq) struct ip_mc_list *im = NULL; struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); - for (state->dev = dev_base, state->in_dev = NULL; - state->dev; - state->dev = state->dev->next) { + state->in_dev = NULL; + for_each_netdev(state->dev) { struct in_device *in_dev; in_dev = in_dev_get(state->dev); if (!in_dev) @@ -2316,7 +2315,7 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li read_unlock(&state->in_dev->mc_list_lock); in_dev_put(state->in_dev); } - state->dev = state->dev->next; + state->dev = next_net_device(state->dev); if (!state->dev) { state->in_dev = NULL; break; @@ -2450,9 +2449,9 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) struct ip_mc_list *im = NULL; struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); - for (state->dev = dev_base, state->idev = NULL, state->im = NULL; - state->dev; - state->dev = state->dev->next) { + state->idev = NULL; + state->im = NULL; + for_each_netdev(state->dev) { struct in_device *idev; idev = in_dev_get(state->dev); if (unlikely(idev == NULL)) @@ -2488,7 +2487,7 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l read_unlock(&state->idev->mc_list_lock); in_dev_put(state->idev); } - state->dev = state->dev->next; + state->dev = next_net_device(state->dev); if (!state->dev) { state->idev = NULL; goto out; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 597c800b2fdc..342ca8d89458 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -192,7 +192,7 @@ static int __init ic_open_devs(void) if (dev_change_flags(&loopback_dev, loopback_dev.flags | IFF_UP) < 0) printk(KERN_ERR "IP-Config: Failed to open %s\n", loopback_dev.name); - for (dev = dev_base; dev; dev = dev->next) { + for_each_netdev(dev) { if (dev == &loopback_dev) continue; if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) : diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3452433cbc96..d02685c6bc69 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -449,7 +449,7 @@ static void addrconf_forward_change(void) struct inet6_dev *idev; read_lock(&dev_base_lock); - for (dev=dev_base; dev; dev=dev->next) { + for_each_netdev(dev) { rcu_read_lock(); idev = __in6_dev_get(dev); if (idev) { @@ -911,7 +911,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev, read_lock(&dev_base_lock); rcu_read_lock(); - for (dev = dev_base; dev; dev=dev->next) { + for_each_netdev(dev) { struct inet6_dev *idev; struct inet6_ifaddr *ifa; @@ -2064,7 +2064,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) return; } - for (dev = dev_base; dev != NULL; dev = dev->next) { + for_each_netdev(dev) { struct in_device * in_dev = __in_dev_get_rtnl(dev); if (in_dev && (dev->flags & IFF_UP)) { struct in_ifaddr * ifa; @@ -2225,7 +2225,7 @@ static void ip6_tnl_add_linklocal(struct inet6_dev *idev) return; } /* then try to inherit it from any device */ - for (link_dev = dev_base; link_dev; link_dev = link_dev->next) { + for_each_netdev(link_dev) { if (!ipv6_inherit_linklocal(idev, link_dev)) return; } @@ -3257,14 +3257,15 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, s_idx = cb->args[0]; s_ip_idx = ip_idx = cb->args[1]; - for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) { + idx = 0; + for_each_netdev(dev) { if (idx < s_idx) - continue; + goto cont; if (idx > s_idx) s_ip_idx = 0; ip_idx = 0; if ((idev = in6_dev_get(dev)) == NULL) - continue; + goto cont; read_lock_bh(&idev->lock); switch (type) { case UNICAST_ADDR: @@ -3311,6 +3312,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, } read_unlock_bh(&idev->lock); in6_dev_put(idev); +cont: + idx++; } done: if (err <= 0) { @@ -3575,16 +3578,19 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) struct inet6_dev *idev; read_lock(&dev_base_lock); - for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) { + idx = 0; + for_each_netdev(dev) { if (idx < s_idx) - continue; + goto cont; if ((idev = in6_dev_get(dev)) == NULL) - continue; + goto cont; err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI); in6_dev_put(idev); if (err <= 0) break; +cont: + idx++; } read_unlock(&dev_base_lock); cb->args[0] = idx; @@ -4247,7 +4253,7 @@ void __exit addrconf_cleanup(void) * clean dev list. */ - for (dev=dev_base; dev; dev=dev->next) { + for_each_netdev(dev) { if ((idev = __in6_dev_get(dev)) == NULL) continue; addrconf_ifdown(dev, 1); diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 09117d63256f..9b81264eb78f 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -423,14 +423,18 @@ static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr) */ int ipv6_chk_acast_addr(struct net_device *dev, struct in6_addr *addr) { + int found = 0; + if (dev) return ipv6_chk_acast_dev(dev, addr); read_lock(&dev_base_lock); - for (dev=dev_base; dev; dev=dev->next) - if (ipv6_chk_acast_dev(dev, addr)) + for_each_netdev(dev) + if (ipv6_chk_acast_dev(dev, addr)) { + found = 1; break; + } read_unlock(&dev_base_lock); - return dev != 0; + return found; } @@ -447,9 +451,8 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq) struct ifacaddr6 *im = NULL; struct ac6_iter_state *state = ac6_seq_private(seq); - for (state->dev = dev_base, state->idev = NULL; - state->dev; - state->dev = state->dev->next) { + state->idev = NULL; + for_each_netdev(state->dev) { struct inet6_dev *idev; idev = in6_dev_get(state->dev); if (!idev) @@ -476,7 +479,7 @@ static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im read_unlock_bh(&state->idev->lock); in6_dev_put(state->idev); } - state->dev = state->dev->next; + state->dev = next_net_device(state->dev); if (!state->dev) { state->idev = NULL; break; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 6c2758951d60..3e308fb41b49 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2331,9 +2331,8 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq) struct ifmcaddr6 *im = NULL; struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); - for (state->dev = dev_base, state->idev = NULL; - state->dev; - state->dev = state->dev->next) { + state->idev = NULL; + for_each_netdev(state->dev) { struct inet6_dev *idev; idev = in6_dev_get(state->dev); if (!idev) @@ -2360,7 +2359,7 @@ static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr read_unlock_bh(&state->idev->lock); in6_dev_put(state->idev); } - state->dev = state->dev->next; + state->dev = next_net_device(state->dev); if (!state->dev) { state->idev = NULL; break; @@ -2475,9 +2474,9 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq) struct ifmcaddr6 *im = NULL; struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); - for (state->dev = dev_base, state->idev = NULL, state->im = NULL; - state->dev; - state->dev = state->dev->next) { + state->idev = NULL; + state->im = NULL; + for_each_netdev(state->dev) { struct inet6_dev *idev; idev = in6_dev_get(state->dev); if (unlikely(idev == NULL)) @@ -2513,7 +2512,7 @@ static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_s read_unlock_bh(&state->idev->lock); in6_dev_put(state->idev); } - state->dev = state->dev->next; + state->dev = next_net_device(state->dev); if (!state->dev) { state->idev = NULL; goto out; diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c index d12413cff5bd..d4b13a031fd5 100644 --- a/net/llc/llc_core.c +++ b/net/llc/llc_core.c @@ -160,8 +160,14 @@ static struct packet_type llc_tr_packet_type = { static int __init llc_init(void) { - if (dev_base->next) - memcpy(llc_station_mac_sa, dev_base->next->dev_addr, ETH_ALEN); + struct net_device *dev; + + dev = first_net_device(); + if (dev != NULL) + dev = next_net_device(dev); + + if (dev != NULL) + memcpy(llc_station_mac_sa, dev->dev_addr, ETH_ALEN); else memset(llc_station_mac_sa, 0, ETH_ALEN); dev_add_pack(&llc_packet_type); diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index 8e6bd4e9d82c..2f76e062609d 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c @@ -598,7 +598,7 @@ struct net_device *nr_dev_first(void) struct net_device *dev, *first = NULL; read_lock(&dev_base_lock); - for (dev = dev_base; dev != NULL; dev = dev->next) { + for_each_netdev(dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM) if (first == NULL || strncmp(dev->name, first->name, 3) < 0) first = dev; @@ -618,12 +618,13 @@ struct net_device *nr_dev_get(ax25_address *addr) struct net_device *dev; read_lock(&dev_base_lock); - for (dev = dev_base; dev != NULL; dev = dev->next) { + for_each_netdev(dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) { dev_hold(dev); goto out; } } + dev = NULL; out: read_unlock(&dev_base_lock); return dev; diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 1f9aefd95a99..929a784a86d7 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -596,7 +596,7 @@ struct net_device *rose_dev_first(void) struct net_device *dev, *first = NULL; read_lock(&dev_base_lock); - for (dev = dev_base; dev != NULL; dev = dev->next) { + for_each_netdev(dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE) if (first == NULL || strncmp(dev->name, first->name, 3) < 0) first = dev; @@ -614,12 +614,13 @@ struct net_device *rose_dev_get(rose_address *addr) struct net_device *dev; read_lock(&dev_base_lock); - for (dev = dev_base; dev != NULL; dev = dev->next) { + for_each_netdev(dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) { dev_hold(dev); goto out; } } + dev = NULL; out: read_unlock(&dev_base_lock); return dev; @@ -630,10 +631,11 @@ static int rose_dev_exists(rose_address *addr) struct net_device *dev; read_lock(&dev_base_lock); - for (dev = dev_base; dev != NULL; dev = dev->next) { + for_each_netdev(dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) goto out; } + dev = NULL; out: read_unlock(&dev_base_lock); return dev != NULL; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 8699e7006d80..bec600af03ca 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -894,9 +894,10 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) s_idx = cb->args[0]; s_q_idx = q_idx = cb->args[1]; read_lock(&dev_base_lock); - for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) { + idx = 0; + for_each_netdev(dev) { if (idx < s_idx) - continue; + goto cont; if (idx > s_idx) s_q_idx = 0; q_idx = 0; @@ -910,6 +911,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) goto done; q_idx++; } +cont: + idx++; } done: diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index c361deb6cea9..d4afafc39138 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -170,7 +170,7 @@ static void sctp_get_local_addr_list(void) struct sctp_af *af; read_lock(&dev_base_lock); - for (dev = dev_base; dev; dev = dev->next) { + for_each_netdev(dev) { __list_for_each(pos, &sctp_address_families) { af = list_entry(pos, struct sctp_af, list); af->copy_addrlist(&sctp_local_addr_list, dev); diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index 67bb29b44d1b..0ee6ded18f3a 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c @@ -120,16 +120,18 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev, static int enable_bearer(struct tipc_bearer *tb_ptr) { - struct net_device *dev = dev_base; + struct net_device *dev, *pdev; struct eth_bearer *eb_ptr = ð_bearers[0]; struct eth_bearer *stop = ð_bearers[MAX_ETH_BEARERS]; char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1; /* Find device with specified name */ - - while (dev && dev->name && strncmp(dev->name, driver_name, IFNAMSIZ)) { - dev = dev->next; - } + dev = NULL; + for_each_netdev(pdev) + if (!strncmp(dev->name, driver_name, IFNAMSIZ)) { + dev = pdev; + break; + } if (!dev) return -ENODEV; -- cgit v1.2.3 From af11e31609d93765c1b22611592543e028f7aa54 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Fri, 4 May 2007 12:55:13 -0700 Subject: [XFRM] SAD info TLV aggregationx Aggregate the SAD info TLVs. Signed-off-by: Jamal Hadi Salim Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/xfrm.h | 21 +++++++-------------- include/net/xfrm.h | 17 ++++++++--------- net/xfrm/xfrm_state.c | 2 +- net/xfrm/xfrm_user.c | 22 +++++++++------------- 4 files changed, 25 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index a5d53e0fe152..c237fe3af8ce 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h @@ -243,17 +243,6 @@ enum xfrm_ae_ftype_t { #define XFRM_AE_MAX (__XFRM_AE_MAX - 1) }; -/* SAD Table filter flags */ -enum xfrm_sad_ftype_t { - XFRM_SAD_UNSPEC, - XFRM_SAD_HMASK=1, - XFRM_SAD_HMAX=2, - XFRM_SAD_CNT=4, - __XFRM_SAD_MAX - -#define XFRM_SAD_MAX (__XFRM_SAD_MAX - 1) -}; - struct xfrm_userpolicy_type { __u8 type; __u16 reserved1; @@ -287,14 +276,18 @@ enum xfrm_attr_type_t { enum xfrm_sadattr_type_t { XFRMA_SAD_UNSPEC, - XFRMA_SADHMASK, - XFRMA_SADHMAX, - XFRMA_SADCNT, + XFRMA_SAD_CNT, + XFRMA_SAD_HINFO, __XFRMA_SAD_MAX #define XFRMA_SAD_MAX (__XFRMA_SAD_MAX - 1) }; +struct xfrmu_sadhinfo { + __u32 sadhcnt; /* current hash bkts */ + __u32 sadhmcnt; /* max allowed hash bkts */ +}; + /* SPD Table filter flags */ enum xfrm_spd_ftype_t { XFRM_SPD_UNSPEC, diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 66c2d3eec03c..d0d7db51d3fc 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -416,14 +416,6 @@ struct xfrm_audit u32 secid; }; -/* SAD metadata, add more later */ -struct xfrm_sadinfo -{ - u32 sadhcnt; /* current hash bkts */ - u32 sadhmcnt; /* max allowed hash bkts */ - u32 sadcnt; /* current running count */ -}; - struct xfrm_spdinfo { u32 incnt; @@ -964,10 +956,17 @@ static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **s return -ENOSYS; } #endif + +struct xfrmk_sadinfo { + u32 sadhcnt; /* current hash bkts */ + u32 sadhmcnt; /* max allowed hash bkts */ + u32 sadcnt; /* current running count */ +}; + extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq); extern int xfrm_state_delete(struct xfrm_state *x); extern void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info); -extern void xfrm_sad_getinfo(struct xfrm_sadinfo *si); +extern void xfrm_sad_getinfo(struct xfrmk_sadinfo *si); extern void xfrm_spd_getinfo(struct xfrm_spdinfo *si); extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq); extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index f3a61ebd8d65..9955ff4da0a2 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -421,7 +421,7 @@ restart: } EXPORT_SYMBOL(xfrm_state_flush); -void xfrm_sad_getinfo(struct xfrm_sadinfo *si) +void xfrm_sad_getinfo(struct xfrmk_sadinfo *si) { spin_lock_bh(&xfrm_state_lock); si->sadcnt = xfrm_state_num; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 4210d91624cd..c35b9ea3b62b 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -749,7 +749,8 @@ static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) { - struct xfrm_sadinfo si; + struct xfrmk_sadinfo si; + struct xfrmu_sadhinfo sh; struct nlmsghdr *nlh; u32 *f; @@ -761,12 +762,11 @@ static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) *f = flags; xfrm_sad_getinfo(&si); - if (flags & XFRM_SAD_HMASK) - NLA_PUT_U32(skb, XFRMA_SADHMASK, si.sadhcnt); - if (flags & XFRM_SAD_HMAX) - NLA_PUT_U32(skb, XFRMA_SADHMAX, si.sadhmcnt); - if (flags & XFRM_SAD_CNT) - NLA_PUT_U32(skb, XFRMA_SADCNT, si.sadcnt); + sh.sadhmcnt = si.sadhmcnt; + sh.sadhcnt = si.sadhcnt; + + NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt); + NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); return nlmsg_end(skb, nlh); @@ -784,12 +784,8 @@ static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, u32 seq = nlh->nlmsg_seq; int len = NLMSG_LENGTH(sizeof(u32)); - if (*flags & XFRM_SAD_HMASK) - len += RTA_SPACE(sizeof(u32)); - if (*flags & XFRM_SAD_HMAX) - len += RTA_SPACE(sizeof(u32)); - if (*flags & XFRM_SAD_CNT) - len += RTA_SPACE(sizeof(u32)); + len += RTA_SPACE(sizeof(struct xfrmu_sadhinfo)); + len += RTA_SPACE(sizeof(u32)); r_skb = alloc_skb(len, GFP_ATOMIC); -- cgit v1.2.3 From 5a6d34162f5c6f522f857df274f1c8240f161e11 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Fri, 4 May 2007 12:55:39 -0700 Subject: [XFRM] SPD info TLV aggregation Aggregate the SPD info TLVs. Signed-off-by: Jamal Hadi Salim Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/xfrm.h | 39 ++++++++++++++++---------------------- include/net/xfrm.h | 24 ++++++++++++------------ net/xfrm/xfrm_policy.c | 2 +- net/xfrm/xfrm_user.c | 51 ++++++++++++++++---------------------------------- 4 files changed, 45 insertions(+), 71 deletions(-) (limited to 'include/linux') diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index c237fe3af8ce..b58adc52448d 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h @@ -288,36 +288,29 @@ struct xfrmu_sadhinfo { __u32 sadhmcnt; /* max allowed hash bkts */ }; -/* SPD Table filter flags */ -enum xfrm_spd_ftype_t { - XFRM_SPD_UNSPEC, - XFRM_SPD_HMASK=1, - XFRM_SPD_HMAX=2, - XFRM_SPD_ICNT=4, - XFRM_SPD_OCNT=8, - XFRM_SPD_FCNT=16, - XFRM_SPD_ISCNT=32, - XFRM_SPD_OSCNT=64, - XFRM_SPD_FSCNT=128, - __XFRM_SPD_MAX - -#define XFRM_SPD_MAX (__XFRM_SPD_MAX - 1) -}; enum xfrm_spdattr_type_t { XFRMA_SPD_UNSPEC, - XFRMA_SPDHMASK, - XFRMA_SPDHMAX, - XFRMA_SPDICNT, - XFRMA_SPDOCNT, - XFRMA_SPDFCNT, - XFRMA_SPDISCNT, - XFRMA_SPDOSCNT, - XFRMA_SPDFSCNT, + XFRMA_SPD_INFO, + XFRMA_SPD_HINFO, __XFRMA_SPD_MAX #define XFRMA_SPD_MAX (__XFRMA_SPD_MAX - 1) }; +struct xfrmu_spdinfo { + __u32 incnt; + __u32 outcnt; + __u32 fwdcnt; + __u32 inscnt; + __u32 outscnt; + __u32 fwdscnt; +}; + +struct xfrmu_spdhinfo { + __u32 spdhcnt; + __u32 spdhmcnt; +}; + struct xfrm_usersa_info { struct xfrm_selector sel; struct xfrm_id id; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index d0d7db51d3fc..39ef925d39dd 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -416,17 +416,6 @@ struct xfrm_audit u32 secid; }; -struct xfrm_spdinfo -{ - u32 incnt; - u32 outcnt; - u32 fwdcnt; - u32 inscnt; - u32 outscnt; - u32 fwdscnt; - u32 spdhcnt; - u32 spdhmcnt; -}; #ifdef CONFIG_AUDITSYSCALL extern void xfrm_audit_log(uid_t auid, u32 secid, int type, int result, struct xfrm_policy *xp, struct xfrm_state *x); @@ -963,11 +952,22 @@ struct xfrmk_sadinfo { u32 sadcnt; /* current running count */ }; +struct xfrmk_spdinfo { + u32 incnt; + u32 outcnt; + u32 fwdcnt; + u32 inscnt; + u32 outscnt; + u32 fwdscnt; + u32 spdhcnt; + u32 spdhmcnt; +}; + extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq); extern int xfrm_state_delete(struct xfrm_state *x); extern void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info); extern void xfrm_sad_getinfo(struct xfrmk_sadinfo *si); -extern void xfrm_spd_getinfo(struct xfrm_spdinfo *si); +extern void xfrm_spd_getinfo(struct xfrmk_spdinfo *si); extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq); extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq); extern void xfrm_replay_notify(struct xfrm_state *x, int event); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 263e34e45265..95271e8426a1 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -579,7 +579,7 @@ static inline int xfrm_byidx_should_resize(int total) return 0; } -void xfrm_spd_getinfo(struct xfrm_spdinfo *si) +void xfrm_spd_getinfo(struct xfrmk_spdinfo *si) { read_lock_bh(&xfrm_policy_lock); si->incnt = xfrm_policy_count[XFRM_POLICY_IN]; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index c35b9ea3b62b..b14c7e590c31 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -674,7 +674,9 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) { - struct xfrm_spdinfo si; + struct xfrmk_spdinfo si; + struct xfrmu_spdinfo spc; + struct xfrmu_spdhinfo sph; struct nlmsghdr *nlh; u32 *f; @@ -685,23 +687,17 @@ static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) f = nlmsg_data(nlh); *f = flags; xfrm_spd_getinfo(&si); - - if (flags & XFRM_SPD_HMASK) - NLA_PUT_U32(skb, XFRMA_SPDHMASK, si.spdhcnt); - if (flags & XFRM_SPD_HMAX) - NLA_PUT_U32(skb, XFRMA_SPDHMAX, si.spdhmcnt); - if (flags & XFRM_SPD_ICNT) - NLA_PUT_U32(skb, XFRMA_SPDICNT, si.incnt); - if (flags & XFRM_SPD_OCNT) - NLA_PUT_U32(skb, XFRMA_SPDOCNT, si.outcnt); - if (flags & XFRM_SPD_FCNT) - NLA_PUT_U32(skb, XFRMA_SPDFCNT, si.fwdcnt); - if (flags & XFRM_SPD_ISCNT) - NLA_PUT_U32(skb, XFRMA_SPDISCNT, si.inscnt); - if (flags & XFRM_SPD_OSCNT) - NLA_PUT_U32(skb, XFRMA_SPDOSCNT, si.inscnt); - if (flags & XFRM_SPD_FSCNT) - NLA_PUT_U32(skb, XFRMA_SPDFSCNT, si.inscnt); + spc.incnt = si.incnt; + spc.outcnt = si.outcnt; + spc.fwdcnt = si.fwdcnt; + spc.inscnt = si.inscnt; + spc.outscnt = si.outscnt; + spc.fwdscnt = si.fwdscnt; + sph.spdhcnt = si.spdhcnt; + sph.spdhmcnt = si.spdhmcnt; + + NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); + NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); return nlmsg_end(skb, nlh); @@ -719,23 +715,8 @@ static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, u32 seq = nlh->nlmsg_seq; int len = NLMSG_LENGTH(sizeof(u32)); - - if (*flags & XFRM_SPD_HMASK) - len += RTA_SPACE(sizeof(u32)); - if (*flags & XFRM_SPD_HMAX) - len += RTA_SPACE(sizeof(u32)); - if (*flags & XFRM_SPD_ICNT) - len += RTA_SPACE(sizeof(u32)); - if (*flags & XFRM_SPD_OCNT) - len += RTA_SPACE(sizeof(u32)); - if (*flags & XFRM_SPD_FCNT) - len += RTA_SPACE(sizeof(u32)); - if (*flags & XFRM_SPD_ISCNT) - len += RTA_SPACE(sizeof(u32)); - if (*flags & XFRM_SPD_OSCNT) - len += RTA_SPACE(sizeof(u32)); - if (*flags & XFRM_SPD_FSCNT) - len += RTA_SPACE(sizeof(u32)); + len += RTA_SPACE(sizeof(struct xfrmu_spdinfo)); + len += RTA_SPACE(sizeof(struct xfrmu_spdhinfo)); r_skb = alloc_skb(len, GFP_ATOMIC); if (r_skb == NULL) -- cgit v1.2.3 From c04cb856e20a8bf68762d60737b84328c1ab5900 Mon Sep 17 00:00:00 2001 From: Michael Schmitz Date: Tue, 1 May 2007 22:32:38 +0200 Subject: m68k: Atari keyboard and mouse support. Atari keyboard and mouse support. (reformating and Kconfig fixes by Roman Zippel) Signed-off-by: Michael Schmitz Signed-off-by: Roman Zippel Signed-off-by: Geert Uytterhoeven Signed-off-by: Linus Torvalds --- arch/m68k/Kconfig | 3 + arch/m68k/atari/Makefile | 1 + arch/m68k/atari/atakeyb.c | 730 +++++++++++++++++++++++++++++++++++++++ drivers/input/keyboard/Kconfig | 11 + drivers/input/keyboard/Makefile | 1 + drivers/input/keyboard/atakbd.c | 134 +++++++ drivers/input/mouse/Kconfig | 11 + drivers/input/mouse/Makefile | 1 + drivers/input/mouse/atarimouse.c | 160 +++++++++ include/asm-m68k/atarikb.h | 6 + include/linux/input.h | 1 + 11 files changed, 1059 insertions(+) create mode 100644 arch/m68k/atari/atakeyb.c create mode 100644 drivers/input/keyboard/atakbd.c create mode 100644 drivers/input/mouse/atarimouse.c (limited to 'include/linux') diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index a8e1e604dfa8..b8536c7c0877 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -409,6 +409,9 @@ config STRAM_PROC help Say Y here to report ST-RAM usage statistics in /proc/stram. +config ATARI_KBD_CORE + bool + config HEARTBEAT bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || MAC ||Q40 default y if !AMIGA && !APOLLO && !ATARI && !MAC && !Q40 && HP300 diff --git a/arch/m68k/atari/Makefile b/arch/m68k/atari/Makefile index 8cb6236b39db..2cb86191f0aa 100644 --- a/arch/m68k/atari/Makefile +++ b/arch/m68k/atari/Makefile @@ -8,3 +8,4 @@ obj-y := config.o time.o debug.o ataints.o stdma.o \ ifeq ($(CONFIG_PCI),y) obj-$(CONFIG_HADES) += hades-pci.o endif +obj-$(CONFIG_ATARI_KBD_CORE) += atakeyb.o diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c new file mode 100644 index 000000000000..1c29603b16b3 --- /dev/null +++ b/arch/m68k/atari/atakeyb.c @@ -0,0 +1,730 @@ +/* + * linux/atari/atakeyb.c + * + * Atari Keyboard driver for 680x0 Linux + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +/* + * Atari support by Robert de Vries + * enhanced by Bjoern Brauel and Roman Hodek + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +static void atakeyb_rep(unsigned long ignore); +extern unsigned int keymap_count; + +/* Hook for MIDI serial driver */ +void (*atari_MIDI_interrupt_hook) (void); +/* Hook for mouse driver */ +void (*atari_mouse_interrupt_hook) (char *); +/* Hook for keyboard inputdev driver */ +void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); +/* Hook for mouse inputdev driver */ +void (*atari_input_mouse_interrupt_hook) (char *); + +/* variables for IKBD self test: */ + +/* state: 0: off; >0: in progress; >1: 0xf1 received */ +static volatile int ikbd_self_test; +/* timestamp when last received a char */ +static volatile unsigned long self_test_last_rcv; +/* bitmap of keys reported as broken */ +static unsigned long broken_keys[128/(sizeof(unsigned long)*8)] = { 0, }; + +#define BREAK_MASK (0x80) + +/* + * ++roman: The following changes were applied manually: + * + * - The Alt (= Meta) key works in combination with Shift and + * Control, e.g. Alt+Shift+a sends Meta-A (0xc1), Alt+Control+A sends + * Meta-Ctrl-A (0x81) ... + * + * - The parentheses on the keypad send '(' and ')' with all + * modifiers (as would do e.g. keypad '+'), but they cannot be used as + * application keys (i.e. sending Esc O c). + * + * - HELP and UNDO are mapped to be F21 and F24, resp, that send the + * codes "\E[M" and "\E[P". (This is better than the old mapping to + * F11 and F12, because these codes are on Shift+F1/2 anyway.) This + * way, applications that allow their own keyboard mappings + * (e.g. tcsh, X Windows) can be configured to use them in the way + * the label suggests (providing help or undoing). + * + * - Console switching is done with Alt+Fx (consoles 1..10) and + * Shift+Alt+Fx (consoles 11..20). + * + * - The misc. special function implemented in the kernel are mapped + * to the following key combinations: + * + * ClrHome -> Home/Find + * Shift + ClrHome -> End/Select + * Shift + Up -> Page Up + * Shift + Down -> Page Down + * Alt + Help -> show system status + * Shift + Help -> show memory info + * Ctrl + Help -> show registers + * Ctrl + Alt + Del -> Reboot + * Alt + Undo -> switch to last console + * Shift + Undo -> send interrupt + * Alt + Insert -> stop/start output (same as ^S/^Q) + * Alt + Up -> Scroll back console (if implemented) + * Alt + Down -> Scroll forward console (if implemented) + * Alt + CapsLock -> NumLock + * + * ++Andreas: + * + * - Help mapped to K_HELP + * - Undo mapped to K_UNDO (= K_F246) + * - Keypad Left/Right Parenthesis mapped to new K_PPAREN[LR] + */ + +static u_short ataplain_map[NR_KEYS] __initdata = { + 0xf200, 0xf01b, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, + 0xf037, 0xf038, 0xf039, 0xf030, 0xf02d, 0xf03d, 0xf008, 0xf009, + 0xfb71, 0xfb77, 0xfb65, 0xfb72, 0xfb74, 0xfb79, 0xfb75, 0xfb69, + 0xfb6f, 0xfb70, 0xf05b, 0xf05d, 0xf201, 0xf702, 0xfb61, 0xfb73, + 0xfb64, 0xfb66, 0xfb67, 0xfb68, 0xfb6a, 0xfb6b, 0xfb6c, 0xf03b, + 0xf027, 0xf060, 0xf700, 0xf05c, 0xfb7a, 0xfb78, 0xfb63, 0xfb76, + 0xfb62, 0xfb6e, 0xfb6d, 0xf02c, 0xf02e, 0xf02f, 0xf700, 0xf200, + 0xf703, 0xf020, 0xf207, 0xf100, 0xf101, 0xf102, 0xf103, 0xf104, + 0xf105, 0xf106, 0xf107, 0xf108, 0xf109, 0xf200, 0xf200, 0xf114, + 0xf603, 0xf200, 0xf30b, 0xf601, 0xf200, 0xf602, 0xf30a, 0xf200, + 0xf600, 0xf200, 0xf115, 0xf07f, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf1ff, 0xf11b, 0xf312, 0xf313, 0xf30d, 0xf30c, 0xf307, + 0xf308, 0xf309, 0xf304, 0xf305, 0xf306, 0xf301, 0xf302, 0xf303, + 0xf300, 0xf310, 0xf30e, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200 +}; + +typedef enum kb_state_t { + KEYBOARD, AMOUSE, RMOUSE, JOYSTICK, CLOCK, RESYNC +} KB_STATE_T; + +#define IS_SYNC_CODE(sc) ((sc) >= 0x04 && (sc) <= 0xfb) + +typedef struct keyboard_state { + unsigned char buf[6]; + int len; + KB_STATE_T state; +} KEYBOARD_STATE; + +KEYBOARD_STATE kb_state; + +#define DEFAULT_KEYB_REP_DELAY (HZ/4) +#define DEFAULT_KEYB_REP_RATE (HZ/25) + +/* These could be settable by some ioctl() in future... */ +static unsigned int key_repeat_delay = DEFAULT_KEYB_REP_DELAY; +static unsigned int key_repeat_rate = DEFAULT_KEYB_REP_RATE; + +static unsigned char rep_scancode; +static struct timer_list atakeyb_rep_timer = { + .function = atakeyb_rep, +}; + +static void atakeyb_rep(unsigned long ignore) +{ + /* Disable keyboard for the time we call handle_scancode(), else a race + * in the keyboard tty queue may happen */ + atari_disable_irq(IRQ_MFP_ACIA); + del_timer(&atakeyb_rep_timer); + + /* A keyboard int may have come in before we disabled the irq, so + * double-check whether rep_scancode is still != 0 */ + if (rep_scancode) { + init_timer(&atakeyb_rep_timer); + atakeyb_rep_timer.expires = jiffies + key_repeat_rate; + add_timer(&atakeyb_rep_timer); + + //handle_scancode(rep_scancode, 1); + if (atari_input_keyboard_interrupt_hook) + atari_input_keyboard_interrupt_hook(rep_scancode, 1); + } + + atari_enable_irq(IRQ_MFP_ACIA); +} + + +/* ++roman: If a keyboard overrun happened, we can't tell in general how much + * bytes have been lost and in which state of the packet structure we are now. + * This usually causes keyboards bytes to be interpreted as mouse movements + * and vice versa, which is very annoying. It seems better to throw away some + * bytes (that are usually mouse bytes) than to misinterpret them. Therefor I + * introduced the RESYNC state for IKBD data. In this state, the bytes up to + * one that really looks like a key event (0x04..0xf2) or the start of a mouse + * packet (0xf8..0xfb) are thrown away, but at most 2 bytes. This at least + * speeds up the resynchronization of the event structure, even if maybe a + * mouse movement is lost. However, nothing is perfect. For bytes 0x01..0x03, + * it's really hard to decide whether they're mouse or keyboard bytes. Since + * overruns usually occur when moving the Atari mouse rapidly, they're seen as + * mouse bytes here. If this is wrong, only a make code of the keyboard gets + * lost, which isn't too bad. Loosing a break code would be disastrous, + * because then the keyboard repeat strikes... + */ + +static irqreturn_t atari_keyboard_interrupt(int irq, void *dummy) +{ + u_char acia_stat; + int scancode; + int break_flag; + +repeat: + if (acia.mid_ctrl & ACIA_IRQ) + if (atari_MIDI_interrupt_hook) + atari_MIDI_interrupt_hook(); + acia_stat = acia.key_ctrl; + /* check out if the interrupt came from this ACIA */ + if (!((acia_stat | acia.mid_ctrl) & ACIA_IRQ)) + return IRQ_HANDLED; + + if (acia_stat & ACIA_OVRN) { + /* a very fast typist or a slow system, give a warning */ + /* ...happens often if interrupts were disabled for too long */ + printk(KERN_DEBUG "Keyboard overrun\n"); + scancode = acia.key_data; + /* Turn off autorepeating in case a break code has been lost */ + del_timer(&atakeyb_rep_timer); + rep_scancode = 0; + if (ikbd_self_test) + /* During self test, don't do resyncing, just process the code */ + goto interpret_scancode; + else if (IS_SYNC_CODE(scancode)) { + /* This code seem already to be the start of a new packet or a + * single scancode */ + kb_state.state = KEYBOARD; + goto interpret_scancode; + } else { + /* Go to RESYNC state and skip this byte */ + kb_state.state = RESYNC; + kb_state.len = 1; /* skip max. 1 another byte */ + goto repeat; + } + } + + if (acia_stat & ACIA_RDRF) { + /* received a character */ + scancode = acia.key_data; /* get it or reset the ACIA, I'll get it! */ + tasklet_schedule(&keyboard_tasklet); + interpret_scancode: + switch (kb_state.state) { + case KEYBOARD: + switch (scancode) { + case 0xF7: + kb_state.state = AMOUSE; + kb_state.len = 0; + break; + + case 0xF8: + case 0xF9: + case 0xFA: + case 0xFB: + kb_state.state = RMOUSE; + kb_state.len = 1; + kb_state.buf[0] = scancode; + break; + + case 0xFC: + kb_state.state = CLOCK; + kb_state.len = 0; + break; + + case 0xFE: + case 0xFF: + kb_state.state = JOYSTICK; + kb_state.len = 1; + kb_state.buf[0] = scancode; + break; + + case 0xF1: + /* during self-test, note that 0xf1 received */ + if (ikbd_self_test) { + ++ikbd_self_test; + self_test_last_rcv = jiffies; + break; + } + /* FALL THROUGH */ + + default: + break_flag = scancode & BREAK_MASK; + scancode &= ~BREAK_MASK; + if (ikbd_self_test) { + /* Scancodes sent during the self-test stand for broken + * keys (keys being down). The code *should* be a break + * code, but nevertheless some AT keyboard interfaces send + * make codes instead. Therefore, simply ignore + * break_flag... + */ + int keyval = plain_map[scancode], keytyp; + + set_bit(scancode, broken_keys); + self_test_last_rcv = jiffies; + keyval = plain_map[scancode]; + keytyp = KTYP(keyval) - 0xf0; + keyval = KVAL(keyval); + + printk(KERN_WARNING "Key with scancode %d ", scancode); + if (keytyp == KT_LATIN || keytyp == KT_LETTER) { + if (keyval < ' ') + printk("('^%c') ", keyval + '@'); + else + printk("('%c') ", keyval); + } + printk("is broken -- will be ignored.\n"); + break; + } else if (test_bit(scancode, broken_keys)) + break; + +#if 0 // FIXME; hangs at boot + if (break_flag) { + del_timer(&atakeyb_rep_timer); + rep_scancode = 0; + } else { + del_timer(&atakeyb_rep_timer); + rep_scancode = scancode; + atakeyb_rep_timer.expires = jiffies + key_repeat_delay; + add_timer(&atakeyb_rep_timer); + } +#endif + + // handle_scancode(scancode, !break_flag); + if (atari_input_keyboard_interrupt_hook) + atari_input_keyboard_interrupt_hook((unsigned char)scancode, !break_flag); + break; + } + break; + + case AMOUSE: + kb_state.buf[kb_state.len++] = scancode; + if (kb_state.len == 5) { + kb_state.state = KEYBOARD; + /* not yet used */ + /* wake up someone waiting for this */ + } + break; + + case RMOUSE: + kb_state.buf[kb_state.len++] = scancode; + if (kb_state.len == 3) { + kb_state.state = KEYBOARD; + if (atari_mouse_interrupt_hook) + atari_mouse_interrupt_hook(kb_state.buf); + } + break; + + case JOYSTICK: + kb_state.buf[1] = scancode; + kb_state.state = KEYBOARD; +#ifdef FIXED_ATARI_JOYSTICK + atari_joystick_interrupt(kb_state.buf); +#endif + break; + + case CLOCK: + kb_state.buf[kb_state.len++] = scancode; + if (kb_state.len == 6) { + kb_state.state = KEYBOARD; + /* wake up someone waiting for this. + But will this ever be used, as Linux keeps its own time. + Perhaps for synchronization purposes? */ + /* wake_up_interruptible(&clock_wait); */ + } + break; + + case RESYNC: + if (kb_state.len <= 0 || IS_SYNC_CODE(scancode)) { + kb_state.state = KEYBOARD; + goto interpret_scancode; + } + kb_state.len--; + break; + } + } + +#if 0 + if (acia_stat & ACIA_CTS) + /* cannot happen */; +#endif + + if (acia_stat & (ACIA_FE | ACIA_PE)) { + printk("Error in keyboard communication\n"); + } + + /* handle_scancode() can take a lot of time, so check again if + * some character arrived + */ + goto repeat; +} + +/* + * I write to the keyboard without using interrupts, I poll instead. + * This takes for the maximum length string allowed (7) at 7812.5 baud + * 8 data 1 start 1 stop bit: 9.0 ms + * If this takes too long for normal operation, interrupt driven writing + * is the solution. (I made a feeble attempt in that direction but I + * kept it simple for now.) + */ +void ikbd_write(const char *str, int len) +{ + u_char acia_stat; + + if ((len < 1) || (len > 7)) + panic("ikbd: maximum string length exceeded"); + while (len) { + acia_stat = acia.key_ctrl; + if (acia_stat & ACIA_TDRE) { + acia.key_data = *str++; + len--; + } + } +} + +/* Reset (without touching the clock) */ +void ikbd_reset(void) +{ + static const char cmd[2] = { 0x80, 0x01 }; + + ikbd_write(cmd, 2); + + /* + * if all's well code 0xF1 is returned, else the break codes of + * all keys making contact + */ +} + +/* Set mouse button action */ +void ikbd_mouse_button_action(int mode) +{ + char cmd[2] = { 0x07, mode }; + + ikbd_write(cmd, 2); +} + +/* Set relative mouse position reporting */ +void ikbd_mouse_rel_pos(void) +{ + static const char cmd[1] = { 0x08 }; + + ikbd_write(cmd, 1); +} + +/* Set absolute mouse position reporting */ +void ikbd_mouse_abs_pos(int xmax, int ymax) +{ + char cmd[5] = { 0x09, xmax>>8, xmax&0xFF, ymax>>8, ymax&0xFF }; + + ikbd_write(cmd, 5); +} + +/* Set mouse keycode mode */ +void ikbd_mouse_kbd_mode(int dx, int dy) +{ + char cmd[3] = { 0x0A, dx, dy }; + + ikbd_write(cmd, 3); +} + +/* Set mouse threshold */ +void ikbd_mouse_thresh(int x, int y) +{ + char cmd[3] = { 0x0B, x, y }; + + ikbd_write(cmd, 3); +} + +/* Set mouse scale */ +void ikbd_mouse_scale(int x, int y) +{ + char cmd[3] = { 0x0C, x, y }; + + ikbd_write(cmd, 3); +} + +/* Interrogate mouse position */ +void ikbd_mouse_pos_get(int *x, int *y) +{ + static const char cmd[1] = { 0x0D }; + + ikbd_write(cmd, 1); + + /* wait for returning bytes */ +} + +/* Load mouse position */ +void ikbd_mouse_pos_set(int x, int y) +{ + char cmd[6] = { 0x0E, 0x00, x>>8, x&0xFF, y>>8, y&0xFF }; + + ikbd_write(cmd, 6); +} + +/* Set Y=0 at bottom */ +void ikbd_mouse_y0_bot(void) +{ + static const char cmd[1] = { 0x0F }; + + ikbd_write(cmd, 1); +} + +/* Set Y=0 at top */ +void ikbd_mouse_y0_top(void) +{ + static const char cmd[1] = { 0x10 }; + + ikbd_write(cmd, 1); +} + +/* Resume */ +void ikbd_resume(void) +{ + static const char cmd[1] = { 0x11 }; + + ikbd_write(cmd, 1); +} + +/* Disable mouse */ +void ikbd_mouse_disable(void) +{ + static const char cmd[1] = { 0x12 }; + + ikbd_write(cmd, 1); +} + +/* Pause output */ +void ikbd_pause(void) +{ + static const char cmd[1] = { 0x13 }; + + ikbd_write(cmd, 1); +} + +/* Set joystick event reporting */ +void ikbd_joystick_event_on(void) +{ + static const char cmd[1] = { 0x14 }; + + ikbd_write(cmd, 1); +} + +/* Set joystick interrogation mode */ +void ikbd_joystick_event_off(void) +{ + static const char cmd[1] = { 0x15 }; + + ikbd_write(cmd, 1); +} + +/* Joystick interrogation */ +void ikbd_joystick_get_state(void) +{ + static const char cmd[1] = { 0x16 }; + + ikbd_write(cmd, 1); +} + +#if 0 +/* This disables all other ikbd activities !!!! */ +/* Set joystick monitoring */ +void ikbd_joystick_monitor(int rate) +{ + static const char cmd[2] = { 0x17, rate }; + + ikbd_write(cmd, 2); + + kb_state.state = JOYSTICK_MONITOR; +} +#endif + +/* some joystick routines not in yet (0x18-0x19) */ + +/* Disable joysticks */ +void ikbd_joystick_disable(void) +{ + static const char cmd[1] = { 0x1A }; + + ikbd_write(cmd, 1); +} + +/* Time-of-day clock set */ +void ikbd_clock_set(int year, int month, int day, int hour, int minute, int second) +{ + char cmd[7] = { 0x1B, year, month, day, hour, minute, second }; + + ikbd_write(cmd, 7); +} + +/* Interrogate time-of-day clock */ +void ikbd_clock_get(int *year, int *month, int *day, int *hour, int *minute, int second) +{ + static const char cmd[1] = { 0x1C }; + + ikbd_write(cmd, 1); +} + +/* Memory load */ +void ikbd_mem_write(int address, int size, char *data) +{ + panic("Attempt to write data into keyboard memory"); +} + +/* Memory read */ +void ikbd_mem_read(int address, char data[6]) +{ + char cmd[3] = { 0x21, address>>8, address&0xFF }; + + ikbd_write(cmd, 3); + + /* receive data and put it in data */ +} + +/* Controller execute */ +void ikbd_exec(int address) +{ + char cmd[3] = { 0x22, address>>8, address&0xFF }; + + ikbd_write(cmd, 3); +} + +/* Status inquiries (0x87-0x9A) not yet implemented */ + +/* Set the state of the caps lock led. */ +void atari_kbd_leds(unsigned int leds) +{ + char cmd[6] = {32, 0, 4, 1, 254 + ((leds & 4) != 0), 0}; + + ikbd_write(cmd, 6); +} + +/* + * The original code sometimes left the interrupt line of + * the ACIAs low forever. I hope, it is fixed now. + * + * Martin Rogge, 20 Aug 1995 + */ + +static int atari_keyb_done = 0; + +int __init atari_keyb_init(void) +{ + if (atari_keyb_done) + return 0; + + /* setup key map */ + memcpy(key_maps[0], ataplain_map, sizeof(plain_map)); + + kb_state.state = KEYBOARD; + kb_state.len = 0; + + request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, IRQ_TYPE_SLOW, + "keyboard/mouse/MIDI", atari_keyboard_interrupt); + + atari_turnoff_irq(IRQ_MFP_ACIA); + do { + /* reset IKBD ACIA */ + acia.key_ctrl = ACIA_RESET | + (atari_switches & ATARI_SWITCH_IKBD) ? ACIA_RHTID : 0; + (void)acia.key_ctrl; + (void)acia.key_data; + + /* reset MIDI ACIA */ + acia.mid_ctrl = ACIA_RESET | + (atari_switches & ATARI_SWITCH_MIDI) ? ACIA_RHTID : 0; + (void)acia.mid_ctrl; + (void)acia.mid_data; + + /* divide 500kHz by 64 gives 7812.5 baud */ + /* 8 data no parity 1 start 1 stop bit */ + /* receive interrupt enabled */ + /* RTS low (except if switch selected), transmit interrupt disabled */ + acia.key_ctrl = (ACIA_DIV64|ACIA_D8N1S|ACIA_RIE) | + ((atari_switches & ATARI_SWITCH_IKBD) ? + ACIA_RHTID : ACIA_RLTID); + + acia.mid_ctrl = ACIA_DIV16 | ACIA_D8N1S | + (atari_switches & ATARI_SWITCH_MIDI) ? ACIA_RHTID : 0; + + /* make sure the interrupt line is up */ + } while ((mfp.par_dt_reg & 0x10) == 0); + + /* enable ACIA Interrupts */ + mfp.active_edge &= ~0x10; + atari_turnon_irq(IRQ_MFP_ACIA); + + ikbd_self_test = 1; + ikbd_reset(); + /* wait for a period of inactivity (here: 0.25s), then assume the IKBD's + * self-test is finished */ + self_test_last_rcv = jiffies; + while (time_before(jiffies, self_test_last_rcv + HZ/4)) + barrier(); + /* if not incremented: no 0xf1 received */ + if (ikbd_self_test == 1) + printk(KERN_ERR "WARNING: keyboard self test failed!\n"); + ikbd_self_test = 0; + + ikbd_mouse_disable(); + ikbd_joystick_disable(); + +#ifdef FIXED_ATARI_JOYSTICK + atari_joystick_init(); +#endif + + // flag init done + atari_keyb_done = 1; + return 0; +} + + +int atari_kbdrate(struct kbd_repeat *k) +{ + if (k->delay > 0) { + /* convert from msec to jiffies */ + key_repeat_delay = (k->delay * HZ + 500) / 1000; + if (key_repeat_delay < 1) + key_repeat_delay = 1; + } + if (k->period > 0) { + key_repeat_rate = (k->period * HZ + 500) / 1000; + if (key_repeat_rate < 1) + key_repeat_rate = 1; + } + + k->delay = key_repeat_delay * 1000 / HZ; + k->period = key_repeat_rate * 1000 / HZ; + + return 0; +} + +int atari_kbd_translate(unsigned char keycode, unsigned char *keycodep, char raw_mode) +{ +#ifdef CONFIG_MAGIC_SYSRQ + /* ALT+HELP pressed? */ + if ((keycode == 98) && ((shift_state & 0xff) == 8)) + *keycodep = 0xff; + else +#endif + *keycodep = keycode; + return 1; +} diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index f17e9c7d4b36..c845d5c4cbba 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -164,6 +164,17 @@ config KEYBOARD_AMIGA To compile this driver as a module, choose M here: the module will be called amikbd. +config KEYBOARD_ATARI + tristate "Atari keyboard" + depends on ATARI + select ATARI_KBD_CORE + help + Say Y here if you are running Linux on any Atari and have a keyboard + attached. + + To compile this driver as a module, choose M here: the + module will be called atakbd. + config KEYBOARD_HIL_OLD tristate "HP HIL keyboard support (simple driver)" depends on GSC || HP300 diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index 586a0fe53be6..7aa59ee5a0b6 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o +obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c new file mode 100644 index 000000000000..ded1d6ac6ff3 --- /dev/null +++ b/drivers/input/keyboard/atakbd.c @@ -0,0 +1,134 @@ +/* + * atakbd.c + * + * Copyright (c) 2005 Michael Schmitz + * + * Based on amikbd.c, which is + * + * Copyright (c) 2000-2001 Vojtech Pavlik + * + * Based on the work of: + * Hamish Macdonald + */ + +/* + * Atari keyboard driver for Linux/m68k + * + * The low level init and interrupt stuff is handled in arch/mm68k/atari/atakeyb.c + * (the keyboard ACIA also handles the mouse and joystick data, and the keyboard + * interrupt is shared with the MIDI ACIA so MIDI data also get handled there). + * This driver only deals with handing key events off to the input layer. + */ + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Should you need to contact me, the author, you can do so either by + * e-mail - mail your message to , or by paper mail: + * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +MODULE_AUTHOR("Michael Schmitz "); +MODULE_DESCRIPTION("Atari keyboard driver"); +MODULE_LICENSE("GPL"); + +static unsigned char atakbd_keycode[0x72]; + +static struct input_dev *atakbd_dev; + +static void atakbd_interrupt(unsigned char scancode, char down) +{ + + if (scancode < 0x72) { /* scancodes < 0xf2 are keys */ + + // report raw events here? + + scancode = atakbd_keycode[scancode]; + + if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */ + input_report_key(atakbd_dev, scancode, 1); + input_report_key(atakbd_dev, scancode, 0); + input_sync(atakbd_dev); + } else { + input_report_key(atakbd_dev, scancode, down); + input_sync(atakbd_dev); + } + } else /* scancodes >= 0xf2 are mouse data, most likely */ + printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode); + + return; +} + +static int __init atakbd_init(void) +{ + int i; + + if (!ATARIHW_PRESENT(ST_MFP)) + return -EIO; + + // TODO: request_mem_region if not done in arch code + + if (!(atakbd_dev = input_allocate_device())) + return -ENOMEM; + + // need to init core driver if not already done so + if (atari_keyb_init()) + return -ENODEV; + + atakbd_dev->name = "Atari Keyboard"; + atakbd_dev->phys = "atakbd/input0"; + atakbd_dev->id.bustype = BUS_ATARI; + atakbd_dev->id.vendor = 0x0001; + atakbd_dev->id.product = 0x0001; + atakbd_dev->id.version = 0x0100; + + atakbd_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP); + atakbd_dev->keycode = atakbd_keycode; + atakbd_dev->keycodesize = sizeof(unsigned char); + atakbd_dev->keycodemax = ARRAY_SIZE(atakbd_keycode); + + for (i = 1; i < 0x72; i++) { + atakbd_keycode[i] = i; + set_bit(atakbd_keycode[i], atakbd_dev->keybit); + } + + input_register_device(atakbd_dev); + + atari_input_keyboard_interrupt_hook = atakbd_interrupt; + + printk(KERN_INFO "input: %s at IKBD ACIA\n", atakbd_dev->name); + + return 0; +} + +static void __exit atakbd_exit(void) +{ + atari_input_keyboard_interrupt_hook = NULL; + input_unregister_device(atakbd_dev); +} + +module_init(atakbd_init); +module_exit(atakbd_exit); diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig index 35d998c3e578..b40784a60aa1 100644 --- a/drivers/input/mouse/Kconfig +++ b/drivers/input/mouse/Kconfig @@ -96,6 +96,17 @@ config MOUSE_AMIGA To compile this driver as a module, choose M here: the module will be called amimouse. +config MOUSE_ATARI + tristate "Atari mouse" + depends on ATARI + select ATARI_KBD_CORE + help + Say Y here if you have an Atari and want its native mouse + supported by the kernel. + + To compile this driver as a module, choose M here: the + module will be called atarimouse. + config MOUSE_RISCPC tristate "Acorn RiscPC mouse" depends on ARCH_ACORN diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile index 21a1de61a79b..700f61200dfc 100644 --- a/drivers/input/mouse/Makefile +++ b/drivers/input/mouse/Makefile @@ -5,6 +5,7 @@ # Each configuration option enables a list of files. obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o +obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o obj-$(CONFIG_MOUSE_INPORT) += inport.o obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o diff --git a/drivers/input/mouse/atarimouse.c b/drivers/input/mouse/atarimouse.c new file mode 100644 index 000000000000..43ab6566fb65 --- /dev/null +++ b/drivers/input/mouse/atarimouse.c @@ -0,0 +1,160 @@ +/* + * Atari mouse driver for Linux/m68k + * + * Copyright (c) 2005 Michael Schmitz + * + * Based on: + * Amiga mouse driver for Linux/m68k + * + * Copyright (c) 2000-2002 Vojtech Pavlik + * + */ +/* + * The low level init and interrupt stuff is handled in arch/mm68k/atari/atakeyb.c + * (the keyboard ACIA also handles the mouse and joystick data, and the keyboard + * interrupt is shared with the MIDI ACIA so MIDI data also get handled there). + * This driver only deals with handing key events off to the input layer. + * + * Largely based on the old: + * + * Atari Mouse Driver for Linux + * by Robert de Vries (robert@and.nl) 19Jul93 + * + * 16 Nov 1994 Andreas Schwab + * Compatibility with busmouse + * Support for three button mouse (shamelessly stolen from MiNT) + * third button wired to one of the joystick directions on joystick 1 + * + * 1996/02/11 Andreas Schwab + * Module support + * Allow multiple open's + * + * Converted to use new generic busmouse code. 5 Apr 1998 + * Russell King + */ + + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Michael Schmitz "); +MODULE_DESCRIPTION("Atari mouse driver"); +MODULE_LICENSE("GPL"); + +static int mouse_threshold[2] = {2,2}; + +#ifdef __MODULE__ +MODULE_PARM(mouse_threshold, "2i"); +#endif +#ifdef FIXED_ATARI_JOYSTICK +extern int atari_mouse_buttons; +#endif +static int atamouse_used = 0; + +static struct input_dev *atamouse_dev; + +static void atamouse_interrupt(char *buf) +{ + int buttons, dx, dy; + +/* ikbd_mouse_disable(); */ + + buttons = (buf[0] & 1) | ((buf[0] & 2) << 1); +#ifdef FIXED_ATARI_JOYSTICK + buttons |= atari_mouse_buttons & 2; + atari_mouse_buttons = buttons; +#endif +/* ikbd_mouse_rel_pos(); */ + + /* only relative events get here */ + dx = buf[1]; + dy = -buf[2]; + + input_report_rel(atamouse_dev, REL_X, dx); + input_report_rel(atamouse_dev, REL_Y, dy); + + input_report_key(atamouse_dev, BTN_LEFT, buttons & 0x1); + input_report_key(atamouse_dev, BTN_MIDDLE, buttons & 0x2); + input_report_key(atamouse_dev, BTN_RIGHT, buttons & 0x4); + + input_sync(atamouse_dev); + + return; +} + +static int atamouse_open(struct input_dev *dev) +{ + if (atamouse_used++) + return 0; + +#ifdef FIXED_ATARI_JOYSTICK + atari_mouse_buttons = 0; +#endif + ikbd_mouse_y0_top(); + ikbd_mouse_thresh(mouse_threshold[0], mouse_threshold[1]); + ikbd_mouse_rel_pos(); + atari_input_mouse_interrupt_hook = atamouse_interrupt; + return 0; +} + +static void atamouse_close(struct input_dev *dev) +{ + if (!--atamouse_used) { + ikbd_mouse_disable(); + atari_mouse_interrupt_hook = NULL; + } +} + +static int __init atamouse_init(void) +{ + if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP)) + return -ENODEV; + + if (!(atamouse_dev = input_allocate_device())) + return -ENOMEM; + + if (!(atari_keyb_init())) + return -ENODEV; + + atamouse_dev->name = "Atari mouse"; + atamouse_dev->phys = "atamouse/input0"; + atamouse_dev->id.bustype = BUS_ATARI; + atamouse_dev->id.vendor = 0x0001; + atamouse_dev->id.product = 0x0002; + atamouse_dev->id.version = 0x0100; + + atamouse_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REL); + atamouse_dev->relbit[0] = BIT(REL_X) | BIT(REL_Y); + atamouse_dev->keybit[LONG(BTN_LEFT)] = BIT(BTN_LEFT) | BIT(BTN_MIDDLE) | BIT(BTN_RIGHT); + atamouse_dev->open = atamouse_open; + atamouse_dev->close = atamouse_close; + + input_register_device(atamouse_dev); + + printk(KERN_INFO "input: %s at keyboard ACIA\n", atamouse_dev->name); + return 0; +} + +static void __exit atamouse_exit(void) +{ + input_unregister_device(atamouse_dev); +} + +module_init(atamouse_init); +module_exit(atamouse_exit); diff --git a/include/asm-m68k/atarikb.h b/include/asm-m68k/atarikb.h index 18926058fde7..546e7da5804f 100644 --- a/include/asm-m68k/atarikb.h +++ b/include/asm-m68k/atarikb.h @@ -36,5 +36,11 @@ void ikbd_joystick_disable(void); extern void (*atari_MIDI_interrupt_hook) (void); /* Hook for mouse driver */ extern void (*atari_mouse_interrupt_hook) (char *); +/* Hook for keyboard inputdev driver */ +extern void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); +/* Hook for mouse inputdev driver */ +extern void (*atari_input_mouse_interrupt_hook) (char *); + +int atari_keyb_init(void); #endif /* _LINUX_ATARIKB_H */ diff --git a/include/linux/input.h b/include/linux/input.h index bde65c8a3519..327122548c4d 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -676,6 +676,7 @@ struct input_absinfo { #define BUS_I2C 0x18 #define BUS_HOST 0x19 #define BUS_GSC 0x1A +#define BUS_ATARI 0x1B /* * Values describing the status of a force-feedback effect -- cgit v1.2.3 From b3e2fd9cebcf4e82d0306fe7e796eeca5aac0614 Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Tue, 1 May 2007 22:32:42 +0200 Subject: lockdep: Add missing disable/enable irq variant Add missing disable/enable irq variant Signed-off-by: Roman Zippel Signed-off-by: Geert Uytterhoeven Signed-off-by: Linus Torvalds --- include/linux/interrupt.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 838cf5a5bd7f..0319f665dd3f 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -185,10 +185,14 @@ static inline int disable_irq_wake(unsigned int irq) * validator need to define the methods below in their asm/irq.h * files, under an #ifdef CONFIG_LOCKDEP section. */ -# ifndef CONFIG_LOCKDEP +#ifndef CONFIG_LOCKDEP # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) +# define disable_irq_nosync_lockdep_irqsave(irq, flags) \ + disable_irq_nosync(irq) # define disable_irq_lockdep(irq) disable_irq(irq) # define enable_irq_lockdep(irq) enable_irq(irq) +# define enable_irq_lockdep_irqrestore(irq, flags) \ + enable_irq(irq) # endif #endif /* CONFIG_GENERIC_HARDIRQS */ -- cgit v1.2.3 From f877958879d413c37bfbeb7517614f3625cdea38 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 1 May 2007 22:32:53 +0200 Subject: NuBus header update Sync the nubus defines with the latest code in the mac68k repo. Some of these are needed for DP8390 driver update in the next patch. Signed-off-by: Finn Thain Signed-off-by: Geert Uytterhoeven Signed-off-by: Linus Torvalds --- drivers/net/macsonic.c | 2 +- include/linux/nubus.h | 126 +++++++++++++++++++++++++++++-------------------- 2 files changed, 76 insertions(+), 52 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c index 8ca57a0a4c11..a3d24a39b5f1 100644 --- a/drivers/net/macsonic.c +++ b/drivers/net/macsonic.c @@ -402,7 +402,7 @@ int __init macsonic_ident(struct nubus_dev* ndev) ndev->dr_sw == NUBUS_DRSW_DAYNA) return MACSONIC_DAYNA; - if (ndev->dr_hw == NUBUS_DRHW_SONIC_LC && + if (ndev->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC && ndev->dr_sw == 0) { /* huh? */ return MACSONIC_APPLE16; } diff --git a/include/linux/nubus.h b/include/linux/nubus.h index 870e66a96286..cdb3e9b8db54 100644 --- a/include/linux/nubus.h +++ b/include/linux/nubus.h @@ -28,18 +28,18 @@ enum nubus_category { }; enum nubus_type_network { - NUBUS_TYPE_ETHERNET = 0x0001, - NUBUS_TYPE_RS232 = 0x0002 + NUBUS_TYPE_ETHERNET = 0x0001, + NUBUS_TYPE_RS232 = 0x0002 }; enum nubus_type_display { - NUBUS_TYPE_VIDEO = 0x0001 + NUBUS_TYPE_VIDEO = 0x0001 }; enum nubus_type_cpu { - NUBUS_TYPE_68020 = 0x0003, - NUBUS_TYPE_68030 = 0x0004, - NUBUS_TYPE_68040 = 0x0005 + NUBUS_TYPE_68020 = 0x0003, + NUBUS_TYPE_68030 = 0x0004, + NUBUS_TYPE_68040 = 0x0005 }; /* Known tuples: (according to TattleTech and Slots) @@ -56,6 +56,7 @@ enum nubus_type_cpu { * * SONIC comm-slot/on-board and DuoDock Ethernet: <4,1,1,272> * SONIC LC-PDS Ethernet (Dayna, but like Apple 16-bit, sort of): <4,1,1,271> + * Apple SONIC LC-PDS Ethernet ("Apple Ethernet LC Twisted-Pair Card"): <4,1,0,281> * Sonic Systems Ethernet A-Series Card: <4,1,268,256> * Asante MacCon NuBus-A: <4,1,260,256> (alpha-1.0,1.1 revision) * ROM on the above card: <2,1,0,0> @@ -80,24 +81,26 @@ enum nubus_type_cpu { /* Add known DrSW values here */ enum nubus_drsw { /* NUBUS_CAT_DISPLAY */ - NUBUS_DRSW_APPLE = 0x0001, - NUBUS_DRSW_APPLE_HIRES = 0x0013, /* MacII HiRes card driver */ + NUBUS_DRSW_APPLE = 0x0001, + NUBUS_DRSW_APPLE_HIRES = 0x0013, /* MacII HiRes card driver */ /* NUBUS_CAT_NETWORK */ - NUBUS_DRSW_CABLETRON = 0x0001, - NUBUS_DRSW_SONIC_LC = 0x0001, - NUBUS_DRSW_KINETICS = 0x0103, - NUBUS_DRSW_ASANTE = 0x0104, - NUBUS_DRSW_DAYNA = 0x010b, - NUBUS_DRSW_FARALLON = 0x010c, - NUBUS_DRSW_APPLE_SN = 0x010f, - NUBUS_DRSW_DAYNA2 = 0x0115, + NUBUS_DRSW_3COM = 0x0000, + NUBUS_DRSW_CABLETRON = 0x0001, + NUBUS_DRSW_SONIC_LC = 0x0001, + NUBUS_DRSW_KINETICS = 0x0103, + NUBUS_DRSW_ASANTE = 0x0104, + NUBUS_DRSW_TECHWORKS = 0x0109, + NUBUS_DRSW_DAYNA = 0x010b, + NUBUS_DRSW_FARALLON = 0x010c, + NUBUS_DRSW_APPLE_SN = 0x010f, + NUBUS_DRSW_DAYNA2 = 0x0115, NUBUS_DRSW_FOCUS = 0x011a, NUBUS_DRSW_ASANTE_CS = 0x011d, /* use asante SMC9194 driver */ - NUBUS_DRSW_DAYNA_LC = 0x011e, + NUBUS_DRSW_DAYNA_LC = 0x011e, /* NUBUS_CAT_CPU */ - NUBUS_DRSW_NONE = 0x0000, + NUBUS_DRSW_NONE = 0x0000, }; /* DrHW: Uniquely identifies the hardware interface to a board (or at @@ -107,27 +110,48 @@ enum nubus_drsw { /* Add known DrHW values here */ enum nubus_drhw { /* NUBUS_CAT_DISPLAY */ - NUBUS_DRHW_APPLE_TFB = 0x0001, /* Toby frame buffer card */ - NUBUS_DRHW_APPLE_HRVC = 0x0013, /* Mac II High Res Video card */ - NUBUS_DRHW_APPLE_RBV1 = 0x0018, /* IIci RBV video */ - NUBUS_DRHW_APPLE_MDC = 0x0019, /* Macintosh Display Card */ - NUBUS_DRHW_APPLE_SONORA = 0x0022, /* Sonora built-in video */ - NUBUS_DRHW_APPLE_JET = 0x0029, /* Jet framebuffer (DuoDock) */ + NUBUS_DRHW_APPLE_TFB = 0x0001, /* Toby frame buffer card */ + NUBUS_DRHW_APPLE_WVC = 0x0006, /* Apple Workstation Video Card */ + NUBUS_DRHW_SIGMA_CLRMAX = 0x0007, /* Sigma Design ColorMax */ + NUBUS_DRHW_APPLE_SE30 = 0x0009, /* Apple SE/30 video */ + NUBUS_DRHW_APPLE_HRVC = 0x0013, /* Mac II High-Res Video Card */ + NUBUS_DRHW_APPLE_PVC = 0x0017, /* Mac II Portrait Video Card */ + NUBUS_DRHW_APPLE_RBV1 = 0x0018, /* IIci RBV video */ + NUBUS_DRHW_APPLE_MDC = 0x0019, /* Macintosh Display Card */ + NUBUS_DRHW_APPLE_SONORA = 0x0022, /* Sonora built-in video */ + NUBUS_DRHW_APPLE_24AC = 0x002b, /* Mac 24AC Video Card */ NUBUS_DRHW_APPLE_VALKYRIE = 0x002e, - NUBUS_DRHW_THUNDER24 = 0x02cb, /* SuperMac Thunder/24 */ + NUBUS_DRHW_APPLE_JET = 0x0029, /* Jet framebuffer (DuoDock) */ + NUBUS_DRHW_SMAC_GFX = 0x0105, /* SuperMac GFX */ + NUBUS_DRHW_RASTER_CB264 = 0x013B, /* RasterOps ColorBoard 264 */ + NUBUS_DRHW_MICRON_XCEED = 0x0146, /* Micron Exceed color */ + NUBUS_DRHW_RDIUS_GSC = 0x0153, /* Radius GS/C */ + NUBUS_DRHW_SMAC_SPEC8 = 0x017B, /* SuperMac Spectrum/8 */ + NUBUS_DRHW_SMAC_SPEC24 = 0x017C, /* SuperMac Spectrum/24 */ + NUBUS_DRHW_RASTER_CB364 = 0x026F, /* RasterOps ColorBoard 364 */ + NUBUS_DRHW_RDIUS_DCGX = 0x027C, /* Radius DirectColor/GX */ + NUBUS_DRHW_RDIUS_PC8 = 0x0291, /* Radius PrecisionColor 8 */ + NUBUS_DRHW_LAPIS_PCS8 = 0x0292, /* Lapis ProColorServer 8 */ + NUBUS_DRHW_RASTER_24LXI = 0x02A0, /* RasterOps 8/24 XLi */ + NUBUS_DRHW_RASTER_PBPGT = 0x02A5, /* RasterOps PaintBoard Prism GT */ + NUBUS_DRHW_EMACH_FSX = 0x02AE, /* E-Machines Futura SX */ + NUBUS_DRHW_SMAC_THUND24 = 0x02CB, /* SuperMac Thunder/24 */ + NUBUS_DRHW_RDIUS_PC24XP = 0x0406, /* Radius PrecisionColor 24Xp */ + NUBUS_DRHW_RDIUS_PC24X = 0x040A, /* Radius PrecisionColor 24X */ + NUBUS_DRHW_RDIUS_PC8XJ = 0x040B, /* Radius PrecisionColor 8XJ */ /* NUBUS_CAT_NETWORK */ - NUBUS_DRHW_INTERLAN = 0x0100, - NUBUS_DRHW_SMC9194 = 0x0101, - NUBUS_DRHW_KINETICS = 0x0106, - NUBUS_DRHW_CABLETRON = 0x0109, - NUBUS_DRHW_ASANTE_LC = 0x010f, - NUBUS_DRHW_SONIC = 0x0110, - NUBUS_DRHW_SONIC_NB = 0x0118, - NUBUS_DRHW_SONIC_LC = 0x0119, - - /* NUBUS_CAT_COMMUNICATIONS */ - NUBUS_DRHW_DOVEFAX = 0x0100, + NUBUS_DRHW_INTERLAN = 0x0100, + NUBUS_DRHW_SMC9194 = 0x0101, + NUBUS_DRHW_KINETICS = 0x0106, + NUBUS_DRHW_CABLETRON = 0x0109, + NUBUS_DRHW_ASANTE_LC = 0x010f, + NUBUS_DRHW_SONIC = 0x0110, + NUBUS_DRHW_TECHWORKS = 0x0112, + NUBUS_DRHW_APPLE_SONIC_NB = 0x0118, + NUBUS_DRHW_APPLE_SONIC_LC = 0x0119, + NUBUS_DRHW_FOCUS = 0x011c, + NUBUS_DRHW_SONNET = 0x011d, }; /* Resource IDs: These are the identifiers for the various weird and @@ -153,17 +177,17 @@ enum nubus_res_id { /* Category-specific resources. */ enum nubus_board_res_id { - NUBUS_RESID_BOARDID = 0x0020, + NUBUS_RESID_BOARDID = 0x0020, NUBUS_RESID_PRAMINITDATA = 0x0021, - NUBUS_RESID_PRIMARYINIT = 0x0022, + NUBUS_RESID_PRIMARYINIT = 0x0022, NUBUS_RESID_TIMEOUTCONST = 0x0023, - NUBUS_RESID_VENDORINFO = 0x0024, - NUBUS_RESID_BOARDFLAGS = 0x0025, - NUBUS_RESID_SECONDINIT = 0x0026, + NUBUS_RESID_VENDORINFO = 0x0024, + NUBUS_RESID_BOARDFLAGS = 0x0025, + NUBUS_RESID_SECONDINIT = 0x0026, /* Not sure why Apple put these next two in here */ - NUBUS_RESID_VIDNAMES = 0x0041, - NUBUS_RESID_VIDMODES = 0x007e + NUBUS_RESID_VIDNAMES = 0x0041, + NUBUS_RESID_VIDMODES = 0x007e }; /* Fields within the vendor info directory */ @@ -185,13 +209,13 @@ enum nubus_cpu_res_id { }; enum nubus_display_res_id { - NUBUS_RESID_GAMMADIR = 0x0040, - NUBUS_RESID_FIRSTMODE = 0x0080, - NUBUS_RESID_SECONDMODE = 0x0081, - NUBUS_RESID_THIRDMODE = 0x0082, - NUBUS_RESID_FOURTHMODE = 0x0083, - NUBUS_RESID_FIFTHMODE = 0x0084, - NUBUS_RESID_SIXTHMODE = 0x0085 + NUBUS_RESID_GAMMADIR = 0x0040, + NUBUS_RESID_FIRSTMODE = 0x0080, + NUBUS_RESID_SECONDMODE = 0x0081, + NUBUS_RESID_THIRDMODE = 0x0082, + NUBUS_RESID_FOURTHMODE = 0x0083, + NUBUS_RESID_FIFTHMODE = 0x0084, + NUBUS_RESID_SIXTHMODE = 0x0085 }; struct nubus_dir @@ -214,7 +238,7 @@ struct nubus_board { struct nubus_board* next; struct nubus_dev* first_dev; - /* Only 9-E actually exist, though 0-8 are also theoretically + /* Only 9-E actually exist, though 0-8 are also theoretically possible, and 0 is a special case which represents the motherboard and onboard peripherals (Ethernet, video) */ int slot; -- cgit v1.2.3 From 254f9c5cd2d3b41e64f59df816630f7ca5548a8a Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 1 May 2007 22:33:07 +0200 Subject: Convert non-highmem kmap_atomic() to static inline function Convert kmap_atomic() in the non-highmem case from a macro to a static inline function, for better type-checking and the ability to pass void pointers instead of struct page pointers. Signed-off-by: Geert Uytterhoeven Signed-off-by: Linus Torvalds --- include/linux/highmem.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 645d440807c2..7bab8eae2341 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -42,8 +42,14 @@ static inline void *kmap(struct page *page) #define kunmap(page) do { (void) (page); } while (0) -#define kmap_atomic(page, idx) \ - ({ pagefault_disable(); page_address(page); }) +#include + +static inline void *kmap_atomic(struct page *page, enum km_type idx) +{ + pagefault_disable(); + return page_address(page); +} + #define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0) #define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) #define kmap_atomic_to_page(ptr) virt_to_page(ptr) -- cgit v1.2.3 From cf130cb102487723bdfc53e4abde1227a7563797 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 5 May 2007 11:42:03 -0700 Subject: [NETLINK]: Remove references to process ID People treating the *_pid fields in netlink as a process ID has caused endless confusion over the years. The fact that our own netlink.h does this only adds to the confusion. So here is a patch to change the comments to refer to it as the port ID which hopefully will make it clear what the purpose of the fields really is. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/netlink.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index f41688f56632..2e23353c28a5 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -31,7 +31,7 @@ struct sockaddr_nl { sa_family_t nl_family; /* AF_NETLINK */ unsigned short nl_pad; /* zero */ - __u32 nl_pid; /* process pid */ + __u32 nl_pid; /* port ID */ __u32 nl_groups; /* multicast groups mask */ }; @@ -41,7 +41,7 @@ struct nlmsghdr __u16 nlmsg_type; /* Message content */ __u16 nlmsg_flags; /* Additional flags */ __u32 nlmsg_seq; /* Sequence number */ - __u32 nlmsg_pid; /* Sending process PID */ + __u32 nlmsg_pid; /* Sending process port ID */ }; /* Flags values */ -- cgit v1.2.3 From a9de8ce0943e03b425be18561f51159fcceb873d Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Sat, 5 May 2007 11:43:04 -0700 Subject: [MAC80211]: Add generic include/linux/ieee80211.h Add generic IEEE 802.11 definitions. Signed-off-by: Jiri Benc Signed-off-by: John W. Linville Signed-off-by: David S. Miller --- include/linux/ieee80211.h | 342 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 342 insertions(+) create mode 100644 include/linux/ieee80211.h (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h new file mode 100644 index 000000000000..ecd61e8438a5 --- /dev/null +++ b/include/linux/ieee80211.h @@ -0,0 +1,342 @@ +/* + * IEEE 802.11 defines + * + * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen + * + * Copyright (c) 2002-2003, Jouni Malinen + * Copyright (c) 2005, Devicescape Software, Inc. + * Copyright (c) 2006, Michael Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef IEEE80211_H +#define IEEE80211_H + +#include + +#define FCS_LEN 4 + +#define IEEE80211_FCTL_VERS 0x0003 +#define IEEE80211_FCTL_FTYPE 0x000c +#define IEEE80211_FCTL_STYPE 0x00f0 +#define IEEE80211_FCTL_TODS 0x0100 +#define IEEE80211_FCTL_FROMDS 0x0200 +#define IEEE80211_FCTL_MOREFRAGS 0x0400 +#define IEEE80211_FCTL_RETRY 0x0800 +#define IEEE80211_FCTL_PM 0x1000 +#define IEEE80211_FCTL_MOREDATA 0x2000 +#define IEEE80211_FCTL_PROTECTED 0x4000 +#define IEEE80211_FCTL_ORDER 0x8000 + +#define IEEE80211_SCTL_FRAG 0x000F +#define IEEE80211_SCTL_SEQ 0xFFF0 + +#define IEEE80211_FTYPE_MGMT 0x0000 +#define IEEE80211_FTYPE_CTL 0x0004 +#define IEEE80211_FTYPE_DATA 0x0008 + +/* management */ +#define IEEE80211_STYPE_ASSOC_REQ 0x0000 +#define IEEE80211_STYPE_ASSOC_RESP 0x0010 +#define IEEE80211_STYPE_REASSOC_REQ 0x0020 +#define IEEE80211_STYPE_REASSOC_RESP 0x0030 +#define IEEE80211_STYPE_PROBE_REQ 0x0040 +#define IEEE80211_STYPE_PROBE_RESP 0x0050 +#define IEEE80211_STYPE_BEACON 0x0080 +#define IEEE80211_STYPE_ATIM 0x0090 +#define IEEE80211_STYPE_DISASSOC 0x00A0 +#define IEEE80211_STYPE_AUTH 0x00B0 +#define IEEE80211_STYPE_DEAUTH 0x00C0 +#define IEEE80211_STYPE_ACTION 0x00D0 + +/* control */ +#define IEEE80211_STYPE_PSPOLL 0x00A0 +#define IEEE80211_STYPE_RTS 0x00B0 +#define IEEE80211_STYPE_CTS 0x00C0 +#define IEEE80211_STYPE_ACK 0x00D0 +#define IEEE80211_STYPE_CFEND 0x00E0 +#define IEEE80211_STYPE_CFENDACK 0x00F0 + +/* data */ +#define IEEE80211_STYPE_DATA 0x0000 +#define IEEE80211_STYPE_DATA_CFACK 0x0010 +#define IEEE80211_STYPE_DATA_CFPOLL 0x0020 +#define IEEE80211_STYPE_DATA_CFACKPOLL 0x0030 +#define IEEE80211_STYPE_NULLFUNC 0x0040 +#define IEEE80211_STYPE_CFACK 0x0050 +#define IEEE80211_STYPE_CFPOLL 0x0060 +#define IEEE80211_STYPE_CFACKPOLL 0x0070 +#define IEEE80211_STYPE_QOS_DATA 0x0080 +#define IEEE80211_STYPE_QOS_DATA_CFACK 0x0090 +#define IEEE80211_STYPE_QOS_DATA_CFPOLL 0x00A0 +#define IEEE80211_STYPE_QOS_DATA_CFACKPOLL 0x00B0 +#define IEEE80211_STYPE_QOS_NULLFUNC 0x00C0 +#define IEEE80211_STYPE_QOS_CFACK 0x00D0 +#define IEEE80211_STYPE_QOS_CFPOLL 0x00E0 +#define IEEE80211_STYPE_QOS_CFACKPOLL 0x00F0 + + +/* miscellaneous IEEE 802.11 constants */ +#define IEEE80211_MAX_FRAG_THRESHOLD 2346 +#define IEEE80211_MAX_RTS_THRESHOLD 2347 +#define IEEE80211_MAX_AID 2007 +#define IEEE80211_MAX_TIM_LEN 251 +#define IEEE80211_MAX_DATA_LEN 2304 +/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section + 6.2.1.1.2. + + The figure in section 7.1.2 suggests a body size of up to 2312 + bytes is allowed, which is a bit confusing, I suspect this + represents the 2304 bytes of real data, plus a possible 8 bytes of + WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */ + +#define IEEE80211_MAX_SSID_LEN 32 + +struct ieee80211_hdr { + __le16 frame_control; + __le16 duration_id; + u8 addr1[6]; + u8 addr2[6]; + u8 addr3[6]; + __le16 seq_ctrl; + u8 addr4[6]; +} __attribute__ ((packed)); + + +struct ieee80211_mgmt { + __le16 frame_control; + __le16 duration; + u8 da[6]; + u8 sa[6]; + u8 bssid[6]; + __le16 seq_ctrl; + union { + struct { + __le16 auth_alg; + __le16 auth_transaction; + __le16 status_code; + /* possibly followed by Challenge text */ + u8 variable[0]; + } __attribute__ ((packed)) auth; + struct { + __le16 reason_code; + } __attribute__ ((packed)) deauth; + struct { + __le16 capab_info; + __le16 listen_interval; + /* followed by SSID and Supported rates */ + u8 variable[0]; + } __attribute__ ((packed)) assoc_req; + struct { + __le16 capab_info; + __le16 status_code; + __le16 aid; + /* followed by Supported rates */ + u8 variable[0]; + } __attribute__ ((packed)) assoc_resp, reassoc_resp; + struct { + __le16 capab_info; + __le16 listen_interval; + u8 current_ap[6]; + /* followed by SSID and Supported rates */ + u8 variable[0]; + } __attribute__ ((packed)) reassoc_req; + struct { + __le16 reason_code; + } __attribute__ ((packed)) disassoc; + struct { + __le64 timestamp; + __le16 beacon_int; + __le16 capab_info; + /* followed by some of SSID, Supported rates, + * FH Params, DS Params, CF Params, IBSS Params, TIM */ + u8 variable[0]; + } __attribute__ ((packed)) beacon; + struct { + /* only variable items: SSID, Supported rates */ + u8 variable[0]; + } __attribute__ ((packed)) probe_req; + struct { + __le64 timestamp; + __le16 beacon_int; + __le16 capab_info; + /* followed by some of SSID, Supported rates, + * FH Params, DS Params, CF Params, IBSS Params */ + u8 variable[0]; + } __attribute__ ((packed)) probe_resp; + struct { + u8 category; + union { + struct { + u8 action_code; + u8 dialog_token; + u8 status_code; + u8 variable[0]; + } __attribute__ ((packed)) wme_action; + struct{ + u8 action_code; + u8 element_id; + u8 length; + u8 switch_mode; + u8 new_chan; + u8 switch_count; + } __attribute__((packed)) chan_switch; + } u; + } __attribute__ ((packed)) action; + } u; +} __attribute__ ((packed)); + + +/* Control frames */ +struct ieee80211_rts { + __le16 frame_control; + __le16 duration; + u8 ra[6]; + u8 ta[6]; +} __attribute__ ((packed)); + +struct ieee80211_cts { + __le16 frame_control; + __le16 duration; + u8 ra[6]; +} __attribute__ ((packed)); + + +/* Authentication algorithms */ +#define WLAN_AUTH_OPEN 0 +#define WLAN_AUTH_SHARED_KEY 1 +#define WLAN_AUTH_FAST_BSS_TRANSITION 2 +#define WLAN_AUTH_LEAP 128 + +#define WLAN_AUTH_CHALLENGE_LEN 128 + +#define WLAN_CAPABILITY_ESS (1<<0) +#define WLAN_CAPABILITY_IBSS (1<<1) +#define WLAN_CAPABILITY_CF_POLLABLE (1<<2) +#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3) +#define WLAN_CAPABILITY_PRIVACY (1<<4) +#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5) +#define WLAN_CAPABILITY_PBCC (1<<6) +#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7) +/* 802.11h */ +#define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8) +#define WLAN_CAPABILITY_QOS (1<<9) +#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10) +#define WLAN_CAPABILITY_DSSS_OFDM (1<<13) + +/* Status codes */ +enum ieee80211_statuscode { + WLAN_STATUS_SUCCESS = 0, + WLAN_STATUS_UNSPECIFIED_FAILURE = 1, + WLAN_STATUS_CAPS_UNSUPPORTED = 10, + WLAN_STATUS_REASSOC_NO_ASSOC = 11, + WLAN_STATUS_ASSOC_DENIED_UNSPEC = 12, + WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG = 13, + WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION = 14, + WLAN_STATUS_CHALLENGE_FAIL = 15, + WLAN_STATUS_AUTH_TIMEOUT = 16, + WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17, + WLAN_STATUS_ASSOC_DENIED_RATES = 18, + /* 802.11b */ + WLAN_STATUS_ASSOC_DENIED_NOSHORTPREAMBLE = 19, + WLAN_STATUS_ASSOC_DENIED_NOPBCC = 20, + WLAN_STATUS_ASSOC_DENIED_NOAGILITY = 21, + /* 802.11h */ + WLAN_STATUS_ASSOC_DENIED_NOSPECTRUM = 22, + WLAN_STATUS_ASSOC_REJECTED_BAD_POWER = 23, + WLAN_STATUS_ASSOC_REJECTED_BAD_SUPP_CHAN = 24, + /* 802.11g */ + WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25, + WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26, + /* 802.11i */ + WLAN_STATUS_INVALID_IE = 40, + WLAN_STATUS_INVALID_GROUP_CIPHER = 41, + WLAN_STATUS_INVALID_PAIRWISE_CIPHER = 42, + WLAN_STATUS_INVALID_AKMP = 43, + WLAN_STATUS_UNSUPP_RSN_VERSION = 44, + WLAN_STATUS_INVALID_RSN_IE_CAP = 45, + WLAN_STATUS_CIPHER_SUITE_REJECTED = 46, +}; + + +/* Reason codes */ +enum ieee80211_reasoncode { + WLAN_REASON_UNSPECIFIED = 1, + WLAN_REASON_PREV_AUTH_NOT_VALID = 2, + WLAN_REASON_DEAUTH_LEAVING = 3, + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY = 4, + WLAN_REASON_DISASSOC_AP_BUSY = 5, + WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA = 6, + WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA = 7, + WLAN_REASON_DISASSOC_STA_HAS_LEFT = 8, + WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH = 9, + /* 802.11h */ + WLAN_REASON_DISASSOC_BAD_POWER = 10, + WLAN_REASON_DISASSOC_BAD_SUPP_CHAN = 11, + /* 802.11i */ + WLAN_REASON_INVALID_IE = 13, + WLAN_REASON_MIC_FAILURE = 14, + WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT = 15, + WLAN_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT = 16, + WLAN_REASON_IE_DIFFERENT = 17, + WLAN_REASON_INVALID_GROUP_CIPHER = 18, + WLAN_REASON_INVALID_PAIRWISE_CIPHER = 19, + WLAN_REASON_INVALID_AKMP = 20, + WLAN_REASON_UNSUPP_RSN_VERSION = 21, + WLAN_REASON_INVALID_RSN_IE_CAP = 22, + WLAN_REASON_IEEE8021X_FAILED = 23, + WLAN_REASON_CIPHER_SUITE_REJECTED = 24, +}; + + +/* Information Element IDs */ +enum ieee80211_eid { + WLAN_EID_SSID = 0, + WLAN_EID_SUPP_RATES = 1, + WLAN_EID_FH_PARAMS = 2, + WLAN_EID_DS_PARAMS = 3, + WLAN_EID_CF_PARAMS = 4, + WLAN_EID_TIM = 5, + WLAN_EID_IBSS_PARAMS = 6, + WLAN_EID_CHALLENGE = 16, + /* 802.11d */ + WLAN_EID_COUNTRY = 7, + WLAN_EID_HP_PARAMS = 8, + WLAN_EID_HP_TABLE = 9, + WLAN_EID_REQUEST = 10, + /* 802.11h */ + WLAN_EID_PWR_CONSTRAINT = 32, + WLAN_EID_PWR_CAPABILITY = 33, + WLAN_EID_TPC_REQUEST = 34, + WLAN_EID_TPC_REPORT = 35, + WLAN_EID_SUPPORTED_CHANNELS = 36, + WLAN_EID_CHANNEL_SWITCH = 37, + WLAN_EID_MEASURE_REQUEST = 38, + WLAN_EID_MEASURE_REPORT = 39, + WLAN_EID_QUIET = 40, + WLAN_EID_IBSS_DFS = 41, + /* 802.11g */ + WLAN_EID_ERP_INFO = 42, + WLAN_EID_EXT_SUPP_RATES = 50, + /* 802.11i */ + WLAN_EID_RSN = 48, + WLAN_EID_WPA = 221, + WLAN_EID_GENERIC = 221, + WLAN_EID_VENDOR_SPECIFIC = 221, + WLAN_EID_QOS_PARAMETER = 222 +}; + +/* cipher suite selectors */ +#define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00 +#define WLAN_CIPHER_SUITE_WEP40 0x000FAC01 +#define WLAN_CIPHER_SUITE_TKIP 0x000FAC02 +/* reserved: 0x000FAC03 */ +#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04 +#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 + +#define WLAN_MAX_KEY_LEN 32 + +#endif /* IEEE80211_H */ -- cgit v1.2.3 From e93df705af1992dbf5956a8c80fcb9987bc595c0 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sat, 5 May 2007 22:03:49 +0200 Subject: sl82c105: rework PIO support (take 2) Get rid of the 'pio_speed' member of 'ide_drive_t' that was only used by this driver by storing the PIO mode timings in the 'drive_data' instead -- this allows us to greatly simplify the process of "reloading" of the chip's timing register and do it right in sl82c150_dma_off_quietly() and to get rid of two extra arguments to config_for_pio() -- which got renamed to sl82c105_tune_pio() and now returns a PIO mode selected, with ide_config_drive_speed() call moved into the tuneproc() method, now called sl82c105_tune_drive() with the code to set drive's 'io_32bit' and 'unmask' flags in its turn moved to its proper place in the init_hwif() method. Also, while at it, rename get_timing_sl82c105() into get_pio_timings() and get rid of the code in it clamping cycle counts to 32 which was both incorrect and never executed anyway... Signed-off-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/pci/sl82c105.c | 99 +++++++++++++++++++--------------------------- include/linux/ide.h | 1 - 2 files changed, 41 insertions(+), 59 deletions(-) (limited to 'include/linux') diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c index 3a8a76fc78c7..ee6dd45a9ba7 100644 --- a/drivers/ide/pci/sl82c105.c +++ b/drivers/ide/pci/sl82c105.c @@ -11,6 +11,8 @@ * Merge in Russell's HW workarounds, fix various problems * with the timing registers setup. * -- Benjamin Herrenschmidt (01/11/03) benh@kernel.crashing.org + * + * Copyright (C) 2006-2007 MontaVista Software, Inc. */ #include @@ -47,25 +49,19 @@ #define CTRL_P0EN (1 << 0) /* - * Convert a PIO mode and cycle time to the required on/off - * times for the interface. This has protection against run-away - * timings. + * Convert a PIO mode and cycle time to the required on/off times + * for the interface. This has protection against runaway timings. */ -static unsigned int get_timing_sl82c105(ide_pio_data_t *p) +static unsigned int get_pio_timings(ide_pio_data_t *p) { - unsigned int cmd_on; - unsigned int cmd_off; + unsigned int cmd_on, cmd_off; - cmd_on = (ide_pio_timings[p->pio_mode].active_time + 29) / 30; + cmd_on = (ide_pio_timings[p->pio_mode].active_time + 29) / 30; cmd_off = (p->cycle_time - 30 * cmd_on + 29) / 30; - if (cmd_on > 32) - cmd_on = 32; if (cmd_on == 0) cmd_on = 1; - if (cmd_off > 32) - cmd_off = 32; if (cmd_off == 0) cmd_off = 1; @@ -73,44 +69,34 @@ static unsigned int get_timing_sl82c105(ide_pio_data_t *p) } /* - * Configure the drive and chipset for PIO + * Configure the chipset for PIO mode. */ -static void config_for_pio(ide_drive_t *drive, int pio, int report, int chipset_only) +static u8 sl82c105_tune_pio(ide_drive_t *drive, u8 pio) { - ide_hwif_t *hwif = HWIF(drive); - struct pci_dev *dev = hwif->pci_dev; + struct pci_dev *dev = HWIF(drive)->pci_dev; + int reg = 0x44 + drive->dn * 4; ide_pio_data_t p; - u16 drv_ctrl = 0x909; - unsigned int xfer_mode, reg; + u16 drv_ctrl; - DBG(("config_for_pio(drive:%s, pio:%d, report:%d, chipset_only:%d)\n", - drive->name, pio, report, chipset_only)); - - reg = (hwif->channel ? 0x4c : 0x44) + (drive->select.b.unit ? 4 : 0); + DBG(("sl82c105_tune_pio(drive:%s, pio:%u)\n", drive->name, pio)); pio = ide_get_best_pio_mode(drive, pio, 5, &p); - xfer_mode = XFER_PIO_0 + pio; + drive->drive_data = drv_ctrl = get_pio_timings(&p); - if (chipset_only || ide_config_drive_speed(drive, xfer_mode) == 0) { - drv_ctrl = get_timing_sl82c105(&p); - drive->pio_speed = xfer_mode; - } else - drive->pio_speed = XFER_PIO_0; - - if (drive->using_dma == 0) { + if (!drive->using_dma) { /* * If we are actually using MW DMA, then we can not * reprogram the interface drive control register. */ - pci_write_config_word(dev, reg, drv_ctrl); - pci_read_config_word(dev, reg, &drv_ctrl); - - if (report) { - printk("%s: selected %s (%dns) (%04X)\n", drive->name, - ide_xfer_verbose(xfer_mode), p.cycle_time, drv_ctrl); - } + pci_write_config_word(dev, reg, drv_ctrl); + pci_read_config_word (dev, reg, &drv_ctrl); } + + printk(KERN_DEBUG "%s: selected %s (%dns) (%04X)\n", drive->name, + ide_xfer_verbose(pio + XFER_PIO_0), p.cycle_time, drv_ctrl); + + return pio; } /* @@ -267,14 +253,14 @@ static int sl82c105_ide_dma_on (ide_drive_t *drive) static void sl82c105_dma_off_quietly(ide_drive_t *drive) { - u8 speed = XFER_PIO_0; + struct pci_dev *dev = HWIF(drive)->pci_dev; + int reg = 0x44 + drive->dn * 4; DBG(("sl82c105_dma_off_quietly(drive:%s)\n", drive->name)); + pci_write_config_word(dev, reg, drive->drive_data); + ide_dma_off_quietly(drive); - if (drive->pio_speed) - speed = drive->pio_speed - XFER_PIO_0; - config_for_pio(drive, speed, 0, 1); } /* @@ -323,18 +309,12 @@ static void sl82c105_resetproc(ide_drive_t *drive) * We only deal with PIO mode here - DMA mode 'using_dma' is not * initialised at the point that this function is called. */ -static void tune_sl82c105(ide_drive_t *drive, u8 pio) +static void sl82c105_tune_drive(ide_drive_t *drive, u8 pio) { - DBG(("tune_sl82c105(drive:%s)\n", drive->name)); + DBG(("sl82c105_tune_drive(drive:%s, pio:%u)\n", drive->name, pio)); - config_for_pio(drive, pio, 1, 0); - - /* - * We support 32-bit I/O on this interface, and it - * doesn't have problems with interrupts. - */ - drive->io_32bit = 1; - drive->unmask = 1; + pio = sl82c105_tune_pio(drive, pio); + (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio); } /* @@ -401,19 +381,22 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif) DBG(("init_hwif_sl82c105(hwif: ide%d)\n", hwif->index)); - hwif->tuneproc = tune_sl82c105; - hwif->selectproc = sl82c105_selectproc; - hwif->resetproc = sl82c105_resetproc; + hwif->tuneproc = &sl82c105_tune_drive; + hwif->selectproc = &sl82c105_selectproc; + hwif->resetproc = &sl82c105_resetproc; + + /* + * We support 32-bit I/O on this interface, and + * it doesn't have problems with interrupts. + */ + hwif->drives[0].io_32bit = hwif->drives[1].io_32bit = 1; + hwif->drives[0].unmask = hwif->drives[1].unmask = 1; /* - * Default to PIO 0 for fallback unless tuned otherwise. * We always autotune PIO, this is done before DMA is checked, * so there's no risk of accidentally disabling DMA */ - hwif->drives[0].pio_speed = XFER_PIO_0; - hwif->drives[0].autotune = 1; - hwif->drives[1].pio_speed = XFER_PIO_0; - hwif->drives[1].autotune = 1; + hwif->drives[0].autotune = hwif->drives[1].autotune = 1; hwif->atapi_dma = 0; hwif->mwdma_mask = 0; diff --git a/include/linux/ide.h b/include/linux/ide.h index d3bbc7188b6a..418dfb5adadd 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -613,7 +613,6 @@ typedef struct ide_drive_s { u8 quirk_list; /* considered quirky, set for a specific host */ u8 init_speed; /* transfer rate set at boot */ - u8 pio_speed; /* unused by core, used by some drivers for fallback from DMA */ u8 current_speed; /* current transfer rate set */ u8 desired_speed; /* desired transfer rate set */ u8 dn; /* now wide spread use */ -- cgit v1.2.3 From 989485c190cc6a64e5a21d98ef2d752df1db8c27 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Sat, 5 May 2007 22:05:11 +0100 Subject: Fix nfsroot build CC fs/nfs/nfsroot.o fs/nfs/nfsroot.c:131: error: tokens causes a section type conflict make[2]: *** [fs/nfs/nfsroot.o] Error 1 This is due to mixing const and non-const content in the same section which halfway recent gccs absolutely hate. Fixed by dropping the const. Signed-off-by: Ralf Baechle Signed-off-by: Linus Torvalds --- include/linux/parser.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/parser.h b/include/linux/parser.h index 86676f600992..26b2bdfcaf06 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h @@ -14,7 +14,7 @@ struct match_token { const char *pattern; }; -typedef const struct match_token match_table_t[]; +typedef struct match_token match_table_t[]; /* Maximum number of arguments that match_token will find in a pattern */ enum {MAX_OPT_ARGS = 3}; -- cgit v1.2.3 From 9d6a8c5c213e34c475e72b245a8eb709258e968c Mon Sep 17 00:00:00 2001 From: Marc Eshel Date: Wed, 21 Feb 2007 00:55:18 -0500 Subject: locks: give posix_test_lock same interface as ->lock posix_test_lock() and ->lock() do the same job but have gratuitously different interfaces. Modify posix_test_lock() so the two agree, simplifying some code in the process. Signed-off-by: Marc Eshel Signed-off-by: "J. Bruce Fields" --- fs/fuse/file.c | 3 +-- fs/gfs2/locking/nolock/main.c | 8 +------- fs/gfs2/ops_file.c | 7 +------ fs/lockd/svclock.c | 13 ++++++++----- fs/locks.c | 37 ++++++++++++++++--------------------- fs/nfs/file.c | 7 +------ fs/nfsd/nfs4state.c | 6 +++--- include/linux/fs.h | 2 +- 8 files changed, 32 insertions(+), 51 deletions(-) (limited to 'include/linux') diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 2fd06927e851..acfad65a6e8e 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -738,8 +738,7 @@ static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) if (cmd == F_GETLK) { if (fc->no_lock) { - if (!posix_test_lock(file, fl, fl)) - fl->fl_type = F_UNLCK; + posix_test_lock(file, fl); err = 0; } else err = fuse_getlk(file, fl); diff --git a/fs/gfs2/locking/nolock/main.c b/fs/gfs2/locking/nolock/main.c index acfbc941f319..5cc1dfa7944a 100644 --- a/fs/gfs2/locking/nolock/main.c +++ b/fs/gfs2/locking/nolock/main.c @@ -164,13 +164,7 @@ static void nolock_unhold_lvb(void *lock, char *lvb) static int nolock_plock_get(void *lockspace, struct lm_lockname *name, struct file *file, struct file_lock *fl) { - struct file_lock tmp; - int ret; - - ret = posix_test_lock(file, fl, &tmp); - fl->fl_type = F_UNLCK; - if (ret) - memcpy(fl, &tmp, sizeof(struct file_lock)); + posix_test_lock(file, fl); return 0; } diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c index b50180e22779..48b248d7c823 100644 --- a/fs/gfs2/ops_file.c +++ b/fs/gfs2/ops_file.c @@ -513,12 +513,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) if (sdp->sd_args.ar_localflocks) { if (IS_GETLK(cmd)) { - struct file_lock tmp; - int ret; - ret = posix_test_lock(file, fl, &tmp); - fl->fl_type = F_UNLCK; - if (ret) - memcpy(fl, &tmp, sizeof(struct file_lock)); + posix_test_lock(file, fl); return 0; } else { return posix_lock_file_wait(file, fl); diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index cf51f849e76c..97b0160ef10f 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -426,15 +426,18 @@ nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); - if (posix_test_lock(file->f_file, &lock->fl, &conflock->fl)) { + if (posix_test_lock(file->f_file, &lock->fl)) { dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", - conflock->fl.fl_type, - (long long)conflock->fl.fl_start, - (long long)conflock->fl.fl_end); + lock->fl.fl_type, + (long long)lock->fl.fl_start, + (long long)lock->fl.fl_end); conflock->caller = "somehost"; /* FIXME */ conflock->len = strlen(conflock->caller); conflock->oh.len = 0; /* don't return OH info */ - conflock->svid = conflock->fl.fl_pid; + conflock->svid = lock->fl.fl_pid; + conflock->fl.fl_type = lock->fl.fl_type; + conflock->fl.fl_start = lock->fl.fl_start; + conflock->fl.fl_end = lock->fl.fl_end; return nlm_lck_denied; } diff --git a/fs/locks.c b/fs/locks.c index b07e6e6f819b..749a0dc7cd4b 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -666,11 +666,11 @@ static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *w } int -posix_test_lock(struct file *filp, struct file_lock *fl, - struct file_lock *conflock) +posix_test_lock(struct file *filp, struct file_lock *fl) { struct file_lock *cfl; + fl->fl_type = F_UNLCK; lock_kernel(); for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { if (!IS_POSIX(cfl)) @@ -679,7 +679,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl, break; } if (cfl) { - __locks_copy_lock(conflock, cfl); + __locks_copy_lock(fl, cfl); unlock_kernel(); return 1; } @@ -1648,7 +1648,7 @@ static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) */ int fcntl_getlk(struct file *filp, struct flock __user *l) { - struct file_lock *fl, cfl, file_lock; + struct file_lock file_lock; struct flock flock; int error; @@ -1667,15 +1667,12 @@ int fcntl_getlk(struct file *filp, struct flock __user *l) error = filp->f_op->lock(filp, F_GETLK, &file_lock); if (error < 0) goto out; - else - fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); - } else { - fl = (posix_test_lock(filp, &file_lock, &cfl) ? &cfl : NULL); - } + } else + posix_test_lock(filp, &file_lock); - flock.l_type = F_UNLCK; - if (fl != NULL) { - error = posix_lock_to_flock(&flock, fl); + flock.l_type = file_lock.fl_type; + if (file_lock.fl_type != F_UNLCK) { + error = posix_lock_to_flock(&flock, &file_lock); if (error) goto out; } @@ -1785,7 +1782,7 @@ out: */ int fcntl_getlk64(struct file *filp, struct flock64 __user *l) { - struct file_lock *fl, cfl, file_lock; + struct file_lock file_lock; struct flock64 flock; int error; @@ -1804,15 +1801,13 @@ int fcntl_getlk64(struct file *filp, struct flock64 __user *l) error = filp->f_op->lock(filp, F_GETLK, &file_lock); if (error < 0) goto out; - else - fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); - } else { - fl = (posix_test_lock(filp, &file_lock, &cfl) ? &cfl : NULL); - } + } else + posix_test_lock(filp, &file_lock); - flock.l_type = F_UNLCK; - if (fl != NULL) - posix_lock_to_flock64(&flock, fl); + flock.l_type = file_lock.fl_type; + if (file_lock.fl_type != F_UNLCK) + posix_lock_to_flock64(&flock, &file_lock); + error = -EFAULT; if (!copy_to_user(l, &flock, sizeof(flock))) error = 0; diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 8e66b5a2d490..5eaee6dd040b 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -391,17 +391,12 @@ out_swapfile: static int do_getlk(struct file *filp, int cmd, struct file_lock *fl) { - struct file_lock cfl; struct inode *inode = filp->f_mapping->host; int status = 0; lock_kernel(); /* Try local locking first */ - if (posix_test_lock(filp, fl, &cfl)) { - fl->fl_start = cfl.fl_start; - fl->fl_end = cfl.fl_end; - fl->fl_type = cfl.fl_type; - fl->fl_pid = cfl.fl_pid; + if (posix_test_lock(filp, fl)) { goto out; } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index af360705e551..e42c7a0eb6fa 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2813,7 +2813,6 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct inode *inode; struct file file; struct file_lock file_lock; - struct file_lock conflock; __be32 status; if (nfs4_in_grace()) @@ -2878,9 +2877,10 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, file.f_path.dentry = cstate->current_fh.fh_dentry; status = nfs_ok; - if (posix_test_lock(&file, &file_lock, &conflock)) { + posix_test_lock(&file, &file_lock); + if (file_lock.fl_type != F_UNLCK) { status = nfserr_denied; - nfs4_set_lock_denied(&conflock, &lockt->lt_denied); + nfs4_set_lock_denied(&file_lock, &lockt->lt_denied); } out: nfs4_unlock_state(); diff --git a/include/linux/fs.h b/include/linux/fs.h index 86ec3f4a7da6..9e1ddffe3884 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -851,7 +851,7 @@ extern void locks_init_lock(struct file_lock *); extern void locks_copy_lock(struct file_lock *, struct file_lock *); extern void locks_remove_posix(struct file *, fl_owner_t); extern void locks_remove_flock(struct file *); -extern int posix_test_lock(struct file *, struct file_lock *, struct file_lock *); +extern int posix_test_lock(struct file *, struct file_lock *); extern int posix_lock_file_conf(struct file *, struct file_lock *, struct file_lock *); extern int posix_lock_file(struct file *, struct file_lock *); extern int posix_lock_file_wait(struct file *, struct file_lock *); -- cgit v1.2.3 From 3ee17abd14c728d4e0ca7a991c58f2250cb091af Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Wed, 21 Feb 2007 00:58:50 -0500 Subject: locks: factor out generic/filesystem switch from test_lock Factor out the code that switches between generic and filesystem-specific lock methods; eventually we want to call this from lock managers (lockd and nfsd) too; currently they only call the generic methods. This patch does that for test_lock. Note that this hasn't been necessary until recently, because the few filesystems that define ->lock() (nfs, cifs...) aren't exportable via NFS. However GFS (and, in the future, other cluster filesystems) need to implement their own locking to get cluster-coherent locking, and also want to be able to export locking to NFS (lockd and NFSv4). So we accomplish this by factoring out code such as this and exporting it for the use of lockd and nfsd. Signed-off-by: "J. Bruce Fields" --- fs/locks.c | 38 +++++++++++++++++++++++++------------- include/linux/fs.h | 1 + 2 files changed, 26 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/fs/locks.c b/fs/locks.c index 749a0dc7cd4b..a31648e3ec1b 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1611,6 +1611,24 @@ asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) return error; } +/** + * vfs_test_lock - test file byte range lock + * @filp: The file to test lock for + * @fl: The lock to test + * @conf: Place to return a copy of the conflicting lock, if found + * + * Returns -ERRNO on failure. Indicates presence of conflicting lock by + * setting conf->fl_type to something other than F_UNLCK. + */ +int vfs_test_lock(struct file *filp, struct file_lock *fl) +{ + if (filp->f_op && filp->f_op->lock) + return filp->f_op->lock(filp, F_GETLK, fl); + posix_test_lock(filp, fl); + return 0; +} +EXPORT_SYMBOL_GPL(vfs_test_lock); + static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) { flock->l_pid = fl->fl_pid; @@ -1663,12 +1681,9 @@ int fcntl_getlk(struct file *filp, struct flock __user *l) if (error) goto out; - if (filp->f_op && filp->f_op->lock) { - error = filp->f_op->lock(filp, F_GETLK, &file_lock); - if (error < 0) - goto out; - } else - posix_test_lock(filp, &file_lock); + error = vfs_test_lock(filp, &file_lock); + if (error) + goto out; flock.l_type = file_lock.fl_type; if (file_lock.fl_type != F_UNLCK) { @@ -1797,13 +1812,10 @@ int fcntl_getlk64(struct file *filp, struct flock64 __user *l) if (error) goto out; - if (filp->f_op && filp->f_op->lock) { - error = filp->f_op->lock(filp, F_GETLK, &file_lock); - if (error < 0) - goto out; - } else - posix_test_lock(filp, &file_lock); - + error = vfs_test_lock(filp, &file_lock); + if (error) + goto out; + flock.l_type = file_lock.fl_type; if (file_lock.fl_type != F_UNLCK) posix_lock_to_flock64(&flock, &file_lock); diff --git a/include/linux/fs.h b/include/linux/fs.h index 9e1ddffe3884..2a2a43988f50 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -856,6 +856,7 @@ extern int posix_lock_file_conf(struct file *, struct file_lock *, struct file_l extern int posix_lock_file(struct file *, struct file_lock *); extern int posix_lock_file_wait(struct file *, struct file_lock *); extern int posix_unblock_lock(struct file *, struct file_lock *); +extern int vfs_test_lock(struct file *, struct file_lock *); extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); extern int __break_lease(struct inode *inode, unsigned int flags); extern void lease_get_mtime(struct inode *, struct timespec *time); -- cgit v1.2.3 From 7723ec9777d9832849b76475b1a21a2872a40d20 Mon Sep 17 00:00:00 2001 From: Marc Eshel Date: Thu, 18 Jan 2007 15:08:55 -0500 Subject: locks: factor out generic/filesystem switch from setlock code Factor out the code that switches between generic and filesystem-specific lock methods; eventually we want to call this from lock managers (lockd and nfsd) too; currently they only call the generic methods. This patch does that for all the setlk code. Signed-off-by: Marc Eshel Signed-off-by: "J. Bruce Fields" --- fs/locks.c | 68 +++++++++++++++++++++++++++++------------------------- include/linux/fs.h | 1 + 2 files changed, 37 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/fs/locks.c b/fs/locks.c index a31648e3ec1b..f4fd1515b6e2 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1698,6 +1698,21 @@ out: return error; } +/** + * vfs_lock_file - file byte range lock + * @filp: The file to apply the lock to + * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.) + * @fl: The lock to be applied + */ +int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl) +{ + if (filp->f_op && filp->f_op->lock) + return filp->f_op->lock(filp, cmd, fl); + else + return posix_lock_file(filp, fl); +} +EXPORT_SYMBOL_GPL(vfs_lock_file); + /* Apply the lock described by l to an open file descriptor. * This implements both the F_SETLK and F_SETLKW commands of fcntl(). */ @@ -1760,21 +1775,17 @@ again: if (error) goto out; - if (filp->f_op && filp->f_op->lock != NULL) - error = filp->f_op->lock(filp, cmd, file_lock); - else { - for (;;) { - error = posix_lock_file(filp, file_lock); - if (error != -EAGAIN || cmd == F_SETLK) - break; - error = wait_event_interruptible(file_lock->fl_wait, - !file_lock->fl_next); - if (!error) - continue; - - locks_delete_block(file_lock); + for (;;) { + error = vfs_lock_file(filp, cmd, file_lock); + if (error != -EAGAIN || cmd == F_SETLK) break; - } + error = wait_event_interruptible(file_lock->fl_wait, + !file_lock->fl_next); + if (!error) + continue; + + locks_delete_block(file_lock); + break; } /* @@ -1890,21 +1901,17 @@ again: if (error) goto out; - if (filp->f_op && filp->f_op->lock != NULL) - error = filp->f_op->lock(filp, cmd, file_lock); - else { - for (;;) { - error = posix_lock_file(filp, file_lock); - if (error != -EAGAIN || cmd == F_SETLK64) - break; - error = wait_event_interruptible(file_lock->fl_wait, - !file_lock->fl_next); - if (!error) - continue; - - locks_delete_block(file_lock); + for (;;) { + error = vfs_lock_file(filp, cmd, file_lock); + if (error != -EAGAIN || cmd == F_SETLK64) break; - } + error = wait_event_interruptible(file_lock->fl_wait, + !file_lock->fl_next); + if (!error) + continue; + + locks_delete_block(file_lock); + break; } /* @@ -1949,10 +1956,7 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner) lock.fl_ops = NULL; lock.fl_lmops = NULL; - if (filp->f_op && filp->f_op->lock != NULL) - filp->f_op->lock(filp, F_SETLK, &lock); - else - posix_lock_file(filp, &lock); + vfs_lock_file(filp, F_SETLK, &lock); if (lock.fl_ops && lock.fl_ops->fl_release_private) lock.fl_ops->fl_release_private(&lock); diff --git a/include/linux/fs.h b/include/linux/fs.h index 2a2a43988f50..1d5ccdd7c68d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -857,6 +857,7 @@ extern int posix_lock_file(struct file *, struct file_lock *); extern int posix_lock_file_wait(struct file *, struct file_lock *); extern int posix_unblock_lock(struct file *, struct file_lock *); extern int vfs_test_lock(struct file *, struct file_lock *); +extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *); extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); extern int __break_lease(struct inode *inode, unsigned int flags); extern void lease_get_mtime(struct inode *, struct timespec *time); -- cgit v1.2.3 From 150b393456e5a23513cace286a019e87151e47f0 Mon Sep 17 00:00:00 2001 From: Marc Eshel Date: Thu, 18 Jan 2007 16:15:35 -0500 Subject: locks: allow {vfs,posix}_lock_file to return conflicting lock The nfsv4 protocol's lock operation, in the case of a conflict, returns information about the conflicting lock. It's unclear how clients can use this, so for now we're not going so far as to add a filesystem method that can return a conflicting lock, but we may as well return something in the local case when it's easy to. Signed-off-by: Marc Eshel Signed-off-by: "J. Bruce Fields" --- fs/lockd/svclock.c | 6 +++--- fs/lockd/svcsubs.c | 2 +- fs/locks.c | 45 ++++++++++++++++++++------------------------- fs/nfsd/nfs4state.c | 10 ++++++---- include/linux/fs.h | 5 ++--- 5 files changed, 32 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index 97b0160ef10f..3b0e7a4b817b 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -363,7 +363,7 @@ again: } else lock = &block->b_call->a_args.lock; - error = posix_lock_file(file->f_file, &lock->fl); + error = posix_lock_file(file->f_file, &lock->fl, NULL); lock->fl.fl_flags &= ~FL_SLEEP; dprintk("lockd: posix_lock_file returned %d\n", error); @@ -467,7 +467,7 @@ nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock) nlmsvc_cancel_blocked(file, lock); lock->fl.fl_type = F_UNLCK; - error = posix_lock_file(file->f_file, &lock->fl); + error = posix_lock_file(file->f_file, &lock->fl, NULL); return (error < 0)? nlm_lck_denied_nolocks : nlm_granted; } @@ -569,7 +569,7 @@ nlmsvc_grant_blocked(struct nlm_block *block) /* Try the lock operation again */ lock->fl.fl_flags |= FL_SLEEP; - error = posix_lock_file(file->f_file, &lock->fl); + error = posix_lock_file(file->f_file, &lock->fl, NULL); lock->fl.fl_flags &= ~FL_SLEEP; switch (error) { diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index c0df00c74ce3..50957089be1f 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -182,7 +182,7 @@ again: lock.fl_type = F_UNLCK; lock.fl_start = 0; lock.fl_end = OFFSET_MAX; - if (posix_lock_file(file->f_file, &lock) < 0) { + if (posix_lock_file(file->f_file, &lock, NULL) < 0) { printk("lockd: unlock failure in %s:%d\n", __FILE__, __LINE__); return 1; diff --git a/fs/locks.c b/fs/locks.c index f4fd1515b6e2..ee46584c1a40 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -801,7 +801,7 @@ out: return error; } -static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request, struct file_lock *conflock) +static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) { struct file_lock *fl; struct file_lock *new_fl = NULL; @@ -1007,6 +1007,7 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request * posix_lock_file - Apply a POSIX-style lock to a file * @filp: The file to apply the lock to * @fl: The lock to be applied + * @conflock: Place to return a copy of the conflicting lock, if found. * * Add a POSIX style lock to a file. * We merge adjacent & overlapping locks whenever possible. @@ -1016,26 +1017,12 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request * whether or not a lock was successfully freed by testing the return * value for -ENOENT. */ -int posix_lock_file(struct file *filp, struct file_lock *fl) -{ - return __posix_lock_file_conf(filp->f_path.dentry->d_inode, fl, NULL); -} -EXPORT_SYMBOL(posix_lock_file); - -/** - * posix_lock_file_conf - Apply a POSIX-style lock to a file - * @filp: The file to apply the lock to - * @fl: The lock to be applied - * @conflock: Place to return a copy of the conflicting lock, if found. - * - * Except for the conflock parameter, acts just like posix_lock_file. - */ -int posix_lock_file_conf(struct file *filp, struct file_lock *fl, +int posix_lock_file(struct file *filp, struct file_lock *fl, struct file_lock *conflock) { - return __posix_lock_file_conf(filp->f_path.dentry->d_inode, fl, conflock); + return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock); } -EXPORT_SYMBOL(posix_lock_file_conf); +EXPORT_SYMBOL(posix_lock_file); /** * posix_lock_file_wait - Apply a POSIX-style lock to a file @@ -1051,7 +1038,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl) int error; might_sleep (); for (;;) { - error = posix_lock_file(filp, fl); + error = posix_lock_file(filp, fl, NULL); if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) break; error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); @@ -1123,7 +1110,7 @@ int locks_mandatory_area(int read_write, struct inode *inode, fl.fl_end = offset + count - 1; for (;;) { - error = __posix_lock_file_conf(inode, &fl, NULL); + error = __posix_lock_file(inode, &fl, NULL); if (error != -EAGAIN) break; if (!(fl.fl_flags & FL_SLEEP)) @@ -1703,13 +1690,21 @@ out: * @filp: The file to apply the lock to * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.) * @fl: The lock to be applied + * @conf: Place to return a copy of the conflicting lock, if found. + * + * A caller that doesn't care about the conflicting lock may pass NULL + * as the final argument. + * + * If the filesystem defines a private ->lock() method, then @conf will + * be left unchanged; so a caller that cares should initialize it to + * some acceptable default. */ -int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl) +int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) { if (filp->f_op && filp->f_op->lock) return filp->f_op->lock(filp, cmd, fl); else - return posix_lock_file(filp, fl); + return posix_lock_file(filp, fl, conf); } EXPORT_SYMBOL_GPL(vfs_lock_file); @@ -1776,7 +1771,7 @@ again: goto out; for (;;) { - error = vfs_lock_file(filp, cmd, file_lock); + error = vfs_lock_file(filp, cmd, file_lock, NULL); if (error != -EAGAIN || cmd == F_SETLK) break; error = wait_event_interruptible(file_lock->fl_wait, @@ -1902,7 +1897,7 @@ again: goto out; for (;;) { - error = vfs_lock_file(filp, cmd, file_lock); + error = vfs_lock_file(filp, cmd, file_lock, NULL); if (error != -EAGAIN || cmd == F_SETLK64) break; error = wait_event_interruptible(file_lock->fl_wait, @@ -1956,7 +1951,7 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner) lock.fl_ops = NULL; lock.fl_lmops = NULL; - vfs_lock_file(filp, F_SETLK, &lock); + vfs_lock_file(filp, F_SETLK, &lock, NULL); if (lock.fl_ops && lock.fl_ops->fl_release_private) lock.fl_ops->fl_release_private(&lock); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index e42c7a0eb6fa..03b0578781cb 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2657,6 +2657,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct file_lock conflock; __be32 status = 0; unsigned int strhashval; + unsigned int cmd; int err; dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", @@ -2739,10 +2740,12 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, case NFS4_READ_LT: case NFS4_READW_LT: file_lock.fl_type = F_RDLCK; + cmd = F_SETLK; break; case NFS4_WRITE_LT: case NFS4_WRITEW_LT: file_lock.fl_type = F_WRLCK; + cmd = F_SETLK; break; default: status = nfserr_inval; @@ -2769,9 +2772,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, /* XXX?: Just to divert the locks_release_private at the start of * locks_copy_lock: */ - conflock.fl_ops = NULL; - conflock.fl_lmops = NULL; - err = posix_lock_file_conf(filp, &file_lock, &conflock); + locks_init_lock(&conflock); + err = posix_lock_file(filp, &file_lock, &conflock); switch (-err) { case 0: /* success! */ update_stateid(&lock_stp->st_stateid); @@ -2933,7 +2935,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, /* * Try to unlock the file in the VFS. */ - err = posix_lock_file(filp, &file_lock); + err = posix_lock_file(filp, &file_lock, NULL); if (err) { dprintk("NFSD: nfs4_locku: posix_lock_file failed!\n"); goto out_nfserr; diff --git a/include/linux/fs.h b/include/linux/fs.h index 1d5ccdd7c68d..c92d0bdff39f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -852,12 +852,11 @@ extern void locks_copy_lock(struct file_lock *, struct file_lock *); extern void locks_remove_posix(struct file *, fl_owner_t); extern void locks_remove_flock(struct file *); extern int posix_test_lock(struct file *, struct file_lock *); -extern int posix_lock_file_conf(struct file *, struct file_lock *, struct file_lock *); -extern int posix_lock_file(struct file *, struct file_lock *); +extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); extern int posix_lock_file_wait(struct file *, struct file_lock *); extern int posix_unblock_lock(struct file *, struct file_lock *); extern int vfs_test_lock(struct file *, struct file_lock *); -extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *); +extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); extern int __break_lease(struct inode *inode, unsigned int flags); extern void lease_get_mtime(struct inode *, struct timespec *time); -- cgit v1.2.3 From 9b9d2ab4154a42ea4a119f7d3e4e0288bfe0bb79 Mon Sep 17 00:00:00 2001 From: Marc Eshel Date: Thu, 18 Jan 2007 17:52:58 -0500 Subject: locks: add lock cancel command Lock managers need to be able to cancel pending lock requests. In the case where the exported filesystem manages its own locks, it's not sufficient just to call posix_unblock_lock(); we need to let the filesystem know what's happening too. We do this by adding a new fcntl lock command: FL_CANCELLK. Some day this might also be made available to userspace applications that could benefit from an asynchronous locking api. Signed-off-by: Marc Eshel Signed-off-by: "J. Bruce Fields" --- fs/locks.c | 16 ++++++++++++++++ include/linux/fcntl.h | 4 ++++ include/linux/fs.h | 1 + 3 files changed, 21 insertions(+) (limited to 'include/linux') diff --git a/fs/locks.c b/fs/locks.c index ee46584c1a40..242328e17f32 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -2028,6 +2028,22 @@ posix_unblock_lock(struct file *filp, struct file_lock *waiter) EXPORT_SYMBOL(posix_unblock_lock); +/** + * vfs_cancel_lock - file byte range unblock lock + * @filp: The file to apply the unblock to + * @fl: The lock to be unblocked + * + * Used by lock managers to cancel blocked requests + */ +int vfs_cancel_lock(struct file *filp, struct file_lock *fl) +{ + if (filp->f_op && filp->f_op->lock) + return filp->f_op->lock(filp, F_CANCELLK, fl); + return 0; +} + +EXPORT_SYMBOL_GPL(vfs_cancel_lock); + static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx) { struct inode *inode = NULL; diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h index 996f5611cd59..40b93265d4ba 100644 --- a/include/linux/fcntl.h +++ b/include/linux/fcntl.h @@ -3,6 +3,10 @@ #include +/* Cancel a blocking posix lock; internal use only until we expose an + * asynchronous lock api to userspace: */ +#define F_CANCELLK (F_LINUX_SPECIFIC_BASE+5) + #define F_SETLEASE (F_LINUX_SPECIFIC_BASE+0) #define F_GETLEASE (F_LINUX_SPECIFIC_BASE+1) diff --git a/include/linux/fs.h b/include/linux/fs.h index c92d0bdff39f..64b8ae205309 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -857,6 +857,7 @@ extern int posix_lock_file_wait(struct file *, struct file_lock *); extern int posix_unblock_lock(struct file *, struct file_lock *); extern int vfs_test_lock(struct file *, struct file_lock *); extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); +extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); extern int __break_lease(struct inode *inode, unsigned int flags); extern void lease_get_mtime(struct inode *, struct timespec *time); -- cgit v1.2.3 From 2beb6614f5e36c6165b704c167d82ef3e4ceaa0c Mon Sep 17 00:00:00 2001 From: Marc Eshel Date: Tue, 5 Dec 2006 23:31:28 -0500 Subject: locks: add fl_grant callback for asynchronous lock return Acquiring a lock on a cluster filesystem may require communication with remote hosts, and to avoid blocking lockd or nfsd threads during such communication, we allow the results to be returned asynchronously. When a ->lock() call needs to block, the file system will return -EINPROGRESS, and then later return the results with a call to the routine in the fl_grant field of the lock_manager_operations struct. This differs from the case when ->lock returns -EAGAIN to a blocking lock request; in that case, the filesystem calls fl_notify when the lock is granted, and the caller retries the original lock. So while fl_notify is merely a hint to the caller that it should retry, fl_grant actually communicates the final result of the lock operation (with the lock already acquired in the succesful case). Therefore fl_grant takes a lock, a status and, for the test lock case, a conflicting lock. We also allow fl_grant to return an error to the filesystem, to handle the case where the fl_grant requests arrives after the lock manager has already given up waiting for it. Signed-off-by: Marc Eshel Signed-off-by: J. Bruce Fields --- fs/locks.c | 19 +++++++++++++++++++ include/linux/fs.h | 1 + 2 files changed, 20 insertions(+) (limited to 'include/linux') diff --git a/fs/locks.c b/fs/locks.c index 242328e17f32..53b0cd153202 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1698,6 +1698,25 @@ out: * If the filesystem defines a private ->lock() method, then @conf will * be left unchanged; so a caller that cares should initialize it to * some acceptable default. + * + * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX + * locks, the ->lock() interface may return asynchronously, before the lock has + * been granted or denied by the underlying filesystem, if (and only if) + * fl_grant is set. Callers expecting ->lock() to return asynchronously + * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if) + * the request is for a blocking lock. When ->lock() does return asynchronously, + * it must return -EINPROGRESS, and call ->fl_grant() when the lock + * request completes. + * If the request is for non-blocking lock the file system should return + * -EINPROGRESS then try to get the lock and call the callback routine with + * the result. If the request timed out the callback routine will return a + * nonzero return code and the file system should release the lock. The file + * system is also responsible to keep a corresponding posix lock when it + * grants a lock so the VFS can find out which locks are locally held and do + * the correct lock cleanup when required. + * The underlying filesystem must not drop the kernel lock or call + * ->fl_grant() before returning to the caller with a -EINPROGRESS + * return code. */ int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) { diff --git a/include/linux/fs.h b/include/linux/fs.h index 64b8ae205309..b22991d5f16b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -785,6 +785,7 @@ struct file_lock_operations { struct lock_manager_operations { int (*fl_compare_owner)(struct file_lock *, struct file_lock *); void (*fl_notify)(struct file_lock *); /* unblock callback */ + int (*fl_grant)(struct file_lock *, struct file_lock *, int); void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); void (*fl_break)(struct file_lock *); -- cgit v1.2.3 From 2b36f412ab6f2e5b64af9832b20eb7ef67d025b4 Mon Sep 17 00:00:00 2001 From: Marc Eshel Date: Tue, 28 Nov 2006 16:26:47 -0500 Subject: lockd: save lock state on deferral We need to keep some state for a pending asynchronous lock request, so this patch adds that state to struct nlm_block. This also adds a function which defers the request, by calling rqstp->rq_chandle.defer and storing the resulting deferred request in a nlm_block structure which we insert into lockd's global block list. That new function isn't called yet, so it's dead code until a later patch. Signed-off-by: Marc Eshel Signed-off-by: J. Bruce Fields --- fs/lockd/svclock.c | 25 +++++++++++++++++++++++++ include/linux/lockd/lockd.h | 10 ++++++++++ 2 files changed, 35 insertions(+) (limited to 'include/linux') diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index 3b0e7a4b817b..f2449265e2eb 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -330,6 +330,31 @@ static void nlmsvc_freegrantargs(struct nlm_rqst *call) kfree(call->a_args.lock.oh.data); } +/* + * Deferred lock request handling for non-blocking lock + */ +static u32 +nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block) +{ + u32 status = nlm_lck_denied_nolocks; + + block->b_flags |= B_QUEUED; + + nlmsvc_insert_block(block, NLM_TIMEOUT); + + block->b_cache_req = &rqstp->rq_chandle; + if (rqstp->rq_chandle.defer) { + block->b_deferred_req = + rqstp->rq_chandle.defer(block->b_cache_req); + if (block->b_deferred_req != NULL) + status = nlm_drop_reply; + } + dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n", + block, block->b_flags, status); + + return status; +} + /* * Attempt to establish a lock, and if it can't be granted, block it * if required. diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index ac25b5649c59..d4c4de753bca 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -119,6 +119,9 @@ struct nlm_file { * couldn't be granted because of a conflicting lock). */ #define NLM_NEVER (~(unsigned long) 0) +/* timeout on non-blocking call: */ +#define NLM_TIMEOUT (7 * HZ) + struct nlm_block { struct kref b_count; /* Reference count */ struct list_head b_list; /* linked list of all blocks */ @@ -130,6 +133,13 @@ struct nlm_block { unsigned int b_id; /* block id */ unsigned char b_granted; /* VFS granted lock */ struct nlm_file * b_file; /* file in question */ + struct cache_req * b_cache_req; /* deferred request handling */ + struct file_lock * b_fl; /* set for GETLK */ + struct cache_deferred_req * b_deferred_req; + unsigned int b_flags; /* block flags */ +#define B_QUEUED 1 /* lock queued */ +#define B_GOT_CALLBACK 2 /* got lock or conflicting lock */ +#define B_TIMED_OUT 4 /* filesystem too slow to respond */ }; /* -- cgit v1.2.3 From 85f3f1b3f7a6197b51a2ab98d927517df730214c Mon Sep 17 00:00:00 2001 From: Marc Eshel Date: Tue, 28 Nov 2006 16:27:06 -0500 Subject: lockd: pass cookie in nlmsvc_testlock Change NLM internal interface to pass more information for test lock; we need this to make sure the cookie information is pushed down to the place where we do request deferral, which is handled for testlock by the following patch. Signed-off-by: Marc Eshel Signed-off-by: J. Bruce Fields --- fs/lockd/svc4proc.c | 2 +- fs/lockd/svclock.c | 5 +++-- fs/lockd/svcproc.c | 2 +- include/linux/lockd/lockd.h | 4 ++-- 4 files changed, 7 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index 47a66aa5d55b..a1bccad7ebd3 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c @@ -99,7 +99,7 @@ nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp, return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now check for conflicting locks */ - resp->status = nlmsvc_testlock(file, &argp->lock, &resp->lock); + resp->status = nlmsvc_testlock(rqstp, file, &argp->lock, &resp->lock, &resp->cookie); dprintk("lockd: TEST4 status %d\n", ntohl(resp->status)); nlm_release_host(host); diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index 6e748573e0c6..7ab2e60c51f7 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -442,8 +442,9 @@ out: * Test for presence of a conflicting lock. */ __be32 -nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock, - struct nlm_lock *conflock) +nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, + struct nlm_lock *lock, struct nlm_lock *conflock, + struct nlm_cookie *cookie) { dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", file->f_file->f_path.dentry->d_inode->i_sb->s_id, diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index 31cb48425733..d13b827dfd34 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c @@ -127,7 +127,7 @@ nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp, return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now check for conflicting locks */ - resp->status = cast_status(nlmsvc_testlock(file, &argp->lock, &resp->lock)); + resp->status = cast_status(nlmsvc_testlock(rqstp, file, &argp->lock, &resp->lock, &resp->cookie)); dprintk("lockd: TEST status %d vers %d\n", ntohl(resp->status), rqstp->rq_vers); diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index d4c4de753bca..424c3de333d3 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -195,8 +195,8 @@ typedef int (*nlm_host_match_fn_t)(struct nlm_host *cur, struct nlm_host *ref) __be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *, struct nlm_lock *, int, struct nlm_cookie *); __be32 nlmsvc_unlock(struct nlm_file *, struct nlm_lock *); -__be32 nlmsvc_testlock(struct nlm_file *, struct nlm_lock *, - struct nlm_lock *); +__be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *, + struct nlm_lock *, struct nlm_lock *, struct nlm_cookie *); __be32 nlmsvc_cancel_blocked(struct nlm_file *, struct nlm_lock *); unsigned long nlmsvc_retry_blocked(void); void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, -- cgit v1.2.3 From cf4328cd949c2086091c62c5685f1580fe9b55e4 Mon Sep 17 00:00:00 2001 From: Ivo van Doorn Date: Mon, 7 May 2007 00:34:20 -0700 Subject: [NET]: rfkill: add support for input key to control wireless radio The RF kill patch that provides infrastructure for implementing switches controlling radio states on various network and other cards. [dtor@insightbb.com: address review comments] [akpm@linux-foundation.org: cleanups, build fixes] Signed-off-by: Ivo van Doorn Signed-off-by: Andrew Morton Signed-off-by: Dmitry Torokhov Signed-off-by: David S. Miller --- include/linux/rfkill.h | 89 ++++++++++ net/Kconfig | 2 + net/Makefile | 1 + net/rfkill/Kconfig | 24 +++ net/rfkill/Makefile | 6 + net/rfkill/rfkill-input.c | 174 ++++++++++++++++++++ net/rfkill/rfkill.c | 407 ++++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 703 insertions(+) create mode 100644 include/linux/rfkill.h create mode 100644 net/rfkill/Kconfig create mode 100644 net/rfkill/Makefile create mode 100644 net/rfkill/rfkill-input.c create mode 100644 net/rfkill/rfkill.c (limited to 'include/linux') diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h new file mode 100644 index 000000000000..7c1ffbab7865 --- /dev/null +++ b/include/linux/rfkill.h @@ -0,0 +1,89 @@ +#ifndef __RFKILL_H +#define __RFKILL_H + +/* + * Copyright (C) 2006 Ivo van Doorn + * Copyright (C) 2007 Dmitry Torokhov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include + +/** + * enum rfkill_type - type of rfkill switch. + * RFKILL_TYPE_WLAN: switch is no a Wireless network devices. + * RFKILL_TYPE_BlUETOOTH: switch is on a bluetooth device. + * RFKILL_TYPE_IRDA: switch is on an infrared devices. + */ +enum rfkill_type { + RFKILL_TYPE_WLAN = 0, + RFKILL_TYPE_BLUETOOTH = 1, + RFKILL_TYPE_IRDA = 2, + RFKILL_TYPE_MAX = 3, +}; + +enum rfkill_state { + RFKILL_STATE_OFF = 0, + RFKILL_STATE_ON = 1, +}; + +/** + * struct rfkill - rfkill control structure. + * @name: Name of the switch. + * @type: Radio type which the button controls, the value stored + * here should be a value from enum rfkill_type. + * @state: State of the switch (on/off). + * @user_claim: Set when the switch is controlled exlusively by userspace. + * @mutex: Guards switch state transitions + * @data: Pointer to the RF button drivers private data which will be + * passed along when toggling radio state. + * @toggle_radio(): Mandatory handler to control state of the radio. + * @dev: Device structure integrating the switch into device tree. + * @node: Used to place switch into list of all switches known to the + * the system. + * + * This structure represents a RF switch located on a network device. + */ +struct rfkill { + char *name; + enum rfkill_type type; + + enum rfkill_state state; + bool user_claim; + + struct mutex mutex; + + void *data; + int (*toggle_radio)(void *data, enum rfkill_state state); + + struct device dev; + struct list_head node; +}; +#define to_rfkill(d) container_of(d, struct rfkill, dev) + +struct rfkill *rfkill_allocate(struct device *parent, enum rfkill_type type); +void rfkill_free(struct rfkill *rfkill); +int rfkill_register(struct rfkill *rfkill); +void rfkill_unregister(struct rfkill *rfkill); + +void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); + +#endif /* RFKILL_H */ diff --git a/net/Kconfig b/net/Kconfig index 5b2227813923..caeacd16656a 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -225,6 +225,8 @@ source "net/ieee80211/Kconfig" endmenu +source "net/rfkill/Kconfig" + endif # if NET endmenu # Networking diff --git a/net/Makefile b/net/Makefile index 9fdb60c2e4a1..34e5b2d7f877 100644 --- a/net/Makefile +++ b/net/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_IEEE80211) += ieee80211/ obj-$(CONFIG_TIPC) += tipc/ obj-$(CONFIG_NETLABEL) += netlabel/ obj-$(CONFIG_IUCV) += iucv/ +obj-$(CONFIG_RFKILL) += rfkill/ ifeq ($(CONFIG_NET),y) obj-$(CONFIG_SYSCTL) += sysctl_net.o diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig new file mode 100644 index 000000000000..8b31759ee8b0 --- /dev/null +++ b/net/rfkill/Kconfig @@ -0,0 +1,24 @@ +# +# RF switch subsystem configuration +# +menuconfig RFKILL + tristate "RF switch subsystem support" + help + Say Y here if you want to have control over RF switches + found on many WiFi, Bluetooth and IRDA cards. + + To compile this driver as a module, choose M here: the + module will be called rfkill. + +config RFKILL_INPUT + tristate "Input layer to RF switch connector" + depends on RFKILL && INPUT + help + Say Y here if you want kernel automatically toggle state + of RF switches on and off when user presses appropriate + button or a key on the keyboard. Without this module you + need a some kind of userspace application to control + state of the switches. + + To compile this driver as a module, choose M here: the + module will be called rfkill-input. diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile new file mode 100644 index 000000000000..b38c430be057 --- /dev/null +++ b/net/rfkill/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the RF switch subsystem. +# + +obj-$(CONFIG_RFKILL) += rfkill.o +obj-$(CONFIG_RFKILL_INPUT) += rfkill-input.o diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c new file mode 100644 index 000000000000..e5c840c30284 --- /dev/null +++ b/net/rfkill/rfkill-input.c @@ -0,0 +1,174 @@ +/* + * Input layer to RF Kill interface connector + * + * Copyright (c) 2007 Dmitry Torokhov + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Dmitry Torokhov "); +MODULE_DESCRIPTION("Input layer to RF switch connector"); +MODULE_LICENSE("GPL"); + +struct rfkill_task { + struct work_struct work; + enum rfkill_type type; + struct mutex mutex; /* ensures that task is serialized */ + spinlock_t lock; /* for accessing last and desired state */ + unsigned long last; /* last schedule */ + enum rfkill_state desired_state; /* on/off */ + enum rfkill_state current_state; /* on/off */ +}; + +static void rfkill_task_handler(struct work_struct *work) +{ + struct rfkill_task *task = container_of(work, struct rfkill_task, work); + enum rfkill_state state; + + mutex_lock(&task->mutex); + + /* + * Use temp variable to fetch desired state to keep it + * consistent even if rfkill_schedule_toggle() runs in + * another thread or interrupts us. + */ + state = task->desired_state; + + if (state != task->current_state) { + rfkill_switch_all(task->type, state); + task->current_state = state; + } + + mutex_unlock(&task->mutex); +} + +static void rfkill_schedule_toggle(struct rfkill_task *task) +{ + unsigned int flags; + + spin_lock_irqsave(&task->lock, flags); + + if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { + task->desired_state = !task->desired_state; + task->last = jiffies; + schedule_work(&task->work); + } + + spin_unlock_irqrestore(&task->lock, flags); +} + +#define DEFINE_RFKILL_TASK(n, t) \ + struct rfkill_task n = { \ + .work = __WORK_INITIALIZER(n.work, \ + rfkill_task_handler), \ + .type = t, \ + .mutex = __MUTEX_INITIALIZER(n.mutex), \ + .lock = __SPIN_LOCK_UNLOCKED(n.lock), \ + .desired_state = RFKILL_STATE_ON, \ + .current_state = RFKILL_STATE_ON, \ + } + +static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN); +static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH); + +static void rfkill_event(struct input_handle *handle, unsigned int type, + unsigned int code, int down) +{ + if (type == EV_KEY && down == 1) { + switch (code) { + case KEY_WLAN: + rfkill_schedule_toggle(&rfkill_wlan); + break; + case KEY_BLUETOOTH: + rfkill_schedule_toggle(&rfkill_bt); + break; + default: + break; + } + } +} + +static int rfkill_connect(struct input_handler *handler, struct input_dev *dev, + const struct input_device_id *id) +{ + struct input_handle *handle; + int error; + + handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = "rfkill"; + + error = input_register_handle(handle); + if (error) + goto err_free_handle; + + error = input_open_device(handle); + if (error) + goto err_unregister_handle; + + return 0; + + err_unregister_handle: + input_unregister_handle(handle); + err_free_handle: + kfree(handle); + return error; +} + +static void rfkill_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id rfkill_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT(EV_KEY) }, + .keybit = { [LONG(KEY_WLAN)] = BIT(KEY_WLAN) }, + }, + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT(EV_KEY) }, + .keybit = { [LONG(KEY_BLUETOOTH)] = BIT(KEY_BLUETOOTH) }, + }, + { } +}; + +static struct input_handler rfkill_handler = { + .event = rfkill_event, + .connect = rfkill_connect, + .disconnect = rfkill_disconnect, + .name = "rfkill", + .id_table = rfkill_ids, +}; + +static int __init rfkill_handler_init(void) +{ + return input_register_handler(&rfkill_handler); +} + +static void __exit rfkill_handler_exit(void) +{ + input_unregister_handler(&rfkill_handler); + flush_scheduled_work(); +} + +module_init(rfkill_handler_init); +module_exit(rfkill_handler_exit); diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c new file mode 100644 index 000000000000..a973603e3880 --- /dev/null +++ b/net/rfkill/rfkill.c @@ -0,0 +1,407 @@ +/* + * Copyright (C) 2006 Ivo van Doorn + * Copyright (C) 2007 Dmitry Torokhov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Ivo van Doorn "); +MODULE_VERSION("1.0"); +MODULE_DESCRIPTION("RF switch support"); +MODULE_LICENSE("GPL"); + +static LIST_HEAD(rfkill_list); /* list of registered rf switches */ +static DEFINE_MUTEX(rfkill_mutex); + +static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX]; + +static int rfkill_toggle_radio(struct rfkill *rfkill, + enum rfkill_state state) +{ + int retval; + + retval = mutex_lock_interruptible(&rfkill->mutex); + if (retval) + return retval; + + if (state != rfkill->state) { + retval = rfkill->toggle_radio(rfkill->data, state); + if (!retval) + rfkill->state = state; + } + + mutex_unlock(&rfkill->mutex); + return retval; +} + +/** + * rfkill_switch_all - Toggle state of all switches of given type + * @type: type of interfaces to be affeceted + * @state: the new state + * + * This function toggles state of all switches of given type unless + * a specific switch is claimed by userspace in which case it is + * left alone. + */ + +void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) +{ + struct rfkill *rfkill; + + mutex_lock(&rfkill_mutex); + + rfkill_states[type] = state; + + list_for_each_entry(rfkill, &rfkill_list, node) { + if (!rfkill->user_claim) + rfkill_toggle_radio(rfkill, state); + } + + mutex_unlock(&rfkill_mutex); +} +EXPORT_SYMBOL(rfkill_switch_all); + +static ssize_t rfkill_name_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rfkill *rfkill = to_rfkill(dev); + + return sprintf(buf, "%s\n", rfkill->name); +} + +static ssize_t rfkill_type_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rfkill *rfkill = to_rfkill(dev); + const char *type; + + switch (rfkill->type) { + case RFKILL_TYPE_WLAN: + type = "wlan"; + break; + case RFKILL_TYPE_BLUETOOTH: + type = "bluetooth"; + break; + case RFKILL_TYPE_IRDA: + type = "irda"; + break; + default: + BUG(); + } + + return sprintf(buf, "%s\n", type); +} + +static ssize_t rfkill_state_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rfkill *rfkill = to_rfkill(dev); + + return sprintf(buf, "%d\n", rfkill->state); +} + +static ssize_t rfkill_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct rfkill *rfkill = to_rfkill(dev); + unsigned int state = simple_strtoul(buf, NULL, 0); + int error; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + error = rfkill_toggle_radio(rfkill, + state ? RFKILL_STATE_ON : RFKILL_STATE_OFF); + if (error) + return error; + + return count; +} + +static ssize_t rfkill_claim_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rfkill *rfkill = to_rfkill(dev); + + return sprintf(buf, "%d", rfkill->user_claim); +} + +static ssize_t rfkill_claim_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct rfkill *rfkill = to_rfkill(dev); + bool claim = !!simple_strtoul(buf, NULL, 0); + int error; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + /* + * Take the global lock to make sure the kernel is not in + * the middle of rfkill_switch_all + */ + error = mutex_lock_interruptible(&rfkill_mutex); + if (error) + return error; + + if (rfkill->user_claim != claim) { + if (!claim) + rfkill_toggle_radio(rfkill, + rfkill_states[rfkill->type]); + rfkill->user_claim = claim; + } + + mutex_unlock(&rfkill_mutex); + + return count; +} + +static struct device_attribute rfkill_dev_attrs[] = { + __ATTR(name, S_IRUGO, rfkill_name_show, NULL), + __ATTR(type, S_IRUGO, rfkill_type_show, NULL), + __ATTR(state, S_IRUGO, rfkill_state_show, rfkill_state_store), + __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), + __ATTR_NULL +}; + +static void rfkill_release(struct device *dev) +{ + struct rfkill *rfkill = to_rfkill(dev); + + kfree(rfkill); + module_put(THIS_MODULE); +} + +#ifdef CONFIG_PM +static int rfkill_suspend(struct device *dev, pm_message_t state) +{ + struct rfkill *rfkill = to_rfkill(dev); + + if (dev->power.power_state.event != state.event) { + if (state.event == PM_EVENT_SUSPEND) { + mutex_lock(&rfkill->mutex); + + if (rfkill->state == RFKILL_STATE_ON) + rfkill->toggle_radio(rfkill->data, + RFKILL_STATE_OFF); + + mutex_unlock(&rfkill->mutex); + } + + dev->power.power_state = state; + } + + return 0; +} + +static int rfkill_resume(struct device *dev) +{ + struct rfkill *rfkill = to_rfkill(dev); + + if (dev->power.power_state.event != PM_EVENT_ON) { + mutex_lock(&rfkill->mutex); + + if (rfkill->state == RFKILL_STATE_ON) + rfkill->toggle_radio(rfkill->data, RFKILL_STATE_ON); + + mutex_unlock(&rfkill->mutex); + } + + dev->power.power_state = PMSG_ON; + return 0; +} +#else +#define rfkill_suspend NULL +#define rfkill_resume NULL +#endif + +static struct class rfkill_class = { + .name = "rfkill", + .dev_release = rfkill_release, + .dev_attrs = rfkill_dev_attrs, + .suspend = rfkill_suspend, + .resume = rfkill_resume, +}; + +static int rfkill_add_switch(struct rfkill *rfkill) +{ + int retval; + + retval = mutex_lock_interruptible(&rfkill_mutex); + if (retval) + return retval; + + retval = rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type]); + if (retval) + goto out; + + list_add_tail(&rfkill->node, &rfkill_list); + + out: + mutex_unlock(&rfkill_mutex); + return retval; +} + +static void rfkill_remove_switch(struct rfkill *rfkill) +{ + mutex_lock(&rfkill_mutex); + list_del_init(&rfkill->node); + rfkill_toggle_radio(rfkill, RFKILL_STATE_OFF); + mutex_unlock(&rfkill_mutex); +} + +/** + * rfkill_allocate - allocate memory for rfkill structure. + * @parent: device that has rf switch on it + * @type: type of the switch (wlan, bluetooth, irda) + * + * This function should be called by the network driver when it needs + * rfkill structure. Once the structure is allocated the driver shoud + * finish its initialization by setting name, private data, enable_radio + * and disable_radio methods and then register it with rfkill_register(). + * NOTE: If registration fails the structure shoudl be freed by calling + * rfkill_free() otherwise rfkill_unregister() should be used. + */ +struct rfkill *rfkill_allocate(struct device *parent, enum rfkill_type type) +{ + struct rfkill *rfkill; + struct device *dev; + + rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL); + if (rfkill) + return NULL; + + mutex_init(&rfkill->mutex); + INIT_LIST_HEAD(&rfkill->node); + rfkill->type = type; + + dev = &rfkill->dev; + dev->class = &rfkill_class; + dev->parent = parent; + device_initialize(dev); + + __module_get(THIS_MODULE); + + return rfkill; +} +EXPORT_SYMBOL(rfkill_allocate); + +/** + * rfkill_free - Mark rfkill structure for deletion + * @rfkill: rfkill structure to be destroyed + * + * Decrements reference count of rfkill structure so it is destoryed. + * Note that rfkill_free() should _not_ be called after rfkill_unregister(). + */ +void rfkill_free(struct rfkill *rfkill) +{ + if (rfkill) + put_device(&rfkill->dev); +} +EXPORT_SYMBOL(rfkill_free); + +/** + * rfkill_register - Register a rfkill structure. + * @rfkill: rfkill structure to be registered + * + * This function should be called by the network driver when the rfkill + * structure needs to be registered. Immediately from registration the + * switch driver should be able to service calls to toggle_radio. + */ +int rfkill_register(struct rfkill *rfkill) +{ + static atomic_t rfkill_no = ATOMIC_INIT(0); + struct device *dev = &rfkill->dev; + int error; + + if (!rfkill->toggle_radio) + return -EINVAL; + + error = rfkill_add_switch(rfkill); + if (error) + return error; + + snprintf(dev->bus_id, sizeof(dev->bus_id), + "rfkill%ld", (long)atomic_inc_return(&rfkill_no) - 1); + + error = device_add(dev); + if (error) { + rfkill_remove_switch(rfkill); + return error; + } + + return 0; +} +EXPORT_SYMBOL(rfkill_register); + +/** + * rfkill_unregister - Uegister a rfkill structure. + * @rfkill: rfkill structure to be unregistered + * + * This function should be called by the network driver during device + * teardown to destroy rfkill structure. Note that rfkill_free() should + * _not_ be called after rfkill_unregister(). + */ +void rfkill_unregister(struct rfkill *rfkill) +{ + device_del(&rfkill->dev); + rfkill_remove_switch(rfkill); + put_device(&rfkill->dev); +} +EXPORT_SYMBOL(rfkill_unregister); + +/* + * Rfkill module initialization/deinitialization. + */ +static int __init rfkill_init(void) +{ + int error; + int i; + + for (i = 0; i < ARRAY_SIZE(rfkill_states); i++) + rfkill_states[i] = RFKILL_STATE_ON; + + error = class_register(&rfkill_class); + if (error) { + printk(KERN_ERR "rfkill: unable to register rfkill class\n"); + return error; + } + + return 0; +} + +static void __exit rfkill_exit(void) +{ + class_unregister(&rfkill_class); +} + +module_init(rfkill_init); +module_exit(rfkill_exit); -- cgit v1.2.3 From 543b9fd3528f64c4b20439de0edb453764482de7 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 3 May 2007 22:31:38 +1000 Subject: [POWERPC] powermac: Suspend to disk on G5 Powermac G5 suspend to disk implementation. The code is platform agnostic but only tested on powermac, no other 64-bit powerpc machines. Because nvidiafb still breaks suspend I have marked it EXPERIMENTAL on powermac and because I can't test it and some lowlevel code will need changes it is BROKEN on all other 64-bit platforms. Signed-off-by: Johannes Berg Acked-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/Kconfig | 5 + arch/powerpc/kernel/Makefile | 1 + arch/powerpc/kernel/asm-offsets.c | 4 +- arch/powerpc/kernel/idle.c | 5 +- arch/powerpc/kernel/swsusp.c | 9 ++ arch/powerpc/kernel/swsusp_64.c | 24 ++++ arch/powerpc/kernel/swsusp_asm64.S | 228 +++++++++++++++++++++++++++++++++++++ include/linux/suspend.h | 2 +- kernel/power/Kconfig | 4 +- 9 files changed, 276 insertions(+), 6 deletions(-) create mode 100644 arch/powerpc/kernel/swsusp_64.c create mode 100644 arch/powerpc/kernel/swsusp_asm64.S (limited to 'include/linux') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a8e08f4b62d3..49b1ea275eba 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -126,6 +126,11 @@ config DEFAULT_UIMAGE Used to allow a board to specify it wants a uImage built by default default n +config PPC64_SWSUSP + bool + depends on PPC64 && (BROKEN || (PPC_PMAC64 && EXPERIMENTAL)) + default y + menu "Processor support" choice prompt "Processor Type" diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 949f36a62aae..4dc73b8c25ba 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o obj-$(CONFIG_TAU) += tau_6xx.o obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o obj32-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_32.o +obj64-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_64.o swsusp_asm64.o obj32-$(CONFIG_MODULES) += module_32.o ifeq ($(CONFIG_PPC_MERGE),y) diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 0c5150c69175..8f48560b7ee2 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -21,12 +21,12 @@ #include #include #include +#include #ifdef CONFIG_PPC64 #include #include #else #include -#include #endif #include @@ -257,11 +257,11 @@ int main(void) DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore)); -#ifndef CONFIG_PPC64 DEFINE(pbe_address, offsetof(struct pbe, address)); DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); DEFINE(pbe_next, offsetof(struct pbe, next)); +#ifndef CONFIG_PPC64 DEFINE(TASK_SIZE, TASK_SIZE); DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); #endif /* ! CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 6e7f50967bab..a9e9cbd32975 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -33,8 +33,11 @@ #include #ifdef CONFIG_HOTPLUG_CPU +/* this is used for software suspend, and that shuts down + * CPUs even while the system is still booting... */ #define cpu_should_die() (cpu_is_offline(smp_processor_id()) && \ - system_state == SYSTEM_RUNNING) + (system_state == SYSTEM_RUNNING \ + || system_state == SYSTEM_BOOTING)) #else #define cpu_should_die() 0 #endif diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c index b89e4f5a0b08..064a7ba4f02c 100644 --- a/arch/powerpc/kernel/swsusp.c +++ b/arch/powerpc/kernel/swsusp.c @@ -24,6 +24,11 @@ void save_processor_state(void) flush_fp_to_thread(current); flush_altivec_to_thread(current); flush_spe_to_thread(current); + +#ifdef CONFIG_PPC64 + hard_irq_disable(); +#endif + } void restore_processor_state(void) @@ -31,4 +36,8 @@ void restore_processor_state(void) #ifdef CONFIG_PPC32 set_context(current->active_mm->context.id, current->active_mm->pgd); #endif + +#ifdef CONFIG_PPC64 + hard_irq_enable(); +#endif } diff --git a/arch/powerpc/kernel/swsusp_64.c b/arch/powerpc/kernel/swsusp_64.c new file mode 100644 index 000000000000..6f3f0697274e --- /dev/null +++ b/arch/powerpc/kernel/swsusp_64.c @@ -0,0 +1,24 @@ +/* + * PowerPC 64-bit swsusp implementation + * + * Copyright 2006 Johannes Berg + * + * GPLv2 + */ + +#include +#include +#include +#include + +void do_after_copyback(void) +{ + iommu_restore(); + touch_softlockup_watchdog(); + mb(); +} + +void _iommu_save(void) +{ + iommu_save(); +} diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S new file mode 100644 index 000000000000..e092c3cbdb9b --- /dev/null +++ b/arch/powerpc/kernel/swsusp_asm64.S @@ -0,0 +1,228 @@ +/* + * PowerPC 64-bit swsusp implementation + * + * Copyright 2006 Johannes Berg + * + * GPLv2 + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Structure for storing CPU registers on the save area. + */ +#define SL_r1 0x00 /* stack pointer */ +#define SL_PC 0x08 +#define SL_MSR 0x10 +#define SL_SDR1 0x18 +#define SL_XER 0x20 +#define SL_TB 0x40 +#define SL_r2 0x48 +#define SL_CR 0x50 +#define SL_LR 0x58 +#define SL_r12 0x60 +#define SL_r13 0x68 +#define SL_r14 0x70 +#define SL_r15 0x78 +#define SL_r16 0x80 +#define SL_r17 0x88 +#define SL_r18 0x90 +#define SL_r19 0x98 +#define SL_r20 0xa0 +#define SL_r21 0xa8 +#define SL_r22 0xb0 +#define SL_r23 0xb8 +#define SL_r24 0xc0 +#define SL_r25 0xc8 +#define SL_r26 0xd0 +#define SL_r27 0xd8 +#define SL_r28 0xe0 +#define SL_r29 0xe8 +#define SL_r30 0xf0 +#define SL_r31 0xf8 +#define SL_SIZE SL_r31+8 + +/* these macros rely on the save area being + * pointed to by r11 */ +#define SAVE_SPECIAL(special) \ + mf##special r0 ;\ + std r0, SL_##special(r11) +#define RESTORE_SPECIAL(special) \ + ld r0, SL_##special(r11) ;\ + mt##special r0 +#define SAVE_REGISTER(reg) \ + std reg, SL_##reg(r11) +#define RESTORE_REGISTER(reg) \ + ld reg, SL_##reg(r11) + +/* space for storing cpu state */ + .section .data + .align 5 +swsusp_save_area: + .space SL_SIZE + + .section ".toc","aw" +swsusp_save_area_ptr: + .tc swsusp_save_area[TC],swsusp_save_area +restore_pblist_ptr: + .tc restore_pblist[TC],restore_pblist + + .section .text + .align 5 +_GLOBAL(swsusp_arch_suspend) + ld r11,swsusp_save_area_ptr@toc(r2) + SAVE_SPECIAL(LR) + SAVE_REGISTER(r1) + SAVE_SPECIAL(CR) + SAVE_SPECIAL(TB) + SAVE_REGISTER(r2) + SAVE_REGISTER(r12) + SAVE_REGISTER(r13) + SAVE_REGISTER(r14) + SAVE_REGISTER(r15) + SAVE_REGISTER(r16) + SAVE_REGISTER(r17) + SAVE_REGISTER(r18) + SAVE_REGISTER(r19) + SAVE_REGISTER(r20) + SAVE_REGISTER(r21) + SAVE_REGISTER(r22) + SAVE_REGISTER(r23) + SAVE_REGISTER(r24) + SAVE_REGISTER(r25) + SAVE_REGISTER(r26) + SAVE_REGISTER(r27) + SAVE_REGISTER(r28) + SAVE_REGISTER(r29) + SAVE_REGISTER(r30) + SAVE_REGISTER(r31) + SAVE_SPECIAL(MSR) + SAVE_SPECIAL(SDR1) + SAVE_SPECIAL(XER) + + /* we push the stack up 128 bytes but don't store the + * stack pointer on the stack like a real stackframe */ + addi r1,r1,-128 + + bl _iommu_save + bl swsusp_save + + /* restore LR */ + ld r11,swsusp_save_area_ptr@toc(r2) + RESTORE_SPECIAL(LR) + addi r1,r1,128 + + blr + +/* Resume code */ +_GLOBAL(swsusp_arch_resume) + /* Stop pending alitvec streams and memory accesses */ +BEGIN_FTR_SECTION + DSSALL +END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) + sync + + ld r12,restore_pblist_ptr@toc(r2) + ld r12,0(r12) + + cmpdi r12,0 + beq- nothing_to_copy + li r15,512 +copyloop: + ld r13,pbe_address(r12) + ld r14,pbe_orig_address(r12) + + mtctr r15 + li r10,0 +copy_page_loop: + ldx r0,r10,r13 + stdx r0,r10,r14 + addi r10,r10,8 + bdnz copy_page_loop + + ld r12,pbe_next(r12) + cmpdi r12,0 + bne+ copyloop +nothing_to_copy: + + /* flush caches */ + lis r3, 0x10 + mtctr r3 + li r3, 0 + ori r3, r3, CONFIG_KERNEL_START>>48 + li r0, 48 + sld r3, r3, r0 + li r0, 0 +1: + dcbf r0,r3 + addi r3,r3,0x20 + bdnz 1b + + sync + + tlbia + + ld r11,swsusp_save_area_ptr@toc(r2) + + RESTORE_SPECIAL(CR) + + /* restore timebase */ + /* load saved tb */ + ld r1, SL_TB(r11) + /* get upper 32 bits of it */ + srdi r2, r1, 32 + /* clear tb lower to avoid wrap */ + li r0, 0 + mttbl r0 + /* set tb upper */ + mttbu r2 + /* set tb lower */ + mttbl r1 + + /* restore registers */ + RESTORE_REGISTER(r1) + RESTORE_REGISTER(r2) + RESTORE_REGISTER(r12) + RESTORE_REGISTER(r13) + RESTORE_REGISTER(r14) + RESTORE_REGISTER(r15) + RESTORE_REGISTER(r16) + RESTORE_REGISTER(r17) + RESTORE_REGISTER(r18) + RESTORE_REGISTER(r19) + RESTORE_REGISTER(r20) + RESTORE_REGISTER(r21) + RESTORE_REGISTER(r22) + RESTORE_REGISTER(r23) + RESTORE_REGISTER(r24) + RESTORE_REGISTER(r25) + RESTORE_REGISTER(r26) + RESTORE_REGISTER(r27) + RESTORE_REGISTER(r28) + RESTORE_REGISTER(r29) + RESTORE_REGISTER(r30) + RESTORE_REGISTER(r31) + /* can't use RESTORE_SPECIAL(MSR) */ + ld r0, SL_MSR(r11) + mtmsrd r0, 0 + RESTORE_SPECIAL(SDR1) + RESTORE_SPECIAL(XER) + + sync + + addi r1,r1,-128 + bl slb_flush_and_rebolt + bl do_after_copyback + addi r1,r1,128 + + ld r11,swsusp_save_area_ptr@toc(r2) + RESTORE_SPECIAL(LR) + + li r3, 0 + blr diff --git a/include/linux/suspend.h b/include/linux/suspend.h index bf99bd49f8ef..a34835ae0e75 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -1,7 +1,7 @@ #ifndef _LINUX_SWSUSP_H #define _LINUX_SWSUSP_H -#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) +#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64) #include #endif #include diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 51a4dd0f1b74..5001c652028c 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -79,7 +79,7 @@ config PM_SYSFS_DEPRECATED config SOFTWARE_SUSPEND bool "Software Suspend" - depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)) + depends on PM && SWAP && (((X86 || PPC64_SWSUSP) && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)) ---help--- Enable the suspend to disk (STD) functionality. @@ -134,7 +134,7 @@ config PM_STD_PARTITION config SUSPEND_SMP bool - depends on HOTPLUG_CPU && X86 && PM + depends on HOTPLUG_CPU && (X86 || PPC64) && PM default y config APM_EMULATION -- cgit v1.2.3 From fd76bab2fa6d8f3ef6b326a4c6ae442fa21d30a4 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sun, 6 May 2007 14:48:40 -0700 Subject: slab: introduce krealloc This introduce krealloc() that reallocates memory while keeping the contents unchanged. The allocator avoids reallocation if the new size fits the currently used cache. I also added a simple non-optimized version for mm/slob.c for compatibility. [akpm@linux-foundation.org: fix warnings] Acked-by: Josef Sipek Acked-by: Matt Mackall Acked-by: Christoph Lameter Signed-off-by: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 3 ++- mm/slab.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++++- mm/slob.c | 35 ++++++++++++++++++++++++++++++++++- 3 files changed, 84 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 1ef822e31c77..2f8f60ff294a 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -72,8 +72,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, */ void *__kmalloc(size_t, gfp_t); void *__kzalloc(size_t, gfp_t); +void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); -unsigned int ksize(const void *); +size_t ksize(const void *); /** * kcalloc - allocate memory for an array. The memory is set to zero. diff --git a/mm/slab.c b/mm/slab.c index 168bfe9d8ffe..8b71a9c3daa4 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3739,6 +3739,53 @@ void *__kmalloc(size_t size, gfp_t flags) EXPORT_SYMBOL(__kmalloc); #endif +/** + * krealloc - reallocate memory. The contents will remain unchanged. + * + * @p: object to reallocate memory for. + * @new_size: how many bytes of memory are required. + * @flags: the type of memory to allocate. + * + * The contents of the object pointed to are preserved up to the + * lesser of the new and old sizes. If @p is %NULL, krealloc() + * behaves exactly like kmalloc(). If @size is 0 and @p is not a + * %NULL pointer, the object pointed to is freed. + */ +void *krealloc(const void *p, size_t new_size, gfp_t flags) +{ + struct kmem_cache *cache, *new_cache; + void *ret; + + if (unlikely(!p)) + return kmalloc_track_caller(new_size, flags); + + if (unlikely(!new_size)) { + kfree(p); + return NULL; + } + + cache = virt_to_cache(p); + new_cache = __find_general_cachep(new_size, flags); + + /* + * If new size fits in the current cache, bail out. + */ + if (likely(cache == new_cache)) + return (void *)p; + + /* + * We are on the slow-path here so do not use __cache_alloc + * because it bloats kernel text. + */ + ret = kmalloc_track_caller(new_size, flags); + if (ret) { + memcpy(ret, p, min(new_size, ksize(p))); + kfree(p); + } + return ret; +} +EXPORT_SYMBOL(krealloc); + /** * kmem_cache_free - Deallocate an object * @cachep: The cache the allocation was from. @@ -4481,7 +4528,7 @@ const struct seq_operations slabstats_op = { * allocated with either kmalloc() or kmem_cache_alloc(). The object * must not be freed during the duration of the call. */ -unsigned int ksize(const void *objp) +size_t ksize(const void *objp) { if (unlikely(objp == NULL)) return 0; diff --git a/mm/slob.c b/mm/slob.c index 5adc29cb58dd..03cce3d3d986 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -190,6 +190,39 @@ void *__kmalloc(size_t size, gfp_t gfp) } EXPORT_SYMBOL(__kmalloc); +/** + * krealloc - reallocate memory. The contents will remain unchanged. + * + * @p: object to reallocate memory for. + * @new_size: how many bytes of memory are required. + * @flags: the type of memory to allocate. + * + * The contents of the object pointed to are preserved up to the + * lesser of the new and old sizes. If @p is %NULL, krealloc() + * behaves exactly like kmalloc(). If @size is 0 and @p is not a + * %NULL pointer, the object pointed to is freed. + */ +void *krealloc(const void *p, size_t new_size, gfp_t flags) +{ + void *ret; + + if (unlikely(!p)) + return kmalloc_track_caller(new_size, flags); + + if (unlikely(!new_size)) { + kfree(p); + return NULL; + } + + ret = kmalloc_track_caller(new_size, flags); + if (ret) { + memcpy(ret, p, min(new_size, ksize(p))); + kfree(p); + } + return ret; +} +EXPORT_SYMBOL(krealloc); + void kfree(const void *block) { bigblock_t *bb, **last = &bigblocks; @@ -219,7 +252,7 @@ void kfree(const void *block) EXPORT_SYMBOL(kfree); -unsigned int ksize(const void *block) +size_t ksize(const void *block) { bigblock_t *bb; unsigned long flags; -- cgit v1.2.3 From beab697ab4b2962e3d741b476abe443baad0933d Mon Sep 17 00:00:00 2001 From: Marc St-Jean Date: Sun, 6 May 2007 14:48:45 -0700 Subject: serial driver PMC MSP71xx Serial driver patch for the PMC-Sierra MSP71xx devices. There are three different fixes: 1 Fix for DesignWare APB THRE errata: In brief, this is a non-standard 16550 in that the THRE interrupt will not re-assert itself simply by disabling and re-enabling the THRI bit in the IER, it is only re-enabled if a character is actually sent out. It appears that the "8250-uart-backup-timer.patch" in the "mm" tree also fixes it so we have dropped our initial workaround. This patch now needs to be applied on top of that "mm" patch. 2 Fix for Busy Detect on LCR write: The DesignWare APB UART has a feature which causes a new Busy Detect interrupt to be generated if it's busy when the LCR is written. This fix saves the value of the LCR and rewrites it after clearing the interrupt. 3 Workaround for interrupt/data concurrency issue: The SoC needs to ensure that writes that can cause interrupts to be cleared reach the UART before returning from the ISR. This fix reads a non-destructive register on the UART so the read transaction completion ensures the previously queued write transaction has also completed. Signed-off-by: Marc St-Jean Cc: Russell King Cc: Ralf Baechle Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/mips/pmc-sierra/msp71xx/msp_serial.c | 165 ++++++++++++++++++++++++++++++ drivers/serial/8250.c | 31 ++++++ drivers/serial/serial_core.c | 2 + include/linux/serial_core.h | 2 + include/linux/serial_reg.h | 2 + 5 files changed, 202 insertions(+) create mode 100644 arch/mips/pmc-sierra/msp71xx/msp_serial.c (limited to 'include/linux') diff --git a/arch/mips/pmc-sierra/msp71xx/msp_serial.c b/arch/mips/pmc-sierra/msp71xx/msp_serial.c new file mode 100644 index 000000000000..c41b53faa8f6 --- /dev/null +++ b/arch/mips/pmc-sierra/msp71xx/msp_serial.c @@ -0,0 +1,165 @@ +/* + * The setup file for serial related hardware on PMC-Sierra MSP processors. + * + * Copyright 2005 PMC-Sierra, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_KGDB +/* + * kgdb uses serial port 1 so the console can remain on port 0. + * To use port 0 change the definition to read as follows: + * #define DEBUG_PORT_BASE KSEG1ADDR(MSP_UART0_BASE) + */ +#define DEBUG_PORT_BASE KSEG1ADDR(MSP_UART1_BASE) + +int putDebugChar(char c) +{ + volatile uint32_t *uart = (volatile uint32_t *)DEBUG_PORT_BASE; + uint32_t val = (uint32_t)c; + + local_irq_disable(); + while( !(uart[5] & 0x20) ); /* Wait for TXRDY */ + uart[0] = val; + while( !(uart[5] & 0x20) ); /* Wait for TXRDY */ + local_irq_enable(); + + return 1; +} + +char getDebugChar(void) +{ + volatile uint32_t *uart = (volatile uint32_t *)DEBUG_PORT_BASE; + uint32_t val; + + while( !(uart[5] & 0x01) ); /* Wait for RXRDY */ + val = uart[0]; + + return (char)val; +} + +void initDebugPort(unsigned int uartclk, unsigned int baudrate) +{ + unsigned int baud_divisor = (uartclk + 8 * baudrate)/(16 * baudrate); + + /* Enable FIFOs */ + writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | + UART_FCR_CLEAR_XMIT | UART_FCR_TRIGGER_4, + (char *)DEBUG_PORT_BASE + (UART_FCR * 4)); + + /* Select brtc divisor */ + writeb(UART_LCR_DLAB, (char *)DEBUG_PORT_BASE + (UART_LCR * 4)); + + /* Store divisor lsb */ + writeb(baud_divisor, (char *)DEBUG_PORT_BASE + (UART_TX * 4)); + + /* Store divisor msb */ + writeb(baud_divisor >> 8, (char *)DEBUG_PORT_BASE + (UART_IER * 4)); + + /* Set 8N1 mode */ + writeb(UART_LCR_WLEN8, (char *)DEBUG_PORT_BASE + (UART_LCR * 4)); + + /* Disable flow control */ + writeb(0, (char *)DEBUG_PORT_BASE + (UART_MCR * 4)); + + /* Disable receive interrupt(!) */ + writeb(0, (char *)DEBUG_PORT_BASE + (UART_IER * 4)); +} +#endif + +void __init msp_serial_setup(void) +{ + char *s; + char *endp; + struct uart_port up; + unsigned int uartclk; + + memset(&up, 0, sizeof(up)); + + /* Check if clock was specified in environment */ + s = prom_getenv("uartfreqhz"); + if(!(s && *s && (uartclk = simple_strtoul(s, &endp, 10)) && *endp == 0)) + uartclk = MSP_BASE_BAUD; + ppfinit("UART clock set to %d\n", uartclk); + + /* Initialize first serial port */ + up.mapbase = MSP_UART0_BASE; + up.membase = ioremap_nocache(up.mapbase,MSP_UART_REG_LEN); + up.irq = MSP_INT_UART0; + up.uartclk = uartclk; + up.regshift = 2; + up.iotype = UPIO_DWAPB; /* UPIO_MEM like */ + up.flags = STD_COM_FLAGS; + up.type = PORT_16550A; + up.line = 0; + up.private_data = (void*)UART0_STATUS_REG; + if (early_serial_setup(&up)) + printk(KERN_ERR "Early serial init of port 0 failed\n"); + + /* Initialize the second serial port, if one exists */ + switch (mips_machtype) { + case MACH_MSP4200_EVAL: + case MACH_MSP4200_GW: + case MACH_MSP4200_FPGA: + case MACH_MSP7120_EVAL: + case MACH_MSP7120_GW: + case MACH_MSP7120_FPGA: + /* Enable UART1 on MSP4200 and MSP7120 */ + *GPIO_CFG2_REG = 0x00002299; + +#ifdef CONFIG_KGDB + /* Initialize UART1 for kgdb since PMON doesn't */ + if( DEBUG_PORT_BASE == KSEG1ADDR(MSP_UART1_BASE) ) { + if( mips_machtype == MACH_MSP4200_FPGA + || mips_machtype == MACH_MSP7120_FPGA ) + initDebugPort(uartclk,19200); + else + initDebugPort(uartclk,57600); + } +#endif + break; + + default: + return; /* No second serial port, good-bye. */ + } + + up.mapbase = MSP_UART1_BASE; + up.membase = ioremap_nocache(up.mapbase,MSP_UART_REG_LEN); + up.irq = MSP_INT_UART1; + up.line = 1; + up.private_data = (void*)UART1_STATUS_REG; + if (early_serial_setup(&up)) + printk(KERN_ERR "Early serial init of port 1 failed\n"); +} diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 90621c3312bc..194362d9f5a3 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -308,6 +308,7 @@ static unsigned int serial_in(struct uart_8250_port *up, int offset) return inb(up->port.iobase + 1); case UPIO_MEM: + case UPIO_DWAPB: return readb(up->port.membase + offset); case UPIO_MEM32: @@ -333,6 +334,8 @@ static unsigned int serial_in(struct uart_8250_port *up, int offset) static void serial_out(struct uart_8250_port *up, int offset, int value) { + /* Save the offset before it's remapped */ + int save_offset = offset; offset = map_8250_out_reg(up, offset) << up->port.regshift; switch (up->port.iotype) { @@ -359,6 +362,18 @@ serial_out(struct uart_8250_port *up, int offset, int value) writeb(value, up->port.membase + offset); break; + case UPIO_DWAPB: + /* Save the LCR value so it can be re-written when a + * Busy Detect interrupt occurs. */ + if (save_offset == UART_LCR) + up->lcr = value; + writeb(value, up->port.membase + offset); + /* Read the IER to ensure any interrupt is cleared before + * returning from ISR. */ + if (save_offset == UART_TX || save_offset == UART_IER) + value = serial_in(up, UART_IER); + break; + default: outb(value, up->port.iobase + offset); } @@ -373,6 +388,7 @@ serial_out_sync(struct uart_8250_port *up, int offset, int value) #ifdef CONFIG_SERIAL_8250_AU1X00 case UPIO_AU: #endif + case UPIO_DWAPB: serial_out(up, offset, value); serial_in(up, UART_LCR); /* safe, no side-effects */ break; @@ -1388,6 +1404,19 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id) handled = 1; + end = NULL; + } else if (up->port.iotype == UPIO_DWAPB && + (iir & UART_IIR_BUSY) == UART_IIR_BUSY) { + /* The DesignWare APB UART has an Busy Detect (0x07) + * interrupt meaning an LCR write attempt occured while the + * UART was busy. The interrupt must be cleared by reading + * the UART status register (USR) and the LCR re-written. */ + unsigned int status; + status = *(volatile u32 *)up->port.private_data; + serial_out(up, UART_LCR, up->lcr); + + handled = 1; + end = NULL; } else if (end == NULL) end = l; @@ -2090,6 +2119,7 @@ static int serial8250_request_std_resource(struct uart_8250_port *up) case UPIO_TSI: case UPIO_MEM32: case UPIO_MEM: + case UPIO_DWAPB: if (!up->port.mapbase) break; @@ -2127,6 +2157,7 @@ static void serial8250_release_std_resource(struct uart_8250_port *up) case UPIO_TSI: case UPIO_MEM32: case UPIO_MEM: + case UPIO_DWAPB: if (!up->port.mapbase) break; diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 0422c0f1f852..a677133ab2d4 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -2064,6 +2064,7 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port) case UPIO_MEM32: case UPIO_AU: case UPIO_TSI: + case UPIO_DWAPB: snprintf(address, sizeof(address), "MMIO 0x%lx", port->mapbase); break; @@ -2409,6 +2410,7 @@ int uart_match_port(struct uart_port *port1, struct uart_port *port2) case UPIO_MEM32: case UPIO_AU: case UPIO_TSI: + case UPIO_DWAPB: return (port1->mapbase == port2->mapbase); } return 0; diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 586aaba91720..8b5592e6aca4 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -230,6 +230,7 @@ struct uart_port { #define UPIO_MEM32 (3) #define UPIO_AU (4) /* Au1x00 type IO */ #define UPIO_TSI (5) /* Tsi108/109 type IO */ +#define UPIO_DWAPB (6) /* DesignWare APB UART */ unsigned int read_status_mask; /* driver specific */ unsigned int ignore_status_mask; /* driver specific */ @@ -276,6 +277,7 @@ struct uart_port { struct device *dev; /* parent device */ unsigned char hub6; /* this should be in the 8250 driver */ unsigned char unused[3]; + void *private_data; /* generic platform data pointer */ }; /* diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h index 3c8a6aa77415..1c5ed7d92b0f 100644 --- a/include/linux/serial_reg.h +++ b/include/linux/serial_reg.h @@ -38,6 +38,8 @@ #define UART_IIR_RDI 0x04 /* Receiver data interrupt */ #define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */ +#define UART_IIR_BUSY 0x07 /* DesignWare APB Busy Detect */ + #define UART_FCR 2 /* Out: FIFO Control Register */ #define UART_FCR_ENABLE_FIFO 0x01 /* Enable the FIFO */ #define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */ -- cgit v1.2.3 From bd71c182d5a02337305fc381831c11029dd17d64 Mon Sep 17 00:00:00 2001 From: Thomas Koeller Date: Sun, 6 May 2007 14:48:47 -0700 Subject: RM9000 serial driver Add support for the integrated serial ports of the MIPS RM9122 processor and its relatives. The patch also does some whitespace cleanup. [akpm@linux-foundation.org: cleanups] Signed-off-by: Thomas Koeller Cc: Ralf Baechle Cc: Russell King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/mips/Kconfig | 3 ++ drivers/serial/8250.c | 92 +++++++++++++++++++++++++++++++++++++++------ drivers/serial/Kconfig | 9 +++++ include/linux/serial_core.h | 4 +- 4 files changed, 95 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 130d825e5438..7441a2cf523e 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1042,6 +1042,9 @@ config SOC_AU1X00 select SYS_SUPPORTS_APM_EMULATION select SYS_SUPPORTS_KGDB +config SERIAL_RM9000 + bool + config PNX8550 bool select SOC_PNX8550 diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 194362d9f5a3..c9832d963f1e 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -251,9 +251,16 @@ static const struct serial8250_config uart_config[] = { .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, .flags = UART_CAP_FIFO | UART_CAP_UUE, }, + [PORT_RM9000] = { + .name = "RM9000", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .flags = UART_CAP_FIFO, + }, }; -#ifdef CONFIG_SERIAL_8250_AU1X00 +#if defined (CONFIG_SERIAL_8250_AU1X00) /* Au1x00 UART hardware has a weird register layout */ static const u8 au_io_in_map[] = { @@ -289,6 +296,44 @@ static inline int map_8250_out_reg(struct uart_8250_port *up, int offset) return au_io_out_map[offset]; } +#elif defined (CONFIG_SERIAL_8250_RM9K) + +static const u8 + regmap_in[8] = { + [UART_RX] = 0x00, + [UART_IER] = 0x0c, + [UART_IIR] = 0x14, + [UART_LCR] = 0x1c, + [UART_MCR] = 0x20, + [UART_LSR] = 0x24, + [UART_MSR] = 0x28, + [UART_SCR] = 0x2c + }, + regmap_out[8] = { + [UART_TX] = 0x04, + [UART_IER] = 0x0c, + [UART_FCR] = 0x18, + [UART_LCR] = 0x1c, + [UART_MCR] = 0x20, + [UART_LSR] = 0x24, + [UART_MSR] = 0x28, + [UART_SCR] = 0x2c + }; + +static inline int map_8250_in_reg(struct uart_8250_port *up, int offset) +{ + if (up->port.iotype != UPIO_RM9000) + return offset; + return regmap_in[offset]; +} + +static inline int map_8250_out_reg(struct uart_8250_port *up, int offset) +{ + if (up->port.iotype != UPIO_RM9000) + return offset; + return regmap_out[offset]; +} + #else /* sane hardware needs no mapping */ @@ -311,6 +356,7 @@ static unsigned int serial_in(struct uart_8250_port *up, int offset) case UPIO_DWAPB: return readb(up->port.membase + offset); + case UPIO_RM9000: case UPIO_MEM32: return readl(up->port.membase + offset); @@ -348,6 +394,7 @@ serial_out(struct uart_8250_port *up, int offset, int value) writeb(value, up->port.membase + offset); break; + case UPIO_RM9000: case UPIO_MEM32: writel(value, up->port.membase + offset); break; @@ -419,7 +466,7 @@ static inline void _serial_dl_write(struct uart_8250_port *up, int value) serial_outp(up, UART_DLM, value >> 8 & 0xff); } -#ifdef CONFIG_SERIAL_8250_AU1X00 +#if defined (CONFIG_SERIAL_8250_AU1X00) /* Au1x00 haven't got a standard divisor latch */ static int serial_dl_read(struct uart_8250_port *up) { @@ -436,6 +483,24 @@ static void serial_dl_write(struct uart_8250_port *up, int value) else _serial_dl_write(up, value); } +#elif defined (CONFIG_SERIAL_8250_RM9K) +static int serial_dl_read(struct uart_8250_port *up) +{ + return (up->port.iotype == UPIO_RM9000) ? + (((__raw_readl(up->port.membase + 0x10) << 8) | + (__raw_readl(up->port.membase + 0x08) & 0xff)) & 0xffff) : + _serial_dl_read(up); +} + +static void serial_dl_write(struct uart_8250_port *up, int value) +{ + if (up->port.iotype == UPIO_RM9000) { + __raw_writel(value, up->port.membase + 0x08); + __raw_writel(value >> 8, up->port.membase + 0x10); + } else { + _serial_dl_write(up, value); + } +} #else #define serial_dl_read(up) _serial_dl_read(up) #define serial_dl_write(up, value) _serial_dl_write(up, value) @@ -637,7 +702,7 @@ static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p) * its clones. (We treat the broken original StarTech 16650 V1 as a * 16550, and why not? Startech doesn't seem to even acknowledge its * existence.) - * + * * What evil have men's minds wrought... */ static void autoconfig_has_efr(struct uart_8250_port *up) @@ -690,7 +755,7 @@ static void autoconfig_has_efr(struct uart_8250_port *up) up->bugs |= UART_BUG_QUOT; return; } - + /* * We check for a XR16C850 by setting DLL and DLM to 0, and then * reading back DLL and DLM. The chip type depends on the DLM @@ -833,7 +898,7 @@ static void autoconfig_16550a(struct uart_8250_port *up) status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ serial_outp(up, 0x04, status1); - + serial_dl_write(up, quot); serial_outp(up, UART_LCR, 0); @@ -938,7 +1003,7 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags) /* * Do a simple existence test first; if we fail this, * there's no point trying anything else. - * + * * 0x80 is used as a nonsense port to prevent against * false positives due to ISA bus float. The * assumption is that 0x80 is a non-existent port; @@ -977,7 +1042,7 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags) save_mcr = serial_in(up, UART_MCR); save_lcr = serial_in(up, UART_LCR); - /* + /* * Check to see if a UART is really there. Certain broken * internal modems based on the Rockwell chipset fail this * test, because they apparently don't implement the loopback @@ -1084,7 +1149,7 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags) else serial_outp(up, UART_IER, 0); - out: + out: spin_unlock_irqrestore(&up->port.lock, flags); // restore_flags(flags); DEBUG_AUTOCONF("type=%s\n", uart_config[up->port.type].name); @@ -1110,7 +1175,7 @@ static void autoconfig_irq(struct uart_8250_port *up) save_mcr = serial_inp(up, UART_MCR); save_ier = serial_inp(up, UART_IER); serial_outp(up, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2); - + irqs = probe_irq_on(); serial_outp(up, UART_MCR, 0); udelay (10); @@ -1175,8 +1240,11 @@ static void serial8250_start_tx(struct uart_port *port) if (up->bugs & UART_BUG_TXEN) { unsigned char lsr, iir; lsr = serial_in(up, UART_LSR); - iir = serial_in(up, UART_IIR); - if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) + iir = serial_in(up, UART_IIR) & 0x0f; + if ((up->port.type == PORT_RM9000) ? + (lsr & UART_LSR_THRE && + (iir == UART_IIR_NO_INT || iir == UART_IIR_THRI)) : + (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT)) transmit_chars(up); } } @@ -1957,7 +2025,7 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios, /* * Ask the core to calculate the divisor for us. */ - baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); quot = serial8250_get_divisor(port, baud); /* diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index ad9f321968e1..e9d927e3b8e6 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -254,6 +254,15 @@ config SERIAL_8250_AU1X00 to this option. The driver can handle 1 or 2 serial ports. If unsure, say N. +config SERIAL_8250_RM9K + bool "Support for MIPS RM9xxx integrated serial port" + depends on SERIAL_8250 != n && SERIAL_RM9000 + select SERIAL_8250_SHARE_IRQ + help + Selecting this option will add support for the integrated serial + port hardware found on MIPS RM9122 and similar processors. + If unsure, say N. + comment "Non-8250 serial port support" config SERIAL_AMBA_PL010 diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 8b5592e6aca4..d242c731491f 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -39,7 +39,8 @@ #define PORT_RSA 13 #define PORT_NS16550A 14 #define PORT_XSCALE 15 -#define PORT_MAX_8250 15 /* max port ID */ +#define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ +#define PORT_MAX_8250 16 /* max port ID */ /* * ARM specific type numbers. These are not currently guaranteed @@ -231,6 +232,7 @@ struct uart_port { #define UPIO_AU (4) /* Au1x00 type IO */ #define UPIO_TSI (5) /* Tsi108/109 type IO */ #define UPIO_DWAPB (6) /* DesignWare APB UART */ +#define UPIO_RM9000 (7) /* RM9000 type IO */ unsigned int read_status_mask; /* driver specific */ unsigned int ignore_status_mask; /* driver specific */ -- cgit v1.2.3 From abb4a2390737867353ebafc012d45f2b03f3f944 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Sun, 6 May 2007 14:48:49 -0700 Subject: serial: define FIXED_PORT flag for serial_core At present, the serial core always allows setserial in userspace to change the port address, irq and base clock of any serial port. That makes sense for legacy ISA ports, but not for (say) embedded ns16550 compatible serial ports at peculiar addresses. In these cases, the kernel code configuring the ports must know exactly where they are, and their clocking arrangements (which can be unusual on embedded boards). It doesn't make sense for userspace to change these settings. Therefore, this patch defines a UPF_FIXED_PORT flag for the uart_port structure. If this flag is set when the serial port is configured, any attempts to alter the port's type, io address, irq or base clock with setserial are ignored. In addition this patch uses the new flag for on-chip serial ports probed in arch/powerpc/kernel/legacy_serial.c, and for other hard-wired serial ports probed by drivers/serial/of_serial.c. Signed-off-by: David Gibson Cc: Russell King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/legacy_serial.c | 3 ++- drivers/serial/of_serial.c | 3 ++- drivers/serial/serial_core.c | 22 +++++++++++++--------- include/linux/serial_core.h | 1 + 4 files changed, 18 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 63dd2c3ad95e..ae4836ea7442 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -115,7 +115,8 @@ static int __init add_legacy_soc_port(struct device_node *np, { u64 addr; const u32 *addrp; - upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; + upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ + | UPF_FIXED_PORT; struct device_node *tsi = of_get_parent(np); /* We only support ports that have a clock frequency properly diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c index 09b0b736a751..336d0f4580d9 100644 --- a/drivers/serial/of_serial.c +++ b/drivers/serial/of_serial.c @@ -48,7 +48,8 @@ static int __devinit of_platform_serial_setup(struct of_device *ofdev, port->iotype = UPIO_MEM; port->type = type; port->uartclk = *clk; - port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP; + port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP + | UPF_FIXED_PORT; port->dev = &ofdev->dev; port->custom_divisor = *clk / (16 * (*spd)); diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index a677133ab2d4..f409be37b62f 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -672,19 +672,21 @@ static int uart_set_info(struct uart_state *state, */ mutex_lock(&state->mutex); - change_irq = new_serial.irq != port->irq; + change_irq = !(port->flags & UPF_FIXED_PORT) + && new_serial.irq != port->irq; /* * Since changing the 'type' of the port changes its resource * allocations, we should treat type changes the same as * IO port changes. */ - change_port = new_port != port->iobase || - (unsigned long)new_serial.iomem_base != port->mapbase || - new_serial.hub6 != port->hub6 || - new_serial.io_type != port->iotype || - new_serial.iomem_reg_shift != port->regshift || - new_serial.type != port->type; + change_port = !(port->flags & UPF_FIXED_PORT) + && (new_port != port->iobase || + (unsigned long)new_serial.iomem_base != port->mapbase || + new_serial.hub6 != port->hub6 || + new_serial.io_type != port->iotype || + new_serial.iomem_reg_shift != port->regshift || + new_serial.type != port->type); old_flags = port->flags; new_flags = new_serial.flags; @@ -796,8 +798,10 @@ static int uart_set_info(struct uart_state *state, } } - port->irq = new_serial.irq; - port->uartclk = new_serial.baud_base * 16; + if (change_irq) + port->irq = new_serial.irq; + if (!(port->flags & UPF_FIXED_PORT)) + port->uartclk = new_serial.baud_base * 16; port->flags = (port->flags & ~UPF_CHANGE_MASK) | (new_flags & UPF_CHANGE_MASK); port->custom_divisor = new_serial.custom_divisor; diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index d242c731491f..aadbfd30763f 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -263,6 +263,7 @@ struct uart_port { #define UPF_CONS_FLOW ((__force upf_t) (1 << 23)) #define UPF_SHARE_IRQ ((__force upf_t) (1 << 24)) #define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28)) +#define UPF_FIXED_PORT ((__force upf_t) (1 << 29)) #define UPF_DEAD ((__force upf_t) (1 << 30)) #define UPF_IOREMAP ((__force upf_t) (1 << 31)) -- cgit v1.2.3 From aee16b3cee2746880e40945a9b5bff4f309cfbc4 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sun, 6 May 2007 14:48:54 -0700 Subject: Add apply_to_page_range() which applies a function to a pte range Add a new mm function apply_to_page_range() which applies a given function to every pte in a given virtual address range in a given mm structure. This is a generic alternative to cut-and-pasting the Linux idiomatic pagetable walking code in every place that a sequence of PTEs must be accessed. Although this interface is intended to be useful in a wide range of situations, it is currently used specifically by several Xen subsystems, for example: to ensure that pagetables have been allocated for a virtual address range, and to construct batched special pagetable update requests to map I/O memory (in ioremap()). [akpm@linux-foundation.org: fix warning, unpleasantly] Signed-off-by: Ian Pratt Signed-off-by: Christian Limpach Signed-off-by: Chris Wright Signed-off-by: Jeremy Fitzhardinge Cc: Christoph Lameter Cc: Matt Mackall Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 5 +++ mm/memory.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 99 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 60e0e4a592d2..7bf0bd882fc3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1130,6 +1130,11 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address, #define FOLL_GET 0x04 /* do get_page on page */ #define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */ +typedef int (*pte_fn_t)(pte_t *pte, struct page *pmd_page, unsigned long addr, + void *data); +extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, + unsigned long size, pte_fn_t fn, void *data); + #ifdef CONFIG_PROC_FS void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); #else diff --git a/mm/memory.c b/mm/memory.c index e7066e71dfa3..044feb7e7134 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1448,6 +1448,100 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, } EXPORT_SYMBOL(remap_pfn_range); +static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, unsigned long end, + pte_fn_t fn, void *data) +{ + pte_t *pte; + int err; + struct page *pmd_page; + spinlock_t *ptl = ptl; /* Suppress gcc warning */ + + pte = (mm == &init_mm) ? + pte_alloc_kernel(pmd, addr) : + pte_alloc_map_lock(mm, pmd, addr, &ptl); + if (!pte) + return -ENOMEM; + + BUG_ON(pmd_huge(*pmd)); + + pmd_page = pmd_page(*pmd); + + do { + err = fn(pte, pmd_page, addr, data); + if (err) + break; + } while (pte++, addr += PAGE_SIZE, addr != end); + + if (mm != &init_mm) + pte_unmap_unlock(pte-1, ptl); + return err; +} + +static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, + unsigned long addr, unsigned long end, + pte_fn_t fn, void *data) +{ + pmd_t *pmd; + unsigned long next; + int err; + + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return -ENOMEM; + do { + next = pmd_addr_end(addr, end); + err = apply_to_pte_range(mm, pmd, addr, next, fn, data); + if (err) + break; + } while (pmd++, addr = next, addr != end); + return err; +} + +static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, + unsigned long addr, unsigned long end, + pte_fn_t fn, void *data) +{ + pud_t *pud; + unsigned long next; + int err; + + pud = pud_alloc(mm, pgd, addr); + if (!pud) + return -ENOMEM; + do { + next = pud_addr_end(addr, end); + err = apply_to_pmd_range(mm, pud, addr, next, fn, data); + if (err) + break; + } while (pud++, addr = next, addr != end); + return err; +} + +/* + * Scan a region of virtual memory, filling in page tables as necessary + * and calling a provided function on each leaf page table. + */ +int apply_to_page_range(struct mm_struct *mm, unsigned long addr, + unsigned long size, pte_fn_t fn, void *data) +{ + pgd_t *pgd; + unsigned long next; + unsigned long end = addr + size; + int err; + + BUG_ON(addr >= end); + pgd = pgd_offset(mm, addr); + do { + next = pgd_addr_end(addr, end); + err = apply_to_pud_range(mm, pgd, addr, next, fn, data); + if (err) + break; + } while (pgd++, addr = next, addr != end); + return err; +} +EXPORT_SYMBOL_GPL(apply_to_page_range); + /* * handle_pte_fault chooses page fault handler according to an entry * which was read non-atomically. Before making any commitment, on -- cgit v1.2.3 From d2ba27e8007b35d24764c0877ab2428e00a5c5ab Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Sun, 6 May 2007 14:49:00 -0700 Subject: proper prototype for hugetlb_get_unmapped_area() Add a proper prototype for hugetlb_get_unmapped_area() in include/linux/hugetlb.h. Signed-off-by: Adrian Bunk Acked-by: William Irwin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/hugetlbfs/inode.c | 5 +---- include/linux/hugetlb.h | 6 ++++++ 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 8c718a3d413f..9ba71b252f3e 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -98,10 +98,7 @@ out: * Called under down_write(mmap_sem). */ -#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA -unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags); -#else +#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA static unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 3f3e7a648da3..b4570b62ab85 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -189,4 +189,10 @@ static inline void set_file_hugepages(struct file *file) #endif /* !CONFIG_HUGETLBFS */ +#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA +unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ + #endif /* _LINUX_HUGETLB_H */ -- cgit v1.2.3 From 5f22df00a009e3f86301366c0ecddb63ebd22af9 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sun, 6 May 2007 14:49:02 -0700 Subject: mm: remove gcc workaround Minimum gcc version is 3.2 now. However, with likely profiling, even modern gcc versions cannot always eliminate the call. Replace the placeholder functions with the more conventional empty static inlines, which should be optimal for everyone. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 18 ++++++++++++++++++ mm/memory.c | 12 ------------ 2 files changed, 18 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 7bf0bd882fc3..c95d96ebd5ad 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -850,8 +850,26 @@ static inline int vma_wants_writenotify(struct vm_area_struct *vma) extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)); +#ifdef __PAGETABLE_PUD_FOLDED +static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, + unsigned long address) +{ + return 0; +} +#else int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); +#endif + +#ifdef __PAGETABLE_PMD_FOLDED +static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, + unsigned long address) +{ + return 0; +} +#else int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); +#endif + int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); diff --git a/mm/memory.c b/mm/memory.c index 044feb7e7134..c252aae544e9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2633,12 +2633,6 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) spin_unlock(&mm->page_table_lock); return 0; } -#else -/* Workaround for gcc 2.96 */ -int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) -{ - return 0; -} #endif /* __PAGETABLE_PUD_FOLDED */ #ifndef __PAGETABLE_PMD_FOLDED @@ -2667,12 +2661,6 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) spin_unlock(&mm->page_table_lock); return 0; } -#else -/* Workaround for gcc 2.96 */ -int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) -{ - return 0; -} #endif /* __PAGETABLE_PMD_FOLDED */ int make_pages_present(unsigned long addr, unsigned long end) -- cgit v1.2.3 From 6fe6900e1e5b6fa9e5c59aa5061f244fe3f467e2 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sun, 6 May 2007 14:49:04 -0700 Subject: mm: make read_cache_page synchronous Ensure pages are uptodate after returning from read_cache_page, which allows us to cut out most of the filesystem-internal PageUptodate calls. I didn't have a great look down the call chains, but this appears to fixes 7 possible use-before uptodate in hfs, 2 in hfsplus, 1 in jfs, a few in ecryptfs, 1 in jffs2, and a possible cleared data overwritten with readpage in block2mtd. All depending on whether the filler is async and/or can return with a !uptodate page. Signed-off-by: Nick Piggin Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mtd/devices/block2mtd.c | 6 ++--- fs/afs/dir.c | 3 --- fs/afs/mntpt.c | 11 ++++----- fs/cramfs/inode.c | 3 ++- fs/ecryptfs/mmap.c | 11 +-------- fs/ext2/dir.c | 3 --- fs/freevxfs/vxfs_subr.c | 3 --- fs/minix/dir.c | 1 - fs/namei.c | 12 +--------- fs/nfs/dir.c | 5 ----- fs/nfs/symlink.c | 6 ----- fs/ntfs/aops.h | 3 +-- fs/ntfs/attrib.c | 18 ++------------- fs/ntfs/file.c | 3 +-- fs/ntfs/super.c | 30 ++++--------------------- fs/ocfs2/symlink.c | 7 ------ fs/partitions/check.c | 3 --- fs/reiserfs/xattr.c | 4 ---- fs/sysv/dir.c | 10 +-------- fs/ufs/dir.c | 6 +---- fs/ufs/util.c | 6 ++--- include/linux/pagemap.h | 11 +++++++++ mm/filemap.c | 49 ++++++++++++++++++++++++++++++++--------- mm/swapfile.c | 3 --- 24 files changed, 71 insertions(+), 146 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index ce47544dc120..fc4cc8ba9e29 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c @@ -40,13 +40,11 @@ struct block2mtd_dev { static LIST_HEAD(blkmtd_device_list); -static struct page* page_read(struct address_space *mapping, int index) +static struct page *page_read(struct address_space *mapping, int index) { - filler_t *filler = (filler_t*)mapping->a_ops->readpage; - return read_cache_page(mapping, index, filler, NULL); + return read_mapping_page(mapping, index, NULL); } - /* erase a specified part of the device */ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len) { diff --git a/fs/afs/dir.c b/fs/afs/dir.c index dac5b990c0cd..0c1e902f17a3 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -194,10 +194,7 @@ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index, page = read_mapping_page(dir->i_mapping, index, &file); if (!IS_ERR(page)) { - wait_on_page_locked(page); kmap(page); - if (!PageUptodate(page)) - goto fail; if (!PageChecked(page)) afs_dir_check_page(dir, page); if (PageError(page)) diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index b905ae37f912..034fcfd4e330 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -68,13 +68,11 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key) } ret = -EIO; - wait_on_page_locked(page); - buf = kmap(page); - if (!PageUptodate(page)) - goto out_free; if (PageError(page)) goto out_free; + buf = kmap(page); + /* examine the symlink's contents */ size = vnode->status.size; _debug("symlink to %*.*s", (int) size, (int) size, buf); @@ -91,8 +89,8 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key) ret = 0; -out_free: kunmap(page); +out_free: page_cache_release(page); out: _leave(" = %d", ret); @@ -171,8 +169,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) } ret = -EIO; - wait_on_page_locked(page); - if (!PageUptodate(page) || PageError(page)) + if (PageError(page)) goto error; buf = kmap(page); diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index facd0c89be8f..3d194a2be3f5 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -180,7 +180,8 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i struct page *page = NULL; if (blocknr + i < devsize) { - page = read_mapping_page(mapping, blocknr + i, NULL); + page = read_mapping_page_async(mapping, blocknr + i, + NULL); /* synchronous error? */ if (IS_ERR(page)) page = NULL; diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index b731b09499cb..0770c4b66f53 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -46,7 +46,6 @@ struct kmem_cache *ecryptfs_lower_page_cache; */ static struct page *ecryptfs_get1page(struct file *file, int index) { - struct page *page; struct dentry *dentry; struct inode *inode; struct address_space *mapping; @@ -54,14 +53,7 @@ static struct page *ecryptfs_get1page(struct file *file, int index) dentry = file->f_path.dentry; inode = dentry->d_inode; mapping = inode->i_mapping; - page = read_cache_page(mapping, index, - (filler_t *)mapping->a_ops->readpage, - (void *)file); - if (IS_ERR(page)) - goto out; - wait_on_page_locked(page); -out: - return page; + return read_mapping_page(mapping, index, (void *)file); } static @@ -233,7 +225,6 @@ int ecryptfs_do_readpage(struct file *file, struct page *page, ecryptfs_printk(KERN_ERR, "Error reading from page cache\n"); goto out; } - wait_on_page_locked(lower_page); page_data = kmap_atomic(page, KM_USER0); lower_page_data = kmap_atomic(lower_page, KM_USER1); memcpy(page_data, lower_page_data, PAGE_CACHE_SIZE); diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index e89bfc8cf957..1d1e7e30d70e 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -161,10 +161,7 @@ static struct page * ext2_get_page(struct inode *dir, unsigned long n) struct address_space *mapping = dir->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) { - wait_on_page_locked(page); kmap(page); - if (!PageUptodate(page)) - goto fail; if (!PageChecked(page)) ext2_check_page(page); if (PageError(page)) diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c index decac62efe57..ed8f0b0dd880 100644 --- a/fs/freevxfs/vxfs_subr.c +++ b/fs/freevxfs/vxfs_subr.c @@ -74,10 +74,7 @@ vxfs_get_page(struct address_space *mapping, u_long n) pp = read_mapping_page(mapping, n, NULL); if (!IS_ERR(pp)) { - wait_on_page_locked(pp); kmap(pp); - if (!PageUptodate(pp)) - goto fail; /** if (!PageChecked(pp)) **/ /** vxfs_check_page(pp); **/ if (PageError(pp)) diff --git a/fs/minix/dir.c b/fs/minix/dir.c index cb4cb571fddf..e207cbe70951 100644 --- a/fs/minix/dir.c +++ b/fs/minix/dir.c @@ -65,7 +65,6 @@ static struct page * dir_get_page(struct inode *dir, unsigned long n) struct address_space *mapping = dir->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) { - wait_on_page_locked(page); kmap(page); if (!PageUptodate(page)) goto fail; diff --git a/fs/namei.c b/fs/namei.c index 880052cadbcd..94b2f60aec22 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2671,19 +2671,9 @@ static char *page_getlink(struct dentry * dentry, struct page **ppage) struct address_space *mapping = dentry->d_inode->i_mapping; page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) - goto sync_fail; - wait_on_page_locked(page); - if (!PageUptodate(page)) - goto async_fail; + return (char*)page; *ppage = page; return kmap(page); - -async_fail: - page_cache_release(page); - return ERR_PTR(-EIO); - -sync_fail: - return (char*)page; } int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index e59fd31c9a22..625d8e5fb39d 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -334,8 +334,6 @@ int find_dirent_page(nfs_readdir_descriptor_t *desc) status = PTR_ERR(page); goto out; } - if (!PageUptodate(page)) - goto read_error; /* NOTE: Someone else may have changed the READDIRPLUS flag */ desc->page = page; @@ -349,9 +347,6 @@ int find_dirent_page(nfs_readdir_descriptor_t *desc) out: dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __FUNCTION__, status); return status; - read_error: - page_cache_release(page); - return -EIO; } /* diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c index f4a0548b9ce8..bc2821331c29 100644 --- a/fs/nfs/symlink.c +++ b/fs/nfs/symlink.c @@ -61,15 +61,9 @@ static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd) err = page; goto read_failed; } - if (!PageUptodate(page)) { - err = ERR_PTR(-EIO); - goto getlink_read_error; - } nd_set_link(nd, kmap(page)); return page; -getlink_read_error: - page_cache_release(page); read_failed: nd_set_link(nd, err); return NULL; diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h index 9393f4b1e298..caecc58f529c 100644 --- a/fs/ntfs/aops.h +++ b/fs/ntfs/aops.h @@ -89,9 +89,8 @@ static inline struct page *ntfs_map_page(struct address_space *mapping, struct page *page = read_mapping_page(mapping, index, NULL); if (!IS_ERR(page)) { - wait_on_page_locked(page); kmap(page); - if (PageUptodate(page) && !PageError(page)) + if (!PageError(page)) return page; ntfs_unmap_page(page); return ERR_PTR(-EIO); diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c index 7659cc192995..1c08fefe487a 100644 --- a/fs/ntfs/attrib.c +++ b/fs/ntfs/attrib.c @@ -2532,14 +2532,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) page = read_mapping_page(mapping, idx, NULL); if (IS_ERR(page)) { ntfs_error(vol->sb, "Failed to read first partial " - "page (sync error, index 0x%lx).", idx); - return PTR_ERR(page); - } - wait_on_page_locked(page); - if (unlikely(!PageUptodate(page))) { - ntfs_error(vol->sb, "Failed to read first partial page " - "(async error, index 0x%lx).", idx); - page_cache_release(page); + "page (error, index 0x%lx).", idx); return PTR_ERR(page); } /* @@ -2602,14 +2595,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) page = read_mapping_page(mapping, idx, NULL); if (IS_ERR(page)) { ntfs_error(vol->sb, "Failed to read last partial page " - "(sync error, index 0x%lx).", idx); - return PTR_ERR(page); - } - wait_on_page_locked(page); - if (unlikely(!PageUptodate(page))) { - ntfs_error(vol->sb, "Failed to read last partial page " - "(async error, index 0x%lx).", idx); - page_cache_release(page); + "(error, index 0x%lx).", idx); return PTR_ERR(page); } kaddr = kmap_atomic(page, KM_USER0); diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index d69c4595ccd0..dbbac5593106 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -236,8 +236,7 @@ do_non_resident_extend: err = PTR_ERR(page); goto init_err_out; } - wait_on_page_locked(page); - if (unlikely(!PageUptodate(page) || PageError(page))) { + if (unlikely(PageError(page))) { page_cache_release(page); err = -EIO; goto init_err_out; diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 1594c90b7164..2ddde534db0a 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -2471,7 +2471,6 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) s64 nr_free = vol->nr_clusters; u32 *kaddr; struct address_space *mapping = vol->lcnbmp_ino->i_mapping; - filler_t *readpage = (filler_t*)mapping->a_ops->readpage; struct page *page; pgoff_t index, max_index; @@ -2494,24 +2493,14 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) * Read the page from page cache, getting it from backing store * if necessary, and increment the use count. */ - page = read_cache_page(mapping, index, (filler_t*)readpage, - NULL); + page = read_mapping_page(mapping, index, NULL); /* Ignore pages which errored synchronously. */ if (IS_ERR(page)) { - ntfs_debug("Sync read_cache_page() error. Skipping " + ntfs_debug("read_mapping_page() error. Skipping " "page (index 0x%lx).", index); nr_free -= PAGE_CACHE_SIZE * 8; continue; } - wait_on_page_locked(page); - /* Ignore pages which errored asynchronously. */ - if (!PageUptodate(page)) { - ntfs_debug("Async read_cache_page() error. Skipping " - "page (index 0x%lx).", index); - page_cache_release(page); - nr_free -= PAGE_CACHE_SIZE * 8; - continue; - } kaddr = (u32*)kmap_atomic(page, KM_USER0); /* * For each 4 bytes, subtract the number of set bits. If this @@ -2562,7 +2551,6 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, { u32 *kaddr; struct address_space *mapping = vol->mftbmp_ino->i_mapping; - filler_t *readpage = (filler_t*)mapping->a_ops->readpage; struct page *page; pgoff_t index; @@ -2576,21 +2564,11 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, * Read the page from page cache, getting it from backing store * if necessary, and increment the use count. */ - page = read_cache_page(mapping, index, (filler_t*)readpage, - NULL); + page = read_mapping_page(mapping, index, NULL); /* Ignore pages which errored synchronously. */ if (IS_ERR(page)) { - ntfs_debug("Sync read_cache_page() error. Skipping " - "page (index 0x%lx).", index); - nr_free -= PAGE_CACHE_SIZE * 8; - continue; - } - wait_on_page_locked(page); - /* Ignore pages which errored asynchronously. */ - if (!PageUptodate(page)) { - ntfs_debug("Async read_cache_page() error. Skipping " + ntfs_debug("read_mapping_page() error. Skipping " "page (index 0x%lx).", index); - page_cache_release(page); nr_free -= PAGE_CACHE_SIZE * 8; continue; } diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index 40dc1a51f4a9..7134007ba22f 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -67,16 +67,9 @@ static char *ocfs2_page_getlink(struct dentry * dentry, page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) goto sync_fail; - wait_on_page_locked(page); - if (!PageUptodate(page)) - goto async_fail; *ppage = page; return kmap(page); -async_fail: - page_cache_release(page); - return ERR_PTR(-EIO); - sync_fail: return (char*)page; } diff --git a/fs/partitions/check.c b/fs/partitions/check.c index f01572fca02f..6b9dae3f0e6c 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -569,9 +569,6 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)), NULL); if (!IS_ERR(page)) { - wait_on_page_locked(page); - if (!PageUptodate(page)) - goto fail; if (PageError(page)) goto fail; p->v = page; diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 2cac56210e2b..bf6e58214538 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -410,11 +410,7 @@ static struct page *reiserfs_get_page(struct inode *dir, unsigned long n) mapping_set_gfp_mask(mapping, GFP_NOFS); page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) { - wait_on_page_locked(page); kmap(page); - if (!PageUptodate(page)) - goto fail; - if (PageError(page)) goto fail; } diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c index ebf7007fa161..e566b387fcf9 100644 --- a/fs/sysv/dir.c +++ b/fs/sysv/dir.c @@ -54,17 +54,9 @@ static struct page * dir_get_page(struct inode *dir, unsigned long n) { struct address_space *mapping = dir->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); - if (!IS_ERR(page)) { - wait_on_page_locked(page); + if (!IS_ERR(page)) kmap(page); - if (!PageUptodate(page)) - goto fail; - } return page; - -fail: - dir_put_page(page); - return ERR_PTR(-EIO); } static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir) diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index 4890ddf1518e..4fb8b2e077ee 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c @@ -180,13 +180,9 @@ fail: static struct page *ufs_get_page(struct inode *dir, unsigned long n) { struct address_space *mapping = dir->i_mapping; - struct page *page = read_cache_page(mapping, n, - (filler_t*)mapping->a_ops->readpage, NULL); + struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) { - wait_on_page_locked(page); kmap(page); - if (!PageUptodate(page)) - goto fail; if (!PageChecked(page)) ufs_check_page(page); if (PageError(page)) diff --git a/fs/ufs/util.c b/fs/ufs/util.c index 17437574f79c..84357f1ff0ec 100644 --- a/fs/ufs/util.c +++ b/fs/ufs/util.c @@ -251,13 +251,11 @@ struct page *ufs_get_locked_page(struct address_space *mapping, page = find_lock_page(mapping, index); if (!page) { - page = read_cache_page(mapping, index, - (filler_t*)mapping->a_ops->readpage, - NULL); + page = read_mapping_page(mapping, index, NULL); if (IS_ERR(page)) { printk(KERN_ERR "ufs_change_blocknr: " - "read_cache_page error: ino %lu, index: %lu\n", + "read_mapping_page error: ino %lu, index: %lu\n", mapping->host->i_ino, index); goto out; } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 7a8dcb82a699..b4def5e083ed 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -95,12 +95,23 @@ static inline struct page *grab_cache_page(struct address_space *mapping, unsign extern struct page * grab_cache_page_nowait(struct address_space *mapping, unsigned long index); +extern struct page * read_cache_page_async(struct address_space *mapping, + unsigned long index, filler_t *filler, + void *data); extern struct page * read_cache_page(struct address_space *mapping, unsigned long index, filler_t *filler, void *data); extern int read_cache_pages(struct address_space *mapping, struct list_head *pages, filler_t *filler, void *data); +static inline struct page *read_mapping_page_async( + struct address_space *mapping, + unsigned long index, void *data) +{ + filler_t *filler = (filler_t *)mapping->a_ops->readpage; + return read_cache_page_async(mapping, index, filler, data); +} + static inline struct page *read_mapping_page(struct address_space *mapping, unsigned long index, void *data) { diff --git a/mm/filemap.c b/mm/filemap.c index 5dfc093ceb3d..070e7547d5b5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1726,7 +1726,7 @@ int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) EXPORT_SYMBOL(generic_file_mmap); EXPORT_SYMBOL(generic_file_readonly_mmap); -static inline struct page *__read_cache_page(struct address_space *mapping, +static struct page *__read_cache_page(struct address_space *mapping, unsigned long index, int (*filler)(void *,struct page*), void *data) @@ -1763,17 +1763,11 @@ repeat: return page; } -/** - * read_cache_page - read into page cache, fill it if needed - * @mapping: the page's address_space - * @index: the page index - * @filler: function to perform the read - * @data: destination for read data - * - * Read into the page cache. If a page already exists, - * and PageUptodate() is not set, try to fill the page. +/* + * Same as read_cache_page, but don't wait for page to become unlocked + * after submitting it to the filler. */ -struct page *read_cache_page(struct address_space *mapping, +struct page *read_cache_page_async(struct address_space *mapping, unsigned long index, int (*filler)(void *,struct page*), void *data) @@ -1804,6 +1798,39 @@ retry: page_cache_release(page); page = ERR_PTR(err); } + out: + mark_page_accessed(page); + return page; +} +EXPORT_SYMBOL(read_cache_page_async); + +/** + * read_cache_page - read into page cache, fill it if needed + * @mapping: the page's address_space + * @index: the page index + * @filler: function to perform the read + * @data: destination for read data + * + * Read into the page cache. If a page already exists, and PageUptodate() is + * not set, try to fill the page then wait for it to become unlocked. + * + * If the page does not get brought uptodate, return -EIO. + */ +struct page *read_cache_page(struct address_space *mapping, + unsigned long index, + int (*filler)(void *,struct page*), + void *data) +{ + struct page *page; + + page = read_cache_page_async(mapping, index, filler, data); + if (IS_ERR(page)) + goto out; + wait_on_page_locked(page); + if (!PageUptodate(page)) { + page_cache_release(page); + page = ERR_PTR(-EIO); + } out: return page; } diff --git a/mm/swapfile.c b/mm/swapfile.c index a2d9bb4e80df..acc172cbe3aa 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1531,9 +1531,6 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) error = PTR_ERR(page); goto bad_swap; } - wait_on_page_locked(page); - if (!PageUptodate(page)) - goto bad_swap; kmap(page); swap_header = page_address(page); -- cgit v1.2.3 From ac267728f13c55017ed5ee243c9c3166e27ab929 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Sun, 6 May 2007 14:49:12 -0700 Subject: mm/slab.c: proper prototypes Add proper prototypes in include/linux/slab.h. Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/proc_misc.c | 2 -- include/linux/slab.h | 3 +++ 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index e2c4c0a5c90d..75ec6523d29a 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -398,8 +398,6 @@ static const struct file_operations proc_modules_operations = { #endif #ifdef CONFIG_SLAB -extern struct seq_operations slabinfo_op; -extern ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); static int slabinfo_open(struct inode *inode, struct file *file) { return seq_open(file, &slabinfo_op); diff --git a/include/linux/slab.h b/include/linux/slab.h index 2f8f60ff294a..f9ed9346bfd6 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -219,6 +219,9 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); #endif /* DEBUG_SLAB */ +extern const struct seq_operations slabinfo_op; +ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); + #endif /* __KERNEL__ */ #endif /* _LINUX_SLAB_H */ -- cgit v1.2.3 From 14e072984179d3d421bf9ab75cc67e0961742841 Mon Sep 17 00:00:00 2001 From: Andy Whitcroft Date: Sun, 6 May 2007 14:49:14 -0700 Subject: add pfn_valid_within helper for sub-MAX_ORDER hole detection Generally we work under the assumption that memory the mem_map array is contigious and valid out to MAX_ORDER_NR_PAGES block of pages, ie. that if we have validated any page within this MAX_ORDER_NR_PAGES block we need not check any other. This is not true when CONFIG_HOLES_IN_ZONE is set and we must check each and every reference we make from a pfn. Add a pfn_valid_within() helper which should be used when scanning pages within a MAX_ORDER_NR_PAGES block when we have already checked the validility of the block normally with pfn_valid(). This can then be optimised away when we do not have holes within a MAX_ORDER_NR_PAGES block of pages. Signed-off-by: Andy Whitcroft Acked-by: Mel Gorman Acked-by: Bob Picco Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 12 ++++++++++++ mm/page_alloc.c | 8 ++------ 2 files changed, 14 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ee9e3143df4f..2f1544e83042 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -784,6 +784,18 @@ void sparse_init(void); void memory_present(int nid, unsigned long start, unsigned long end); unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); +/* + * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we + * need to check pfn validility within that MAX_ORDER_NR_PAGES block. + * pfn_valid_within() should be used in this case; we optimise this away + * when we have no holes within a MAX_ORDER_NR_PAGES block. + */ +#ifdef CONFIG_HOLES_IN_ZONE +#define pfn_valid_within(pfn) pfn_valid(pfn) +#else +#define pfn_valid_within(pfn) (1) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _LINUX_MMZONE_H */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 019ceda6a8b6..f564717d22f3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -156,10 +156,8 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page) static int page_is_consistent(struct zone *zone, struct page *page) { -#ifdef CONFIG_HOLES_IN_ZONE - if (!pfn_valid(page_to_pfn(page))) + if (!pfn_valid_within(page_to_pfn(page))) return 0; -#endif if (zone != page_zone(page)) return 0; @@ -346,10 +344,8 @@ __find_combined_index(unsigned long page_idx, unsigned int order) static inline int page_is_buddy(struct page *page, struct page *buddy, int order) { -#ifdef CONFIG_HOLES_IN_ZONE - if (!pfn_valid(page_to_pfn(buddy))) + if (!pfn_valid_within(page_to_pfn(buddy))) return 0; -#endif if (page_zone_id(page) != page_zone_id(buddy)) return 0; -- cgit v1.2.3 From 9490991482a2091a828d997adbc088e24c310a4d Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sun, 6 May 2007 14:49:17 -0700 Subject: Add unitialized_var() macro for suppressing gcc warnings Introduce a macro for suppressing gcc from generating a warning about a probable uninitialized state of a variable. Example: - spinlock_t *ptl; + spinlock_t *uninitialized_var(ptl); Not a happy solution, but those warnings are obnoxious. - Using the usual pointlessly-set-it-to-zero approach wastes several bytes of text. - Using a macro means we can (hopefully) do something else if gcc changes cause the `x = x' hack to stop working - Using a macro means that people who are worried about hiding true bugs can easily turn it off. Signed-off-by: Borislav Petkov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compiler-gcc3.h | 6 ++++++ include/linux/compiler-gcc4.h | 6 ++++++ include/linux/compiler-intel.h | 2 ++ mm/memory.c | 2 +- 4 files changed, 15 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h index 1698b845761f..ecd621fd27d2 100644 --- a/include/linux/compiler-gcc3.h +++ b/include/linux/compiler-gcc3.h @@ -13,4 +13,10 @@ #define __must_check __attribute__((warn_unused_result)) #endif +/* + * A trick to suppress uninitialized variable warning without generating any + * code + */ +#define uninitialized_var(x) x = x + #define __always_inline inline __attribute__((always_inline)) diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index 6f5cc6f0e7a6..fd0cc7c4a636 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h @@ -16,3 +16,9 @@ #define __must_check __attribute__((warn_unused_result)) #define __compiler_offsetof(a,b) __builtin_offsetof(a,b) #define __always_inline inline __attribute__((always_inline)) + +/* + * A trick to suppress uninitialized variable warning without generating any + * code + */ +#define uninitialized_var(x) x = x diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 1d1c3ceaff4e..a436eea43912 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -22,3 +22,5 @@ (typeof(ptr)) (__ptr + (off)); }) #endif + +#define uninitialized_var(x) x diff --git a/mm/memory.c b/mm/memory.c index c252aae544e9..1d647ab0ee72 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1455,7 +1455,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, pte_t *pte; int err; struct page *pmd_page; - spinlock_t *ptl = ptl; /* Suppress gcc warning */ + spinlock_t *uninitialized_var(ptl); pte = (mm == &init_mm) ? pte_alloc_kernel(pmd, addr) : -- cgit v1.2.3 From b813e931b4c8235bb42e301096ea97dbdee3e8fe Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Sun, 6 May 2007 14:49:24 -0700 Subject: smaps: add clear_refs file to clear reference Adds /proc/pid/clear_refs. When any non-zero number is written to this file, pte_mkold() and ClearPageReferenced() is called for each pte and its corresponding page, respectively, in that task's VMAs. This file is only writable by the user who owns the task. It is now possible to measure _approximately_ how much memory a task is using by clearing the reference bits with echo 1 > /proc/pid/clear_refs and checking the reference count for each VMA from the /proc/pid/smaps output at a measured time interval. For example, to observe the approximate change in memory footprint for a task, write a script that clears the references (echo 1 > /proc/pid/clear_refs), sleeps, and then greps for Pgs_Referenced and extracts the size in kB. Add the sizes for each VMA together for the total referenced footprint. Moments later, repeat the process and observe the difference. For example, using an efficient Mozilla: accumulated time referenced memory ---------------- ----------------- 0 s 408 kB 1 s 408 kB 2 s 556 kB 3 s 1028 kB 4 s 872 kB 5 s 1956 kB 6 s 416 kB 7 s 1560 kB 8 s 2336 kB 9 s 1044 kB 10 s 416 kB This is a valuable tool to get an approximate measurement of the memory footprint for a task. Cc: Hugh Dickins Cc: Paul Mundt Cc: Christoph Lameter Signed-off-by: David Rientjes [akpm@linux-foundation.org: build fixes] [mpm@selenic.com: rename for_each_pmd] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/proc.txt | 31 +++++++-------- fs/proc/base.c | 36 +++++++++++++++++ fs/proc/task_mmu.c | 79 ++++++++++++++++++++++++++++++-------- include/linux/proc_fs.h | 1 + 4 files changed, 116 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 7aaf09b86a55..3f4b226572e7 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -122,21 +122,22 @@ subdirectory has the entries listed in Table 1-1. Table 1-1: Process specific entries in /proc .............................................................................. - File Content - cmdline Command line arguments - cpu Current and last cpu in which it was executed (2.4)(smp) - cwd Link to the current working directory - environ Values of environment variables - exe Link to the executable of this process - fd Directory, which contains all file descriptors - maps Memory maps to executables and library files (2.4) - mem Memory held by this process - root Link to the root directory of this process - stat Process status - statm Process memory status information - status Process status in human readable form - wchan If CONFIG_KALLSYMS is set, a pre-decoded wchan - smaps Extension based on maps, presenting the rss size for each mapped file + File Content + clear_refs Clears page referenced bits shown in smaps output + cmdline Command line arguments + cpu Current and last cpu in which it was executed (2.4)(smp) + cwd Link to the current working directory + environ Values of environment variables + exe Link to the executable of this process + fd Directory, which contains all file descriptors + maps Memory maps to executables and library files (2.4) + mem Memory held by this process + root Link to the root directory of this process + stat Process status + statm Process memory status information + status Process status in human readable form + wchan If CONFIG_KALLSYMS is set, a pre-decoded wchan + smaps Extension based on maps, the rss size for each mapped file .............................................................................. For example, to get the status information of a process, all you have to do is diff --git a/fs/proc/base.c b/fs/proc/base.c index 989af5e55d1b..ec158dd02b3a 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -715,6 +715,40 @@ static const struct file_operations proc_oom_adjust_operations = { .write = oom_adjust_write, }; +static ssize_t clear_refs_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct task_struct *task; + char buffer[PROC_NUMBUF], *end; + struct mm_struct *mm; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) + return -EFAULT; + if (!simple_strtol(buffer, &end, 0)) + return -EINVAL; + if (*end == '\n') + end++; + task = get_proc_task(file->f_path.dentry->d_inode); + if (!task) + return -ESRCH; + mm = get_task_mm(task); + if (mm) { + clear_refs_smap(mm); + mmput(mm); + } + put_task_struct(task); + if (end - buffer == 0) + return -EIO; + return end - buffer; +} + +static struct file_operations proc_clear_refs_operations = { + .write = clear_refs_write, +}; + #ifdef CONFIG_AUDITSYSCALL #define TMPBUFLEN 21 static ssize_t proc_loginuid_read(struct file * file, char __user * buf, @@ -1851,6 +1885,7 @@ static struct pid_entry tgid_base_stuff[] = { REG("mounts", S_IRUGO, mounts), REG("mountstats", S_IRUSR, mountstats), #ifdef CONFIG_MMU + REG("clear_refs", S_IWUSR, clear_refs), REG("smaps", S_IRUGO, smaps), #endif #ifdef CONFIG_SECURITY @@ -2132,6 +2167,7 @@ static struct pid_entry tid_base_stuff[] = { LNK("exe", exe), REG("mounts", S_IRUGO, mounts), #ifdef CONFIG_MMU + REG("clear_refs", S_IWUSR, clear_refs), REG("smaps", S_IRUGO, smaps), #endif #ifdef CONFIG_SECURITY diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 199088ee969b..4008c060f7ef 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -195,7 +195,7 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats "Shared_Dirty: %8lu kB\n" "Private_Clean: %8lu kB\n" "Private_Dirty: %8lu kB\n" - "Pgs_Referenced: %8lu kB\n", + "Referenced: %8lu kB\n", (vma->vm_end - vma->vm_start) >> 10, mss->resident >> 10, mss->shared_clean >> 10, @@ -214,9 +214,9 @@ static int show_map(struct seq_file *m, void *v) return show_map_internal(m, v, NULL); } -static void smaps_one_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long addr, unsigned long end, - void *private) +static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, unsigned long end, + void *private) { struct mem_size_stats *mss = private; pte_t *pte, ptent; @@ -254,8 +254,34 @@ static void smaps_one_pmd(struct vm_area_struct *vma, pmd_t *pmd, cond_resched(); } -static inline void for_each_pmd_in_pud(struct pmd_walker *walker, pud_t *pud, - unsigned long addr, unsigned long end) +static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, unsigned long end, + void *private) +{ + pte_t *pte, ptent; + spinlock_t *ptl; + struct page *page; + + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + for (; addr != end; pte++, addr += PAGE_SIZE) { + ptent = *pte; + if (!pte_present(ptent)) + continue; + + page = vm_normal_page(vma, addr, ptent); + if (!page) + continue; + + /* Clear accessed and referenced bits. */ + ptep_test_and_clear_young(vma, addr, pte); + ClearPageReferenced(page); + } + pte_unmap_unlock(pte - 1, ptl); + cond_resched(); +} + +static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud, + unsigned long addr, unsigned long end) { pmd_t *pmd; unsigned long next; @@ -269,8 +295,8 @@ static inline void for_each_pmd_in_pud(struct pmd_walker *walker, pud_t *pud, } } -static inline void for_each_pud_in_pgd(struct pmd_walker *walker, pgd_t *pgd, - unsigned long addr, unsigned long end) +static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd, + unsigned long addr, unsigned long end) { pud_t *pud; unsigned long next; @@ -280,15 +306,24 @@ static inline void for_each_pud_in_pgd(struct pmd_walker *walker, pgd_t *pgd, next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; - for_each_pmd_in_pud(walker, pud, addr, next); + walk_pmd_range(walker, pud, addr, next); } } -static inline void for_each_pmd(struct vm_area_struct *vma, - void (*action)(struct vm_area_struct *, pmd_t *, - unsigned long, unsigned long, - void *), - void *private) +/* + * walk_page_range - walk the page tables of a VMA with a callback + * @vma - VMA to walk + * @action - callback invoked for every bottom-level (PTE) page table + * @private - private data passed to the callback function + * + * Recursively walk the page table for the memory area in a VMA, calling + * a callback for every bottom-level (PTE) page table. + */ +static inline void walk_page_range(struct vm_area_struct *vma, + void (*action)(struct vm_area_struct *, + pmd_t *, unsigned long, + unsigned long, void *), + void *private) { unsigned long addr = vma->vm_start; unsigned long end = vma->vm_end; @@ -305,7 +340,7 @@ static inline void for_each_pmd(struct vm_area_struct *vma, next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; - for_each_pud_in_pgd(&walker, pgd, addr, next); + walk_pud_range(&walker, pgd, addr, next); } } @@ -316,10 +351,22 @@ static int show_smap(struct seq_file *m, void *v) memset(&mss, 0, sizeof mss); if (vma->vm_mm && !is_vm_hugetlb_page(vma)) - for_each_pmd(vma, smaps_one_pmd, &mss); + walk_page_range(vma, smaps_pte_range, &mss); return show_map_internal(m, v, &mss); } +void clear_refs_smap(struct mm_struct *mm) +{ + struct vm_area_struct *vma; + + down_read(&mm->mmap_sem); + for (vma = mm->mmap; vma; vma = vma->vm_next) + if (vma->vm_mm && !is_vm_hugetlb_page(vma)) + walk_page_range(vma, clear_refs_pte_range, NULL); + flush_tlb_mm(mm); + up_read(&mm->mmap_sem); +} + static void *m_start(struct seq_file *m, loff_t *pos) { struct proc_maps_private *priv = m->private; diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index be4652a0545a..f4f7a63cae1f 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -104,6 +104,7 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir); unsigned long task_vsize(struct mm_struct *); int task_statm(struct mm_struct *, int *, int *, int *, int *); char *task_mem(struct mm_struct *, char *); +void clear_refs_smap(struct mm_struct *mm); extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, struct proc_dir_entry *parent); -- cgit v1.2.3 From ec0f16372277052a29a6c17527c6cae5e898b3fd Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Sun, 6 May 2007 14:49:25 -0700 Subject: readahead: improve heuristic detecting sequential reads Introduce ra.offset and store in it an offset where the previous read ended. This way we can detect whether reads are really sequential (and thus we should not mark the page as accessed repeatedly) or whether they are random and just happen to be in the same page (and the page should really be marked accessed again). Signed-off-by: Jan Kara Acked-by: Nick Piggin Cc: WU Fengguang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/fs.h | 1 + mm/filemap.c | 9 ++++++--- mm/readahead.c | 3 +++ 3 files changed, 10 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 7c0077f06e24..0949e243b8b9 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -702,6 +702,7 @@ struct file_ra_state { unsigned long ra_pages; /* Maximum readahead window */ unsigned long mmap_hit; /* Cache hit stat for mmap accesses */ unsigned long mmap_miss; /* Cache miss stat for mmap accesses */ + unsigned int offset; /* Offset where last read() ended in a page */ }; #define RA_FLAG_MISS 0x01 /* a cache miss occured against this file */ #define RA_FLAG_INCACHE 0x02 /* file is already in cache */ diff --git a/mm/filemap.c b/mm/filemap.c index cbea95a25283..07f5b77114a3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -868,6 +868,7 @@ void do_generic_mapping_read(struct address_space *mapping, unsigned long last_index; unsigned long next_index; unsigned long prev_index; + unsigned int prev_offset; loff_t isize; struct page *cached_page; int error; @@ -877,6 +878,7 @@ void do_generic_mapping_read(struct address_space *mapping, index = *ppos >> PAGE_CACHE_SHIFT; next_index = index; prev_index = ra.prev_page; + prev_offset = ra.offset; last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; offset = *ppos & ~PAGE_CACHE_MASK; @@ -924,10 +926,10 @@ page_ok: flush_dcache_page(page); /* - * When (part of) the same page is read multiple times - * in succession, only mark it as accessed the first time. + * When a sequential read accesses a page several times, + * only mark it as accessed the first time. */ - if (prev_index != index) + if (prev_index != index || offset != prev_offset) mark_page_accessed(page); prev_index = index; @@ -945,6 +947,7 @@ page_ok: offset += ret; index += offset >> PAGE_CACHE_SHIFT; offset &= ~PAGE_CACHE_MASK; + prev_offset = ra.offset = offset; page_cache_release(page); if (ret == nr && desc->count) diff --git a/mm/readahead.c b/mm/readahead.c index 93d9ee692fd8..0a6fed9d365c 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -207,6 +207,8 @@ out: * If page_cache_readahead sees that it is again being called for * a page which it just looked at, it can return immediately without * making any state changes. + * offset: Offset in the prev_page where the last read ended - used for + * detection of sequential file reading. * ahead_start, * ahead_size: Together, these form the "ahead window". * ra_pages: The externally controlled max readahead for this fd. @@ -473,6 +475,7 @@ page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra, /* Note that prev_page == -1 if it is a first read */ sequential = (offset == ra->prev_page + 1); ra->prev_page = offset; + ra->offset = 0; max = get_max_readahead(ra); newsize = min(req_size, max); -- cgit v1.2.3 From 6ce745ed39d35f9d547d00d406db2be7c6c175b3 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Sun, 6 May 2007 14:49:26 -0700 Subject: readahead: code cleanup Rename file_ra_state.prev_page to prev_index and file_ra_state.offset to prev_offset. Also update of prev_index in do_generic_mapping_read() is now moved close to the update of prev_offset. [wfg@mail.ustc.edu.cn: fix it] Signed-off-by: Jan Kara Cc: Nick Piggin Cc: WU Fengguang Signed-off-by: Fengguang Wu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/fs.h | 4 ++-- mm/filemap.c | 7 ++++--- mm/readahead.c | 30 +++++++++++++++--------------- 3 files changed, 21 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 0949e243b8b9..55a74ffa7e3b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -696,13 +696,13 @@ struct file_ra_state { unsigned long size; unsigned long flags; /* ra flags RA_FLAG_xxx*/ unsigned long cache_hit; /* cache hit count*/ - unsigned long prev_page; /* Cache last read() position */ + unsigned long prev_index; /* Cache last read() position */ unsigned long ahead_start; /* Ahead window */ unsigned long ahead_size; unsigned long ra_pages; /* Maximum readahead window */ unsigned long mmap_hit; /* Cache hit stat for mmap accesses */ unsigned long mmap_miss; /* Cache miss stat for mmap accesses */ - unsigned int offset; /* Offset where last read() ended in a page */ + unsigned int prev_offset; /* Offset where last read() ended in a page */ }; #define RA_FLAG_MISS 0x01 /* a cache miss occured against this file */ #define RA_FLAG_INCACHE 0x02 /* file is already in cache */ diff --git a/mm/filemap.c b/mm/filemap.c index 07f5b77114a3..5631d6b2a62d 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -877,8 +877,8 @@ void do_generic_mapping_read(struct address_space *mapping, cached_page = NULL; index = *ppos >> PAGE_CACHE_SHIFT; next_index = index; - prev_index = ra.prev_page; - prev_offset = ra.offset; + prev_index = ra.prev_index; + prev_offset = ra.prev_offset; last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; offset = *ppos & ~PAGE_CACHE_MASK; @@ -947,7 +947,8 @@ page_ok: offset += ret; index += offset >> PAGE_CACHE_SHIFT; offset &= ~PAGE_CACHE_MASK; - prev_offset = ra.offset = offset; + prev_offset = offset; + ra.prev_offset = offset; page_cache_release(page); if (ret == nr && desc->count) diff --git a/mm/readahead.c b/mm/readahead.c index 0a6fed9d365c..9861e883fe57 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -37,7 +37,7 @@ void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) { ra->ra_pages = mapping->backing_dev_info->ra_pages; - ra->prev_page = -1; + ra->prev_index = -1; } EXPORT_SYMBOL_GPL(file_ra_state_init); @@ -202,19 +202,19 @@ out: * size: Number of pages in that read * Together, these form the "current window". * Together, start and size represent the `readahead window'. - * prev_page: The page which the readahead algorithm most-recently inspected. + * prev_index: The page which the readahead algorithm most-recently inspected. * It is mainly used to detect sequential file reading. * If page_cache_readahead sees that it is again being called for * a page which it just looked at, it can return immediately without * making any state changes. - * offset: Offset in the prev_page where the last read ended - used for + * offset: Offset in the prev_index where the last read ended - used for * detection of sequential file reading. * ahead_start, * ahead_size: Together, these form the "ahead window". * ra_pages: The externally controlled max readahead for this fd. * * When readahead is in the off state (size == 0), readahead is disabled. - * In this state, prev_page is used to detect the resumption of sequential I/O. + * In this state, prev_index is used to detect the resumption of sequential I/O. * * The readahead code manages two windows - the "current" and the "ahead" * windows. The intent is that while the application is walking the pages @@ -417,7 +417,7 @@ static int make_ahead_window(struct address_space *mapping, struct file *filp, ra->ahead_size = get_next_ra_size(ra); ra->ahead_start = ra->start + ra->size; - block = force || (ra->prev_page >= ra->ahead_start); + block = force || (ra->prev_index >= ra->ahead_start); ret = blockable_page_cache_readahead(mapping, filp, ra->ahead_start, ra->ahead_size, ra, block); @@ -469,13 +469,13 @@ page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra, * We avoid doing extra work and bogusly perturbing the readahead * window expansion logic. */ - if (offset == ra->prev_page && --req_size) + if (offset == ra->prev_index && --req_size) ++offset; - /* Note that prev_page == -1 if it is a first read */ - sequential = (offset == ra->prev_page + 1); - ra->prev_page = offset; - ra->offset = 0; + /* Note that prev_index == -1 if it is a first read */ + sequential = (offset == ra->prev_index + 1); + ra->prev_index = offset; + ra->prev_offset = 0; max = get_max_readahead(ra); newsize = min(req_size, max); @@ -484,7 +484,7 @@ page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra, if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE)) goto out; - ra->prev_page += newsize - 1; + ra->prev_index += newsize - 1; /* * Special case - first read at start of file. We'll assume it's @@ -540,18 +540,18 @@ page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra, * we get called back on the first page of the ahead window which * will allow us to submit more IO. */ - if (ra->prev_page >= ra->ahead_start) { + if (ra->prev_index >= ra->ahead_start) { ra->start = ra->ahead_start; ra->size = ra->ahead_size; make_ahead_window(mapping, filp, ra, 0); recheck: - /* prev_page shouldn't overrun the ahead window */ - ra->prev_page = min(ra->prev_page, + /* prev_index shouldn't overrun the ahead window */ + ra->prev_index = min(ra->prev_index, ra->ahead_start + ra->ahead_size - 1); } out: - return ra->prev_page + 1; + return ra->prev_index + 1; } EXPORT_SYMBOL_GPL(page_cache_readahead); -- cgit v1.2.3 From 81819f0fc8285a2a5a921c019e3e3d7b6169d225 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:49:36 -0700 Subject: SLUB core This is a new slab allocator which was motivated by the complexity of the existing code in mm/slab.c. It attempts to address a variety of concerns with the existing implementation. A. Management of object queues A particular concern was the complex management of the numerous object queues in SLAB. SLUB has no such queues. Instead we dedicate a slab for each allocating CPU and use objects from a slab directly instead of queueing them up. B. Storage overhead of object queues SLAB Object queues exist per node, per CPU. The alien cache queue even has a queue array that contain a queue for each processor on each node. For very large systems the number of queues and the number of objects that may be caught in those queues grows exponentially. On our systems with 1k nodes / processors we have several gigabytes just tied up for storing references to objects for those queues This does not include the objects that could be on those queues. One fears that the whole memory of the machine could one day be consumed by those queues. C. SLAB meta data overhead SLAB has overhead at the beginning of each slab. This means that data cannot be naturally aligned at the beginning of a slab block. SLUB keeps all meta data in the corresponding page_struct. Objects can be naturally aligned in the slab. F.e. a 128 byte object will be aligned at 128 byte boundaries and can fit tightly into a 4k page with no bytes left over. SLAB cannot do this. D. SLAB has a complex cache reaper SLUB does not need a cache reaper for UP systems. On SMP systems the per CPU slab may be pushed back into partial list but that operation is simple and does not require an iteration over a list of objects. SLAB expires per CPU, shared and alien object queues during cache reaping which may cause strange hold offs. E. SLAB has complex NUMA policy layer support SLUB pushes NUMA policy handling into the page allocator. This means that allocation is coarser (SLUB does interleave on a page level) but that situation was also present before 2.6.13. SLABs application of policies to individual slab objects allocated in SLAB is certainly a performance concern due to the frequent references to memory policies which may lead a sequence of objects to come from one node after another. SLUB will get a slab full of objects from one node and then will switch to the next. F. Reduction of the size of partial slab lists SLAB has per node partial lists. This means that over time a large number of partial slabs may accumulate on those lists. These can only be reused if allocator occur on specific nodes. SLUB has a global pool of partial slabs and will consume slabs from that pool to decrease fragmentation. G. Tunables SLAB has sophisticated tuning abilities for each slab cache. One can manipulate the queue sizes in detail. However, filling the queues still requires the uses of the spin lock to check out slabs. SLUB has a global parameter (min_slab_order) for tuning. Increasing the minimum slab order can decrease the locking overhead. The bigger the slab order the less motions of pages between per CPU and partial lists occur and the better SLUB will be scaling. G. Slab merging We often have slab caches with similar parameters. SLUB detects those on boot up and merges them into the corresponding general caches. This leads to more effective memory use. About 50% of all caches can be eliminated through slab merging. This will also decrease slab fragmentation because partial allocated slabs can be filled up again. Slab merging can be switched off by specifying slub_nomerge on boot up. Note that merging can expose heretofore unknown bugs in the kernel because corrupted objects may now be placed differently and corrupt differing neighboring objects. Enable sanity checks to find those. H. Diagnostics The current slab diagnostics are difficult to use and require a recompilation of the kernel. SLUB contains debugging code that is always available (but is kept out of the hot code paths). SLUB diagnostics can be enabled via the "slab_debug" option. Parameters can be specified to select a single or a group of slab caches for diagnostics. This means that the system is running with the usual performance and it is much more likely that race conditions can be reproduced. I. Resiliency If basic sanity checks are on then SLUB is capable of detecting common error conditions and recover as best as possible to allow the system to continue. J. Tracing Tracing can be enabled via the slab_debug=T, option during boot. SLUB will then protocol all actions on that slabcache and dump the object contents on free. K. On demand DMA cache creation. Generally DMA caches are not needed. If a kmalloc is used with __GFP_DMA then just create this single slabcache that is needed. For systems that have no ZONE_DMA requirement the support is completely eliminated. L. Performance increase Some benchmarks have shown speed improvements on kernbench in the range of 5-10%. The locking overhead of slub is based on the underlying base allocation size. If we can reliably allocate larger order pages then it is possible to increase slub performance much further. The anti-fragmentation patches may enable further performance increases. Tested on: i386 UP + SMP, x86_64 UP + SMP + NUMA emulation, IA64 NUMA + Simulator SLUB Boot options slub_nomerge Disable merging of slabs slub_min_order=x Require a minimum order for slab caches. This increases the managed chunk size and therefore reduces meta data and locking overhead. slub_min_objects=x Mininum objects per slab. Default is 8. slub_max_order=x Avoid generating slabs larger than order specified. slub_debug Enable all diagnostics for all caches slub_debug= Enable selective options for all caches slub_debug=, Enable selective options for a certain set of caches Available Debug options F Double Free checking, sanity and resiliency R Red zoning P Object / padding poisoning U Track last free / alloc T Trace all allocs / frees (only use for individual slabs). To use SLUB: Apply this patch and then select SLUB as the default slab allocator. [hugh@veritas.com: fix an oops-causing locking error] [akpm@linux-foundation.org: various stupid cleanups and small fixes] Signed-off-by: Christoph Lameter Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/frv/Kconfig | 4 + arch/i386/Kconfig | 4 + include/linux/mm_types.h | 17 +- include/linux/poison.h | 3 + include/linux/slab.h | 14 +- include/linux/slub_def.h | 201 +++ init/Kconfig | 53 +- mm/Makefile | 1 + mm/slub.c | 3144 ++++++++++++++++++++++++++++++++++++++++++++++ 9 files changed, 3422 insertions(+), 19 deletions(-) create mode 100644 include/linux/slub_def.h create mode 100644 mm/slub.c (limited to 'include/linux') diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index cea237413aa2..eed694312a79 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -53,6 +53,10 @@ config ARCH_HAS_ILOG2_U64 bool default y +config ARCH_USES_SLAB_PAGE_STRUCT + bool + default y + mainmenu "Fujitsu FR-V Kernel Configuration" source "init/Kconfig" diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index a9af760c7e5f..64ad10f984a1 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -79,6 +79,10 @@ config ARCH_MAY_HAVE_PC_FDC bool default y +config ARCH_USES_SLAB_PAGE_STRUCT + bool + default y + config DMI bool default y diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index c3852fd4a1cc..e30687bad075 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -19,10 +19,16 @@ struct page { unsigned long flags; /* Atomic flags, some possibly * updated asynchronously */ atomic_t _count; /* Usage count, see below. */ - atomic_t _mapcount; /* Count of ptes mapped in mms, + union { + atomic_t _mapcount; /* Count of ptes mapped in mms, * to show when page is mapped * & limit reverse map searches. */ + struct { /* SLUB uses */ + short unsigned int inuse; + short unsigned int offset; + }; + }; union { struct { unsigned long private; /* Mapping-private opaque data: @@ -43,8 +49,15 @@ struct page { #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS spinlock_t ptl; #endif + struct { /* SLUB uses */ + struct page *first_page; /* Compound pages */ + struct kmem_cache *slab; /* Pointer to slab */ + }; + }; + union { + pgoff_t index; /* Our offset within mapping. */ + void *freelist; /* SLUB: pointer to free object */ }; - pgoff_t index; /* Our offset within mapping. */ struct list_head lru; /* Pageout list, eg. active_list * protected by zone->lru_lock ! */ diff --git a/include/linux/poison.h b/include/linux/poison.h index 89580b764959..95f518b17684 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -18,6 +18,9 @@ #define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */ #define RED_ACTIVE 0x170FC2A5UL /* when obj is active */ +#define SLUB_RED_INACTIVE 0xbb +#define SLUB_RED_ACTIVE 0xcc + /* ...and for poisoning */ #define POISON_INUSE 0x5a /* for use-uninitialised poisoning */ #define POISON_FREE 0x6b /* for use-after-free poisoning */ diff --git a/include/linux/slab.h b/include/linux/slab.h index f9ed9346bfd6..67425c277e12 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -32,6 +32,7 @@ typedef struct kmem_cache kmem_cache_t __deprecated; #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ +#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ /* Flags passed to a constructor functions */ #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ @@ -42,7 +43,7 @@ typedef struct kmem_cache kmem_cache_t __deprecated; * struct kmem_cache related prototypes */ void __init kmem_cache_init(void); -extern int slab_is_available(void); +int slab_is_available(void); struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, @@ -95,9 +96,14 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) * the appropriate general cache at compile time. */ -#ifdef CONFIG_SLAB +#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB) +#ifdef CONFIG_SLUB +#include +#else #include +#endif /* !CONFIG_SLUB */ #else + /* * Fallback definitions for an allocator not wanting to provide * its own optimized kmalloc definitions (like SLOB). @@ -184,7 +190,7 @@ static inline void *__kmalloc_node(size_t size, gfp_t flags, int node) * allocator where we care about the real place the memory allocation * request comes from. */ -#ifdef CONFIG_DEBUG_SLAB +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) extern void *__kmalloc_track_caller(size_t, gfp_t, void*); #define kmalloc_track_caller(size, flags) \ __kmalloc_track_caller(size, flags, __builtin_return_address(0)) @@ -202,7 +208,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*); * standard allocator where we care about the real place the memory * allocation request comes from. */ -#ifdef CONFIG_DEBUG_SLAB +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node_track_caller(size, flags, node, \ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h new file mode 100644 index 000000000000..30b154ce7289 --- /dev/null +++ b/include/linux/slub_def.h @@ -0,0 +1,201 @@ +#ifndef _LINUX_SLUB_DEF_H +#define _LINUX_SLUB_DEF_H + +/* + * SLUB : A Slab allocator without object queues. + * + * (C) 2007 SGI, Christoph Lameter + */ +#include +#include +#include +#include + +struct kmem_cache_node { + spinlock_t list_lock; /* Protect partial list and nr_partial */ + unsigned long nr_partial; + atomic_long_t nr_slabs; + struct list_head partial; +}; + +/* + * Slab cache management. + */ +struct kmem_cache { + /* Used for retriving partial slabs etc */ + unsigned long flags; + int size; /* The size of an object including meta data */ + int objsize; /* The size of an object without meta data */ + int offset; /* Free pointer offset. */ + unsigned int order; + + /* + * Avoid an extra cache line for UP, SMP and for the node local to + * struct kmem_cache. + */ + struct kmem_cache_node local_node; + + /* Allocation and freeing of slabs */ + int objects; /* Number of objects in slab */ + int refcount; /* Refcount for slab cache destroy */ + void (*ctor)(void *, struct kmem_cache *, unsigned long); + void (*dtor)(void *, struct kmem_cache *, unsigned long); + int inuse; /* Offset to metadata */ + int align; /* Alignment */ + const char *name; /* Name (only for display!) */ + struct list_head list; /* List of slab caches */ + struct kobject kobj; /* For sysfs */ + +#ifdef CONFIG_NUMA + int defrag_ratio; + struct kmem_cache_node *node[MAX_NUMNODES]; +#endif + struct page *cpu_slab[NR_CPUS]; +}; + +/* + * Kmalloc subsystem. + */ +#define KMALLOC_SHIFT_LOW 3 + +#ifdef CONFIG_LARGE_ALLOCS +#define KMALLOC_SHIFT_HIGH 25 +#else +#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256 +#define KMALLOC_SHIFT_HIGH 20 +#else +#define KMALLOC_SHIFT_HIGH 18 +#endif +#endif + +/* + * We keep the general caches in an array of slab caches that are used for + * 2^x bytes of allocations. + */ +extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; + +/* + * Sorry that the following has to be that ugly but some versions of GCC + * have trouble with constant propagation and loops. + */ +static inline int kmalloc_index(int size) +{ + if (size == 0) + return 0; + if (size > 64 && size <= 96) + return 1; + if (size > 128 && size <= 192) + return 2; + if (size <= 8) return 3; + if (size <= 16) return 4; + if (size <= 32) return 5; + if (size <= 64) return 6; + if (size <= 128) return 7; + if (size <= 256) return 8; + if (size <= 512) return 9; + if (size <= 1024) return 10; + if (size <= 2 * 1024) return 11; + if (size <= 4 * 1024) return 12; + if (size <= 8 * 1024) return 13; + if (size <= 16 * 1024) return 14; + if (size <= 32 * 1024) return 15; + if (size <= 64 * 1024) return 16; + if (size <= 128 * 1024) return 17; + if (size <= 256 * 1024) return 18; +#if KMALLOC_SHIFT_HIGH > 18 + if (size <= 512 * 1024) return 19; + if (size <= 1024 * 1024) return 20; +#endif +#if KMALLOC_SHIFT_HIGH > 20 + if (size <= 2 * 1024 * 1024) return 21; + if (size <= 4 * 1024 * 1024) return 22; + if (size <= 8 * 1024 * 1024) return 23; + if (size <= 16 * 1024 * 1024) return 24; + if (size <= 32 * 1024 * 1024) return 25; +#endif + return -1; + +/* + * What we really wanted to do and cannot do because of compiler issues is: + * int i; + * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) + * if (size <= (1 << i)) + * return i; + */ +} + +/* + * Find the slab cache for a given combination of allocation flags and size. + * + * This ought to end up with a global pointer to the right cache + * in kmalloc_caches. + */ +static inline struct kmem_cache *kmalloc_slab(size_t size) +{ + int index = kmalloc_index(size); + + if (index == 0) + return NULL; + + if (index < 0) { + /* + * Generate a link failure. Would be great if we could + * do something to stop the compile here. + */ + extern void __kmalloc_size_too_large(void); + __kmalloc_size_too_large(); + } + return &kmalloc_caches[index]; +} + +#ifdef CONFIG_ZONE_DMA +#define SLUB_DMA __GFP_DMA +#else +/* Disable DMA functionality */ +#define SLUB_DMA 0 +#endif + +static inline void *kmalloc(size_t size, gfp_t flags) +{ + if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { + struct kmem_cache *s = kmalloc_slab(size); + + if (!s) + return NULL; + + return kmem_cache_alloc(s, flags); + } else + return __kmalloc(size, flags); +} + +static inline void *kzalloc(size_t size, gfp_t flags) +{ + if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { + struct kmem_cache *s = kmalloc_slab(size); + + if (!s) + return NULL; + + return kmem_cache_zalloc(s, flags); + } else + return __kzalloc(size, flags); +} + +#ifdef CONFIG_NUMA +extern void *__kmalloc_node(size_t size, gfp_t flags, int node); + +static inline void *kmalloc_node(size_t size, gfp_t flags, int node) +{ + if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { + struct kmem_cache *s = kmalloc_slab(size); + + if (!s) + return NULL; + + return kmem_cache_alloc_node(s, flags, node); + } else + return __kmalloc_node(size, flags, node); +} +#endif + +#endif /* _LINUX_SLUB_DEF_H */ diff --git a/init/Kconfig b/init/Kconfig index 29d9e47ee0da..7ce952052947 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -478,15 +478,6 @@ config SHMEM option replaces shmem and tmpfs with the much simpler ramfs code, which may be appropriate on small systems without swap. -config SLAB - default y - bool "Use full SLAB allocator" if (EMBEDDED && !SMP && !SPARSEMEM) - help - Disabling this replaces the advanced SLAB allocator and - kmalloc support with the drastically simpler SLOB allocator. - SLOB is more space efficient but does not scale well and is - more susceptible to fragmentation. - config VM_EVENT_COUNTERS default y bool "Enable VM event counters for /proc/vmstat" if EMBEDDED @@ -496,6 +487,46 @@ config VM_EVENT_COUNTERS on EMBEDDED systems. /proc/vmstat will only show page counts if VM event counters are disabled. +choice + prompt "Choose SLAB allocator" + default SLAB + help + This option allows to select a slab allocator. + +config SLAB + bool "SLAB" + help + The regular slab allocator that is established and known to work + well in all environments. It organizes chache hot objects in + per cpu and per node queues. SLAB is the default choice for + slab allocator. + +config SLUB + depends on EXPERIMENTAL && !ARCH_USES_SLAB_PAGE_STRUCT + bool "SLUB (Unqueued Allocator)" + help + SLUB is a slab allocator that minimizes cache line usage + instead of managing queues of cached objects (SLAB approach). + Per cpu caching is realized using slabs of objects instead + of queues of objects. SLUB can use memory efficiently + way and has enhanced diagnostics. + +config SLOB +# +# SLOB cannot support SMP because SLAB_DESTROY_BY_RCU does not work +# properly. +# + depends on EMBEDDED && !SMP && !SPARSEMEM + bool "SLOB (Simple Allocator)" + help + SLOB replaces the SLAB allocator with a drastically simpler + allocator. SLOB is more space efficient that SLAB but does not + scale well (single lock for all operations) and is more susceptible + to fragmentation. SLOB it is a great choice to reduce + memory usage and code size for embedded systems. + +endchoice + endmenu # General setup config RT_MUTEXES @@ -511,10 +542,6 @@ config BASE_SMALL default 0 if BASE_FULL default 1 if !BASE_FULL -config SLOB - default !SLAB - bool - menu "Loadable module support" config MODULES diff --git a/mm/Makefile b/mm/Makefile index f3c077eb0b8e..1887148e44e7 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o obj-$(CONFIG_SLOB) += slob.o obj-$(CONFIG_SLAB) += slab.o +obj-$(CONFIG_SLUB) += slub.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_MIGRATION) += migrate.o diff --git a/mm/slub.c b/mm/slub.c new file mode 100644 index 000000000000..0cd56bd74b64 --- /dev/null +++ b/mm/slub.c @@ -0,0 +1,3144 @@ +/* + * SLUB: A slab allocator that limits cache line use instead of queuing + * objects in per cpu and per node lists. + * + * The allocator synchronizes using per slab locks and only + * uses a centralized lock to manage a pool of partial slabs. + * + * (C) 2007 SGI, Christoph Lameter + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Lock order: + * 1. slab_lock(page) + * 2. slab->list_lock + * + * The slab_lock protects operations on the object of a particular + * slab and its metadata in the page struct. If the slab lock + * has been taken then no allocations nor frees can be performed + * on the objects in the slab nor can the slab be added or removed + * from the partial or full lists since this would mean modifying + * the page_struct of the slab. + * + * The list_lock protects the partial and full list on each node and + * the partial slab counter. If taken then no new slabs may be added or + * removed from the lists nor make the number of partial slabs be modified. + * (Note that the total number of slabs is an atomic value that may be + * modified without taking the list lock). + * + * The list_lock is a centralized lock and thus we avoid taking it as + * much as possible. As long as SLUB does not have to handle partial + * slabs, operations can continue without any centralized lock. F.e. + * allocating a long series of objects that fill up slabs does not require + * the list lock. + * + * The lock order is sometimes inverted when we are trying to get a slab + * off a list. We take the list_lock and then look for a page on the list + * to use. While we do that objects in the slabs may be freed. We can + * only operate on the slab if we have also taken the slab_lock. So we use + * a slab_trylock() on the slab. If trylock was successful then no frees + * can occur anymore and we can use the slab for allocations etc. If the + * slab_trylock() does not succeed then frees are in progress in the slab and + * we must stay away from it for a while since we may cause a bouncing + * cacheline if we try to acquire the lock. So go onto the next slab. + * If all pages are busy then we may allocate a new slab instead of reusing + * a partial slab. A new slab has noone operating on it and thus there is + * no danger of cacheline contention. + * + * Interrupts are disabled during allocation and deallocation in order to + * make the slab allocator safe to use in the context of an irq. In addition + * interrupts are disabled to ensure that the processor does not change + * while handling per_cpu slabs, due to kernel preemption. + * + * SLUB assigns one slab for allocation to each processor. + * Allocations only occur from these slabs called cpu slabs. + * + * Slabs with free elements are kept on a partial list. + * There is no list for full slabs. If an object in a full slab is + * freed then the slab will show up again on the partial lists. + * Otherwise there is no need to track full slabs unless we have to + * track full slabs for debugging purposes. + * + * Slabs are freed when they become empty. Teardown and setup is + * minimal so we rely on the page allocators per cpu caches for + * fast frees and allocs. + * + * Overloading of page flags that are otherwise used for LRU management. + * + * PageActive The slab is used as a cpu cache. Allocations + * may be performed from the slab. The slab is not + * on any slab list and cannot be moved onto one. + * + * PageError Slab requires special handling due to debug + * options set. This moves slab handling out of + * the fast path. + */ + +/* + * Issues still to be resolved: + * + * - The per cpu array is updated for each new slab and and is a remote + * cacheline for most nodes. This could become a bouncing cacheline given + * enough frequent updates. There are 16 pointers in a cacheline.so at + * max 16 cpus could compete. Likely okay. + * + * - Support PAGE_ALLOC_DEBUG. Should be easy to do. + * + * - Support DEBUG_SLAB_LEAK. Trouble is we do not know where the full + * slabs are in SLUB. + * + * - SLAB_DEBUG_INITIAL is not supported but I have never seen a use of + * it. + * + * - Variable sizing of the per node arrays + */ + +/* Enable to test recovery from slab corruption on boot */ +#undef SLUB_RESILIENCY_TEST + +#if PAGE_SHIFT <= 12 + +/* + * Small page size. Make sure that we do not fragment memory + */ +#define DEFAULT_MAX_ORDER 1 +#define DEFAULT_MIN_OBJECTS 4 + +#else + +/* + * Large page machines are customarily able to handle larger + * page orders. + */ +#define DEFAULT_MAX_ORDER 2 +#define DEFAULT_MIN_OBJECTS 8 + +#endif + +/* + * Flags from the regular SLAB that SLUB does not support: + */ +#define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL) + +#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ + SLAB_POISON | SLAB_STORE_USER) +/* + * Set of flags that will prevent slab merging + */ +#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ + SLAB_TRACE | SLAB_DESTROY_BY_RCU) + +#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ + SLAB_CACHE_DMA) + +#ifndef ARCH_KMALLOC_MINALIGN +#define ARCH_KMALLOC_MINALIGN sizeof(void *) +#endif + +#ifndef ARCH_SLAB_MINALIGN +#define ARCH_SLAB_MINALIGN sizeof(void *) +#endif + +/* Internal SLUB flags */ +#define __OBJECT_POISON 0x80000000 /* Poison object */ + +static int kmem_size = sizeof(struct kmem_cache); + +#ifdef CONFIG_SMP +static struct notifier_block slab_notifier; +#endif + +static enum { + DOWN, /* No slab functionality available */ + PARTIAL, /* kmem_cache_open() works but kmalloc does not */ + UP, /* Everything works */ + SYSFS /* Sysfs up */ +} slab_state = DOWN; + +/* A list of all slab caches on the system */ +static DECLARE_RWSEM(slub_lock); +LIST_HEAD(slab_caches); + +#ifdef CONFIG_SYSFS +static int sysfs_slab_add(struct kmem_cache *); +static int sysfs_slab_alias(struct kmem_cache *, const char *); +static void sysfs_slab_remove(struct kmem_cache *); +#else +static int sysfs_slab_add(struct kmem_cache *s) { return 0; } +static int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } +static void sysfs_slab_remove(struct kmem_cache *s) {} +#endif + +/******************************************************************** + * Core slab cache functions + *******************************************************************/ + +int slab_is_available(void) +{ + return slab_state >= UP; +} + +static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) +{ +#ifdef CONFIG_NUMA + return s->node[node]; +#else + return &s->local_node; +#endif +} + +/* + * Object debugging + */ +static void print_section(char *text, u8 *addr, unsigned int length) +{ + int i, offset; + int newline = 1; + char ascii[17]; + + ascii[16] = 0; + + for (i = 0; i < length; i++) { + if (newline) { + printk(KERN_ERR "%10s 0x%p: ", text, addr + i); + newline = 0; + } + printk(" %02x", addr[i]); + offset = i % 16; + ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; + if (offset == 15) { + printk(" %s\n",ascii); + newline = 1; + } + } + if (!newline) { + i %= 16; + while (i < 16) { + printk(" "); + ascii[i] = ' '; + i++; + } + printk(" %s\n", ascii); + } +} + +/* + * Slow version of get and set free pointer. + * + * This requires touching the cache lines of kmem_cache. + * The offset can also be obtained from the page. In that + * case it is in the cacheline that we already need to touch. + */ +static void *get_freepointer(struct kmem_cache *s, void *object) +{ + return *(void **)(object + s->offset); +} + +static void set_freepointer(struct kmem_cache *s, void *object, void *fp) +{ + *(void **)(object + s->offset) = fp; +} + +/* + * Tracking user of a slab. + */ +struct track { + void *addr; /* Called from address */ + int cpu; /* Was running on cpu */ + int pid; /* Pid context */ + unsigned long when; /* When did the operation occur */ +}; + +enum track_item { TRACK_ALLOC, TRACK_FREE }; + +static struct track *get_track(struct kmem_cache *s, void *object, + enum track_item alloc) +{ + struct track *p; + + if (s->offset) + p = object + s->offset + sizeof(void *); + else + p = object + s->inuse; + + return p + alloc; +} + +static void set_track(struct kmem_cache *s, void *object, + enum track_item alloc, void *addr) +{ + struct track *p; + + if (s->offset) + p = object + s->offset + sizeof(void *); + else + p = object + s->inuse; + + p += alloc; + if (addr) { + p->addr = addr; + p->cpu = smp_processor_id(); + p->pid = current ? current->pid : -1; + p->when = jiffies; + } else + memset(p, 0, sizeof(struct track)); +} + +#define set_tracking(__s, __o, __a) set_track(__s, __o, __a, \ + __builtin_return_address(0)) + +static void init_tracking(struct kmem_cache *s, void *object) +{ + if (s->flags & SLAB_STORE_USER) { + set_track(s, object, TRACK_FREE, NULL); + set_track(s, object, TRACK_ALLOC, NULL); + } +} + +static void print_track(const char *s, struct track *t) +{ + if (!t->addr) + return; + + printk(KERN_ERR "%s: ", s); + __print_symbol("%s", (unsigned long)t->addr); + printk(" jiffies_ago=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid); +} + +static void print_trailer(struct kmem_cache *s, u8 *p) +{ + unsigned int off; /* Offset of last byte */ + + if (s->flags & SLAB_RED_ZONE) + print_section("Redzone", p + s->objsize, + s->inuse - s->objsize); + + printk(KERN_ERR "FreePointer 0x%p -> 0x%p\n", + p + s->offset, + get_freepointer(s, p)); + + if (s->offset) + off = s->offset + sizeof(void *); + else + off = s->inuse; + + if (s->flags & SLAB_STORE_USER) { + print_track("Last alloc", get_track(s, p, TRACK_ALLOC)); + print_track("Last free ", get_track(s, p, TRACK_FREE)); + off += 2 * sizeof(struct track); + } + + if (off != s->size) + /* Beginning of the filler is the free pointer */ + print_section("Filler", p + off, s->size - off); +} + +static void object_err(struct kmem_cache *s, struct page *page, + u8 *object, char *reason) +{ + u8 *addr = page_address(page); + + printk(KERN_ERR "*** SLUB %s: %s@0x%p slab 0x%p\n", + s->name, reason, object, page); + printk(KERN_ERR " offset=%tu flags=0x%04lx inuse=%u freelist=0x%p\n", + object - addr, page->flags, page->inuse, page->freelist); + if (object > addr + 16) + print_section("Bytes b4", object - 16, 16); + print_section("Object", object, min(s->objsize, 128)); + print_trailer(s, object); + dump_stack(); +} + +static void slab_err(struct kmem_cache *s, struct page *page, char *reason, ...) +{ + va_list args; + char buf[100]; + + va_start(args, reason); + vsnprintf(buf, sizeof(buf), reason, args); + va_end(args); + printk(KERN_ERR "*** SLUB %s: %s in slab @0x%p\n", s->name, buf, + page); + dump_stack(); +} + +static void init_object(struct kmem_cache *s, void *object, int active) +{ + u8 *p = object; + + if (s->flags & __OBJECT_POISON) { + memset(p, POISON_FREE, s->objsize - 1); + p[s->objsize -1] = POISON_END; + } + + if (s->flags & SLAB_RED_ZONE) + memset(p + s->objsize, + active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE, + s->inuse - s->objsize); +} + +static int check_bytes(u8 *start, unsigned int value, unsigned int bytes) +{ + while (bytes) { + if (*start != (u8)value) + return 0; + start++; + bytes--; + } + return 1; +} + + +static int check_valid_pointer(struct kmem_cache *s, struct page *page, + void *object) +{ + void *base; + + if (!object) + return 1; + + base = page_address(page); + if (object < base || object >= base + s->objects * s->size || + (object - base) % s->size) { + return 0; + } + + return 1; +} + +/* + * Object layout: + * + * object address + * Bytes of the object to be managed. + * If the freepointer may overlay the object then the free + * pointer is the first word of the object. + * Poisoning uses 0x6b (POISON_FREE) and the last byte is + * 0xa5 (POISON_END) + * + * object + s->objsize + * Padding to reach word boundary. This is also used for Redzoning. + * Padding is extended to word size if Redzoning is enabled + * and objsize == inuse. + * We fill with 0xbb (RED_INACTIVE) for inactive objects and with + * 0xcc (RED_ACTIVE) for objects in use. + * + * object + s->inuse + * A. Free pointer (if we cannot overwrite object on free) + * B. Tracking data for SLAB_STORE_USER + * C. Padding to reach required alignment boundary + * Padding is done using 0x5a (POISON_INUSE) + * + * object + s->size + * + * If slabcaches are merged then the objsize and inuse boundaries are to + * be ignored. And therefore no slab options that rely on these boundaries + * may be used with merged slabcaches. + */ + +static void restore_bytes(struct kmem_cache *s, char *message, u8 data, + void *from, void *to) +{ + printk(KERN_ERR "@@@ SLUB: %s Restoring %s (0x%x) from 0x%p-0x%p\n", + s->name, message, data, from, to - 1); + memset(from, data, to - from); +} + +static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) +{ + unsigned long off = s->inuse; /* The end of info */ + + if (s->offset) + /* Freepointer is placed after the object. */ + off += sizeof(void *); + + if (s->flags & SLAB_STORE_USER) + /* We also have user information there */ + off += 2 * sizeof(struct track); + + if (s->size == off) + return 1; + + if (check_bytes(p + off, POISON_INUSE, s->size - off)) + return 1; + + object_err(s, page, p, "Object padding check fails"); + + /* + * Restore padding + */ + restore_bytes(s, "object padding", POISON_INUSE, p + off, p + s->size); + return 0; +} + +static int slab_pad_check(struct kmem_cache *s, struct page *page) +{ + u8 *p; + int length, remainder; + + if (!(s->flags & SLAB_POISON)) + return 1; + + p = page_address(page); + length = s->objects * s->size; + remainder = (PAGE_SIZE << s->order) - length; + if (!remainder) + return 1; + + if (!check_bytes(p + length, POISON_INUSE, remainder)) { + printk(KERN_ERR "SLUB: %s slab 0x%p: Padding fails check\n", + s->name, p); + dump_stack(); + restore_bytes(s, "slab padding", POISON_INUSE, p + length, + p + length + remainder); + return 0; + } + return 1; +} + +static int check_object(struct kmem_cache *s, struct page *page, + void *object, int active) +{ + u8 *p = object; + u8 *endobject = object + s->objsize; + + if (s->flags & SLAB_RED_ZONE) { + unsigned int red = + active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE; + + if (!check_bytes(endobject, red, s->inuse - s->objsize)) { + object_err(s, page, object, + active ? "Redzone Active" : "Redzone Inactive"); + restore_bytes(s, "redzone", red, + endobject, object + s->inuse); + return 0; + } + } else { + if ((s->flags & SLAB_POISON) && s->objsize < s->inuse && + !check_bytes(endobject, POISON_INUSE, + s->inuse - s->objsize)) { + object_err(s, page, p, "Alignment padding check fails"); + /* + * Fix it so that there will not be another report. + * + * Hmmm... We may be corrupting an object that now expects + * to be longer than allowed. + */ + restore_bytes(s, "alignment padding", POISON_INUSE, + endobject, object + s->inuse); + } + } + + if (s->flags & SLAB_POISON) { + if (!active && (s->flags & __OBJECT_POISON) && + (!check_bytes(p, POISON_FREE, s->objsize - 1) || + p[s->objsize - 1] != POISON_END)) { + + object_err(s, page, p, "Poison check failed"); + restore_bytes(s, "Poison", POISON_FREE, + p, p + s->objsize -1); + restore_bytes(s, "Poison", POISON_END, + p + s->objsize - 1, p + s->objsize); + return 0; + } + /* + * check_pad_bytes cleans up on its own. + */ + check_pad_bytes(s, page, p); + } + + if (!s->offset && active) + /* + * Object and freepointer overlap. Cannot check + * freepointer while object is allocated. + */ + return 1; + + /* Check free pointer validity */ + if (!check_valid_pointer(s, page, get_freepointer(s, p))) { + object_err(s, page, p, "Freepointer corrupt"); + /* + * No choice but to zap it and thus loose the remainder + * of the free objects in this slab. May cause + * another error because the object count maybe + * wrong now. + */ + set_freepointer(s, p, NULL); + return 0; + } + return 1; +} + +static int check_slab(struct kmem_cache *s, struct page *page) +{ + VM_BUG_ON(!irqs_disabled()); + + if (!PageSlab(page)) { + printk(KERN_ERR "SLUB: %s Not a valid slab page @0x%p " + "flags=%lx mapping=0x%p count=%d \n", + s->name, page, page->flags, page->mapping, + page_count(page)); + return 0; + } + if (page->offset * sizeof(void *) != s->offset) { + printk(KERN_ERR "SLUB: %s Corrupted offset %lu in slab @0x%p" + " flags=0x%lx mapping=0x%p count=%d\n", + s->name, + (unsigned long)(page->offset * sizeof(void *)), + page, + page->flags, + page->mapping, + page_count(page)); + dump_stack(); + return 0; + } + if (page->inuse > s->objects) { + printk(KERN_ERR "SLUB: %s Inuse %u > max %u in slab " + "page @0x%p flags=%lx mapping=0x%p count=%d\n", + s->name, page->inuse, s->objects, page, page->flags, + page->mapping, page_count(page)); + dump_stack(); + return 0; + } + /* Slab_pad_check fixes things up after itself */ + slab_pad_check(s, page); + return 1; +} + +/* + * Determine if a certain object on a page is on the freelist and + * therefore free. Must hold the slab lock for cpu slabs to + * guarantee that the chains are consistent. + */ +static int on_freelist(struct kmem_cache *s, struct page *page, void *search) +{ + int nr = 0; + void *fp = page->freelist; + void *object = NULL; + + while (fp && nr <= s->objects) { + if (fp == search) + return 1; + if (!check_valid_pointer(s, page, fp)) { + if (object) { + object_err(s, page, object, + "Freechain corrupt"); + set_freepointer(s, object, NULL); + break; + } else { + printk(KERN_ERR "SLUB: %s slab 0x%p " + "freepointer 0x%p corrupted.\n", + s->name, page, fp); + dump_stack(); + page->freelist = NULL; + page->inuse = s->objects; + return 0; + } + break; + } + object = fp; + fp = get_freepointer(s, object); + nr++; + } + + if (page->inuse != s->objects - nr) { + printk(KERN_ERR "slab %s: page 0x%p wrong object count." + " counter is %d but counted were %d\n", + s->name, page, page->inuse, + s->objects - nr); + page->inuse = s->objects - nr; + } + return search == NULL; +} + +static int alloc_object_checks(struct kmem_cache *s, struct page *page, + void *object) +{ + if (!check_slab(s, page)) + goto bad; + + if (object && !on_freelist(s, page, object)) { + printk(KERN_ERR "SLUB: %s Object 0x%p@0x%p " + "already allocated.\n", + s->name, object, page); + goto dump; + } + + if (!check_valid_pointer(s, page, object)) { + object_err(s, page, object, "Freelist Pointer check fails"); + goto dump; + } + + if (!object) + return 1; + + if (!check_object(s, page, object, 0)) + goto bad; + init_object(s, object, 1); + + if (s->flags & SLAB_TRACE) { + printk(KERN_INFO "TRACE %s alloc 0x%p inuse=%d fp=0x%p\n", + s->name, object, page->inuse, + page->freelist); + dump_stack(); + } + return 1; +dump: + dump_stack(); +bad: + if (PageSlab(page)) { + /* + * If this is a slab page then lets do the best we can + * to avoid issues in the future. Marking all objects + * as used avoids touching the remainder. + */ + printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n", + s->name, page); + page->inuse = s->objects; + page->freelist = NULL; + /* Fix up fields that may be corrupted */ + page->offset = s->offset / sizeof(void *); + } + return 0; +} + +static int free_object_checks(struct kmem_cache *s, struct page *page, + void *object) +{ + if (!check_slab(s, page)) + goto fail; + + if (!check_valid_pointer(s, page, object)) { + printk(KERN_ERR "SLUB: %s slab 0x%p invalid " + "object pointer 0x%p\n", + s->name, page, object); + goto fail; + } + + if (on_freelist(s, page, object)) { + printk(KERN_ERR "SLUB: %s slab 0x%p object " + "0x%p already free.\n", s->name, page, object); + goto fail; + } + + if (!check_object(s, page, object, 1)) + return 0; + + if (unlikely(s != page->slab)) { + if (!PageSlab(page)) + printk(KERN_ERR "slab_free %s size %d: attempt to" + "free object(0x%p) outside of slab.\n", + s->name, s->size, object); + else + if (!page->slab) + printk(KERN_ERR + "slab_free : no slab(NULL) for object 0x%p.\n", + object); + else + printk(KERN_ERR "slab_free %s(%d): object at 0x%p" + " belongs to slab %s(%d)\n", + s->name, s->size, object, + page->slab->name, page->slab->size); + goto fail; + } + if (s->flags & SLAB_TRACE) { + printk(KERN_INFO "TRACE %s free 0x%p inuse=%d fp=0x%p\n", + s->name, object, page->inuse, + page->freelist); + print_section("Object", object, s->objsize); + dump_stack(); + } + init_object(s, object, 0); + return 1; +fail: + dump_stack(); + printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", + s->name, page, object); + return 0; +} + +/* + * Slab allocation and freeing + */ +static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) +{ + struct page * page; + int pages = 1 << s->order; + + if (s->order) + flags |= __GFP_COMP; + + if (s->flags & SLAB_CACHE_DMA) + flags |= SLUB_DMA; + + if (node == -1) + page = alloc_pages(flags, s->order); + else + page = alloc_pages_node(node, flags, s->order); + + if (!page) + return NULL; + + mod_zone_page_state(page_zone(page), + (s->flags & SLAB_RECLAIM_ACCOUNT) ? + NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, + pages); + + return page; +} + +static void setup_object(struct kmem_cache *s, struct page *page, + void *object) +{ + if (PageError(page)) { + init_object(s, object, 0); + init_tracking(s, object); + } + + if (unlikely(s->ctor)) { + int mode = SLAB_CTOR_CONSTRUCTOR; + + if (!(s->flags & __GFP_WAIT)) + mode |= SLAB_CTOR_ATOMIC; + + s->ctor(object, s, mode); + } +} + +static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) +{ + struct page *page; + struct kmem_cache_node *n; + void *start; + void *end; + void *last; + void *p; + + if (flags & __GFP_NO_GROW) + return NULL; + + BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); + + if (flags & __GFP_WAIT) + local_irq_enable(); + + page = allocate_slab(s, flags & GFP_LEVEL_MASK, node); + if (!page) + goto out; + + n = get_node(s, page_to_nid(page)); + if (n) + atomic_long_inc(&n->nr_slabs); + page->offset = s->offset / sizeof(void *); + page->slab = s; + page->flags |= 1 << PG_slab; + if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | + SLAB_STORE_USER | SLAB_TRACE)) + page->flags |= 1 << PG_error; + + start = page_address(page); + end = start + s->objects * s->size; + + if (unlikely(s->flags & SLAB_POISON)) + memset(start, POISON_INUSE, PAGE_SIZE << s->order); + + last = start; + for (p = start + s->size; p < end; p += s->size) { + setup_object(s, page, last); + set_freepointer(s, last, p); + last = p; + } + setup_object(s, page, last); + set_freepointer(s, last, NULL); + + page->freelist = start; + page->inuse = 0; +out: + if (flags & __GFP_WAIT) + local_irq_disable(); + return page; +} + +static void __free_slab(struct kmem_cache *s, struct page *page) +{ + int pages = 1 << s->order; + + if (unlikely(PageError(page) || s->dtor)) { + void *start = page_address(page); + void *end = start + (pages << PAGE_SHIFT); + void *p; + + slab_pad_check(s, page); + for (p = start; p <= end - s->size; p += s->size) { + if (s->dtor) + s->dtor(p, s, 0); + check_object(s, page, p, 0); + } + } + + mod_zone_page_state(page_zone(page), + (s->flags & SLAB_RECLAIM_ACCOUNT) ? + NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, + - pages); + + page->mapping = NULL; + __free_pages(page, s->order); +} + +static void rcu_free_slab(struct rcu_head *h) +{ + struct page *page; + + page = container_of((struct list_head *)h, struct page, lru); + __free_slab(page->slab, page); +} + +static void free_slab(struct kmem_cache *s, struct page *page) +{ + if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { + /* + * RCU free overloads the RCU head over the LRU + */ + struct rcu_head *head = (void *)&page->lru; + + call_rcu(head, rcu_free_slab); + } else + __free_slab(s, page); +} + +static void discard_slab(struct kmem_cache *s, struct page *page) +{ + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + + atomic_long_dec(&n->nr_slabs); + reset_page_mapcount(page); + page->flags &= ~(1 << PG_slab | 1 << PG_error); + free_slab(s, page); +} + +/* + * Per slab locking using the pagelock + */ +static __always_inline void slab_lock(struct page *page) +{ + bit_spin_lock(PG_locked, &page->flags); +} + +static __always_inline void slab_unlock(struct page *page) +{ + bit_spin_unlock(PG_locked, &page->flags); +} + +static __always_inline int slab_trylock(struct page *page) +{ + int rc = 1; + + rc = bit_spin_trylock(PG_locked, &page->flags); + return rc; +} + +/* + * Management of partially allocated slabs + */ +static void add_partial(struct kmem_cache *s, struct page *page) +{ + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + + spin_lock(&n->list_lock); + n->nr_partial++; + list_add(&page->lru, &n->partial); + spin_unlock(&n->list_lock); +} + +static void remove_partial(struct kmem_cache *s, + struct page *page) +{ + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + + spin_lock(&n->list_lock); + list_del(&page->lru); + n->nr_partial--; + spin_unlock(&n->list_lock); +} + +/* + * Lock page and remove it from the partial list + * + * Must hold list_lock + */ +static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page) +{ + if (slab_trylock(page)) { + list_del(&page->lru); + n->nr_partial--; + return 1; + } + return 0; +} + +/* + * Try to get a partial slab from a specific node + */ +static struct page *get_partial_node(struct kmem_cache_node *n) +{ + struct page *page; + + /* + * Racy check. If we mistakenly see no partial slabs then we + * just allocate an empty slab. If we mistakenly try to get a + * partial slab then get_partials() will return NULL. + */ + if (!n || !n->nr_partial) + return NULL; + + spin_lock(&n->list_lock); + list_for_each_entry(page, &n->partial, lru) + if (lock_and_del_slab(n, page)) + goto out; + page = NULL; +out: + spin_unlock(&n->list_lock); + return page; +} + +/* + * Get a page from somewhere. Search in increasing NUMA + * distances. + */ +static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) +{ +#ifdef CONFIG_NUMA + struct zonelist *zonelist; + struct zone **z; + struct page *page; + + /* + * The defrag ratio allows to configure the tradeoffs between + * inter node defragmentation and node local allocations. + * A lower defrag_ratio increases the tendency to do local + * allocations instead of scanning throught the partial + * lists on other nodes. + * + * If defrag_ratio is set to 0 then kmalloc() always + * returns node local objects. If its higher then kmalloc() + * may return off node objects in order to avoid fragmentation. + * + * A higher ratio means slabs may be taken from other nodes + * thus reducing the number of partial slabs on those nodes. + * + * If /sys/slab/xx/defrag_ratio is set to 100 (which makes + * defrag_ratio = 1000) then every (well almost) allocation + * will first attempt to defrag slab caches on other nodes. This + * means scanning over all nodes to look for partial slabs which + * may be a bit expensive to do on every slab allocation. + */ + if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio) + return NULL; + + zonelist = &NODE_DATA(slab_node(current->mempolicy)) + ->node_zonelists[gfp_zone(flags)]; + for (z = zonelist->zones; *z; z++) { + struct kmem_cache_node *n; + + n = get_node(s, zone_to_nid(*z)); + + if (n && cpuset_zone_allowed_hardwall(*z, flags) && + n->nr_partial > 2) { + page = get_partial_node(n); + if (page) + return page; + } + } +#endif + return NULL; +} + +/* + * Get a partial page, lock it and return it. + */ +static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) +{ + struct page *page; + int searchnode = (node == -1) ? numa_node_id() : node; + + page = get_partial_node(get_node(s, searchnode)); + if (page || (flags & __GFP_THISNODE)) + return page; + + return get_any_partial(s, flags); +} + +/* + * Move a page back to the lists. + * + * Must be called with the slab lock held. + * + * On exit the slab lock will have been dropped. + */ +static void putback_slab(struct kmem_cache *s, struct page *page) +{ + if (page->inuse) { + if (page->freelist) + add_partial(s, page); + slab_unlock(page); + } else { + slab_unlock(page); + discard_slab(s, page); + } +} + +/* + * Remove the cpu slab + */ +static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu) +{ + s->cpu_slab[cpu] = NULL; + ClearPageActive(page); + + putback_slab(s, page); +} + +static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) +{ + slab_lock(page); + deactivate_slab(s, page, cpu); +} + +/* + * Flush cpu slab. + * Called from IPI handler with interrupts disabled. + */ +static void __flush_cpu_slab(struct kmem_cache *s, int cpu) +{ + struct page *page = s->cpu_slab[cpu]; + + if (likely(page)) + flush_slab(s, page, cpu); +} + +static void flush_cpu_slab(void *d) +{ + struct kmem_cache *s = d; + int cpu = smp_processor_id(); + + __flush_cpu_slab(s, cpu); +} + +static void flush_all(struct kmem_cache *s) +{ +#ifdef CONFIG_SMP + on_each_cpu(flush_cpu_slab, s, 1, 1); +#else + unsigned long flags; + + local_irq_save(flags); + flush_cpu_slab(s); + local_irq_restore(flags); +#endif +} + +/* + * slab_alloc is optimized to only modify two cachelines on the fast path + * (aside from the stack): + * + * 1. The page struct + * 2. The first cacheline of the object to be allocated. + * + * The only cache lines that are read (apart from code) is the + * per cpu array in the kmem_cache struct. + * + * Fastpath is not possible if we need to get a new slab or have + * debugging enabled (which means all slabs are marked with PageError) + */ +static __always_inline void *slab_alloc(struct kmem_cache *s, + gfp_t gfpflags, int node) +{ + struct page *page; + void **object; + unsigned long flags; + int cpu; + + local_irq_save(flags); + cpu = smp_processor_id(); + page = s->cpu_slab[cpu]; + if (!page) + goto new_slab; + + slab_lock(page); + if (unlikely(node != -1 && page_to_nid(page) != node)) + goto another_slab; +redo: + object = page->freelist; + if (unlikely(!object)) + goto another_slab; + if (unlikely(PageError(page))) + goto debug; + +have_object: + page->inuse++; + page->freelist = object[page->offset]; + slab_unlock(page); + local_irq_restore(flags); + return object; + +another_slab: + deactivate_slab(s, page, cpu); + +new_slab: + page = get_partial(s, gfpflags, node); + if (likely(page)) { +have_slab: + s->cpu_slab[cpu] = page; + SetPageActive(page); + goto redo; + } + + page = new_slab(s, gfpflags, node); + if (page) { + cpu = smp_processor_id(); + if (s->cpu_slab[cpu]) { + /* + * Someone else populated the cpu_slab while we enabled + * interrupts, or we have got scheduled on another cpu. + * The page may not be on the requested node. + */ + if (node == -1 || + page_to_nid(s->cpu_slab[cpu]) == node) { + /* + * Current cpuslab is acceptable and we + * want the current one since its cache hot + */ + discard_slab(s, page); + page = s->cpu_slab[cpu]; + slab_lock(page); + goto redo; + } + /* Dump the current slab */ + flush_slab(s, s->cpu_slab[cpu], cpu); + } + slab_lock(page); + goto have_slab; + } + local_irq_restore(flags); + return NULL; +debug: + if (!alloc_object_checks(s, page, object)) + goto another_slab; + if (s->flags & SLAB_STORE_USER) + set_tracking(s, object, TRACK_ALLOC); + goto have_object; +} + +void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) +{ + return slab_alloc(s, gfpflags, -1); +} +EXPORT_SYMBOL(kmem_cache_alloc); + +#ifdef CONFIG_NUMA +void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) +{ + return slab_alloc(s, gfpflags, node); +} +EXPORT_SYMBOL(kmem_cache_alloc_node); +#endif + +/* + * The fastpath only writes the cacheline of the page struct and the first + * cacheline of the object. + * + * No special cachelines need to be read + */ +static void slab_free(struct kmem_cache *s, struct page *page, void *x) +{ + void *prior; + void **object = (void *)x; + unsigned long flags; + + local_irq_save(flags); + slab_lock(page); + + if (unlikely(PageError(page))) + goto debug; +checks_ok: + prior = object[page->offset] = page->freelist; + page->freelist = object; + page->inuse--; + + if (unlikely(PageActive(page))) + /* + * Cpu slabs are never on partial lists and are + * never freed. + */ + goto out_unlock; + + if (unlikely(!page->inuse)) + goto slab_empty; + + /* + * Objects left in the slab. If it + * was not on the partial list before + * then add it. + */ + if (unlikely(!prior)) + add_partial(s, page); + +out_unlock: + slab_unlock(page); + local_irq_restore(flags); + return; + +slab_empty: + if (prior) + /* + * Partially used slab that is on the partial list. + */ + remove_partial(s, page); + + slab_unlock(page); + discard_slab(s, page); + local_irq_restore(flags); + return; + +debug: + if (free_object_checks(s, page, x)) + goto checks_ok; + goto out_unlock; +} + +void kmem_cache_free(struct kmem_cache *s, void *x) +{ + struct page * page; + + page = virt_to_page(x); + + if (unlikely(PageCompound(page))) + page = page->first_page; + + + if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) + set_tracking(s, x, TRACK_FREE); + slab_free(s, page, x); +} +EXPORT_SYMBOL(kmem_cache_free); + +/* Figure out on which slab object the object resides */ +static struct page *get_object_page(const void *x) +{ + struct page *page = virt_to_page(x); + + if (unlikely(PageCompound(page))) + page = page->first_page; + + if (!PageSlab(page)) + return NULL; + + return page; +} + +/* + * kmem_cache_open produces objects aligned at "size" and the first object + * is placed at offset 0 in the slab (We have no metainformation on the + * slab, all slabs are in essence "off slab"). + * + * In order to get the desired alignment one just needs to align the + * size. + * + * Notice that the allocation order determines the sizes of the per cpu + * caches. Each processor has always one slab available for allocations. + * Increasing the allocation order reduces the number of times that slabs + * must be moved on and off the partial lists and therefore may influence + * locking overhead. + * + * The offset is used to relocate the free list link in each object. It is + * therefore possible to move the free list link behind the object. This + * is necessary for RCU to work properly and also useful for debugging. + */ + +/* + * Mininum / Maximum order of slab pages. This influences locking overhead + * and slab fragmentation. A higher order reduces the number of partial slabs + * and increases the number of allocations possible without having to + * take the list_lock. + */ +static int slub_min_order; +static int slub_max_order = DEFAULT_MAX_ORDER; + +/* + * Minimum number of objects per slab. This is necessary in order to + * reduce locking overhead. Similar to the queue size in SLAB. + */ +static int slub_min_objects = DEFAULT_MIN_OBJECTS; + +/* + * Merge control. If this is set then no merging of slab caches will occur. + */ +static int slub_nomerge; + +/* + * Debug settings: + */ +static int slub_debug; + +static char *slub_debug_slabs; + +/* + * Calculate the order of allocation given an slab object size. + * + * The order of allocation has significant impact on other elements + * of the system. Generally order 0 allocations should be preferred + * since they do not cause fragmentation in the page allocator. Larger + * objects may have problems with order 0 because there may be too much + * space left unused in a slab. We go to a higher order if more than 1/8th + * of the slab would be wasted. + * + * In order to reach satisfactory performance we must ensure that + * a minimum number of objects is in one slab. Otherwise we may + * generate too much activity on the partial lists. This is less a + * concern for large slabs though. slub_max_order specifies the order + * where we begin to stop considering the number of objects in a slab. + * + * Higher order allocations also allow the placement of more objects + * in a slab and thereby reduce object handling overhead. If the user + * has requested a higher mininum order then we start with that one + * instead of zero. + */ +static int calculate_order(int size) +{ + int order; + int rem; + + for (order = max(slub_min_order, fls(size - 1) - PAGE_SHIFT); + order < MAX_ORDER; order++) { + unsigned long slab_size = PAGE_SIZE << order; + + if (slub_max_order > order && + slab_size < slub_min_objects * size) + continue; + + if (slab_size < size) + continue; + + rem = slab_size % size; + + if (rem <= (PAGE_SIZE << order) / 8) + break; + + } + if (order >= MAX_ORDER) + return -E2BIG; + return order; +} + +/* + * Function to figure out which alignment to use from the + * various ways of specifying it. + */ +static unsigned long calculate_alignment(unsigned long flags, + unsigned long align, unsigned long size) +{ + /* + * If the user wants hardware cache aligned objects then + * follow that suggestion if the object is sufficiently + * large. + * + * The hardware cache alignment cannot override the + * specified alignment though. If that is greater + * then use it. + */ + if ((flags & (SLAB_MUST_HWCACHE_ALIGN | SLAB_HWCACHE_ALIGN)) && + size > L1_CACHE_BYTES / 2) + return max_t(unsigned long, align, L1_CACHE_BYTES); + + if (align < ARCH_SLAB_MINALIGN) + return ARCH_SLAB_MINALIGN; + + return ALIGN(align, sizeof(void *)); +} + +static void init_kmem_cache_node(struct kmem_cache_node *n) +{ + n->nr_partial = 0; + atomic_long_set(&n->nr_slabs, 0); + spin_lock_init(&n->list_lock); + INIT_LIST_HEAD(&n->partial); +} + +#ifdef CONFIG_NUMA +/* + * No kmalloc_node yet so do it by hand. We know that this is the first + * slab on the node for this slabcache. There are no concurrent accesses + * possible. + * + * Note that this function only works on the kmalloc_node_cache + * when allocating for the kmalloc_node_cache. + */ +static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflags, + int node) +{ + struct page *page; + struct kmem_cache_node *n; + + BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); + + page = new_slab(kmalloc_caches, gfpflags | GFP_THISNODE, node); + /* new_slab() disables interupts */ + local_irq_enable(); + + BUG_ON(!page); + n = page->freelist; + BUG_ON(!n); + page->freelist = get_freepointer(kmalloc_caches, n); + page->inuse++; + kmalloc_caches->node[node] = n; + init_object(kmalloc_caches, n, 1); + init_kmem_cache_node(n); + atomic_long_inc(&n->nr_slabs); + add_partial(kmalloc_caches, page); + return n; +} + +static void free_kmem_cache_nodes(struct kmem_cache *s) +{ + int node; + + for_each_online_node(node) { + struct kmem_cache_node *n = s->node[node]; + if (n && n != &s->local_node) + kmem_cache_free(kmalloc_caches, n); + s->node[node] = NULL; + } +} + +static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) +{ + int node; + int local_node; + + if (slab_state >= UP) + local_node = page_to_nid(virt_to_page(s)); + else + local_node = 0; + + for_each_online_node(node) { + struct kmem_cache_node *n; + + if (local_node == node) + n = &s->local_node; + else { + if (slab_state == DOWN) { + n = early_kmem_cache_node_alloc(gfpflags, + node); + continue; + } + n = kmem_cache_alloc_node(kmalloc_caches, + gfpflags, node); + + if (!n) { + free_kmem_cache_nodes(s); + return 0; + } + + } + s->node[node] = n; + init_kmem_cache_node(n); + } + return 1; +} +#else +static void free_kmem_cache_nodes(struct kmem_cache *s) +{ +} + +static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) +{ + init_kmem_cache_node(&s->local_node); + return 1; +} +#endif + +/* + * calculate_sizes() determines the order and the distribution of data within + * a slab object. + */ +static int calculate_sizes(struct kmem_cache *s) +{ + unsigned long flags = s->flags; + unsigned long size = s->objsize; + unsigned long align = s->align; + + /* + * Determine if we can poison the object itself. If the user of + * the slab may touch the object after free or before allocation + * then we should never poison the object itself. + */ + if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && + !s->ctor && !s->dtor) + s->flags |= __OBJECT_POISON; + else + s->flags &= ~__OBJECT_POISON; + + /* + * Round up object size to the next word boundary. We can only + * place the free pointer at word boundaries and this determines + * the possible location of the free pointer. + */ + size = ALIGN(size, sizeof(void *)); + + /* + * If we are redzoning then check if there is some space between the + * end of the object and the free pointer. If not then add an + * additional word, so that we can establish a redzone between + * the object and the freepointer to be able to check for overwrites. + */ + if ((flags & SLAB_RED_ZONE) && size == s->objsize) + size += sizeof(void *); + + /* + * With that we have determined how much of the slab is in actual + * use by the object. This is the potential offset to the free + * pointer. + */ + s->inuse = size; + + if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || + s->ctor || s->dtor)) { + /* + * Relocate free pointer after the object if it is not + * permitted to overwrite the first word of the object on + * kmem_cache_free. + * + * This is the case if we do RCU, have a constructor or + * destructor or are poisoning the objects. + */ + s->offset = size; + size += sizeof(void *); + } + + if (flags & SLAB_STORE_USER) + /* + * Need to store information about allocs and frees after + * the object. + */ + size += 2 * sizeof(struct track); + + if (flags & DEBUG_DEFAULT_FLAGS) + /* + * Add some empty padding so that we can catch + * overwrites from earlier objects rather than let + * tracking information or the free pointer be + * corrupted if an user writes before the start + * of the object. + */ + size += sizeof(void *); + /* + * Determine the alignment based on various parameters that the + * user specified (this is unecessarily complex due to the attempt + * to be compatible with SLAB. Should be cleaned up some day). + */ + align = calculate_alignment(flags, align, s->objsize); + + /* + * SLUB stores one object immediately after another beginning from + * offset 0. In order to align the objects we have to simply size + * each object to conform to the alignment. + */ + size = ALIGN(size, align); + s->size = size; + + s->order = calculate_order(size); + if (s->order < 0) + return 0; + + /* + * Determine the number of objects per slab + */ + s->objects = (PAGE_SIZE << s->order) / size; + + /* + * Verify that the number of objects is within permitted limits. + * The page->inuse field is only 16 bit wide! So we cannot have + * more than 64k objects per slab. + */ + if (!s->objects || s->objects > 65535) + return 0; + return 1; + +} + +static int __init finish_bootstrap(void) +{ + struct list_head *h; + int err; + + slab_state = SYSFS; + + list_for_each(h, &slab_caches) { + struct kmem_cache *s = + container_of(h, struct kmem_cache, list); + + err = sysfs_slab_add(s); + BUG_ON(err); + } + return 0; +} + +static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, + const char *name, size_t size, + size_t align, unsigned long flags, + void (*ctor)(void *, struct kmem_cache *, unsigned long), + void (*dtor)(void *, struct kmem_cache *, unsigned long)) +{ + memset(s, 0, kmem_size); + s->name = name; + s->ctor = ctor; + s->dtor = dtor; + s->objsize = size; + s->flags = flags; + s->align = align; + + BUG_ON(flags & SLUB_UNIMPLEMENTED); + + /* + * The page->offset field is only 16 bit wide. This is an offset + * in units of words from the beginning of an object. If the slab + * size is bigger then we cannot move the free pointer behind the + * object anymore. + * + * On 32 bit platforms the limit is 256k. On 64bit platforms + * the limit is 512k. + * + * Debugging or ctor/dtors may create a need to move the free + * pointer. Fail if this happens. + */ + if (s->size >= 65535 * sizeof(void *)) { + BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON | + SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); + BUG_ON(ctor || dtor); + } + else + /* + * Enable debugging if selected on the kernel commandline. + */ + if (slub_debug && (!slub_debug_slabs || + strncmp(slub_debug_slabs, name, + strlen(slub_debug_slabs)) == 0)) + s->flags |= slub_debug; + + if (!calculate_sizes(s)) + goto error; + + s->refcount = 1; +#ifdef CONFIG_NUMA + s->defrag_ratio = 100; +#endif + + if (init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) + return 1; +error: + if (flags & SLAB_PANIC) + panic("Cannot create slab %s size=%lu realsize=%u " + "order=%u offset=%u flags=%lx\n", + s->name, (unsigned long)size, s->size, s->order, + s->offset, flags); + return 0; +} +EXPORT_SYMBOL(kmem_cache_open); + +/* + * Check if a given pointer is valid + */ +int kmem_ptr_validate(struct kmem_cache *s, const void *object) +{ + struct page * page; + void *addr; + + page = get_object_page(object); + + if (!page || s != page->slab) + /* No slab or wrong slab */ + return 0; + + addr = page_address(page); + if (object < addr || object >= addr + s->objects * s->size) + /* Out of bounds */ + return 0; + + if ((object - addr) % s->size) + /* Improperly aligned */ + return 0; + + /* + * We could also check if the object is on the slabs freelist. + * But this would be too expensive and it seems that the main + * purpose of kmem_ptr_valid is to check if the object belongs + * to a certain slab. + */ + return 1; +} +EXPORT_SYMBOL(kmem_ptr_validate); + +/* + * Determine the size of a slab object + */ +unsigned int kmem_cache_size(struct kmem_cache *s) +{ + return s->objsize; +} +EXPORT_SYMBOL(kmem_cache_size); + +const char *kmem_cache_name(struct kmem_cache *s) +{ + return s->name; +} +EXPORT_SYMBOL(kmem_cache_name); + +/* + * Attempt to free all slabs on a node + */ +static int free_list(struct kmem_cache *s, struct kmem_cache_node *n, + struct list_head *list) +{ + int slabs_inuse = 0; + unsigned long flags; + struct page *page, *h; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry_safe(page, h, list, lru) + if (!page->inuse) { + list_del(&page->lru); + discard_slab(s, page); + } else + slabs_inuse++; + spin_unlock_irqrestore(&n->list_lock, flags); + return slabs_inuse; +} + +/* + * Release all resources used by slab cache + */ +static int kmem_cache_close(struct kmem_cache *s) +{ + int node; + + flush_all(s); + + /* Attempt to free all objects */ + for_each_online_node(node) { + struct kmem_cache_node *n = get_node(s, node); + + free_list(s, n, &n->partial); + if (atomic_long_read(&n->nr_slabs)) + return 1; + } + free_kmem_cache_nodes(s); + return 0; +} + +/* + * Close a cache and release the kmem_cache structure + * (must be used for caches created using kmem_cache_create) + */ +void kmem_cache_destroy(struct kmem_cache *s) +{ + down_write(&slub_lock); + s->refcount--; + if (!s->refcount) { + list_del(&s->list); + if (kmem_cache_close(s)) + WARN_ON(1); + sysfs_slab_remove(s); + kfree(s); + } + up_write(&slub_lock); +} +EXPORT_SYMBOL(kmem_cache_destroy); + +/******************************************************************** + * Kmalloc subsystem + *******************************************************************/ + +struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned; +EXPORT_SYMBOL(kmalloc_caches); + +#ifdef CONFIG_ZONE_DMA +static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_HIGH + 1]; +#endif + +static int __init setup_slub_min_order(char *str) +{ + get_option (&str, &slub_min_order); + + return 1; +} + +__setup("slub_min_order=", setup_slub_min_order); + +static int __init setup_slub_max_order(char *str) +{ + get_option (&str, &slub_max_order); + + return 1; +} + +__setup("slub_max_order=", setup_slub_max_order); + +static int __init setup_slub_min_objects(char *str) +{ + get_option (&str, &slub_min_objects); + + return 1; +} + +__setup("slub_min_objects=", setup_slub_min_objects); + +static int __init setup_slub_nomerge(char *str) +{ + slub_nomerge = 1; + return 1; +} + +__setup("slub_nomerge", setup_slub_nomerge); + +static int __init setup_slub_debug(char *str) +{ + if (!str || *str != '=') + slub_debug = DEBUG_DEFAULT_FLAGS; + else { + str++; + if (*str == 0 || *str == ',') + slub_debug = DEBUG_DEFAULT_FLAGS; + else + for( ;*str && *str != ','; str++) + switch (*str) { + case 'f' : case 'F' : + slub_debug |= SLAB_DEBUG_FREE; + break; + case 'z' : case 'Z' : + slub_debug |= SLAB_RED_ZONE; + break; + case 'p' : case 'P' : + slub_debug |= SLAB_POISON; + break; + case 'u' : case 'U' : + slub_debug |= SLAB_STORE_USER; + break; + case 't' : case 'T' : + slub_debug |= SLAB_TRACE; + break; + default: + printk(KERN_ERR "slub_debug option '%c' " + "unknown. skipped\n",*str); + } + } + + if (*str == ',') + slub_debug_slabs = str + 1; + return 1; +} + +__setup("slub_debug", setup_slub_debug); + +static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, + const char *name, int size, gfp_t gfp_flags) +{ + unsigned int flags = 0; + + if (gfp_flags & SLUB_DMA) + flags = SLAB_CACHE_DMA; + + down_write(&slub_lock); + if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, + flags, NULL, NULL)) + goto panic; + + list_add(&s->list, &slab_caches); + up_write(&slub_lock); + if (sysfs_slab_add(s)) + goto panic; + return s; + +panic: + panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); +} + +static struct kmem_cache *get_slab(size_t size, gfp_t flags) +{ + int index = kmalloc_index(size); + + if (!size) + return NULL; + + /* Allocation too large? */ + BUG_ON(index < 0); + +#ifdef CONFIG_ZONE_DMA + if ((flags & SLUB_DMA)) { + struct kmem_cache *s; + struct kmem_cache *x; + char *text; + size_t realsize; + + s = kmalloc_caches_dma[index]; + if (s) + return s; + + /* Dynamically create dma cache */ + x = kmalloc(kmem_size, flags & ~SLUB_DMA); + if (!x) + panic("Unable to allocate memory for dma cache\n"); + + if (index <= KMALLOC_SHIFT_HIGH) + realsize = 1 << index; + else { + if (index == 1) + realsize = 96; + else + realsize = 192; + } + + text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", + (unsigned int)realsize); + s = create_kmalloc_cache(x, text, realsize, flags); + kmalloc_caches_dma[index] = s; + return s; + } +#endif + return &kmalloc_caches[index]; +} + +void *__kmalloc(size_t size, gfp_t flags) +{ + struct kmem_cache *s = get_slab(size, flags); + + if (s) + return kmem_cache_alloc(s, flags); + return NULL; +} +EXPORT_SYMBOL(__kmalloc); + +#ifdef CONFIG_NUMA +void *__kmalloc_node(size_t size, gfp_t flags, int node) +{ + struct kmem_cache *s = get_slab(size, flags); + + if (s) + return kmem_cache_alloc_node(s, flags, node); + return NULL; +} +EXPORT_SYMBOL(__kmalloc_node); +#endif + +size_t ksize(const void *object) +{ + struct page *page = get_object_page(object); + struct kmem_cache *s; + + BUG_ON(!page); + s = page->slab; + BUG_ON(!s); + + /* + * Debugging requires use of the padding between object + * and whatever may come after it. + */ + if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) + return s->objsize; + + /* + * If we have the need to store the freelist pointer + * back there or track user information then we can + * only use the space before that information. + */ + if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) + return s->inuse; + + /* + * Else we can use all the padding etc for the allocation + */ + return s->size; +} +EXPORT_SYMBOL(ksize); + +void kfree(const void *x) +{ + struct kmem_cache *s; + struct page *page; + + if (!x) + return; + + page = virt_to_page(x); + + if (unlikely(PageCompound(page))) + page = page->first_page; + + s = page->slab; + + if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) + set_tracking(s, (void *)x, TRACK_FREE); + slab_free(s, page, (void *)x); +} +EXPORT_SYMBOL(kfree); + +/** + * krealloc - reallocate memory. The contents will remain unchanged. + * + * @p: object to reallocate memory for. + * @new_size: how many bytes of memory are required. + * @flags: the type of memory to allocate. + * + * The contents of the object pointed to are preserved up to the + * lesser of the new and old sizes. If @p is %NULL, krealloc() + * behaves exactly like kmalloc(). If @size is 0 and @p is not a + * %NULL pointer, the object pointed to is freed. + */ +void *krealloc(const void *p, size_t new_size, gfp_t flags) +{ + struct kmem_cache *new_cache; + void *ret; + struct page *page; + + if (unlikely(!p)) + return kmalloc(new_size, flags); + + if (unlikely(!new_size)) { + kfree(p); + return NULL; + } + + page = virt_to_page(p); + + if (unlikely(PageCompound(page))) + page = page->first_page; + + new_cache = get_slab(new_size, flags); + + /* + * If new size fits in the current cache, bail out. + */ + if (likely(page->slab == new_cache)) + return (void *)p; + + ret = kmalloc(new_size, flags); + if (ret) { + memcpy(ret, p, min(new_size, ksize(p))); + kfree(p); + } + return ret; +} +EXPORT_SYMBOL(krealloc); + +/******************************************************************** + * Basic setup of slabs + *******************************************************************/ + +void __init kmem_cache_init(void) +{ + int i; + +#ifdef CONFIG_NUMA + /* + * Must first have the slab cache available for the allocations of the + * struct kmalloc_cache_node's. There is special bootstrap code in + * kmem_cache_open for slab_state == DOWN. + */ + create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", + sizeof(struct kmem_cache_node), GFP_KERNEL); +#endif + + /* Able to allocate the per node structures */ + slab_state = PARTIAL; + + /* Caches that are not of the two-to-the-power-of size */ + create_kmalloc_cache(&kmalloc_caches[1], + "kmalloc-96", 96, GFP_KERNEL); + create_kmalloc_cache(&kmalloc_caches[2], + "kmalloc-192", 192, GFP_KERNEL); + + for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) + create_kmalloc_cache(&kmalloc_caches[i], + "kmalloc", 1 << i, GFP_KERNEL); + + slab_state = UP; + + /* Provide the correct kmalloc names now that the caches are up */ + for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) + kmalloc_caches[i]. name = + kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); + +#ifdef CONFIG_SMP + register_cpu_notifier(&slab_notifier); +#endif + + if (nr_cpu_ids) /* Remove when nr_cpu_ids is fixed upstream ! */ + kmem_size = offsetof(struct kmem_cache, cpu_slab) + + nr_cpu_ids * sizeof(struct page *); + + printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," + " Processors=%d, Nodes=%d\n", + KMALLOC_SHIFT_HIGH, L1_CACHE_BYTES, + slub_min_order, slub_max_order, slub_min_objects, + nr_cpu_ids, nr_node_ids); +} + +/* + * Find a mergeable slab cache + */ +static int slab_unmergeable(struct kmem_cache *s) +{ + if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) + return 1; + + if (s->ctor || s->dtor) + return 1; + + return 0; +} + +static struct kmem_cache *find_mergeable(size_t size, + size_t align, unsigned long flags, + void (*ctor)(void *, struct kmem_cache *, unsigned long), + void (*dtor)(void *, struct kmem_cache *, unsigned long)) +{ + struct list_head *h; + + if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) + return NULL; + + if (ctor || dtor) + return NULL; + + size = ALIGN(size, sizeof(void *)); + align = calculate_alignment(flags, align, size); + size = ALIGN(size, align); + + list_for_each(h, &slab_caches) { + struct kmem_cache *s = + container_of(h, struct kmem_cache, list); + + if (slab_unmergeable(s)) + continue; + + if (size > s->size) + continue; + + if (((flags | slub_debug) & SLUB_MERGE_SAME) != + (s->flags & SLUB_MERGE_SAME)) + continue; + /* + * Check if alignment is compatible. + * Courtesy of Adrian Drzewiecki + */ + if ((s->size & ~(align -1)) != s->size) + continue; + + if (s->size - size >= sizeof(void *)) + continue; + + return s; + } + return NULL; +} + +struct kmem_cache *kmem_cache_create(const char *name, size_t size, + size_t align, unsigned long flags, + void (*ctor)(void *, struct kmem_cache *, unsigned long), + void (*dtor)(void *, struct kmem_cache *, unsigned long)) +{ + struct kmem_cache *s; + + down_write(&slub_lock); + s = find_mergeable(size, align, flags, dtor, ctor); + if (s) { + s->refcount++; + /* + * Adjust the object sizes so that we clear + * the complete object on kzalloc. + */ + s->objsize = max(s->objsize, (int)size); + s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); + if (sysfs_slab_alias(s, name)) + goto err; + } else { + s = kmalloc(kmem_size, GFP_KERNEL); + if (s && kmem_cache_open(s, GFP_KERNEL, name, + size, align, flags, ctor, dtor)) { + if (sysfs_slab_add(s)) { + kfree(s); + goto err; + } + list_add(&s->list, &slab_caches); + } else + kfree(s); + } + up_write(&slub_lock); + return s; + +err: + up_write(&slub_lock); + if (flags & SLAB_PANIC) + panic("Cannot create slabcache %s\n", name); + else + s = NULL; + return s; +} +EXPORT_SYMBOL(kmem_cache_create); + +void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags) +{ + void *x; + + x = kmem_cache_alloc(s, flags); + if (x) + memset(x, 0, s->objsize); + return x; +} +EXPORT_SYMBOL(kmem_cache_zalloc); + +#ifdef CONFIG_SMP +static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu) +{ + struct list_head *h; + + down_read(&slub_lock); + list_for_each(h, &slab_caches) { + struct kmem_cache *s = + container_of(h, struct kmem_cache, list); + + func(s, cpu); + } + up_read(&slub_lock); +} + +/* + * Use the cpu notifier to insure that the slab are flushed + * when necessary. + */ +static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + long cpu = (long)hcpu; + + switch (action) { + case CPU_UP_CANCELED: + case CPU_DEAD: + for_all_slabs(__flush_cpu_slab, cpu); + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata slab_notifier = + { &slab_cpuup_callback, NULL, 0 }; + +#endif + +/*************************************************************** + * Compatiblility definitions + **************************************************************/ + +int kmem_cache_shrink(struct kmem_cache *s) +{ + flush_all(s); + return 0; +} +EXPORT_SYMBOL(kmem_cache_shrink); + +#ifdef CONFIG_NUMA + +/***************************************************************** + * Generic reaper used to support the page allocator + * (the cpu slabs are reaped by a per slab workqueue). + * + * Maybe move this to the page allocator? + ****************************************************************/ + +static DEFINE_PER_CPU(unsigned long, reap_node); + +static void init_reap_node(int cpu) +{ + int node; + + node = next_node(cpu_to_node(cpu), node_online_map); + if (node == MAX_NUMNODES) + node = first_node(node_online_map); + + __get_cpu_var(reap_node) = node; +} + +static void next_reap_node(void) +{ + int node = __get_cpu_var(reap_node); + + /* + * Also drain per cpu pages on remote zones + */ + if (node != numa_node_id()) + drain_node_pages(node); + + node = next_node(node, node_online_map); + if (unlikely(node >= MAX_NUMNODES)) + node = first_node(node_online_map); + __get_cpu_var(reap_node) = node; +} +#else +#define init_reap_node(cpu) do { } while (0) +#define next_reap_node(void) do { } while (0) +#endif + +#define REAPTIMEOUT_CPUC (2*HZ) + +#ifdef CONFIG_SMP +static DEFINE_PER_CPU(struct delayed_work, reap_work); + +static void cache_reap(struct work_struct *unused) +{ + next_reap_node(); + refresh_cpu_vm_stats(smp_processor_id()); + schedule_delayed_work(&__get_cpu_var(reap_work), + REAPTIMEOUT_CPUC); +} + +static void __devinit start_cpu_timer(int cpu) +{ + struct delayed_work *reap_work = &per_cpu(reap_work, cpu); + + /* + * When this gets called from do_initcalls via cpucache_init(), + * init_workqueues() has already run, so keventd will be setup + * at that time. + */ + if (keventd_up() && reap_work->work.func == NULL) { + init_reap_node(cpu); + INIT_DELAYED_WORK(reap_work, cache_reap); + schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); + } +} + +static int __init cpucache_init(void) +{ + int cpu; + + /* + * Register the timers that drain pcp pages and update vm statistics + */ + for_each_online_cpu(cpu) + start_cpu_timer(cpu); + return 0; +} +__initcall(cpucache_init); +#endif + +#ifdef SLUB_RESILIENCY_TEST +static unsigned long validate_slab_cache(struct kmem_cache *s); + +static void resiliency_test(void) +{ + u8 *p; + + printk(KERN_ERR "SLUB resiliency testing\n"); + printk(KERN_ERR "-----------------------\n"); + printk(KERN_ERR "A. Corruption after allocation\n"); + + p = kzalloc(16, GFP_KERNEL); + p[16] = 0x12; + printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" + " 0x12->0x%p\n\n", p + 16); + + validate_slab_cache(kmalloc_caches + 4); + + /* Hmmm... The next two are dangerous */ + p = kzalloc(32, GFP_KERNEL); + p[32 + sizeof(void *)] = 0x34; + printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" + " 0x34 -> -0x%p\n", p); + printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); + + validate_slab_cache(kmalloc_caches + 5); + p = kzalloc(64, GFP_KERNEL); + p += 64 + (get_cycles() & 0xff) * sizeof(void *); + *p = 0x56; + printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", + p); + printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); + validate_slab_cache(kmalloc_caches + 6); + + printk(KERN_ERR "\nB. Corruption after free\n"); + p = kzalloc(128, GFP_KERNEL); + kfree(p); + *p = 0x78; + printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); + validate_slab_cache(kmalloc_caches + 7); + + p = kzalloc(256, GFP_KERNEL); + kfree(p); + p[50] = 0x9a; + printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); + validate_slab_cache(kmalloc_caches + 8); + + p = kzalloc(512, GFP_KERNEL); + kfree(p); + p[512] = 0xab; + printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); + validate_slab_cache(kmalloc_caches + 9); +} +#else +static void resiliency_test(void) {}; +#endif + +/* + * These are not as efficient as kmalloc for the non debug case. + * We do not have the page struct available so we have to touch one + * cacheline in struct kmem_cache to check slab flags. + */ +void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) +{ + struct kmem_cache *s = get_slab(size, gfpflags); + void *object; + + if (!s) + return NULL; + + object = kmem_cache_alloc(s, gfpflags); + + if (object && (s->flags & SLAB_STORE_USER)) + set_track(s, object, TRACK_ALLOC, caller); + + return object; +} + +void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, + int node, void *caller) +{ + struct kmem_cache *s = get_slab(size, gfpflags); + void *object; + + if (!s) + return NULL; + + object = kmem_cache_alloc_node(s, gfpflags, node); + + if (object && (s->flags & SLAB_STORE_USER)) + set_track(s, object, TRACK_ALLOC, caller); + + return object; +} + +#ifdef CONFIG_SYSFS + +static unsigned long count_partial(struct kmem_cache_node *n) +{ + unsigned long flags; + unsigned long x = 0; + struct page *page; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + x += page->inuse; + spin_unlock_irqrestore(&n->list_lock, flags); + return x; +} + +enum slab_stat_type { + SL_FULL, + SL_PARTIAL, + SL_CPU, + SL_OBJECTS +}; + +#define SO_FULL (1 << SL_FULL) +#define SO_PARTIAL (1 << SL_PARTIAL) +#define SO_CPU (1 << SL_CPU) +#define SO_OBJECTS (1 << SL_OBJECTS) + +static unsigned long slab_objects(struct kmem_cache *s, + char *buf, unsigned long flags) +{ + unsigned long total = 0; + int cpu; + int node; + int x; + unsigned long *nodes; + unsigned long *per_cpu; + + nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); + per_cpu = nodes + nr_node_ids; + + for_each_possible_cpu(cpu) { + struct page *page = s->cpu_slab[cpu]; + int node; + + if (page) { + node = page_to_nid(page); + if (flags & SO_CPU) { + int x = 0; + + if (flags & SO_OBJECTS) + x = page->inuse; + else + x = 1; + total += x; + nodes[node] += x; + } + per_cpu[node]++; + } + } + + for_each_online_node(node) { + struct kmem_cache_node *n = get_node(s, node); + + if (flags & SO_PARTIAL) { + if (flags & SO_OBJECTS) + x = count_partial(n); + else + x = n->nr_partial; + total += x; + nodes[node] += x; + } + + if (flags & SO_FULL) { + int full_slabs = atomic_read(&n->nr_slabs) + - per_cpu[node] + - n->nr_partial; + + if (flags & SO_OBJECTS) + x = full_slabs * s->objects; + else + x = full_slabs; + total += x; + nodes[node] += x; + } + } + + x = sprintf(buf, "%lu", total); +#ifdef CONFIG_NUMA + for_each_online_node(node) + if (nodes[node]) + x += sprintf(buf + x, " N%d=%lu", + node, nodes[node]); +#endif + kfree(nodes); + return x + sprintf(buf + x, "\n"); +} + +static int any_slab_objects(struct kmem_cache *s) +{ + int node; + int cpu; + + for_each_possible_cpu(cpu) + if (s->cpu_slab[cpu]) + return 1; + + for_each_node(node) { + struct kmem_cache_node *n = get_node(s, node); + + if (n->nr_partial || atomic_read(&n->nr_slabs)) + return 1; + } + return 0; +} + +#define to_slab_attr(n) container_of(n, struct slab_attribute, attr) +#define to_slab(n) container_of(n, struct kmem_cache, kobj); + +struct slab_attribute { + struct attribute attr; + ssize_t (*show)(struct kmem_cache *s, char *buf); + ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); +}; + +#define SLAB_ATTR_RO(_name) \ + static struct slab_attribute _name##_attr = __ATTR_RO(_name) + +#define SLAB_ATTR(_name) \ + static struct slab_attribute _name##_attr = \ + __ATTR(_name, 0644, _name##_show, _name##_store) + + +static ssize_t slab_size_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", s->size); +} +SLAB_ATTR_RO(slab_size); + +static ssize_t align_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", s->align); +} +SLAB_ATTR_RO(align); + +static ssize_t object_size_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", s->objsize); +} +SLAB_ATTR_RO(object_size); + +static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", s->objects); +} +SLAB_ATTR_RO(objs_per_slab); + +static ssize_t order_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", s->order); +} +SLAB_ATTR_RO(order); + +static ssize_t ctor_show(struct kmem_cache *s, char *buf) +{ + if (s->ctor) { + int n = sprint_symbol(buf, (unsigned long)s->ctor); + + return n + sprintf(buf + n, "\n"); + } + return 0; +} +SLAB_ATTR_RO(ctor); + +static ssize_t dtor_show(struct kmem_cache *s, char *buf) +{ + if (s->dtor) { + int n = sprint_symbol(buf, (unsigned long)s->dtor); + + return n + sprintf(buf + n, "\n"); + } + return 0; +} +SLAB_ATTR_RO(dtor); + +static ssize_t aliases_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", s->refcount - 1); +} +SLAB_ATTR_RO(aliases); + +static ssize_t slabs_show(struct kmem_cache *s, char *buf) +{ + return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU); +} +SLAB_ATTR_RO(slabs); + +static ssize_t partial_show(struct kmem_cache *s, char *buf) +{ + return slab_objects(s, buf, SO_PARTIAL); +} +SLAB_ATTR_RO(partial); + +static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) +{ + return slab_objects(s, buf, SO_CPU); +} +SLAB_ATTR_RO(cpu_slabs); + +static ssize_t objects_show(struct kmem_cache *s, char *buf) +{ + return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS); +} +SLAB_ATTR_RO(objects); + +static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); +} + +static ssize_t sanity_checks_store(struct kmem_cache *s, + const char *buf, size_t length) +{ + s->flags &= ~SLAB_DEBUG_FREE; + if (buf[0] == '1') + s->flags |= SLAB_DEBUG_FREE; + return length; +} +SLAB_ATTR(sanity_checks); + +static ssize_t trace_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); +} + +static ssize_t trace_store(struct kmem_cache *s, const char *buf, + size_t length) +{ + s->flags &= ~SLAB_TRACE; + if (buf[0] == '1') + s->flags |= SLAB_TRACE; + return length; +} +SLAB_ATTR(trace); + +static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); +} + +static ssize_t reclaim_account_store(struct kmem_cache *s, + const char *buf, size_t length) +{ + s->flags &= ~SLAB_RECLAIM_ACCOUNT; + if (buf[0] == '1') + s->flags |= SLAB_RECLAIM_ACCOUNT; + return length; +} +SLAB_ATTR(reclaim_account); + +static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", !!(s->flags & + (SLAB_HWCACHE_ALIGN|SLAB_MUST_HWCACHE_ALIGN))); +} +SLAB_ATTR_RO(hwcache_align); + +#ifdef CONFIG_ZONE_DMA +static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); +} +SLAB_ATTR_RO(cache_dma); +#endif + +static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); +} +SLAB_ATTR_RO(destroy_by_rcu); + +static ssize_t red_zone_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); +} + +static ssize_t red_zone_store(struct kmem_cache *s, + const char *buf, size_t length) +{ + if (any_slab_objects(s)) + return -EBUSY; + + s->flags &= ~SLAB_RED_ZONE; + if (buf[0] == '1') + s->flags |= SLAB_RED_ZONE; + calculate_sizes(s); + return length; +} +SLAB_ATTR(red_zone); + +static ssize_t poison_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); +} + +static ssize_t poison_store(struct kmem_cache *s, + const char *buf, size_t length) +{ + if (any_slab_objects(s)) + return -EBUSY; + + s->flags &= ~SLAB_POISON; + if (buf[0] == '1') + s->flags |= SLAB_POISON; + calculate_sizes(s); + return length; +} +SLAB_ATTR(poison); + +static ssize_t store_user_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); +} + +static ssize_t store_user_store(struct kmem_cache *s, + const char *buf, size_t length) +{ + if (any_slab_objects(s)) + return -EBUSY; + + s->flags &= ~SLAB_STORE_USER; + if (buf[0] == '1') + s->flags |= SLAB_STORE_USER; + calculate_sizes(s); + return length; +} +SLAB_ATTR(store_user); + +#ifdef CONFIG_NUMA +static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", s->defrag_ratio / 10); +} + +static ssize_t defrag_ratio_store(struct kmem_cache *s, + const char *buf, size_t length) +{ + int n = simple_strtoul(buf, NULL, 10); + + if (n < 100) + s->defrag_ratio = n * 10; + return length; +} +SLAB_ATTR(defrag_ratio); +#endif + +static struct attribute * slab_attrs[] = { + &slab_size_attr.attr, + &object_size_attr.attr, + &objs_per_slab_attr.attr, + &order_attr.attr, + &objects_attr.attr, + &slabs_attr.attr, + &partial_attr.attr, + &cpu_slabs_attr.attr, + &ctor_attr.attr, + &dtor_attr.attr, + &aliases_attr.attr, + &align_attr.attr, + &sanity_checks_attr.attr, + &trace_attr.attr, + &hwcache_align_attr.attr, + &reclaim_account_attr.attr, + &destroy_by_rcu_attr.attr, + &red_zone_attr.attr, + &poison_attr.attr, + &store_user_attr.attr, +#ifdef CONFIG_ZONE_DMA + &cache_dma_attr.attr, +#endif +#ifdef CONFIG_NUMA + &defrag_ratio_attr.attr, +#endif + NULL +}; + +static struct attribute_group slab_attr_group = { + .attrs = slab_attrs, +}; + +static ssize_t slab_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct slab_attribute *attribute; + struct kmem_cache *s; + int err; + + attribute = to_slab_attr(attr); + s = to_slab(kobj); + + if (!attribute->show) + return -EIO; + + err = attribute->show(s, buf); + + return err; +} + +static ssize_t slab_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t len) +{ + struct slab_attribute *attribute; + struct kmem_cache *s; + int err; + + attribute = to_slab_attr(attr); + s = to_slab(kobj); + + if (!attribute->store) + return -EIO; + + err = attribute->store(s, buf, len); + + return err; +} + +static struct sysfs_ops slab_sysfs_ops = { + .show = slab_attr_show, + .store = slab_attr_store, +}; + +static struct kobj_type slab_ktype = { + .sysfs_ops = &slab_sysfs_ops, +}; + +static int uevent_filter(struct kset *kset, struct kobject *kobj) +{ + struct kobj_type *ktype = get_ktype(kobj); + + if (ktype == &slab_ktype) + return 1; + return 0; +} + +static struct kset_uevent_ops slab_uevent_ops = { + .filter = uevent_filter, +}; + +decl_subsys(slab, &slab_ktype, &slab_uevent_ops); + +#define ID_STR_LENGTH 64 + +/* Create a unique string id for a slab cache: + * format + * :[flags-]size:[memory address of kmemcache] + */ +static char *create_unique_id(struct kmem_cache *s) +{ + char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); + char *p = name; + + BUG_ON(!name); + + *p++ = ':'; + /* + * First flags affecting slabcache operations. We will only + * get here for aliasable slabs so we do not need to support + * too many flags. The flags here must cover all flags that + * are matched during merging to guarantee that the id is + * unique. + */ + if (s->flags & SLAB_CACHE_DMA) + *p++ = 'd'; + if (s->flags & SLAB_RECLAIM_ACCOUNT) + *p++ = 'a'; + if (s->flags & SLAB_DEBUG_FREE) + *p++ = 'F'; + if (p != name + 1) + *p++ = '-'; + p += sprintf(p, "%07d", s->size); + BUG_ON(p > name + ID_STR_LENGTH - 1); + return name; +} + +static int sysfs_slab_add(struct kmem_cache *s) +{ + int err; + const char *name; + int unmergeable; + + if (slab_state < SYSFS) + /* Defer until later */ + return 0; + + unmergeable = slab_unmergeable(s); + if (unmergeable) { + /* + * Slabcache can never be merged so we can use the name proper. + * This is typically the case for debug situations. In that + * case we can catch duplicate names easily. + */ + sysfs_remove_link(&slab_subsys.kset.kobj, s->name); + name = s->name; + } else { + /* + * Create a unique name for the slab as a target + * for the symlinks. + */ + name = create_unique_id(s); + } + + kobj_set_kset_s(s, slab_subsys); + kobject_set_name(&s->kobj, name); + kobject_init(&s->kobj); + err = kobject_add(&s->kobj); + if (err) + return err; + + err = sysfs_create_group(&s->kobj, &slab_attr_group); + if (err) + return err; + kobject_uevent(&s->kobj, KOBJ_ADD); + if (!unmergeable) { + /* Setup first alias */ + sysfs_slab_alias(s, s->name); + kfree(name); + } + return 0; +} + +static void sysfs_slab_remove(struct kmem_cache *s) +{ + kobject_uevent(&s->kobj, KOBJ_REMOVE); + kobject_del(&s->kobj); +} + +/* + * Need to buffer aliases during bootup until sysfs becomes + * available lest we loose that information. + */ +struct saved_alias { + struct kmem_cache *s; + const char *name; + struct saved_alias *next; +}; + +struct saved_alias *alias_list; + +static int sysfs_slab_alias(struct kmem_cache *s, const char *name) +{ + struct saved_alias *al; + + if (slab_state == SYSFS) { + /* + * If we have a leftover link then remove it. + */ + sysfs_remove_link(&slab_subsys.kset.kobj, name); + return sysfs_create_link(&slab_subsys.kset.kobj, + &s->kobj, name); + } + + al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); + if (!al) + return -ENOMEM; + + al->s = s; + al->name = name; + al->next = alias_list; + alias_list = al; + return 0; +} + +static int __init slab_sysfs_init(void) +{ + int err; + + err = subsystem_register(&slab_subsys); + if (err) { + printk(KERN_ERR "Cannot register slab subsystem.\n"); + return -ENOSYS; + } + + finish_bootstrap(); + + while (alias_list) { + struct saved_alias *al = alias_list; + + alias_list = alias_list->next; + err = sysfs_slab_alias(al->s, al->name); + BUG_ON(err); + kfree(al); + } + + resiliency_test(); + return 0; +} + +__initcall(slab_sysfs_init); +#else +__initcall(finish_bootstrap); +#endif -- cgit v1.2.3 From 614410d5892af5f86d0ec14e28f9f6d5f4ac9e9b Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:49:38 -0700 Subject: SLUB: allocate smallest object size if the user asks for 0 bytes Makes SLUB behave like SLAB in this area to avoid issues.... Throw a stack dump to alert people. At some point the behavior should be switched back. NULL is no memory as far as I can tell and if the use asked for 0 bytes then he need to get no memory. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slub_def.h | 8 ++++++-- mm/slub.c | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 30b154ce7289..f8e0c86c48a9 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -80,8 +80,12 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; */ static inline int kmalloc_index(int size) { - if (size == 0) - return 0; + /* + * We should return 0 if size == 0 but we use the smallest object + * here for SLAB legacy reasons. + */ + WARN_ON_ONCE(size == 0); + if (size > 64 && size <= 96) return 1; if (size > 128 && size <= 192) diff --git a/mm/slub.c b/mm/slub.c index 4a8585befd84..9d52cce7c999 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1979,7 +1979,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) { int index = kmalloc_index(size); - if (!size) + if (!index) return NULL; /* Allocation too large? */ -- cgit v1.2.3 From d85f33855c303acfa87fa457157cef755b6087df Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:49:39 -0700 Subject: Make page->private usable in compound pages If we add a new flag so that we can distinguish between the first page and the tail pages then we can avoid to use page->private in the first page. page->private == page for the first page, so there is no real information in there. Freeing up page->private makes the use of compound pages more transparent. They become more usable like real pages. Right now we have to be careful f.e. if we are going beyond PAGE_SIZE allocations in the slab on i386 because we can then no longer use the private field. This is one of the issues that cause us not to support debugging for page size slabs in SLAB. Having page->private available for SLUB would allow more meta information in the page struct. I can probably avoid the 16 bit ints that I have in there right now. Also if page->private is available then a compound page may be equipped with buffer heads. This may free up the way for filesystems to support larger blocks than page size. We add PageTail as an alias of PageReclaim. Compound pages cannot currently be reclaimed. Because of the alias one needs to check PageCompound first. The RFC for the this approach was discussed at http://marc.info/?t=117574302800001&r=1&w=2 [nacc@us.ibm.com: fix hugetlbfs] Signed-off-by: Christoph Lameter Signed-off-by: Nishanth Aravamudan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/mm/init.c | 2 +- fs/hugetlbfs/inode.c | 2 +- include/linux/mm.h | 33 ++++++++++++++++++++++++++++----- include/linux/page-flags.h | 14 ++++++++++++++ mm/internal.h | 2 +- mm/page_alloc.c | 29 ++++++++++++++++++++--------- mm/slab.c | 6 ++---- mm/slub.c | 19 ++++--------------- mm/swap.c | 2 +- 9 files changed, 72 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 4f36987eea72..2da841110727 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -121,7 +121,7 @@ lazy_mmu_prot_update (pte_t pte) return; /* i-cache is already coherent with d-cache */ if (PageCompound(page)) { - order = (unsigned long) (page[1].lru.prev); + order = compound_order(page); flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT)); } else diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 9ba71b252f3e..8e1b7825e2f3 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -450,7 +450,7 @@ static int hugetlbfs_symlink(struct inode *dir, */ static int hugetlbfs_set_page_dirty(struct page *page) { - struct page *head = (struct page *)page_private(page); + struct page *head = compound_head(page); SetPageDirty(head); return 0; diff --git a/include/linux/mm.h b/include/linux/mm.h index c95d96ebd5ad..8c149fa4491d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -267,17 +267,28 @@ static inline int get_page_unless_zero(struct page *page) return atomic_inc_not_zero(&page->_count); } +static inline struct page *compound_head(struct page *page) +{ + /* + * We could avoid the PageCompound(page) check if + * we would not overload PageTail(). + * + * This check has to be done in several performance critical + * paths of the slab etc. IMHO PageTail deserves its own flag. + */ + if (unlikely(PageCompound(page) && PageTail(page))) + return page->first_page; + return page; +} + static inline int page_count(struct page *page) { - if (unlikely(PageCompound(page))) - page = (struct page *)page_private(page); - return atomic_read(&page->_count); + return atomic_read(&compound_head(page)->_count); } static inline void get_page(struct page *page) { - if (unlikely(PageCompound(page))) - page = (struct page *)page_private(page); + page = compound_head(page); VM_BUG_ON(atomic_read(&page->_count) == 0); atomic_inc(&page->_count); } @@ -314,6 +325,18 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page) return (compound_page_dtor *)page[1].lru.next; } +static inline int compound_order(struct page *page) +{ + if (!PageCompound(page) || PageTail(page)) + return 0; + return (unsigned long)page[1].lru.prev; +} + +static inline void set_compound_order(struct page *page, unsigned long order) +{ + page[1].lru.prev = (void *)order; +} + /* * Multiple processes may "see" the same page. E.g. for untouched * mappings of /dev/null, all processes see the same page full of diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 96326594e55d..a1e143634946 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -94,6 +94,12 @@ /* PG_owner_priv_1 users should have descriptive aliases */ #define PG_checked PG_owner_priv_1 /* Used by some filesystems */ +/* + * Marks tail portion of a compound page. We currently do not reclaim + * compound pages so we can reuse a flag only used for reclaim here. + */ +#define PG_tail PG_reclaim + #if (BITS_PER_LONG > 32) /* * 64-bit-only flags build down from bit 31 @@ -241,6 +247,14 @@ static inline void SetPageUptodate(struct page *page) #define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags) #define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags) +/* + * Note: PG_tail is an alias of another page flag. The result of PageTail() + * is only valid if PageCompound(page) is true. + */ +#define PageTail(page) test_bit(PG_tail, &(page)->flags) +#define __SetPageTail(page) __set_bit(PG_tail, &(page)->flags) +#define __ClearPageTail(page) __clear_bit(PG_tail, &(page)->flags) + #ifdef CONFIG_SWAP #define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags) #define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags) diff --git a/mm/internal.h b/mm/internal.h index d527b80b292f..a3110c02aea7 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -24,7 +24,7 @@ static inline void set_page_count(struct page *page, int v) */ static inline void set_page_refcounted(struct page *page) { - VM_BUG_ON(PageCompound(page) && page_private(page) != (unsigned long)page); + VM_BUG_ON(PageCompound(page) && PageTail(page)); VM_BUG_ON(atomic_read(&page->_count)); set_page_count(page, 1); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 542fc088ff50..fc241fe295ab 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -225,7 +225,7 @@ static void bad_page(struct page *page) static void free_compound_page(struct page *page) { - __free_pages_ok(page, (unsigned long)page[1].lru.prev); + __free_pages_ok(page, compound_order(page)); } static void prep_compound_page(struct page *page, unsigned long order) @@ -234,12 +234,14 @@ static void prep_compound_page(struct page *page, unsigned long order) int nr_pages = 1 << order; set_compound_page_dtor(page, free_compound_page); - page[1].lru.prev = (void *)order; - for (i = 0; i < nr_pages; i++) { + set_compound_order(page, order); + __SetPageCompound(page); + for (i = 1; i < nr_pages; i++) { struct page *p = page + i; + __SetPageTail(p); __SetPageCompound(p); - set_page_private(p, (unsigned long)page); + p->first_page = page; } } @@ -248,15 +250,19 @@ static void destroy_compound_page(struct page *page, unsigned long order) int i; int nr_pages = 1 << order; - if (unlikely((unsigned long)page[1].lru.prev != order)) + if (unlikely(compound_order(page) != order)) bad_page(page); - for (i = 0; i < nr_pages; i++) { + if (unlikely(!PageCompound(page))) + bad_page(page); + __ClearPageCompound(page); + for (i = 1; i < nr_pages; i++) { struct page *p = page + i; - if (unlikely(!PageCompound(p) | - (page_private(p) != (unsigned long)page))) + if (unlikely(!PageCompound(p) | !PageTail(p) | + (p->first_page != page))) bad_page(page); + __ClearPageTail(p); __ClearPageCompound(p); } } @@ -429,13 +435,18 @@ static inline int free_pages_check(struct page *page) 1 << PG_private | 1 << PG_locked | 1 << PG_active | - 1 << PG_reclaim | 1 << PG_slab | 1 << PG_swapcache | 1 << PG_writeback | 1 << PG_reserved | 1 << PG_buddy )))) bad_page(page); + /* + * PageReclaim == PageTail. It is only an error + * for PageReclaim to be set if PageCompound is clear. + */ + if (unlikely(!PageCompound(page) && PageReclaim(page))) + bad_page(page); if (PageDirty(page)) __ClearPageDirty(page); /* diff --git a/mm/slab.c b/mm/slab.c index 9cd01fa60004..f4b2e22b5c61 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -602,8 +602,7 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache) static inline struct kmem_cache *page_get_cache(struct page *page) { - if (unlikely(PageCompound(page))) - page = (struct page *)page_private(page); + page = compound_head(page); BUG_ON(!PageSlab(page)); return (struct kmem_cache *)page->lru.next; } @@ -615,8 +614,7 @@ static inline void page_set_slab(struct page *page, struct slab *slab) static inline struct slab *page_get_slab(struct page *page) { - if (unlikely(PageCompound(page))) - page = (struct page *)page_private(page); + page = compound_head(page); BUG_ON(!PageSlab(page)); return (struct slab *)page->lru.prev; } diff --git a/mm/slub.c b/mm/slub.c index 9d52cce7c999..8fa1c6e937f5 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1325,9 +1325,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x) page = virt_to_page(x); - if (unlikely(PageCompound(page))) - page = page->first_page; - + page = compound_head(page); if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) set_tracking(s, x, TRACK_FREE); @@ -1338,10 +1336,7 @@ EXPORT_SYMBOL(kmem_cache_free); /* Figure out on which slab object the object resides */ static struct page *get_object_page(const void *x) { - struct page *page = virt_to_page(x); - - if (unlikely(PageCompound(page))) - page = page->first_page; + struct page *page = compound_head(virt_to_page(x)); if (!PageSlab(page)) return NULL; @@ -2081,10 +2076,7 @@ void kfree(const void *x) if (!x) return; - page = virt_to_page(x); - - if (unlikely(PageCompound(page))) - page = page->first_page; + page = compound_head(virt_to_page(x)); s = page->slab; @@ -2120,10 +2112,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) return NULL; } - page = virt_to_page(p); - - if (unlikely(PageCompound(page))) - page = page->first_page; + page = compound_head(virt_to_page(p)); new_cache = get_slab(new_size, flags); diff --git a/mm/swap.c b/mm/swap.c index 2ed7be39795e..218c52a24a21 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -55,7 +55,7 @@ static void fastcall __page_cache_release(struct page *page) static void put_compound_page(struct page *page) { - page = (struct page *)page_private(page); + page = compound_head(page); if (put_page_testzero(page)) { compound_page_dtor *dtor; -- cgit v1.2.3 From 6d7779538f765963ced45a3fa4bed7ba8d2c277d Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:49:40 -0700 Subject: mm: optimize compound_head() by avoiding a shared page flag The patch adds PageTail(page) and PageHead(page) to check if a page is the head or the tail of a compound page. This is done by masking the two bits describing the state of a compound page and then comparing them. So one comparision and a branch instead of two bit checks and two branches. Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 11 ++--------- include/linux/page-flags.h | 37 ++++++++++++++++++++++++++----------- mm/page_alloc.c | 10 ++++------ 3 files changed, 32 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 8c149fa4491d..695b90437bbc 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -269,14 +269,7 @@ static inline int get_page_unless_zero(struct page *page) static inline struct page *compound_head(struct page *page) { - /* - * We could avoid the PageCompound(page) check if - * we would not overload PageTail(). - * - * This check has to be done in several performance critical - * paths of the slab etc. IMHO PageTail deserves its own flag. - */ - if (unlikely(PageCompound(page) && PageTail(page))) + if (unlikely(PageTail(page))) return page->first_page; return page; } @@ -327,7 +320,7 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page) static inline int compound_order(struct page *page) { - if (!PageCompound(page) || PageTail(page)) + if (!PageHead(page)) return 0; return (unsigned long)page[1].lru.prev; } diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index a1e143634946..a3c8b60a9c3a 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -6,6 +6,7 @@ #define PAGE_FLAGS_H #include +#include /* * Various page->flags bits: @@ -94,12 +95,6 @@ /* PG_owner_priv_1 users should have descriptive aliases */ #define PG_checked PG_owner_priv_1 /* Used by some filesystems */ -/* - * Marks tail portion of a compound page. We currently do not reclaim - * compound pages so we can reuse a flag only used for reclaim here. - */ -#define PG_tail PG_reclaim - #if (BITS_PER_LONG > 32) /* * 64-bit-only flags build down from bit 31 @@ -248,12 +243,32 @@ static inline void SetPageUptodate(struct page *page) #define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags) /* - * Note: PG_tail is an alias of another page flag. The result of PageTail() - * is only valid if PageCompound(page) is true. + * PG_reclaim is used in combination with PG_compound to mark the + * head and tail of a compound page + * + * PG_compound & PG_reclaim => Tail page + * PG_compound & ~PG_reclaim => Head page */ -#define PageTail(page) test_bit(PG_tail, &(page)->flags) -#define __SetPageTail(page) __set_bit(PG_tail, &(page)->flags) -#define __ClearPageTail(page) __clear_bit(PG_tail, &(page)->flags) + +#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) + +#define PageTail(page) ((page->flags & PG_head_tail_mask) \ + == PG_head_tail_mask) + +static inline void __SetPageTail(struct page *page) +{ + page->flags |= PG_head_tail_mask; +} + +static inline void __ClearPageTail(struct page *page) +{ + page->flags &= ~PG_head_tail_mask; +} + +#define PageHead(page) ((page->flags & PG_head_tail_mask) \ + == (1L << PG_compound)) +#define __SetPageHead(page) __SetPageCompound(page) +#define __ClearPageHead(page) __ClearPageCompound(page) #ifdef CONFIG_SWAP #define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fc241fe295ab..36d713e216e8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -235,12 +235,11 @@ static void prep_compound_page(struct page *page, unsigned long order) set_compound_page_dtor(page, free_compound_page); set_compound_order(page, order); - __SetPageCompound(page); + __SetPageHead(page); for (i = 1; i < nr_pages; i++) { struct page *p = page + i; __SetPageTail(p); - __SetPageCompound(p); p->first_page = page; } } @@ -253,17 +252,16 @@ static void destroy_compound_page(struct page *page, unsigned long order) if (unlikely(compound_order(page) != order)) bad_page(page); - if (unlikely(!PageCompound(page))) + if (unlikely(!PageHead(page))) bad_page(page); - __ClearPageCompound(page); + __ClearPageHead(page); for (i = 1; i < nr_pages; i++) { struct page *p = page + i; - if (unlikely(!PageCompound(p) | !PageTail(p) | + if (unlikely(!PageTail(p) | (p->first_page != page))) bad_page(page); __ClearPageTail(p); - __ClearPageCompound(p); } } -- cgit v1.2.3 From b49af68ff9fc5d6e0d96704a1843968b91cc73c6 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:49:41 -0700 Subject: Add virt_to_head_page and consolidate code in slab and slub Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 6 ++++++ mm/slab.c | 9 ++++----- mm/slub.c | 10 ++++------ 3 files changed, 14 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 695b90437bbc..4670ebd1f622 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -286,6 +286,12 @@ static inline void get_page(struct page *page) atomic_inc(&page->_count); } +static inline struct page *virt_to_head_page(const void *x) +{ + struct page *page = virt_to_page(x); + return compound_head(page); +} + /* * Setup the page count before being freed into the page allocator for * the first time (boot or memory hotplug) diff --git a/mm/slab.c b/mm/slab.c index f4b2e22b5c61..3e984afc199c 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -614,20 +614,19 @@ static inline void page_set_slab(struct page *page, struct slab *slab) static inline struct slab *page_get_slab(struct page *page) { - page = compound_head(page); BUG_ON(!PageSlab(page)); return (struct slab *)page->lru.prev; } static inline struct kmem_cache *virt_to_cache(const void *obj) { - struct page *page = virt_to_page(obj); + struct page *page = virt_to_head_page(obj); return page_get_cache(page); } static inline struct slab *virt_to_slab(const void *obj) { - struct page *page = virt_to_page(obj); + struct page *page = virt_to_head_page(obj); return page_get_slab(page); } @@ -2876,7 +2875,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, objp -= obj_offset(cachep); kfree_debugcheck(objp); - page = virt_to_page(objp); + page = virt_to_head_page(objp); slabp = page_get_slab(page); @@ -3100,7 +3099,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, struct slab *slabp; unsigned objnr; - slabp = page_get_slab(virt_to_page(objp)); + slabp = page_get_slab(virt_to_head_page(objp)); objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; } diff --git a/mm/slub.c b/mm/slub.c index 8fa1c6e937f5..347c11e80d8e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1323,9 +1323,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x) { struct page * page; - page = virt_to_page(x); - - page = compound_head(page); + page = virt_to_head_page(x); if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) set_tracking(s, x, TRACK_FREE); @@ -1336,7 +1334,7 @@ EXPORT_SYMBOL(kmem_cache_free); /* Figure out on which slab object the object resides */ static struct page *get_object_page(const void *x) { - struct page *page = compound_head(virt_to_page(x)); + struct page *page = virt_to_head_page(x); if (!PageSlab(page)) return NULL; @@ -2076,7 +2074,7 @@ void kfree(const void *x) if (!x) return; - page = compound_head(virt_to_page(x)); + page = virt_to_head_page(x); s = page->slab; @@ -2112,7 +2110,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) return NULL; } - page = compound_head(virt_to_page(p)); + page = virt_to_head_page(p); new_cache = get_slab(new_size, flags); -- cgit v1.2.3 From 643b113849d8faa68c9f01c3c9d929bfbffd50bd Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:49:42 -0700 Subject: slub: enable tracking of full slabs If slab tracking is on then build a list of full slabs so that we can verify the integrity of all slabs and are also able to built list of alloc/free callers. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slub_def.h | 1 + mm/slub.c | 41 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index f8e0c86c48a9..ea27065e80e6 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -16,6 +16,7 @@ struct kmem_cache_node { unsigned long nr_partial; atomic_long_t nr_slabs; struct list_head partial; + struct list_head full; }; /* diff --git a/mm/slub.c b/mm/slub.c index cfc5301afe42..c4f40d373d1e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -661,6 +661,40 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) return search == NULL; } +/* + * Tracking of fully allocated slabs for debugging + */ +static void add_full(struct kmem_cache *s, struct page *page) +{ + struct kmem_cache_node *n; + + VM_BUG_ON(!irqs_disabled()); + + VM_BUG_ON(!irqs_disabled()); + + if (!(s->flags & SLAB_STORE_USER)) + return; + + n = get_node(s, page_to_nid(page)); + spin_lock(&n->list_lock); + list_add(&page->lru, &n->full); + spin_unlock(&n->list_lock); +} + +static void remove_full(struct kmem_cache *s, struct page *page) +{ + struct kmem_cache_node *n; + + if (!(s->flags & SLAB_STORE_USER)) + return; + + n = get_node(s, page_to_nid(page)); + + spin_lock(&n->list_lock); + list_del(&page->lru); + spin_unlock(&n->list_lock); +} + static int alloc_object_checks(struct kmem_cache *s, struct page *page, void *object) { @@ -1090,6 +1124,8 @@ static void putback_slab(struct kmem_cache *s, struct page *page) if (page->inuse) { if (page->freelist) add_partial(s, page); + else if (PageError(page)) + add_full(s, page); slab_unlock(page); } else { slab_unlock(page); @@ -1302,7 +1338,7 @@ out_unlock: slab_empty: if (prior) /* - * Partially used slab that is on the partial list. + * Slab on the partial list. */ remove_partial(s, page); @@ -1314,6 +1350,8 @@ slab_empty: debug: if (!free_object_checks(s, page, x)) goto out_unlock; + if (!PageActive(page) && !page->freelist) + remove_full(s, page); if (s->flags & SLAB_STORE_USER) set_track(s, x, TRACK_FREE, addr); goto checks_ok; @@ -1466,6 +1504,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) atomic_long_set(&n->nr_slabs, 0); spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); + INIT_LIST_HEAD(&n->full); } #ifdef CONFIG_NUMA -- cgit v1.2.3 From 6225e93735acaa09865bce746958f1046c2e0bc3 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:49:50 -0700 Subject: Quicklists for page table pages On x86_64 this cuts allocation overhead for page table pages down to a fraction (kernel compile / editing load. TSC based measurement of times spend in each function): no quicklist pte_alloc 1569048 4.3s(401ns/2.7us/179.7us) pmd_alloc 780988 2.1s(337ns/2.7us/86.1us) pud_alloc 780072 2.2s(424ns/2.8us/300.6us) pgd_alloc 260022 1s(920ns/4us/263.1us) quicklist: pte_alloc 452436 573.4ms(8ns/1.3us/121.1us) pmd_alloc 196204 174.5ms(7ns/889ns/46.1us) pud_alloc 195688 172.4ms(7ns/881ns/151.3us) pgd_alloc 65228 9.8ms(8ns/150ns/6.1us) pgd allocations are the most complex and there we see the most dramatic improvement (may be we can cut down the amount of pgds cached somewhat?). But even the pte allocations still see a doubling of performance. 1. Proven code from the IA64 arch. The method used here has been fine tuned for years and is NUMA aware. It is based on the knowledge that accesses to page table pages are sparse in nature. Taking a page off the freelists instead of allocating a zeroed pages allows a reduction of number of cachelines touched in addition to getting rid of the slab overhead. So performance improves. This is particularly useful if pgds contain standard mappings. We can save on the teardown and setup of such a page if we have some on the quicklists. This includes avoiding lists operations that are otherwise necessary on alloc and free to track pgds. 2. Light weight alternative to use slab to manage page size pages Slab overhead is significant and even page allocator use is pretty heavy weight. The use of a per cpu quicklist means that we touch only two cachelines for an allocation. There is no need to access the page_struct (unless arch code needs to fiddle around with it). So the fast past just means bringing in one cacheline at the beginning of the page. That same cacheline may then be used to store the page table entry. Or a second cacheline may be used if the page table entry is not in the first cacheline of the page. The current code will zero the page which means touching 32 cachelines (assuming 128 byte). We get down from 32 to 2 cachelines in the fast path. 3. x86_64 gets lightweight page table page management. This will allow x86_64 arch code to faster repopulate pgds and other page table entries. The list operations for pgds are reduced in the same way as for i386 to the point where a pgd is allocated from the page allocator and when it is freed back to the page allocator. A pgd can pass through the quicklists without having to be reinitialized. 64 Consolidation of code from multiple arches So far arches have their own implementation of quicklist management. This patch moves that feature into the core allowing an easier maintenance and consistent management of quicklists. Page table pages have the characteristics that they are typically zero or in a known state when they are freed. This is usually the exactly same state as needed after allocation. So it makes sense to build a list of freed page table pages and then consume the pages already in use first. Those pages have already been initialized correctly (thus no need to zero them) and are likely already cached in such a way that the MMU can use them most effectively. Page table pages are used in a sparse way so zeroing them on allocation is not too useful. Such an implementation already exits for ia64. Howver, that implementation did not support constructors and destructors as needed by i386 / x86_64. It also only supported a single quicklist. The implementation here has constructor and destructor support as well as the ability for an arch to specify how many quicklists are needed. Quicklists are defined by an arch defining CONFIG_QUICKLIST. If more than one quicklist is necessary then we can define NR_QUICK for additional lists. F.e. i386 needs two and thus has config NR_QUICK int default 2 If an arch has requested quicklist support then pages can be allocated from the quicklist (or from the page allocator if the quicklist is empty) via: quicklist_alloc(, , ) Page table pages can be freed using: quicklist_free(, , ) Pages must have a definite state after allocation and before they are freed. If no constructor is specified then pages will be zeroed on allocation and must be zeroed before they are freed. If a constructor is used then the constructor will establish a definite page state. F.e. the i386 and x86_64 pgd constructors establish certain mappings. Constructors and destructors can also be used to track the pages. i386 and x86_64 use a list of pgds in order to be able to dynamically update standard mappings. Signed-off-by: Christoph Lameter Cc: "David S. Miller" Cc: Andi Kleen Cc: "Luck, Tony" Cc: William Lee Irwin III Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/quicklist.h | 94 +++++++++++++++++++++++++++++++++++++++++++++++ mm/Kconfig | 5 +++ mm/Makefile | 2 + mm/quicklist.c | 88 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 189 insertions(+) create mode 100644 include/linux/quicklist.h create mode 100644 mm/quicklist.c (limited to 'include/linux') diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h new file mode 100644 index 000000000000..9371c6116df3 --- /dev/null +++ b/include/linux/quicklist.h @@ -0,0 +1,94 @@ +#ifndef LINUX_QUICKLIST_H +#define LINUX_QUICKLIST_H +/* + * Fast allocations and disposal of pages. Pages must be in the condition + * as needed after allocation when they are freed. Per cpu lists of pages + * are kept that only contain node local pages. + * + * (C) 2007, SGI. Christoph Lameter + */ +#include +#include +#include + +#ifdef CONFIG_QUICKLIST + +struct quicklist { + void *page; + int nr_pages; +}; + +DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; + +/* + * The two key functions quicklist_alloc and quicklist_free are inline so + * that they may be custom compiled for the platform. + * Specifying a NULL ctor can remove constructor support. Specifying + * a constant quicklist allows the determination of the exact address + * in the per cpu area. + * + * The fast patch in quicklist_alloc touched only a per cpu cacheline and + * the first cacheline of the page itself. There is minmal overhead involved. + */ +static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) +{ + struct quicklist *q; + void **p = NULL; + + q =&get_cpu_var(quicklist)[nr]; + p = q->page; + if (likely(p)) { + q->page = p[0]; + p[0] = NULL; + q->nr_pages--; + } + put_cpu_var(quicklist); + if (likely(p)) + return p; + + p = (void *)__get_free_page(flags | __GFP_ZERO); + if (ctor && p) + ctor(p); + return p; +} + +static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p, + struct page *page) +{ + struct quicklist *q; + int nid = page_to_nid(page); + + if (unlikely(nid != numa_node_id())) { + if (dtor) + dtor(p); + __free_page(page); + return; + } + + q = &get_cpu_var(quicklist)[nr]; + *(void **)p = q->page; + q->page = p; + q->nr_pages++; + put_cpu_var(quicklist); +} + +static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp) +{ + __quicklist_free(nr, dtor, pp, virt_to_page(pp)); +} + +static inline void quicklist_free_page(int nr, void (*dtor)(void *), + struct page *page) +{ + __quicklist_free(nr, dtor, page_address(page), page); +} + +void quicklist_trim(int nr, void (*dtor)(void *), + unsigned long min_pages, unsigned long max_free); + +unsigned long quicklist_total_size(void); + +#endif + +#endif /* LINUX_QUICKLIST_H */ + diff --git a/mm/Kconfig b/mm/Kconfig index 7942b333e46c..1ac718f636ec 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -163,3 +163,8 @@ config ZONE_DMA_FLAG default "0" if !ZONE_DMA default "1" +config NR_QUICK + int + depends on QUICKLIST + default "1" + diff --git a/mm/Makefile b/mm/Makefile index 1887148e44e7..a9148ea329aa 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -30,3 +30,5 @@ obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_SMP) += allocpercpu.o +obj-$(CONFIG_QUICKLIST) += quicklist.o + diff --git a/mm/quicklist.c b/mm/quicklist.c new file mode 100644 index 000000000000..ae8189c2799e --- /dev/null +++ b/mm/quicklist.c @@ -0,0 +1,88 @@ +/* + * Quicklist support. + * + * Quicklists are light weight lists of pages that have a defined state + * on alloc and free. Pages must be in the quicklist specific defined state + * (zero by default) when the page is freed. It seems that the initial idea + * for such lists first came from Dave Miller and then various other people + * improved on it. + * + * Copyright (C) 2007 SGI, + * Christoph Lameter + * Generalized, added support for multiple lists and + * constructors / destructors. + */ +#include + +#include +#include +#include +#include + +DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; + +#define FRACTION_OF_NODE_MEM 16 + +static unsigned long max_pages(unsigned long min_pages) +{ + unsigned long node_free_pages, max; + + node_free_pages = node_page_state(numa_node_id(), + NR_FREE_PAGES); + max = node_free_pages / FRACTION_OF_NODE_MEM; + return max(max, min_pages); +} + +static long min_pages_to_free(struct quicklist *q, + unsigned long min_pages, long max_free) +{ + long pages_to_free; + + pages_to_free = q->nr_pages - max_pages(min_pages); + + return min(pages_to_free, max_free); +} + +/* + * Trim down the number of pages in the quicklist + */ +void quicklist_trim(int nr, void (*dtor)(void *), + unsigned long min_pages, unsigned long max_free) +{ + long pages_to_free; + struct quicklist *q; + + q = &get_cpu_var(quicklist)[nr]; + if (q->nr_pages > min_pages) { + pages_to_free = min_pages_to_free(q, min_pages, max_free); + + while (pages_to_free > 0) { + /* + * We pass a gfp_t of 0 to quicklist_alloc here + * because we will never call into the page allocator. + */ + void *p = quicklist_alloc(nr, 0, NULL); + + if (dtor) + dtor(p); + free_page((unsigned long)p); + pages_to_free--; + } + } + put_cpu_var(quicklist); +} + +unsigned long quicklist_total_size(void) +{ + unsigned long count = 0; + int cpu; + struct quicklist *ql, *q; + + for_each_online_cpu(cpu) { + ql = per_cpu(quicklist, cpu); + for (q = ql; q < ql + CONFIG_NR_QUICK; q++) + count += q->nr_pages; + } + return count; +} + -- cgit v1.2.3 From f98393a64ca1392130724c3acb4e3f325801d2b6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 6 May 2007 14:49:54 -0700 Subject: mm: remove destroy_dirty_buffers from invalidate_bdev() Remove the destroy_dirty_buffers argument from invalidate_bdev(), it hasn't been used in 6 years (so akpm says). find * -name \*.[ch] | xargs grep -l invalidate_bdev | while read file; do quilt add $file; sed -ie 's/invalidate_bdev(\([^,]*\),[^)]*)/invalidate_bdev(\1)/g' $file; done Signed-off-by: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- block/ioctl.c | 4 ++-- drivers/block/amiflop.c | 2 +- drivers/block/loop.c | 4 ++-- drivers/block/rd.c | 2 +- drivers/cdrom/cdrom.c | 2 +- drivers/md/md.c | 2 +- fs/block_dev.c | 4 ++-- fs/buffer.c | 7 +------ fs/dquot.c | 4 ++-- fs/ext3/super.c | 4 ++-- fs/ext4/super.c | 4 ++-- fs/partitions/acorn.c | 2 +- include/linux/buffer_head.h | 4 ++-- 13 files changed, 20 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/block/ioctl.c b/block/ioctl.c index e06dbe9bc858..f7e3e8abf887 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -80,7 +80,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user } /* all seems OK */ fsync_bdev(bdevp); - invalidate_bdev(bdevp, 0); + invalidate_bdev(bdevp); mutex_lock_nested(&bdev->bd_mutex, 1); delete_partition(disk, part); @@ -236,7 +236,7 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd, lock_kernel(); fsync_bdev(bdev); - invalidate_bdev(bdev, 0); + invalidate_bdev(bdev); unlock_kernel(); return 0; diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 5d6562171533..27a139025ced 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1480,7 +1480,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp, break; case FDFMTEND: floppy_off(drive); - invalidate_bdev(inode->i_bdev, 0); + invalidate_bdev(inode->i_bdev); break; case FDGETPRM: memset((void *)&getprm, 0, sizeof (getprm)); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 6b5b64207407..0d4ccd4a0957 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -833,7 +833,7 @@ out_clr: lo->lo_backing_file = NULL; lo->lo_flags = 0; set_capacity(disks[lo->lo_number], 0); - invalidate_bdev(bdev, 0); + invalidate_bdev(bdev); bd_set_size(bdev, 0); mapping_set_gfp_mask(mapping, lo->old_gfp_mask); lo->lo_state = Lo_unbound; @@ -917,7 +917,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); memset(lo->lo_file_name, 0, LO_NAME_SIZE); - invalidate_bdev(bdev, 0); + invalidate_bdev(bdev); set_capacity(disks[lo->lo_number], 0); bd_set_size(bdev, 0); mapping_set_gfp_mask(filp->f_mapping, gfp); diff --git a/drivers/block/rd.c b/drivers/block/rd.c index 485aa87e9bcd..43d4ebcb3b44 100644 --- a/drivers/block/rd.c +++ b/drivers/block/rd.c @@ -403,7 +403,7 @@ static void __exit rd_cleanup(void) struct block_device *bdev = rd_bdev[i]; rd_bdev[i] = NULL; if (bdev) { - invalidate_bdev(bdev, 1); + invalidate_bdev(bdev); blkdev_put(bdev); } del_gendisk(rd_disks[i]); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index b36f44d4d1bf..3625a05bc3d3 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2384,7 +2384,7 @@ static int cdrom_ioctl_reset(struct cdrom_device_info *cdi, return -EACCES; if (!CDROM_CAN(CDC_RESET)) return -ENOSYS; - invalidate_bdev(bdev, 0); + invalidate_bdev(bdev); return cdi->ops->reset(cdi); } diff --git a/drivers/md/md.c b/drivers/md/md.c index 509171ca7fa8..2b4315d7e5d6 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3080,7 +3080,7 @@ static int do_md_run(mddev_t * mddev) if (test_bit(Faulty, &rdev->flags)) continue; sync_blockdev(rdev->bdev); - invalidate_bdev(rdev->bdev, 0); + invalidate_bdev(rdev->bdev); } md_probe(mddev->unit, NULL, NULL); diff --git a/fs/block_dev.c b/fs/block_dev.c index 575076c018f4..21e59acbcfdf 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -58,7 +58,7 @@ static sector_t max_block(struct block_device *bdev) /* Kill _all_ buffers, dirty or not.. */ static void kill_bdev(struct block_device *bdev) { - invalidate_bdev(bdev, 1); + invalidate_bdev(bdev); truncate_inode_pages(bdev->bd_inode->i_mapping, 0); } @@ -1478,7 +1478,7 @@ int __invalidate_device(struct block_device *bdev) res = invalidate_inodes(sb); drop_super(sb); } - invalidate_bdev(bdev, 0); + invalidate_bdev(bdev); return res; } EXPORT_SYMBOL(__invalidate_device); diff --git a/fs/buffer.c b/fs/buffer.c index dcc5faa573ba..630df3e6fe0c 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -333,7 +333,7 @@ out: we think the disk contains more recent information than the buffercache. The update == 1 pass marks the buffers we need to update, the update == 2 pass does the actual I/O. */ -void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) +void invalidate_bdev(struct block_device *bdev) { struct address_space *mapping = bdev->bd_inode->i_mapping; @@ -341,11 +341,6 @@ void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) return; invalidate_bh_lrus(); - /* - * FIXME: what about destroy_dirty_buffers? - * We really want to use invalidate_inode_pages2() for - * that, but not until that's cleaned up. - */ invalidate_mapping_pages(mapping, 0, -1); } diff --git a/fs/dquot.c b/fs/dquot.c index b16f991662c1..0a5febc159f2 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -1432,7 +1432,7 @@ int vfs_quota_off(struct super_block *sb, int type) mutex_unlock(&dqopt->dqonoff_mutex); } if (sb->s_bdev) - invalidate_bdev(sb->s_bdev, 0); + invalidate_bdev(sb->s_bdev); return 0; } @@ -1468,7 +1468,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) * we see all the changes from userspace... */ write_inode_now(inode, 1); /* And now flush the block cache so that kernel sees the changes */ - invalidate_bdev(sb->s_bdev, 0); + invalidate_bdev(sb->s_bdev); mutex_lock(&inode->i_mutex); mutex_lock(&dqopt->dqonoff_mutex); if (sb_has_quota_enabled(sb, type)) { diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 4a4fcd6868c7..4266b708ca01 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -420,7 +420,7 @@ static void ext3_put_super (struct super_block * sb) dump_orphan_list(sb, sbi); J_ASSERT(list_empty(&sbi->s_orphan)); - invalidate_bdev(sb->s_bdev, 0); + invalidate_bdev(sb->s_bdev); if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { /* * Invalidate the journal device's buffers. We don't want them @@ -428,7 +428,7 @@ static void ext3_put_super (struct super_block * sb) * hotswapped, and it breaks the `ro-after' testing code. */ sync_blockdev(sbi->journal_bdev); - invalidate_bdev(sbi->journal_bdev, 0); + invalidate_bdev(sbi->journal_bdev); ext3_blkdev_remove(sbi); } sb->s_fs_info = NULL; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 61c4718e4a53..25e8d0096176 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -470,7 +470,7 @@ static void ext4_put_super (struct super_block * sb) dump_orphan_list(sb, sbi); J_ASSERT(list_empty(&sbi->s_orphan)); - invalidate_bdev(sb->s_bdev, 0); + invalidate_bdev(sb->s_bdev); if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { /* * Invalidate the journal device's buffers. We don't want them @@ -478,7 +478,7 @@ static void ext4_put_super (struct super_block * sb) * hotswapped, and it breaks the `ro-after' testing code. */ sync_blockdev(sbi->journal_bdev); - invalidate_bdev(sbi->journal_bdev, 0); + invalidate_bdev(sbi->journal_bdev); ext4_blkdev_remove(sbi); } sb->s_fs_info = NULL; diff --git a/fs/partitions/acorn.c b/fs/partitions/acorn.c index 1bc9f372c7d4..9a6826c63818 100644 --- a/fs/partitions/acorn.c +++ b/fs/partitions/acorn.c @@ -271,7 +271,7 @@ adfspart_check_ADFS(struct parsed_partitions *state, struct block_device *bdev) extern void xd_set_geometry(struct block_device *, unsigned char, unsigned char, unsigned int); xd_set_geometry(bdev, dr->secspertrack, heads, 1); - invalidate_bdev(bdev, 1); + invalidate_bdev(bdev); truncate_inode_pages(bdev->bd_inode->i_mapping, 0); } #endif diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index dd27b1c7227f..632c50b21386 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -165,7 +165,7 @@ int sync_mapping_buffers(struct address_space *mapping); void unmap_underlying_metadata(struct block_device *bdev, sector_t block); void mark_buffer_async_write(struct buffer_head *bh); -void invalidate_bdev(struct block_device *, int); +void invalidate_bdev(struct block_device *); int sync_blockdev(struct block_device *bdev); void __wait_on_buffer(struct buffer_head *); wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); @@ -319,7 +319,7 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; } static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } -static inline void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) {} +static inline void invalidate_bdev(struct block_device *bdev) {} #endif /* CONFIG_BLOCK */ -- cgit v1.2.3 From f9a14399aea13830d8af6798a53207bb0a900945 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 6 May 2007 14:49:55 -0700 Subject: mm: optimize kill_bdev() Remove duplicate work in kill_bdev(). It currently invalidates and then truncates the bdev's mapping. invalidate_mapping_pages() will opportunistically remove pages from the mapping. And truncate_inode_pages() will forcefully remove all pages. The only thing truncate doesn't do is flush the bh lrus. So do that explicitly. This avoids (very unlikely) but possible invalid lookup results if the same bdev is quickly re-issued. It also will prevent extreme kernel latencies which are observed when blockdevs which have a large amount of pagecache are unmounted, by avoiding invalidate_mapping_pages() on that path. invalidate_mapping_pages() has no cond_resched (it can be called under spinlock), whereas truncate_inode_pages() has one. [akpm@linux-foundation.org: restore nrpages==0 optimisation] Signed-off-by: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/block_dev.c | 6 ++++-- fs/buffer.c | 3 +-- include/linux/buffer_head.h | 1 + 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/fs/block_dev.c b/fs/block_dev.c index 21e59acbcfdf..6fe49b9349ea 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -55,10 +55,12 @@ static sector_t max_block(struct block_device *bdev) return retval; } -/* Kill _all_ buffers, dirty or not.. */ +/* Kill _all_ buffers and pagecache , dirty or not.. */ static void kill_bdev(struct block_device *bdev) { - invalidate_bdev(bdev); + if (bdev->bd_inode->i_mapping->nrpages == 0) + return; + invalidate_bh_lrus(); truncate_inode_pages(bdev->bd_inode->i_mapping, 0); } diff --git a/fs/buffer.c b/fs/buffer.c index 630df3e6fe0c..80291aad6de6 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -44,7 +44,6 @@ #include static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); -static void invalidate_bh_lrus(void); #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) @@ -1403,7 +1402,7 @@ static void invalidate_bh_lru(void *arg) put_cpu_var(bh_lrus); } -static void invalidate_bh_lrus(void) +void invalidate_bh_lrus(void) { on_each_cpu(invalidate_bh_lru, NULL, 1, 1); } diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 632c50b21386..5c6e12853a9b 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -182,6 +182,7 @@ void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); void __breadahead(struct block_device *, sector_t block, unsigned int size); struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size); +void invalidate_bh_lrus(void); struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); void free_buffer_head(struct buffer_head * bh); void FASTCALL(unlock_buffer(struct buffer_head *bh)); -- cgit v1.2.3 From 5af60839909b8e3b28ca7cd7912fa0b23475617f Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:49:56 -0700 Subject: slab allocators: Remove obsolete SLAB_MUST_HWCACHE_ALIGN This patch was recently posted to lkml and acked by Pekka. The flag SLAB_MUST_HWCACHE_ALIGN is 1. Never checked by SLAB at all. 2. A duplicate of SLAB_HWCACHE_ALIGN for SLUB 3. Fulfills the role of SLAB_HWCACHE_ALIGN for SLOB. The only remaining use is in sparc64 and ppc64 and their use there reflects some earlier role that the slab flag once may have had. If its specified then SLAB_HWCACHE_ALIGN is also specified. The flag is confusing, inconsistent and has no purpose. Remove it. Acked-by: Pekka Enberg Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/mm/hugetlbpage.c | 3 +-- arch/powerpc/mm/init_64.c | 3 +-- arch/sparc64/mm/tsb.c | 3 +-- include/linux/slab.h | 1 - mm/slab.c | 4 ++-- mm/slob.c | 4 ++-- mm/slub.c | 5 ++--- 7 files changed, 9 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 8508f973d9cc..c8814177b716 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -1057,8 +1057,7 @@ static int __init hugetlbpage_init(void) huge_pgtable_cache = kmem_cache_create("hugepte_cache", HUGEPTE_TABLE_SIZE, HUGEPTE_TABLE_SIZE, - SLAB_HWCACHE_ALIGN | - SLAB_MUST_HWCACHE_ALIGN, + SLAB_HWCACHE_ALIGN, zero_ctor, NULL); if (! huge_pgtable_cache) panic("hugetlbpage_init(): could not create hugepte cache\n"); diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index d12a87ec5ae9..5a7750147b7d 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -183,8 +183,7 @@ void pgtable_cache_init(void) "for size: %08x...\n", name, i, size); pgtable_cache[i] = kmem_cache_create(name, size, size, - SLAB_HWCACHE_ALIGN | - SLAB_MUST_HWCACHE_ALIGN, + SLAB_HWCACHE_ALIGN, zero_ctor, NULL); if (! pgtable_cache[i]) diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 57eb3025537a..4be378d9a382 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -262,8 +262,7 @@ void __init pgtable_cache_init(void) tsb_caches[i] = kmem_cache_create(name, size, size, - SLAB_HWCACHE_ALIGN | - SLAB_MUST_HWCACHE_ALIGN, + SLAB_HWCACHE_ALIGN, NULL, NULL); if (!tsb_caches[i]) { prom_printf("Could not create %s cache\n", name); diff --git a/include/linux/slab.h b/include/linux/slab.h index 67425c277e12..a9befa50d3e3 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -26,7 +26,6 @@ typedef struct kmem_cache kmem_cache_t __deprecated; #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ -#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */ #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ diff --git a/mm/slab.c b/mm/slab.c index 997c3b2f50c9..583644f6ae11 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -175,12 +175,12 @@ # define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ SLAB_POISON | SLAB_HWCACHE_ALIGN | \ SLAB_CACHE_DMA | \ - SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ + SLAB_STORE_USER | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) #else # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ - SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ + SLAB_CACHE_DMA | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) #endif diff --git a/mm/slob.c b/mm/slob.c index 77786be032e0..c9401a7eaa5f 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -21,7 +21,7 @@ * * SLAB is emulated on top of SLOB by simply calling constructors and * destructors for every SLAB allocation. Objects are returned with - * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is + * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is * set, in which case the low-level allocator will fragment blocks to * create the proper alignment. Again, objects of page-size or greater * are allocated by calling __get_free_pages. As SLAB objects know @@ -295,7 +295,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, c->ctor = ctor; c->dtor = dtor; /* ignore alignment unless it's forced */ - c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; + c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; if (c->align < align) c->align = align; } else if (flags & SLAB_PANIC) diff --git a/mm/slub.c b/mm/slub.c index 3904002bdb35..79940e98e5e6 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1496,7 +1496,7 @@ static unsigned long calculate_alignment(unsigned long flags, * specified alignment though. If that is greater * then use it. */ - if ((flags & (SLAB_MUST_HWCACHE_ALIGN | SLAB_HWCACHE_ALIGN)) && + if ((flags & SLAB_HWCACHE_ALIGN) && size > L1_CACHE_BYTES / 2) return max_t(unsigned long, align, L1_CACHE_BYTES); @@ -3142,8 +3142,7 @@ SLAB_ATTR(reclaim_account); static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) { - return sprintf(buf, "%d\n", !!(s->flags & - (SLAB_HWCACHE_ALIGN|SLAB_MUST_HWCACHE_ALIGN))); + return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); } SLAB_ATTR_RO(hwcache_align); -- cgit v1.2.3 From 0a31bd5f2bbb6473ef9d24f0063ca91cfa678b64 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:49:57 -0700 Subject: KMEM_CACHE(): simplify slab cache creation This patch provides a new macro KMEM_CACHE(, ) to simplify slab creation. KMEM_CACHE creates a slab with the name of the struct, with the size of the struct and with the alignment of the struct. Additional slab flags may be specified if necessary. Example struct test_slab { int a,b,c; struct list_head; } __cacheline_aligned_in_smp; test_slab_cache = KMEM_CACHE(test_slab, SLAB_PANIC) will create a new slab named "test_slab" of the size sizeof(struct test_slab) and aligned to the alignment of test slab. If it fails then we panic. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- block/cfq-iosched.c | 6 ++---- fs/aio.c | 6 ++---- fs/bio.c | 3 +-- fs/dcache.c | 8 ++------ include/linux/slab.h | 12 ++++++++++++ kernel/delayacct.c | 6 +----- kernel/pid.c | 4 +--- kernel/signal.c | 6 +----- kernel/taskstats.c | 4 +--- 9 files changed, 23 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 64df3fa303b0..baef5fc7cff8 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2090,13 +2090,11 @@ static void cfq_slab_kill(void) static int __init cfq_slab_setup(void) { - cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0, - NULL, NULL); + cfq_pool = KMEM_CACHE(cfq_queue, 0); if (!cfq_pool) goto fail; - cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool", - sizeof(struct cfq_io_context), 0, 0, NULL, NULL); + cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0); if (!cfq_ioc_pool) goto fail; diff --git a/fs/aio.c b/fs/aio.c index e4598d6d49dd..b97ab8028b6d 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -68,10 +68,8 @@ static void aio_queue_work(struct kioctx *); */ static int __init aio_setup(void) { - kiocb_cachep = kmem_cache_create("kiocb", sizeof(struct kiocb), - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); - kioctx_cachep = kmem_cache_create("kioctx", sizeof(struct kioctx), - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); + kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); aio_wq = create_workqueue("aio"); diff --git a/fs/bio.c b/fs/bio.c index 693940da4090..093345f00128 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -1193,8 +1193,7 @@ static void __init biovec_init_slabs(void) static int __init init_bio(void) { - bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC); biovec_init_slabs(); diff --git a/fs/dcache.c b/fs/dcache.c index d68631f18df1..d1bf5d8aeb5a 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2052,12 +2052,8 @@ static void __init dcache_init(unsigned long mempages) * but it is probably not worth it because of the cache nature * of the dcache. */ - dentry_cache = kmem_cache_create("dentry_cache", - sizeof(struct dentry), - 0, - (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| - SLAB_MEM_SPREAD), - NULL, NULL); + dentry_cache = KMEM_CACHE(dentry, + SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory); diff --git a/include/linux/slab.h b/include/linux/slab.h index a9befa50d3e3..e14b4c338b89 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -57,6 +57,18 @@ unsigned int kmem_cache_size(struct kmem_cache *); const char *kmem_cache_name(struct kmem_cache *); int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); +/* + * Please use this macro to create slab caches. Simply specify the + * name of the structure and maybe some flags that are listed above. + * + * The alignment of the struct determines object alignment. If you + * f.e. add ____cacheline_aligned_in_smp to the struct declaration + * then the objects will be properly aligned in SMP configurations. + */ +#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ + sizeof(struct __struct), __alignof__(struct __struct),\ + (__flags), NULL, NULL) + #ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); #else diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 766d5912b26a..c0148ae992c4 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -31,11 +31,7 @@ __setup("nodelayacct", delayacct_setup_disable); void delayacct_init(void) { - delayacct_cache = kmem_cache_create("delayacct_cache", - sizeof(struct task_delay_info), - 0, - SLAB_PANIC, - NULL, NULL); + delayacct_cache = KMEM_CACHE(task_delay_info, SLAB_PANIC); delayacct_tsk_init(&init_task); } diff --git a/kernel/pid.c b/kernel/pid.c index 78f2aee90f54..9c80bc23d6b8 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -412,7 +412,5 @@ void __init pidmap_init(void) set_bit(0, init_pid_ns.pidmap[0].page); atomic_dec(&init_pid_ns.pidmap[0].nr_free); - pid_cachep = kmem_cache_create("pid", sizeof(struct pid), - __alignof__(struct pid), - SLAB_PANIC, NULL, NULL); + pid_cachep = KMEM_CACHE(pid, SLAB_PANIC); } diff --git a/kernel/signal.c b/kernel/signal.c index 3670225ecbc0..2b4087d545a3 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2636,9 +2636,5 @@ __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) void __init signals_init(void) { - sigqueue_cachep = - kmem_cache_create("sigqueue", - sizeof(struct sigqueue), - __alignof__(struct sigqueue), - SLAB_PANIC, NULL, NULL); + sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); } diff --git a/kernel/taskstats.c b/kernel/taskstats.c index ad7d2392cb0e..906cae771585 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -524,9 +524,7 @@ void __init taskstats_init_early(void) { unsigned int i; - taskstats_cache = kmem_cache_create("taskstats_cache", - sizeof(struct taskstats), - 0, SLAB_PANIC, NULL, NULL); + taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC); for_each_possible_cpu(i) { INIT_LIST_HEAD(&(per_cpu(listener_array, i).list)); init_rwsem(&(per_cpu(listener_array, i).sem)); -- cgit v1.2.3 From 50953fe9e00ebbeffa032a565ab2f08312d51a87 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:50:16 -0700 Subject: slab allocators: Remove SLAB_DEBUG_INITIAL flag I have never seen a use of SLAB_DEBUG_INITIAL. It is only supported by SLAB. I think its purpose was to have a callback after an object has been freed to verify that the state is the constructor state again? The callback is performed before each freeing of an object. I would think that it is much easier to check the object state manually before the free. That also places the check near the code object manipulation of the object. Also the SLAB_DEBUG_INITIAL callback is only performed if the kernel was compiled with SLAB debugging on. If there would be code in a constructor handling SLAB_DEBUG_INITIAL then it would have to be conditional on SLAB_DEBUG otherwise it would just be dead code. But there is no such code in the kernel. I think SLUB_DEBUG_INITIAL is too problematic to make real use of, difficult to understand and there are easier ways to accomplish the same effect (i.e. add debug code before kfree). There is a related flag SLAB_CTOR_VERIFY that is frequently checked to be clear in fs inode caches. Remove the pointless checks (they would even be pointless without removeal of SLAB_DEBUG_INITIAL) from the fs constructors. This is the last slab flag that SLUB did not support. Remove the check for unimplemented flags from SLUB. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/platforms/cell/spufs/inode.c | 3 +-- drivers/mtd/ubi/eba.c | 3 +-- fs/adfs/super.c | 3 +-- fs/affs/super.c | 3 +-- fs/afs/super.c | 3 +-- fs/befs/linuxvfs.c | 3 +-- fs/bfs/inode.c | 3 +-- fs/block_dev.c | 4 +--- fs/buffer.c | 3 +-- fs/cifs/cifsfs.c | 3 +-- fs/coda/inode.c | 3 +-- fs/ecryptfs/main.c | 3 +-- fs/efs/super.c | 3 +-- fs/ext2/super.c | 3 +-- fs/ext3/super.c | 3 +-- fs/ext4/super.c | 3 +-- fs/fat/cache.c | 3 +-- fs/fat/inode.c | 3 +-- fs/fuse/inode.c | 3 +-- fs/gfs2/main.c | 6 ++---- fs/hfs/super.c | 2 +- fs/hfsplus/super.c | 2 +- fs/hpfs/super.c | 3 +-- fs/hugetlbfs/inode.c | 3 +-- fs/inode.c | 3 +-- fs/isofs/inode.c | 3 +-- fs/jffs2/super.c | 3 +-- fs/jfs/jfs_metapage.c | 3 +-- fs/jfs/super.c | 3 +-- fs/locks.c | 3 +-- fs/minix/inode.c | 3 +-- fs/ncpfs/inode.c | 3 +-- fs/nfs/inode.c | 3 +-- fs/ntfs/super.c | 3 +-- fs/ocfs2/dlm/dlmfs.c | 3 +-- fs/ocfs2/super.c | 3 +-- fs/openpromfs/inode.c | 3 +-- fs/proc/inode.c | 3 +-- fs/qnx4/inode.c | 3 +-- fs/reiserfs/super.c | 3 +-- fs/romfs/inode.c | 3 +-- fs/smbfs/inode.c | 3 +-- fs/sysv/inode.c | 3 +-- fs/udf/super.c | 4 +--- fs/ufs/super.c | 3 +-- fs/xfs/linux-2.6/xfs_super.c | 3 +-- include/linux/slab.h | 2 -- ipc/mqueue.c | 3 +-- kernel/fork.c | 3 +-- mm/rmap.c | 3 +-- mm/shmem.c | 3 +-- mm/slab.c | 20 ++------------------ mm/slub.c | 10 ---------- net/socket.c | 3 +-- net/sunrpc/rpc_pipe.c | 3 +-- 55 files changed, 55 insertions(+), 136 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 13e4f70ec8c0..a93f328a7317 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -71,8 +71,7 @@ spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags) { struct spufs_inode_info *ei = p; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { inode_init_once(&ei->vfs_inode); } } diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index d847ee1da3d9..3dba5733ed1f 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -940,8 +940,7 @@ static void ltree_entry_ctor(void *obj, struct kmem_cache *cache, { struct ltree_entry *le = obj; - if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) != - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) return; le->users = 0; diff --git a/fs/adfs/super.c b/fs/adfs/super.c index 2e5f2c8371ee..30c296508497 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -232,8 +232,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&ei->vfs_inode); } diff --git a/fs/affs/super.c b/fs/affs/super.c index c3986a1911b0..beff7d21e6e2 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -87,8 +87,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct affs_inode_info *ei = (struct affs_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { init_MUTEX(&ei->i_link_lock); init_MUTEX(&ei->i_ext_lock); inode_init_once(&ei->vfs_inode); diff --git a/fs/afs/super.c b/fs/afs/super.c index 41173f81ac47..7030d76155fc 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -453,8 +453,7 @@ static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep, { struct afs_vnode *vnode = _vnode; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { memset(vnode, 0, sizeof(*vnode)); inode_init_once(&vnode->vfs_inode); init_waitqueue_head(&vnode->update_waitq); diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index cc6cc8ed2e39..fe96108a788d 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -293,8 +293,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct befs_inode_info *bi = (struct befs_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { inode_init_once(&bi->vfs_inode); } } diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index 93d6219243ad..edc08d89aabc 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -248,8 +248,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct bfs_inode_info *bi = foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&bi->vfs_inode); } diff --git a/fs/block_dev.c b/fs/block_dev.c index 6fe49b9349ea..f02b7bdd9864 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -457,9 +457,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag struct bdev_inode *ei = (struct bdev_inode *) foo; struct block_device *bdev = &ei->bdev; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) - { + if (flags & SLAB_CTOR_CONSTRUCTOR) { memset(bdev, 0, sizeof(*bdev)); mutex_init(&bdev->bd_mutex); sema_init(&bdev->bd_mount_sem, 1); diff --git a/fs/buffer.c b/fs/buffer.c index 80291aad6de6..7db24b9e5449 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2953,8 +2953,7 @@ EXPORT_SYMBOL(free_buffer_head); static void init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags) { - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { struct buffer_head * bh = (struct buffer_head *)data; memset(bh, 0, sizeof(*bh)); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 5036dae09cd7..8568e100953c 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -701,8 +701,7 @@ cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags) { struct cifsInodeInfo *cifsi = inode; - if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { inode_init_once(&cifsi->vfs_inode); INIT_LIST_HEAD(&cifsi->lockList); } diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 614175a3b02e..0aaff3651d14 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -62,8 +62,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct coda_inode_info *ei = (struct coda_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&ei->vfs_inode); } diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 6acc8f4fc588..8cbf3f69ebe5 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -583,8 +583,7 @@ inode_info_init_once(void *vptr, struct kmem_cache *cachep, unsigned long flags) { struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr; - if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&ei->vfs_inode); } diff --git a/fs/efs/super.c b/fs/efs/super.c index c2235e46edcd..ba7a8b9da0c1 100644 --- a/fs/efs/super.c +++ b/fs/efs/super.c @@ -72,8 +72,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct efs_inode_info *ei = (struct efs_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&ei->vfs_inode); } diff --git a/fs/ext2/super.c b/fs/ext2/super.c index a046a419d8af..685a1c287177 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -160,8 +160,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { rwlock_init(&ei->i_meta_lock); #ifdef CONFIG_EXT2_FS_XATTR init_rwsem(&ei->xattr_sem); diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 4266b708ca01..54d3c9041259 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -466,8 +466,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { INIT_LIST_HEAD(&ei->i_orphan); #ifdef CONFIG_EXT3_FS_XATTR init_rwsem(&ei->xattr_sem); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 25e8d0096176..719126932354 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -517,8 +517,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { INIT_LIST_HEAD(&ei->i_orphan); #ifdef CONFIG_EXT4DEV_FS_XATTR init_rwsem(&ei->xattr_sem); diff --git a/fs/fat/cache.c b/fs/fat/cache.c index 05c2941c74f2..1959143c1d27 100644 --- a/fs/fat/cache.c +++ b/fs/fat/cache.c @@ -40,8 +40,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct fat_cache *cache = (struct fat_cache *)foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) INIT_LIST_HEAD(&cache->cache_list); } diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 9bfe607c892e..65cb54bde481 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -499,8 +499,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct msdos_inode_info *ei = (struct msdos_inode_info *)foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { spin_lock_init(&ei->cache_lru_lock); ei->nr_caches = 0; ei->cache_valid_id = FAT_CACHE_VALID + 1; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index bdffe0cfe09a..d8003be56e05 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -685,8 +685,7 @@ static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep, { struct inode * inode = foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(inode); } diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 6e8a59809abf..e2bffae683cc 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -27,8 +27,7 @@ static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct gfs2_inode *ip = foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { inode_init_once(&ip->i_inode); spin_lock_init(&ip->i_spin); init_rwsem(&ip->i_rw_mutex); @@ -39,8 +38,7 @@ static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct gfs2_glock *gl = foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { INIT_HLIST_NODE(&gl->gl_list); spin_lock_init(&gl->gl_spin); INIT_LIST_HEAD(&gl->gl_holders); diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 623f509f1d47..4f1888f16cf0 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -434,7 +434,7 @@ static void hfs_init_once(void *p, struct kmem_cache *cachep, unsigned long flag { struct hfs_inode_info *i = p; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&i->vfs_inode); } diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 1a97f9293447..37afbec8a761 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -470,7 +470,7 @@ static void hfsplus_init_once(void *p, struct kmem_cache *cachep, unsigned long { struct hfsplus_inode_info *i = p; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&i->vfs_inode); } diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index e0174e338526..1b95f39fbc37 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -176,8 +176,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { mutex_init(&ei->i_mutex); mutex_init(&ei->i_parent_mutex); inode_init_once(&ei->vfs_inode); diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index fe625cd1719a..842a4ed4052d 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -556,8 +556,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&ei->vfs_inode); } diff --git a/fs/inode.c b/fs/inode.c index 5abb097ab1b0..b4296bf62739 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -213,8 +213,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct inode * inode = (struct inode *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(inode); } diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 64a96cdfe3a4..e99f7ff4ecb4 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -77,8 +77,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags { struct iso_inode_info *ei = foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&ei->vfs_inode); } diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index e51164a8a8d4..45368f8bbe72 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -47,8 +47,7 @@ static void jffs2_i_init_once(void * foo, struct kmem_cache * cachep, unsigned l { struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { init_MUTEX(&ei->sem); inode_init_once(&ei->vfs_inode); } diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 58deae007507..6b3acb0b5781 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -184,8 +184,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct metapage *mp = (struct metapage *)foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { mp->lid = 0; mp->lsn = 0; mp->flag = 0; diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 52d73d54a931..ea9dc3e65dcf 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -752,8 +752,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags { struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); INIT_LIST_HEAD(&jfs_ip->anon_inode_list); init_rwsem(&jfs_ip->rdwrlock); diff --git a/fs/locks.c b/fs/locks.c index 52a81005dab4..325578074742 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -203,8 +203,7 @@ static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags) { struct file_lock *lock = (struct file_lock *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) != - SLAB_CTOR_CONSTRUCTOR) + if (!(flags & SLAB_CTOR_CONSTRUCTOR)) return; locks_init_lock(lock); diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 92e383af3709..2f4d43a2a310 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -73,8 +73,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct minix_inode_info *ei = (struct minix_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&ei->vfs_inode); } diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 7285c94956c4..c29f00ad495d 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -60,8 +60,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct ncp_inode_info *ei = (struct ncp_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { mutex_init(&ei->open_mutex); inode_init_once(&ei->vfs_inode); } diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 44aa9b726573..1e9a915d1fea 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1167,8 +1167,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct nfs_inode *nfsi = (struct nfs_inode *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { inode_init_once(&nfsi->vfs_inode); spin_lock_init(&nfsi->req_lock); INIT_LIST_HEAD(&nfsi->dirty); diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 2ddde534db0a..21d834e5ed73 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -3085,8 +3085,7 @@ static void ntfs_big_inode_init_once(void *foo, struct kmem_cache *cachep, { ntfs_inode *ni = (ntfs_inode *)foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(VFS_I(ni)); } diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c index de952eba29a9..d4e46d067edd 100644 --- a/fs/ocfs2/dlm/dlmfs.c +++ b/fs/ocfs2/dlm/dlmfs.c @@ -263,8 +263,7 @@ static void dlmfs_init_once(void *foo, struct dlmfs_inode_private *ip = (struct dlmfs_inode_private *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { ip->ip_dlm = NULL; ip->ip_parent = NULL; diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index f5493540d94f..7c5e3f5d6634 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -937,8 +937,7 @@ static void ocfs2_inode_init_once(void *data, { struct ocfs2_inode_info *oi = data; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { oi->ip_flags = 0; oi->ip_open_count = 0; spin_lock_init(&oi->ip_lock); diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c index bde1c164417d..731a90e9f0cd 100644 --- a/fs/openpromfs/inode.c +++ b/fs/openpromfs/inode.c @@ -419,8 +419,7 @@ static void op_inode_init_once(void *data, struct kmem_cache * cachep, unsigned { struct op_inode_info *oi = (struct op_inode_info *) data; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&oi->vfs_inode); } diff --git a/fs/proc/inode.c b/fs/proc/inode.c index c372eb151a3a..22b1158389ae 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -109,8 +109,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct proc_inode *ei = (struct proc_inode *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&ei->vfs_inode); } diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index 83bc8e7824cd..75fc8498f2e2 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c @@ -536,8 +536,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, { struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&ei->vfs_inode); } diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index f13a7f164dc6..7054aaef0493 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -511,8 +511,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags { struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; - if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { INIT_LIST_HEAD(&ei->i_prealloc_list); inode_init_once(&ei->vfs_inode); #ifdef CONFIG_REISERFS_FS_POSIX_ACL diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c index fd601014813e..804285190271 100644 --- a/fs/romfs/inode.c +++ b/fs/romfs/inode.c @@ -570,8 +570,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct romfs_inode_info *ei = (struct romfs_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&ei->vfs_inode); } diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c index 5faba4f1c9ab..424a3ddf86dd 100644 --- a/fs/smbfs/inode.c +++ b/fs/smbfs/inode.c @@ -69,9 +69,8 @@ static void smb_destroy_inode(struct inode *inode) static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct smb_inode_info *ei = (struct smb_inode_info *) foo; - unsigned long flagmask = SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR; - if ((flags & flagmask) == SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&ei->vfs_inode); } diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index 9311cac186fe..3152d7415606 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c @@ -322,8 +322,7 @@ static void init_once(void *p, struct kmem_cache *cachep, unsigned long flags) { struct sysv_inode_info *si = (struct sysv_inode_info *)p; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&si->vfs_inode); } diff --git a/fs/udf/super.c b/fs/udf/super.c index 8672b88f7ff2..023b304fdd99 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -134,9 +134,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct udf_inode_info *ei = (struct udf_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) - { + if (flags & SLAB_CTOR_CONSTRUCTOR) { ei->i_ext.i_data = NULL; inode_init_once(&ei->vfs_inode); } diff --git a/fs/ufs/super.c b/fs/ufs/super.c index b5a6461ec66b..be7c48c5f203 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -1237,8 +1237,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag { struct ufs_inode_info *ei = (struct ufs_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&ei->vfs_inode); } diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 2f2c40db562e..14e2cbe5a8d5 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -360,8 +360,7 @@ xfs_fs_inode_init_once( kmem_zone_t *zonep, unsigned long flags) { - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); } diff --git a/include/linux/slab.h b/include/linux/slab.h index e14b4c338b89..1ffe0a959cd4 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -21,7 +21,6 @@ typedef struct kmem_cache kmem_cache_t __deprecated; * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. */ #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ -#define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */ #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ @@ -36,7 +35,6 @@ typedef struct kmem_cache kmem_cache_t __deprecated; /* Flags passed to a constructor functions */ #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ #define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */ -#define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */ /* * struct kmem_cache related prototypes diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 554ac368be79..d17821d3f483 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -215,8 +215,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags { struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&p->vfs_inode); } diff --git a/kernel/fork.c b/kernel/fork.c index ffccefb28b6a..b7d169def942 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1425,8 +1425,7 @@ static void sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long fl { struct sighand_struct *sighand = data; - if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) spin_lock_init(&sighand->siglock); } diff --git a/mm/rmap.c b/mm/rmap.c index 59da5b734c80..75a32be64a21 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -162,8 +162,7 @@ void anon_vma_unlink(struct vm_area_struct *vma) static void anon_vma_ctor(void *data, struct kmem_cache *cachep, unsigned long flags) { - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { struct anon_vma *anon_vma = data; spin_lock_init(&anon_vma->lock); diff --git a/mm/shmem.c b/mm/shmem.c index b2a35ebf071a..f01e8deed645 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2358,8 +2358,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, { struct shmem_inode_info *p = (struct shmem_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { inode_init_once(&p->vfs_inode); #ifdef CONFIG_TMPFS_POSIX_ACL p->i_acl = NULL; diff --git a/mm/slab.c b/mm/slab.c index 2a3cbd6e675d..a877d6f3d687 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -116,8 +116,7 @@ #include /* - * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL, - * SLAB_RED_ZONE & SLAB_POISON. + * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. * 0 for faster, smaller code (especially in the critical paths). * * STATS - 1 to collect stats for /proc/slabinfo. @@ -172,7 +171,7 @@ /* Legal flag mask for kmem_cache_create(). */ #if DEBUG -# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ +# define CREATE_MASK (SLAB_RED_ZONE | \ SLAB_POISON | SLAB_HWCACHE_ALIGN | \ SLAB_CACHE_DMA | \ SLAB_STORE_USER | \ @@ -2184,12 +2183,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, #if DEBUG WARN_ON(strchr(name, ' ')); /* It confuses parsers */ - if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { - /* No constructor, but inital state check requested */ - printk(KERN_ERR "%s: No con, but init state check " - "requested - %s\n", __FUNCTION__, name); - flags &= ~SLAB_DEBUG_INITIAL; - } #if FORCED_DEBUG /* * Enable redzoning and last user accounting, except for caches with @@ -2895,15 +2888,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, BUG_ON(objnr >= cachep->num); BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); - if (cachep->flags & SLAB_DEBUG_INITIAL) { - /* - * Need to call the slab's constructor so the caller can - * perform a verify of its state (debugging). Called without - * the cache-lock held. - */ - cachep->ctor(objp + obj_offset(cachep), - cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY); - } if (cachep->flags & SLAB_POISON && cachep->dtor) { /* we want to cache poison the object, * call the destruction callback diff --git a/mm/slub.c b/mm/slub.c index 79940e98e5e6..bd86182e595e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -97,9 +97,6 @@ * * - Support PAGE_ALLOC_DEBUG. Should be easy to do. * - * - SLAB_DEBUG_INITIAL is not supported but I have never seen a use of - * it. - * * - Variable sizing of the per node arrays */ @@ -125,11 +122,6 @@ #endif -/* - * Flags from the regular SLAB that SLUB does not support: - */ -#define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL) - /* * Mininum number of partial slabs. These will be left on the partial * lists even if they are empty. kmem_cache_shrink may reclaim them. @@ -1748,8 +1740,6 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, s->flags = flags; s->align = align; - BUG_ON(flags & SLUB_UNIMPLEMENTED); - /* * The page->offset field is only 16 bit wide. This is an offset * in units of words from the beginning of an object. If the slab diff --git a/net/socket.c b/net/socket.c index 1ad62c08377b..759825b7ca26 100644 --- a/net/socket.c +++ b/net/socket.c @@ -261,8 +261,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct socket_alloc *ei = (struct socket_alloc *)foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) - == SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) inode_init_once(&ei->vfs_inode); } diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 9b9ea5045569..ad39b47e05bc 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -828,8 +828,7 @@ init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct rpc_inode *rpci = (struct rpc_inode *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { inode_init_once(&rpci->vfs_inode); rpci->private = NULL; rpci->nreaders = 0; -- cgit v1.2.3 From 4f104934591ed98534b3a4c3d17d972b790e9c42 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:50:17 -0700 Subject: slab allocators: Remove SLAB_CTOR_ATOMIC SLAB_CTOR atomic is never used which is no surprise since I cannot imagine that one would want to do something serious in a constructor or destructor. In particular given that the slab allocators run with interrupts disabled. Actions in constructors and destructors are by their nature very limited and usually do not go beyond initializing variables and list operations. (The i386 pgd ctor and dtors do take a spinlock in constructor and destructor..... I think that is the furthest we go at this point.) There is no flag passed to the destructor so removing SLAB_CTOR_ATOMIC also establishes a certain symmetry. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 1 - mm/slab.c | 17 ++--------------- mm/slub.c | 10 ++-------- 3 files changed, 4 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 1ffe0a959cd4..71829efc40ba 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -34,7 +34,6 @@ typedef struct kmem_cache kmem_cache_t __deprecated; /* Flags passed to a constructor functions */ #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ -#define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */ /* * struct kmem_cache related prototypes diff --git a/mm/slab.c b/mm/slab.c index a877d6f3d687..52ecf7599a7b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2752,13 +2752,6 @@ static int cache_grow(struct kmem_cache *cachep, ctor_flags = SLAB_CTOR_CONSTRUCTOR; local_flags = (flags & GFP_LEVEL_MASK); - if (!(local_flags & __GFP_WAIT)) - /* - * Not allowed to sleep. Need to tell a constructor about - * this - it might need to know... - */ - ctor_flags |= SLAB_CTOR_ATOMIC; - /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); l3 = cachep->nodelists[nodeid]; @@ -3092,14 +3085,8 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, } #endif objp += obj_offset(cachep); - if (cachep->ctor && cachep->flags & SLAB_POISON) { - unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; - - if (!(flags & __GFP_WAIT)) - ctor_flags |= SLAB_CTOR_ATOMIC; - - cachep->ctor(objp, cachep, ctor_flags); - } + if (cachep->ctor && cachep->flags & SLAB_POISON) + cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); #if ARCH_SLAB_MINALIGN if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", diff --git a/mm/slub.c b/mm/slub.c index bd86182e595e..347e44821bcb 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -802,14 +802,8 @@ static void setup_object(struct kmem_cache *s, struct page *page, init_tracking(s, object); } - if (unlikely(s->ctor)) { - int mode = SLAB_CTOR_CONSTRUCTOR; - - if (!(s->flags & __GFP_WAIT)) - mode |= SLAB_CTOR_ATOMIC; - - s->ctor(object, s, mode); - } + if (unlikely(s->ctor)) + s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR); } static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) -- cgit v1.2.3 From cfce66047f1893cb7d3abb0d53e65cbbd8d605f0 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:50:17 -0700 Subject: Slab allocators: remove useless __GFP_NO_GROW flag There is no user remaining and I have never seen any use of that flag. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 3 +-- mm/slab.c | 6 ++---- mm/slub.c | 3 --- 3 files changed, 3 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 2a7d15bcde46..97a36c3d96e2 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -40,7 +40,6 @@ struct vm_area_struct; #define __GFP_REPEAT ((__force gfp_t)0x400u) /* Retry the allocation. Might fail */ #define __GFP_NOFAIL ((__force gfp_t)0x800u) /* Retry for ever. Cannot fail */ #define __GFP_NORETRY ((__force gfp_t)0x1000u)/* Do not retry. Might fail */ -#define __GFP_NO_GROW ((__force gfp_t)0x2000u)/* Slab internal usage */ #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ @@ -53,7 +52,7 @@ struct vm_area_struct; /* if you forget to add the bitmask here kernel will crash, period */ #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ - __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ + __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \ __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE) /* This equals 0, but use constants in case they ever change */ diff --git a/mm/slab.c b/mm/slab.c index 52ecf7599a7b..5920a412b377 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2746,9 +2746,7 @@ static int cache_grow(struct kmem_cache *cachep, * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ - BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW)); - if (flags & __GFP_NO_GROW) - return 0; + BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); ctor_flags = SLAB_CTOR_CONSTRUCTOR; local_flags = (flags & GFP_LEVEL_MASK); @@ -3252,7 +3250,7 @@ retry: flags | GFP_THISNODE, nid); } - if (!obj && !(flags & __GFP_NO_GROW)) { + if (!obj) { /* * This allocation will be performed within the constraints * of the current cpuset / memory policy requirements. diff --git a/mm/slub.c b/mm/slub.c index 347e44821bcb..a6323484dd3e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -815,9 +815,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) void *last; void *p; - if (flags & __GFP_NO_GROW) - return NULL; - BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); if (flags & __GFP_WAIT) -- cgit v1.2.3 From 906e0be197232c219197d058ef5095baa7764cd4 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:50:20 -0700 Subject: page migration: Only migrate pages if allocation in the highest zone is possible Address spaces contain an allocation flag that specifies restriction on the zone for pages placed in the mapping. I.e. some device may require pages to be allocated from a DMA zone. Block devices may not be able to use pages from HIGHMEM. Memory policies and the common use of page migration works only on the highest zone. If the address space does not allow allocation from the highest zone then the pages in the address space are not migratable simply because we can only allocate memory for a specified node if we allow allocation for the highest zone on each node. Acked-by: Hugh Dickins Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 75e55dcdeb18..e10a90a93b5d 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -2,18 +2,29 @@ #define _LINUX_MIGRATE_H #include +#include +#include typedef struct page *new_page_t(struct page *, unsigned long private, int **); +#ifdef CONFIG_MIGRATION /* Check if a vma is migratable */ static inline int vma_migratable(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) return 0; + /* + * Migration allocates pages in the highest zone. If we cannot + * do so then migration (at least from node to node) is not + * possible. + */ + if (vma->vm_file && + gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) + < policy_zone) + return 0; return 1; } -#ifdef CONFIG_MIGRATION extern int isolate_lru_page(struct page *p, struct list_head *pagelist); extern int putback_lru_pages(struct list_head *l); extern int migrate_page(struct address_space *, @@ -28,6 +39,8 @@ extern int migrate_vmas(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, unsigned long flags); #else +static inline int vma_migratable(struct vm_area_struct *vma) + { return 0; } static inline int isolate_lru_page(struct page *p, struct list_head *list) { return -ENOSYS; } -- cgit v1.2.3 From 1394f03221790a988afc3e4b3cb79f2e477246a9 Mon Sep 17 00:00:00 2001 From: Bryan Wu Date: Sun, 6 May 2007 14:50:22 -0700 Subject: blackfin architecture This adds support for the Analog Devices Blackfin processor architecture, and currently supports the BF533, BF532, BF531, BF537, BF536, BF534, and BF561 (Dual Core) devices, with a variety of development platforms including those avaliable from Analog Devices (BF533-EZKit, BF533-STAMP, BF537-STAMP, BF561-EZKIT), and Bluetechnix! Tinyboards. The Blackfin architecture was jointly developed by Intel and Analog Devices Inc. (ADI) as the Micro Signal Architecture (MSA) core and introduced it in December of 2000. Since then ADI has put this core into its Blackfin processor family of devices. The Blackfin core has the advantages of a clean, orthogonal,RISC-like microprocessor instruction set. It combines a dual-MAC (Multiply/Accumulate), state-of-the-art signal processing engine and single-instruction, multiple-data (SIMD) multimedia capabilities into a single instruction-set architecture. The Blackfin architecture, including the instruction set, is described by the ADSP-BF53x/BF56x Blackfin Processor Programming Reference http://blackfin.uclinux.org/gf/download/frsrelease/29/2549/Blackfin_PRM.pdf The Blackfin processor is already supported by major releases of gcc, and there are binary and source rpms/tarballs for many architectures at: http://blackfin.uclinux.org/gf/project/toolchain/frs There is complete documentation, including "getting started" guides available at: http://docs.blackfin.uclinux.org/ which provides links to the sources and patches you will need in order to set up a cross-compiling environment for bfin-linux-uclibc This patch, as well as the other patches (toolchain, distribution, uClibc) are actively supported by Analog Devices Inc, at: http://blackfin.uclinux.org/ We have tested this on LTP, and our test plan (including pass/fails) can be found at: http://docs.blackfin.uclinux.org/doku.php?id=testing_the_linux_kernel [m.kozlowski@tuxland.pl: balance parenthesis in blackfin header files] Signed-off-by: Bryan Wu Signed-off-by: Mariusz Kozlowski Signed-off-by: Aubrey Li Signed-off-by: Jie Zhang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/blackfin/00-INDEX | 11 + Documentation/blackfin/Filesystems | 169 ++ Documentation/blackfin/cache-lock.txt | 48 + Documentation/blackfin/cachefeatures.txt | 65 + MAINTAINERS | 38 + arch/blackfin/Kconfig | 989 ++++++++ arch/blackfin/Makefile | 80 + arch/blackfin/boot/Makefile | 27 + arch/blackfin/defconfig | 1314 ++++++++++ arch/blackfin/kernel/Makefile | 14 + arch/blackfin/kernel/asm-offsets.c | 136 ++ arch/blackfin/kernel/bfin_dma_5xx.c | 742 ++++++ arch/blackfin/kernel/bfin_gpio.c | 637 +++++ arch/blackfin/kernel/bfin_ksyms.c | 119 + arch/blackfin/kernel/dma-mapping.c | 183 ++ arch/blackfin/kernel/dualcore_test.c | 49 + arch/blackfin/kernel/entry.S | 94 + arch/blackfin/kernel/flat.c | 101 + arch/blackfin/kernel/init_task.c | 60 + arch/blackfin/kernel/irqchip.c | 147 ++ arch/blackfin/kernel/module.c | 429 ++++ arch/blackfin/kernel/process.c | 394 +++ arch/blackfin/kernel/ptrace.c | 430 ++++ arch/blackfin/kernel/setup.c | 902 +++++++ arch/blackfin/kernel/signal.c | 356 +++ arch/blackfin/kernel/sys_bfin.c | 115 + arch/blackfin/kernel/time.c | 326 +++ arch/blackfin/kernel/traps.c | 649 +++++ arch/blackfin/kernel/vmlinux.lds.S | 228 ++ arch/blackfin/lib/Makefile | 11 + arch/blackfin/lib/ashldi3.c | 58 + arch/blackfin/lib/ashrdi3.c | 59 + arch/blackfin/lib/checksum.c | 140 ++ arch/blackfin/lib/divsi3.S | 216 ++ arch/blackfin/lib/gcclib.h | 47 + arch/blackfin/lib/ins.S | 69 + arch/blackfin/lib/lshrdi3.c | 72 + arch/blackfin/lib/memchr.S | 70 + arch/blackfin/lib/memcmp.S | 110 + arch/blackfin/lib/memcpy.S | 142 ++ arch/blackfin/lib/memmove.S | 103 + arch/blackfin/lib/memset.S | 109 + arch/blackfin/lib/modsi3.S | 79 + arch/blackfin/lib/muldi3.c | 99 + arch/blackfin/lib/outs.S | 62 + arch/blackfin/lib/smulsi3_highpart.S | 30 + arch/blackfin/lib/strcmp.c | 11 + arch/blackfin/lib/strcpy.c | 11 + arch/blackfin/lib/strncmp.c | 11 + arch/blackfin/lib/strncpy.c | 11 + arch/blackfin/lib/udivsi3.S | 298 +++ arch/blackfin/lib/umodsi3.S | 66 + arch/blackfin/lib/umulsi3_highpart.S | 23 + arch/blackfin/mach-bf533/Kconfig | 92 + arch/blackfin/mach-bf533/Makefile | 9 + arch/blackfin/mach-bf533/boards/Makefile | 8 + arch/blackfin/mach-bf533/boards/cm_bf533.c | 267 +++ arch/blackfin/mach-bf533/boards/ezkit.c | 224 ++ arch/blackfin/mach-bf533/boards/generic_board.c | 95 + arch/blackfin/mach-bf533/boards/stamp.c | 321 +++ arch/blackfin/mach-bf533/cpu.c | 161 ++ arch/blackfin/mach-bf533/head.S | 774 ++++++ arch/blackfin/mach-bf533/ints-priority.c | 65 + arch/blackfin/mach-bf537/Kconfig | 141 ++ arch/blackfin/mach-bf537/Makefile | 9 + arch/blackfin/mach-bf537/boards/Makefile | 9 + arch/blackfin/mach-bf537/boards/cm_bf537.c | 364 +++ arch/blackfin/mach-bf537/boards/eth_mac.c | 51 + arch/blackfin/mach-bf537/boards/generic_board.c | 445 ++++ arch/blackfin/mach-bf537/boards/led.S | 183 ++ arch/blackfin/mach-bf537/boards/pnav10.c | 523 ++++ arch/blackfin/mach-bf537/boards/stamp.c | 615 +++++ arch/blackfin/mach-bf537/cpu.c | 161 ++ arch/blackfin/mach-bf537/head.S | 602 +++++ arch/blackfin/mach-bf537/ints-priority.c | 74 + arch/blackfin/mach-bf561/Kconfig | 222 ++ arch/blackfin/mach-bf561/Makefile | 9 + arch/blackfin/mach-bf561/boards/Makefile | 7 + arch/blackfin/mach-bf561/boards/cm_bf561.c | 289 +++ arch/blackfin/mach-bf561/boards/ezkit.c | 147 ++ arch/blackfin/mach-bf561/boards/generic_board.c | 82 + arch/blackfin/mach-bf561/coreb.c | 402 ++++ arch/blackfin/mach-bf561/head.S | 512 ++++ arch/blackfin/mach-bf561/ints-priority.c | 108 + arch/blackfin/mach-common/Makefile | 12 + arch/blackfin/mach-common/cache.S | 253 ++ arch/blackfin/mach-common/cacheinit.S | 137 ++ arch/blackfin/mach-common/cplbhdlr.S | 130 + arch/blackfin/mach-common/cplbinfo.c | 211 ++ arch/blackfin/mach-common/cplbmgr.S | 607 +++++ arch/blackfin/mach-common/dpmc.S | 418 ++++ arch/blackfin/mach-common/entry.S | 1207 ++++++++++ arch/blackfin/mach-common/interrupt.S | 253 ++ arch/blackfin/mach-common/ints-priority-dc.c | 476 ++++ arch/blackfin/mach-common/ints-priority-sc.c | 577 +++++ arch/blackfin/mach-common/irqpanic.c | 194 ++ arch/blackfin/mach-common/lock.S | 204 ++ arch/blackfin/mach-common/pm.c | 181 ++ arch/blackfin/mm/Makefile | 5 + arch/blackfin/mm/blackfin_sram.c | 540 +++++ arch/blackfin/mm/blackfin_sram.h | 38 + arch/blackfin/mm/init.c | 208 ++ arch/blackfin/oprofile/Kconfig | 29 + arch/blackfin/oprofile/Makefile | 14 + arch/blackfin/oprofile/common.c | 168 ++ arch/blackfin/oprofile/op_blackfin.h | 98 + arch/blackfin/oprofile/op_model_bf533.c | 161 ++ arch/blackfin/oprofile/timer_int.c | 74 + fs/Kconfig.binfmt | 2 +- include/asm-blackfin/Kbuild | 1 + include/asm-blackfin/a.out.h | 25 + include/asm-blackfin/atomic.h | 144 ++ include/asm-blackfin/auxvec.h | 4 + include/asm-blackfin/bf5xx_timers.h | 209 ++ include/asm-blackfin/bfin-global.h | 120 + include/asm-blackfin/bfin5xx_spi.h | 170 ++ include/asm-blackfin/bfin_simple_timer.h | 13 + include/asm-blackfin/bfin_sport.h | 175 ++ include/asm-blackfin/bitops.h | 213 ++ include/asm-blackfin/blackfin.h | 81 + include/asm-blackfin/bug.h | 4 + include/asm-blackfin/bugs.h | 16 + include/asm-blackfin/byteorder.h | 48 + include/asm-blackfin/cache.h | 29 + include/asm-blackfin/cacheflush.h | 90 + include/asm-blackfin/checksum.h | 101 + include/asm-blackfin/cplb.h | 51 + include/asm-blackfin/cplbinit.h | 203 ++ include/asm-blackfin/cpumask.h | 6 + include/asm-blackfin/cputime.h | 6 + include/asm-blackfin/current.h | 23 + include/asm-blackfin/delay.h | 44 + include/asm-blackfin/device.h | 7 + include/asm-blackfin/div64.h | 1 + include/asm-blackfin/dma-mapping.h | 66 + include/asm-blackfin/dma.h | 188 ++ include/asm-blackfin/dpmc.h | 70 + include/asm-blackfin/elf.h | 127 + include/asm-blackfin/emergency-restart.h | 6 + include/asm-blackfin/entry.h | 56 + include/asm-blackfin/errno.h | 6 + include/asm-blackfin/fcntl.h | 13 + include/asm-blackfin/flat.h | 58 + include/asm-blackfin/futex.h | 6 + include/asm-blackfin/gpio.h | 367 +++ include/asm-blackfin/hardirq.h | 41 + include/asm-blackfin/hw_irq.h | 6 + include/asm-blackfin/ide.h | 32 + include/asm-blackfin/io.h | 207 ++ include/asm-blackfin/ioctl.h | 1 + include/asm-blackfin/ioctls.h | 82 + include/asm-blackfin/ipc.h | 1 + include/asm-blackfin/ipcbuf.h | 30 + include/asm-blackfin/irq.h | 70 + include/asm-blackfin/irq_handler.h | 22 + include/asm-blackfin/irq_regs.h | 1 + include/asm-blackfin/kdebug.h | 1 + include/asm-blackfin/kmap_types.h | 21 + include/asm-blackfin/l1layout.h | 31 + include/asm-blackfin/linkage.h | 7 + include/asm-blackfin/local.h | 6 + include/asm-blackfin/mach-bf533/anomaly.h | 175 ++ include/asm-blackfin/mach-bf533/bf533.h | 306 +++ include/asm-blackfin/mach-bf533/bfin_serial_5xx.h | 108 + include/asm-blackfin/mach-bf533/blackfin.h | 45 + include/asm-blackfin/mach-bf533/cdefBF532.h | 706 ++++++ include/asm-blackfin/mach-bf533/defBF532.h | 1175 +++++++++ include/asm-blackfin/mach-bf533/dma.h | 54 + include/asm-blackfin/mach-bf533/irq.h | 177 ++ include/asm-blackfin/mach-bf533/mem_init.h | 316 +++ include/asm-blackfin/mach-bf533/mem_map.h | 168 ++ include/asm-blackfin/mach-bf537/anomaly.h | 120 + include/asm-blackfin/mach-bf537/bf537.h | 287 +++ include/asm-blackfin/mach-bf537/bfin_serial_5xx.h | 147 ++ include/asm-blackfin/mach-bf537/blackfin.h | 430 ++++ include/asm-blackfin/mach-bf537/cdefBF534.h | 1823 ++++++++++++++ include/asm-blackfin/mach-bf537/cdefBF537.h | 209 ++ include/asm-blackfin/mach-bf537/defBF534.h | 2501 ++++++++++++++++++++ include/asm-blackfin/mach-bf537/defBF537.h | 404 ++++ include/asm-blackfin/mach-bf537/dma.h | 55 + include/asm-blackfin/mach-bf537/irq.h | 219 ++ include/asm-blackfin/mach-bf537/mem_init.h | 330 +++ include/asm-blackfin/mach-bf537/mem_map.h | 175 ++ include/asm-blackfin/mach-bf561/anomaly.h | 184 ++ include/asm-blackfin/mach-bf561/bf561.h | 408 ++++ include/asm-blackfin/mach-bf561/bfin_serial_5xx.h | 108 + include/asm-blackfin/mach-bf561/blackfin.h | 52 + include/asm-blackfin/mach-bf561/cdefBF561.h | 1543 ++++++++++++ include/asm-blackfin/mach-bf561/defBF561.h | 1717 ++++++++++++++ include/asm-blackfin/mach-bf561/dma.h | 35 + include/asm-blackfin/mach-bf561/irq.h | 450 ++++ include/asm-blackfin/mach-bf561/mem_init.h | 322 +++ include/asm-blackfin/mach-bf561/mem_map.h | 75 + include/asm-blackfin/mach-common/cdef_LPBlackfin.h | 471 ++++ include/asm-blackfin/mach-common/context.S | 350 +++ include/asm-blackfin/mach-common/def_LPBlackfin.h | 691 ++++++ include/asm-blackfin/macros.h | 95 + include/asm-blackfin/mem_map.h | 12 + include/asm-blackfin/mman.h | 45 + include/asm-blackfin/mmu.h | 30 + include/asm-blackfin/mmu_context.h | 129 + include/asm-blackfin/module.h | 19 + include/asm-blackfin/msgbuf.h | 31 + include/asm-blackfin/mutex.h | 9 + include/asm-blackfin/namei.h | 19 + include/asm-blackfin/page.h | 89 + include/asm-blackfin/page_offset.h | 6 + include/asm-blackfin/param.h | 22 + include/asm-blackfin/pci.h | 148 ++ include/asm-blackfin/percpu.h | 6 + include/asm-blackfin/pgalloc.h | 8 + include/asm-blackfin/pgtable.h | 96 + include/asm-blackfin/poll.h | 24 + include/asm-blackfin/posix_types.h | 65 + include/asm-blackfin/processor.h | 130 + include/asm-blackfin/ptrace.h | 166 ++ include/asm-blackfin/resource.h | 6 + include/asm-blackfin/scatterlist.h | 26 + include/asm-blackfin/sections.h | 7 + include/asm-blackfin/segment.h | 7 + include/asm-blackfin/semaphore-helper.h | 82 + include/asm-blackfin/semaphore.h | 106 + include/asm-blackfin/sembuf.h | 25 + include/asm-blackfin/setup.h | 17 + include/asm-blackfin/shmbuf.h | 42 + include/asm-blackfin/shmparam.h | 6 + include/asm-blackfin/sigcontext.h | 55 + include/asm-blackfin/siginfo.h | 35 + include/asm-blackfin/signal.h | 160 ++ include/asm-blackfin/socket.h | 53 + include/asm-blackfin/sockios.h | 13 + include/asm-blackfin/spinlock.h | 6 + include/asm-blackfin/stat.h | 63 + include/asm-blackfin/statfs.h | 6 + include/asm-blackfin/string.h | 104 + include/asm-blackfin/system.h | 250 ++ include/asm-blackfin/termbits.h | 184 ++ include/asm-blackfin/termios.h | 106 + include/asm-blackfin/thread_info.h | 143 ++ include/asm-blackfin/timex.h | 18 + include/asm-blackfin/tlb.h | 16 + include/asm-blackfin/tlbflush.h | 62 + include/asm-blackfin/topology.h | 6 + include/asm-blackfin/traps.h | 75 + include/asm-blackfin/types.h | 66 + include/asm-blackfin/uaccess.h | 271 +++ include/asm-blackfin/ucontext.h | 17 + include/asm-blackfin/unaligned.h | 6 + include/asm-blackfin/unistd.h | 382 +++ include/asm-blackfin/user.h | 89 + include/linux/elf-em.h | 1 + include/linux/spi/ad7877.h | 24 + include/linux/usb_sl811.h | 26 + init/Kconfig | 2 +- lib/Kconfig.debug | 4 +- scripts/genksyms/genksyms.c | 3 +- scripts/mod/mk_elfconfig.c | 3 +- 257 files changed, 47285 insertions(+), 6 deletions(-) create mode 100644 Documentation/blackfin/00-INDEX create mode 100644 Documentation/blackfin/Filesystems create mode 100644 Documentation/blackfin/cache-lock.txt create mode 100644 Documentation/blackfin/cachefeatures.txt create mode 100644 arch/blackfin/Kconfig create mode 100644 arch/blackfin/Makefile create mode 100644 arch/blackfin/boot/Makefile create mode 100644 arch/blackfin/defconfig create mode 100644 arch/blackfin/kernel/Makefile create mode 100644 arch/blackfin/kernel/asm-offsets.c create mode 100644 arch/blackfin/kernel/bfin_dma_5xx.c create mode 100644 arch/blackfin/kernel/bfin_gpio.c create mode 100644 arch/blackfin/kernel/bfin_ksyms.c create mode 100644 arch/blackfin/kernel/dma-mapping.c create mode 100644 arch/blackfin/kernel/dualcore_test.c create mode 100644 arch/blackfin/kernel/entry.S create mode 100644 arch/blackfin/kernel/flat.c create mode 100644 arch/blackfin/kernel/init_task.c create mode 100644 arch/blackfin/kernel/irqchip.c create mode 100644 arch/blackfin/kernel/module.c create mode 100644 arch/blackfin/kernel/process.c create mode 100644 arch/blackfin/kernel/ptrace.c create mode 100644 arch/blackfin/kernel/setup.c create mode 100644 arch/blackfin/kernel/signal.c create mode 100644 arch/blackfin/kernel/sys_bfin.c create mode 100644 arch/blackfin/kernel/time.c create mode 100644 arch/blackfin/kernel/traps.c create mode 100644 arch/blackfin/kernel/vmlinux.lds.S create mode 100644 arch/blackfin/lib/Makefile create mode 100644 arch/blackfin/lib/ashldi3.c create mode 100644 arch/blackfin/lib/ashrdi3.c create mode 100644 arch/blackfin/lib/checksum.c create mode 100644 arch/blackfin/lib/divsi3.S create mode 100644 arch/blackfin/lib/gcclib.h create mode 100644 arch/blackfin/lib/ins.S create mode 100644 arch/blackfin/lib/lshrdi3.c create mode 100644 arch/blackfin/lib/memchr.S create mode 100644 arch/blackfin/lib/memcmp.S create mode 100644 arch/blackfin/lib/memcpy.S create mode 100644 arch/blackfin/lib/memmove.S create mode 100644 arch/blackfin/lib/memset.S create mode 100644 arch/blackfin/lib/modsi3.S create mode 100644 arch/blackfin/lib/muldi3.c create mode 100644 arch/blackfin/lib/outs.S create mode 100644 arch/blackfin/lib/smulsi3_highpart.S create mode 100644 arch/blackfin/lib/strcmp.c create mode 100644 arch/blackfin/lib/strcpy.c create mode 100644 arch/blackfin/lib/strncmp.c create mode 100644 arch/blackfin/lib/strncpy.c create mode 100644 arch/blackfin/lib/udivsi3.S create mode 100644 arch/blackfin/lib/umodsi3.S create mode 100644 arch/blackfin/lib/umulsi3_highpart.S create mode 100644 arch/blackfin/mach-bf533/Kconfig create mode 100644 arch/blackfin/mach-bf533/Makefile create mode 100644 arch/blackfin/mach-bf533/boards/Makefile create mode 100644 arch/blackfin/mach-bf533/boards/cm_bf533.c create mode 100644 arch/blackfin/mach-bf533/boards/ezkit.c create mode 100644 arch/blackfin/mach-bf533/boards/generic_board.c create mode 100644 arch/blackfin/mach-bf533/boards/stamp.c create mode 100644 arch/blackfin/mach-bf533/cpu.c create mode 100644 arch/blackfin/mach-bf533/head.S create mode 100644 arch/blackfin/mach-bf533/ints-priority.c create mode 100644 arch/blackfin/mach-bf537/Kconfig create mode 100644 arch/blackfin/mach-bf537/Makefile create mode 100644 arch/blackfin/mach-bf537/boards/Makefile create mode 100644 arch/blackfin/mach-bf537/boards/cm_bf537.c create mode 100644 arch/blackfin/mach-bf537/boards/eth_mac.c create mode 100644 arch/blackfin/mach-bf537/boards/generic_board.c create mode 100644 arch/blackfin/mach-bf537/boards/led.S create mode 100644 arch/blackfin/mach-bf537/boards/pnav10.c create mode 100644 arch/blackfin/mach-bf537/boards/stamp.c create mode 100644 arch/blackfin/mach-bf537/cpu.c create mode 100644 arch/blackfin/mach-bf537/head.S create mode 100644 arch/blackfin/mach-bf537/ints-priority.c create mode 100644 arch/blackfin/mach-bf561/Kconfig create mode 100644 arch/blackfin/mach-bf561/Makefile create mode 100644 arch/blackfin/mach-bf561/boards/Makefile create mode 100644 arch/blackfin/mach-bf561/boards/cm_bf561.c create mode 100644 arch/blackfin/mach-bf561/boards/ezkit.c create mode 100644 arch/blackfin/mach-bf561/boards/generic_board.c create mode 100644 arch/blackfin/mach-bf561/coreb.c create mode 100644 arch/blackfin/mach-bf561/head.S create mode 100644 arch/blackfin/mach-bf561/ints-priority.c create mode 100644 arch/blackfin/mach-common/Makefile create mode 100644 arch/blackfin/mach-common/cache.S create mode 100644 arch/blackfin/mach-common/cacheinit.S create mode 100644 arch/blackfin/mach-common/cplbhdlr.S create mode 100644 arch/blackfin/mach-common/cplbinfo.c create mode 100644 arch/blackfin/mach-common/cplbmgr.S create mode 100644 arch/blackfin/mach-common/dpmc.S create mode 100644 arch/blackfin/mach-common/entry.S create mode 100644 arch/blackfin/mach-common/interrupt.S create mode 100644 arch/blackfin/mach-common/ints-priority-dc.c create mode 100644 arch/blackfin/mach-common/ints-priority-sc.c create mode 100644 arch/blackfin/mach-common/irqpanic.c create mode 100644 arch/blackfin/mach-common/lock.S create mode 100644 arch/blackfin/mach-common/pm.c create mode 100644 arch/blackfin/mm/Makefile create mode 100644 arch/blackfin/mm/blackfin_sram.c create mode 100644 arch/blackfin/mm/blackfin_sram.h create mode 100644 arch/blackfin/mm/init.c create mode 100644 arch/blackfin/oprofile/Kconfig create mode 100644 arch/blackfin/oprofile/Makefile create mode 100644 arch/blackfin/oprofile/common.c create mode 100644 arch/blackfin/oprofile/op_blackfin.h create mode 100644 arch/blackfin/oprofile/op_model_bf533.c create mode 100644 arch/blackfin/oprofile/timer_int.c create mode 100644 include/asm-blackfin/Kbuild create mode 100644 include/asm-blackfin/a.out.h create mode 100644 include/asm-blackfin/atomic.h create mode 100644 include/asm-blackfin/auxvec.h create mode 100644 include/asm-blackfin/bf5xx_timers.h create mode 100644 include/asm-blackfin/bfin-global.h create mode 100644 include/asm-blackfin/bfin5xx_spi.h create mode 100644 include/asm-blackfin/bfin_simple_timer.h create mode 100644 include/asm-blackfin/bfin_sport.h create mode 100644 include/asm-blackfin/bitops.h create mode 100644 include/asm-blackfin/blackfin.h create mode 100644 include/asm-blackfin/bug.h create mode 100644 include/asm-blackfin/bugs.h create mode 100644 include/asm-blackfin/byteorder.h create mode 100644 include/asm-blackfin/cache.h create mode 100644 include/asm-blackfin/cacheflush.h create mode 100644 include/asm-blackfin/checksum.h create mode 100644 include/asm-blackfin/cplb.h create mode 100644 include/asm-blackfin/cplbinit.h create mode 100644 include/asm-blackfin/cpumask.h create mode 100644 include/asm-blackfin/cputime.h create mode 100644 include/asm-blackfin/current.h create mode 100644 include/asm-blackfin/delay.h create mode 100644 include/asm-blackfin/device.h create mode 100644 include/asm-blackfin/div64.h create mode 100644 include/asm-blackfin/dma-mapping.h create mode 100644 include/asm-blackfin/dma.h create mode 100644 include/asm-blackfin/dpmc.h create mode 100644 include/asm-blackfin/elf.h create mode 100644 include/asm-blackfin/emergency-restart.h create mode 100644 include/asm-blackfin/entry.h create mode 100644 include/asm-blackfin/errno.h create mode 100644 include/asm-blackfin/fcntl.h create mode 100644 include/asm-blackfin/flat.h create mode 100644 include/asm-blackfin/futex.h create mode 100644 include/asm-blackfin/gpio.h create mode 100644 include/asm-blackfin/hardirq.h create mode 100644 include/asm-blackfin/hw_irq.h create mode 100644 include/asm-blackfin/ide.h create mode 100644 include/asm-blackfin/io.h create mode 100644 include/asm-blackfin/ioctl.h create mode 100644 include/asm-blackfin/ioctls.h create mode 100644 include/asm-blackfin/ipc.h create mode 100644 include/asm-blackfin/ipcbuf.h create mode 100644 include/asm-blackfin/irq.h create mode 100644 include/asm-blackfin/irq_handler.h create mode 100644 include/asm-blackfin/irq_regs.h create mode 100644 include/asm-blackfin/kdebug.h create mode 100644 include/asm-blackfin/kmap_types.h create mode 100644 include/asm-blackfin/l1layout.h create mode 100644 include/asm-blackfin/linkage.h create mode 100644 include/asm-blackfin/local.h create mode 100644 include/asm-blackfin/mach-bf533/anomaly.h create mode 100644 include/asm-blackfin/mach-bf533/bf533.h create mode 100644 include/asm-blackfin/mach-bf533/bfin_serial_5xx.h create mode 100644 include/asm-blackfin/mach-bf533/blackfin.h create mode 100644 include/asm-blackfin/mach-bf533/cdefBF532.h create mode 100644 include/asm-blackfin/mach-bf533/defBF532.h create mode 100644 include/asm-blackfin/mach-bf533/dma.h create mode 100644 include/asm-blackfin/mach-bf533/irq.h create mode 100644 include/asm-blackfin/mach-bf533/mem_init.h create mode 100644 include/asm-blackfin/mach-bf533/mem_map.h create mode 100644 include/asm-blackfin/mach-bf537/anomaly.h create mode 100644 include/asm-blackfin/mach-bf537/bf537.h create mode 100644 include/asm-blackfin/mach-bf537/bfin_serial_5xx.h create mode 100644 include/asm-blackfin/mach-bf537/blackfin.h create mode 100644 include/asm-blackfin/mach-bf537/cdefBF534.h create mode 100644 include/asm-blackfin/mach-bf537/cdefBF537.h create mode 100644 include/asm-blackfin/mach-bf537/defBF534.h create mode 100644 include/asm-blackfin/mach-bf537/defBF537.h create mode 100644 include/asm-blackfin/mach-bf537/dma.h create mode 100644 include/asm-blackfin/mach-bf537/irq.h create mode 100644 include/asm-blackfin/mach-bf537/mem_init.h create mode 100644 include/asm-blackfin/mach-bf537/mem_map.h create mode 100644 include/asm-blackfin/mach-bf561/anomaly.h create mode 100644 include/asm-blackfin/mach-bf561/bf561.h create mode 100644 include/asm-blackfin/mach-bf561/bfin_serial_5xx.h create mode 100644 include/asm-blackfin/mach-bf561/blackfin.h create mode 100644 include/asm-blackfin/mach-bf561/cdefBF561.h create mode 100644 include/asm-blackfin/mach-bf561/defBF561.h create mode 100644 include/asm-blackfin/mach-bf561/dma.h create mode 100644 include/asm-blackfin/mach-bf561/irq.h create mode 100644 include/asm-blackfin/mach-bf561/mem_init.h create mode 100644 include/asm-blackfin/mach-bf561/mem_map.h create mode 100644 include/asm-blackfin/mach-common/cdef_LPBlackfin.h create mode 100644 include/asm-blackfin/mach-common/context.S create mode 100644 include/asm-blackfin/mach-common/def_LPBlackfin.h create mode 100644 include/asm-blackfin/macros.h create mode 100644 include/asm-blackfin/mem_map.h create mode 100644 include/asm-blackfin/mman.h create mode 100644 include/asm-blackfin/mmu.h create mode 100644 include/asm-blackfin/mmu_context.h create mode 100644 include/asm-blackfin/module.h create mode 100644 include/asm-blackfin/msgbuf.h create mode 100644 include/asm-blackfin/mutex.h create mode 100644 include/asm-blackfin/namei.h create mode 100644 include/asm-blackfin/page.h create mode 100644 include/asm-blackfin/page_offset.h create mode 100644 include/asm-blackfin/param.h create mode 100644 include/asm-blackfin/pci.h create mode 100644 include/asm-blackfin/percpu.h create mode 100644 include/asm-blackfin/pgalloc.h create mode 100644 include/asm-blackfin/pgtable.h create mode 100644 include/asm-blackfin/poll.h create mode 100644 include/asm-blackfin/posix_types.h create mode 100644 include/asm-blackfin/processor.h create mode 100644 include/asm-blackfin/ptrace.h create mode 100644 include/asm-blackfin/resource.h create mode 100644 include/asm-blackfin/scatterlist.h create mode 100644 include/asm-blackfin/sections.h create mode 100644 include/asm-blackfin/segment.h create mode 100644 include/asm-blackfin/semaphore-helper.h create mode 100644 include/asm-blackfin/semaphore.h create mode 100644 include/asm-blackfin/sembuf.h create mode 100644 include/asm-blackfin/setup.h create mode 100644 include/asm-blackfin/shmbuf.h create mode 100644 include/asm-blackfin/shmparam.h create mode 100644 include/asm-blackfin/sigcontext.h create mode 100644 include/asm-blackfin/siginfo.h create mode 100644 include/asm-blackfin/signal.h create mode 100644 include/asm-blackfin/socket.h create mode 100644 include/asm-blackfin/sockios.h create mode 100644 include/asm-blackfin/spinlock.h create mode 100644 include/asm-blackfin/stat.h create mode 100644 include/asm-blackfin/statfs.h create mode 100644 include/asm-blackfin/string.h create mode 100644 include/asm-blackfin/system.h create mode 100644 include/asm-blackfin/termbits.h create mode 100644 include/asm-blackfin/termios.h create mode 100644 include/asm-blackfin/thread_info.h create mode 100644 include/asm-blackfin/timex.h create mode 100644 include/asm-blackfin/tlb.h create mode 100644 include/asm-blackfin/tlbflush.h create mode 100644 include/asm-blackfin/topology.h create mode 100644 include/asm-blackfin/traps.h create mode 100644 include/asm-blackfin/types.h create mode 100644 include/asm-blackfin/uaccess.h create mode 100644 include/asm-blackfin/ucontext.h create mode 100644 include/asm-blackfin/unaligned.h create mode 100644 include/asm-blackfin/unistd.h create mode 100644 include/asm-blackfin/user.h create mode 100644 include/linux/spi/ad7877.h create mode 100644 include/linux/usb_sl811.h (limited to 'include/linux') diff --git a/Documentation/blackfin/00-INDEX b/Documentation/blackfin/00-INDEX new file mode 100644 index 000000000000..7cb3b356b249 --- /dev/null +++ b/Documentation/blackfin/00-INDEX @@ -0,0 +1,11 @@ +00-INDEX + - This file + +cache-lock.txt + - HOWTO for blackfin cache locking. + +cachefeatures.txt + - Supported cache features. + +Filesystems + - Requirements for mounting the root file system. diff --git a/Documentation/blackfin/Filesystems b/Documentation/blackfin/Filesystems new file mode 100644 index 000000000000..51260a1b8032 --- /dev/null +++ b/Documentation/blackfin/Filesystems @@ -0,0 +1,169 @@ +/* + * File: Documentation/blackfin/Filesystems + * Based on: + * Author: + * + * Created: + * Description: This file contains the simple DMA Implementation for Blackfin + * + * Rev: $Id: Filesystems 2384 2006-11-01 04:12:43Z magicyang $ + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + */ + + How to mount the root file system in uClinux/Blackfin + ----------------------------------------------------- + +1 Mounting EXT3 File system. + ------------------------ + + Creating an EXT3 File system for uClinux/Blackfin: + + +Please follow the steps to form the EXT3 File system and mount the same as root +file system. + +a Make an ext3 file system as large as you want the final root file + system. + + mkfs.ext3 /dev/ram0 + +b Mount this Empty file system on a free directory as: + + mount -t ext3 /dev/ram0 ./test + where ./test is the empty directory. + +c Copy your root fs directory that you have so carefully made over. + + cp -af /tmp/my_final_rootfs_files/* ./test + + (For ex: cp -af uClinux-dist/romfs/* ./test) + +d If you have done everything right till now you should be able to see + the required "root" dir's (that's etc, root, bin, lib, sbin...) + +e Now unmount the file system + + umount ./test + +f Create the root file system image. + + dd if=/dev/ram0 bs=1k count= \ + > ext3fs.img + + +Now you have to tell the kernel that will be mounting this file system as +rootfs. +So do a make menuconfig under kernel and select the Ext3 journaling file system +support under File system --> submenu. + + +2. Mounting EXT2 File system. + ------------------------- + +By default the ext2 file system image will be created if you invoke make from +the top uClinux-dist directory. + + +3. Mounting CRAMFS File System + ---------------------------- + +To create a CRAMFS file system image execute the command + + mkfs.cramfs ./test cramfs.img + + where ./test is the target directory. + + +4. Mounting ROMFS File System + -------------------------- + +To create a ROMFS file system image execute the command + + genromfs -v -V "ROMdisk" -f romfs.img -d ./test + + where ./test is the target directory + + +5. Mounting the JFFS2 Filesystem + ----------------------------- + +To create a compressed JFFS filesystem (JFFS2), please execute the command + + mkfs.jffs2 -d ./test -o jffs2.img + + where ./test is the target directory. + +However, please make sure the following is in your kernel config. + +/* + * RAM/ROM/Flash chip drivers + */ +#define CONFIG_MTD_CFI 1 +#define CONFIG_MTD_ROM 1 +/* + * Mapping drivers for chip access + */ +#define CONFIG_MTD_COMPLEX_MAPPINGS 1 +#define CONFIG_MTD_BF533 1 +#undef CONFIG_MTD_UCLINUX + +Through the u-boot boot loader, use the jffs2.img in the corresponding +partition made in linux-2.6.x/drivers/mtd/maps/bf533_flash.c. + +NOTE - Currently the Flash driver is available only for EZKIT. Watch out for a + STAMP driver soon. + + +6. Mounting the NFS File system + ----------------------------- + + For mounting the NFS please do the following in the kernel config. + + In Networking Support --> Networking options --> TCP/IP networking --> + IP: kernel level autoconfiguration + + Enable BOOTP Support. + + In Kernel hacking --> Compiled-in kernel boot parameter add the following + + root=/dev/nfs rw ip=bootp + + In File system --> Network File system, Enable + + NFS file system support --> NFSv3 client support + Root File system on NFS + + in uClibc menuconfig, do the following + In Networking Support + enable Remote Procedure Call (RPC) support + Full RPC Support + + On the Host side, ensure that /etc/dhcpd.conf looks something like this + + ddns-update-style ad-hoc; + allow bootp; + subnet 10.100.4.0 netmask 255.255.255.0 { + default-lease-time 122209600; + max-lease-time 31557600; + group { + host bf533 { + hardware ethernet 00:CF:52:49:C3:01; + fixed-address 10.100.4.50; + option root-path "/home/nfsmount"; + } + } + + ensure that /etc/exports looks something like this + /home/nfsmount *(rw,no_root_squash,no_all_squash) + + run the following commands as root (may differ depending on your + distribution) : + - service nfs start + - service portmap start + - service dhcpd start + - /usr/sbin/exportfs diff --git a/Documentation/blackfin/cache-lock.txt b/Documentation/blackfin/cache-lock.txt new file mode 100644 index 000000000000..88ba1e6c31c3 --- /dev/null +++ b/Documentation/blackfin/cache-lock.txt @@ -0,0 +1,48 @@ +/* + * File: Documentation/blackfin/cache-lock.txt + * Based on: + * Author: + * + * Created: + * Description: This file contains the simple DMA Implementation for Blackfin + * + * Rev: $Id: cache-lock.txt 2384 2006-11-01 04:12:43Z magicyang $ + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + */ + +How to lock your code in cache in uClinux/blackfin +-------------------------------------------------- + +There are only a few steps required to lock your code into the cache. +Currently you can lock the code by Way. + +Below are the interface provided for locking the cache. + + +1. cache_grab_lock(int Ways); + +This function grab the lock for locking your code into the cache specified +by Ways. + + +2. cache_lock(int Ways); + +This function should be called after your critical code has been executed. +Once the critical code exits, the code is now loaded into the cache. This +function locks the code into the cache. + + +So, the example sequence will be: + + cache_grab_lock(WAY0_L); /* Grab the lock */ + + critical_code(); /* Execute the code of interest */ + + cache_lock(WAY0_L); /* Lock the cache */ + +Where WAY0_L signifies WAY0 locking. diff --git a/Documentation/blackfin/cachefeatures.txt b/Documentation/blackfin/cachefeatures.txt new file mode 100644 index 000000000000..0fbec23becb5 --- /dev/null +++ b/Documentation/blackfin/cachefeatures.txt @@ -0,0 +1,65 @@ +/* + * File: Documentation/blackfin/cachefeatures.txt + * Based on: + * Author: + * + * Created: + * Description: This file contains the simple DMA Implementation for Blackfin + * + * Rev: $Id: cachefeatures.txt 2384 2006-11-01 04:12:43Z magicyang $ + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + */ + + - Instruction and Data cache initialization. + icache_init(); + dcache_init(); + + - Instruction and Data cache Invalidation Routines, when flushing the + same is not required. + _icache_invalidate(); + _dcache_invalidate(); + + Also, for invalidating the entire instruction and data cache, the below + routines are provided (another method for invalidation, refer page no 267 and 287 of + ADSP-BF533 Hardware Reference manual) + + invalidate_entire_dcache(); + invalidate_entire_icache(); + + -External Flushing of Instruction and data cache routines. + + flush_instruction_cache(); + flush_data_cache(); + + - Internal Flushing of Instruction and Data Cache. + + icplb_flush(); + dcplb_flush(); + + - Locking the cache. + + cache_grab_lock(); + cache_lock(); + + Please refer linux-2.6.x/Documentation/blackfin/cache-lock.txt for how to + lock the cache. + + Locking the cache is optional feature. + + - Miscellaneous cache functions. + + flush_cache_all(); + flush_cache_mm(); + invalidate_dcache_range(); + flush_dcache_range(); + flush_dcache_page(); + flush_cache_range(); + flush_cache_page(); + invalidate_dcache_range(); + flush_page_to_ram(); + diff --git a/MAINTAINERS b/MAINTAINERS index 0492dd88e12a..6d665ac13f99 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -700,6 +700,44 @@ P: Richard Purdie M: rpurdie@rpsys.net S: Maintained +BLACKFIN ARCHITECTURE +P: Aubrey Li +M: aubrey.li@analog.com +P: Bernd Schmidt +M: bernd.schmidt@analog.com +P: Bryan Wu +M: bryan.wu@analog.com +P: Grace Pan +M: grace.pan@analog.com +P: Michael Hennerich +M: michael.hennerich@analog.com +P: Mike Frysinger +M: michael.frysinger@analog.com +P: Jane Lv +M: jane.lv@analog.com +P: Jerry Zeng +M: jerry.zeng@analog.com +P: Jie Zhang +M: jie.zhang@analog.com +P: Robin Getz +M: robin.getz@analog.com +P: Roy Huang +M: roy.huang@analog.com +P: Sonic Zhang +M: sonic.zhang@analog.com +P: Yi Li +M: yi.li@analog.com +L: uclinux-dist-devel@blackfin.uclinux.org +W: http://blackfin.uclinux.org +S: Supported + +BLACKFIN SERIAL DRIVER +P: Aubrey Li +M: aubrey.li@analog.com +L: uclinux-dist-devel@blackfin.uclinux.org +W: http://blackfin.uclinux.org +S: Supported + BAYCOM/HDLCDRV DRIVERS FOR AX.25 P: Thomas Sailer M: t.sailer@alumni.ethz.ch diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig new file mode 100644 index 000000000000..1a4930509325 --- /dev/null +++ b/arch/blackfin/Kconfig @@ -0,0 +1,989 @@ +# +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.txt. +# + +mainmenu "uClinux/Blackfin (w/o MMU) Kernel Configuration" + +config MMU + bool + default n + +config FPU + bool + default n + +config RWSEM_GENERIC_SPINLOCK + bool + default y + +config RWSEM_XCHGADD_ALGORITHM + bool + default n + +config BLACKFIN + bool + default y + +config BFIN + bool + default y + +config SEMAPHORE_SLEEPERS + bool + default y + +config GENERIC_FIND_NEXT_BIT + bool + default y + +config GENERIC_HWEIGHT + bool + default y + +config GENERIC_HARDIRQS + bool + default y + +config GENERIC_IRQ_PROBE + bool + default y + +config GENERIC_TIME + bool + default n + +config GENERIC_CALIBRATE_DELAY + bool + default y + +config FORCE_MAX_ZONEORDER + int + default "14" + +config GENERIC_CALIBRATE_DELAY + bool + default y + +config IRQCHIP_DEMUX_GPIO + bool + default y + +source "init/Kconfig" +source "kernel/Kconfig.preempt" + +menu "Blackfin Processor Options" + +comment "Processor and Board Settings" + +choice + prompt "CPU" + default BF533 + +config BF531 + bool "BF531" + help + BF531 Processor Support. + +config BF532 + bool "BF532" + help + BF532 Processor Support. + +config BF533 + bool "BF533" + help + BF533 Processor Support. + +config BF534 + bool "BF534" + help + BF534 Processor Support. + +config BF536 + bool "BF536" + help + BF536 Processor Support. + +config BF537 + bool "BF537" + help + BF537 Processor Support. + +config BF561 + bool "BF561" + help + Not Supported Yet - Work in progress - BF561 Processor Support. + +endchoice + +choice + prompt "Silicon Rev" + default BF_REV_0_2 if BF537 + default BF_REV_0_3 if BF533 + +config BF_REV_0_2 + bool "0.2" + depends on (BF537 || BF536 || BF534) + +config BF_REV_0_3 + bool "0.3" + depends on (BF561 || BF537 || BF536 || BF534 || BF533 || BF532 || BF531) + +config BF_REV_0_4 + bool "0.4" + depends on (BF561 || BF533 || BF532 || BF531) + +config BF_REV_0_5 + bool "0.5" + depends on (BF561 || BF533 || BF532 || BF531) + +endchoice + +config BFIN_DUAL_CORE + bool + depends on (BF561) + default y + +config BFIN_SINGLE_CORE + bool + depends on !BFIN_DUAL_CORE + default y + +choice + prompt "System type" + default BFIN533_STAMP + help + Do NOT change the board here. Please use the top level + configuration to ensure that all the other settings are + correct. + +config BFIN533_EZKIT + bool "BF533-EZKIT" + depends on (BF533 || BF532 || BF531) + help + BF533-EZKIT-LITE board Support. + +config BFIN533_STAMP + bool "BF533-STAMP" + depends on (BF533 || BF532 || BF531) + help + BF533-STAMP board Support. + +config BFIN537_STAMP + bool "BF537-STAMP" + depends on (BF537 || BF536 || BF534) + help + BF537-STAMP board Support. + +config BFIN533_BLUETECHNIX_CM + bool "Bluetechnix CM-BF533" + depends on (BF533) + help + CM-BF533 support for EVAL- and DEV-Board. + +config BFIN537_BLUETECHNIX_CM + bool "Bluetechnix CM-BF537" + depends on (BF537) + help + CM-BF537 support for EVAL- and DEV-Board. + +config BFIN561_BLUETECHNIX_CM + bool "BF561-CM" + depends on (BF561) + help + CM-BF561 support for EVAL- and DEV-Board. + +config BFIN561_EZKIT + bool "BF561-EZKIT" + depends on (BF561) + help + BF561-EZKIT-LITE board Support. + +config PNAV10 + bool "PNAV 1.0 board" + depends on (BF537) + help + PNAV 1.0 board Support. + +config GENERIC_BOARD + bool "Custom" + depends on (BF537 || BF536 \ + || BF534 || BF561 || BF535 || BF533 || BF532 || BF531) + help + GENERIC or Custom board Support. + +endchoice + +config MEM_GENERIC_BOARD + bool + depends on GENERIC_BOARD + default y + +config MEM_MT48LC64M4A2FB_7E + bool + depends on (BFIN533_STAMP) + default y + +config MEM_MT48LC16M16A2TG_75 + bool + depends on (BFIN533_EZKIT || BFIN561_EZKIT \ + || BFIN533_BLUETECHNIX_CM || BFIN537_BLUETECHNIX_CM) + default y + +config MEM_MT48LC32M8A2_75 + bool + depends on (BFIN537_STAMP || PNAV10) + default y + +config MEM_MT48LC8M32B2B5_7 + bool + depends on (BFIN561_BLUETECHNIX_CM) + default y + +config BFIN_SHARED_FLASH_ENET + bool + depends on (BFIN533_STAMP) + default y + +source "arch/blackfin/mach-bf533/Kconfig" +source "arch/blackfin/mach-bf561/Kconfig" +source "arch/blackfin/mach-bf537/Kconfig" + +menu "Board customizations" + +config CMDLINE_BOOL + bool "Default bootloader kernel arguments" + +config CMDLINE + string "Initial kernel command string" + depends on CMDLINE_BOOL + default "console=ttyBF0,57600" + help + If you don't have a boot loader capable of passing a command line string + to the kernel, you may specify one here. As a minimum, you should specify + the memory size and the root device (e.g., mem=8M, root=/dev/nfs). + +comment "Board Setup" + +config CLKIN_HZ + int "Crystal Frequency in Hz" + default "11059200" if BFIN533_STAMP + default "27000000" if BFIN533_EZKIT + default "25000000" if BFIN537_STAMP + default "30000000" if BFIN561_EZKIT + default "24576000" if PNAV10 + help + The frequency of CLKIN crystal oscillator on the board in Hz. + +config MEM_SIZE + int "SDRAM Memory Size in MBytes" + default 32 if BFIN533_EZKIT + default 64 if BFIN537_STAMP + default 64 if BFIN561_EZKIT + default 128 if BFIN533_STAMP + default 64 if PNAV10 + +config MEM_ADD_WIDTH + int "SDRAM Memory Address Width" + default 9 if BFIN533_EZKIT + default 9 if BFIN561_EZKIT + default 10 if BFIN537_STAMP + default 11 if BFIN533_STAMP + default 10 if PNAV10 + +config ENET_FLASH_PIN + int "PF port/pin used for flash and ethernet sharing" + depends on (BFIN533_STAMP) + default 0 + help + PF port/pin used for flash and ethernet sharing to allow other PF + pins to be used on other platforms without having to touch common + code. + For example: PF0 --> 0,PF1 --> 1,PF2 --> 2, etc. + +config BOOT_LOAD + hex "Kernel load address for booting" + default "0x1000" + help + This option allows you to set the load address of the kernel. + This can be useful if you are on a board which has a small amount + of memory or you wish to reserve some memory at the beginning of + the address space. + + Note that you generally want to keep this value at or above 4k + (0x1000) as this will allow the kernel to capture NULL pointer + references. + +comment "LED Status Indicators" + depends on (BFIN533_STAMP || BFIN533_BLUETECHNIX_CM) + +config BFIN_ALIVE_LED + bool "Enable Board Alive" + depends on (BFIN533_STAMP || BFIN533_BLUETECHNIX_CM) + default n + help + Blink the LEDs you select when the kernel is running. Helps detect + a hung kernel. + +config BFIN_ALIVE_LED_NUM + int "LED" + depends on BFIN_ALIVE_LED + range 1 3 if BFIN533_STAMP + default "3" if BFIN533_STAMP + help + Select the LED (marked on the board) for you to blink. + +config BFIN_IDLE_LED + bool "Enable System Load/Idle LED" + depends on (BFIN533_STAMP || BFIN533_BLUETECHNIX_CM) + default n + help + Blinks the LED you select when to determine kernel load. + +config BFIN_IDLE_LED_NUM + int "LED" + depends on BFIN_IDLE_LED + range 1 3 if BFIN533_STAMP + default "2" if BFIN533_STAMP + help + Select the LED (marked on the board) for you to blink. + +# +# Sorry - but you need to put the hex address here - +# + +# Flag Data register +config BFIN_ALIVE_LED_PORT + hex + default 0xFFC00700 if (BFIN533_STAMP) + +# Peripheral Flag Direction Register +config BFIN_ALIVE_LED_DPORT + hex + default 0xFFC00730 if (BFIN533_STAMP) + +config BFIN_ALIVE_LED_PIN + hex + default 0x04 if (BFIN533_STAMP && BFIN_ALIVE_LED_NUM = 1) + default 0x08 if (BFIN533_STAMP && BFIN_ALIVE_LED_NUM = 2) + default 0x10 if (BFIN533_STAMP && BFIN_ALIVE_LED_NUM = 3) + +config BFIN_IDLE_LED_PORT + hex + default 0xFFC00700 if (BFIN533_STAMP) + +# Peripheral Flag Direction Register +config BFIN_IDLE_LED_DPORT + hex + default 0xFFC00730 if (BFIN533_STAMP) + +config BFIN_IDLE_LED_PIN + hex + default 0x04 if (BFIN533_STAMP && BFIN_IDLE_LED_NUM = 1) + default 0x08 if (BFIN533_STAMP && BFIN_IDLE_LED_NUM = 2) + default 0x10 if (BFIN533_STAMP && BFIN_IDLE_LED_NUM = 3) + +comment "Console UART Setup" + +choice + prompt "Baud Rate" + default BAUD_57600 +config BAUD_9600 + bool "9600" +config BAUD_19200 + bool "19200" +config BAUD_38400 + bool "38400" +config BAUD_57600 + bool "57600" +config BAUD_115200 + bool "115200" +endchoice + +choice + prompt "Parity" + default BAUD_NO_PARITY +config BAUD_NO_PARITY + bool "No Parity" +config BAUD_PARITY + bool "Parity" +endchoice + +choice + prompt "Stop Bits" + default BAUD_1_STOPBIT +config BAUD_1_STOPBIT + bool "1" +config BAUD_2_STOPBIT + bool "2" +endchoice + +endmenu + + +menu "Blackfin Kernel Optimizations" + +comment "Timer Tick" + +source kernel/Kconfig.hz + +comment "Memory Optimizations" + +config I_ENTRY_L1 + bool "Locate interrupt entry code in L1 Memory" + default y + help + If enabled interrupt entry code (STORE/RESTORE CONTEXT) is linked + into L1 instruction memory.(less latency) + +config EXCPT_IRQ_SYSC_L1 + bool "Locate entire ASM lowlevel excepetion / interrupt - Syscall and CPLB handler code in L1 Memory" + default y + help + If enabled entire ASM lowlevel exception and interrupt entry code (STORE/RESTORE CONTEXT) is linked + into L1 instruction memory.(less latency) + +config DO_IRQ_L1 + bool "Locate frequently called do_irq dispatcher function in L1 Memory" + default y + help + If enabled frequently called do_irq dispatcher function is linked + into L1 instruction memory.(less latency) + +config CORE_TIMER_IRQ_L1 + bool "Locate frequently called timer_interrupt() function in L1 Memory" + default y + help + If enabled frequently called timer_interrupt() function is linked + into L1 instruction memory.(less latency) + +config IDLE_L1 + bool "Locate frequently idle function in L1 Memory" + default y + help + If enabled frequently called idle function is linked + into L1 instruction memory.(less latency) + +config SCHEDULE_L1 + bool "Locate kernel schedule function in L1 Memory" + default y + help + If enabled frequently called kernel schedule is linked + into L1 instruction memory.(less latency) + +config ARITHMETIC_OPS_L1 + bool "Locate kernel owned arithmetic functions in L1 Memory" + default y + help + If enabled arithmetic functions are linked + into L1 instruction memory.(less latency) + +config ACCESS_OK_L1 + bool "Locate access_ok function in L1 Memory" + default y + help + If enabled access_ok function is linked + into L1 instruction memory.(less latency) + +config MEMSET_L1 + bool "Locate memset function in L1 Memory" + default y + help + If enabled memset function is linked + into L1 instruction memory.(less latency) + +config MEMCPY_L1 + bool "Locate memcpy function in L1 Memory" + default y + help + If enabled memcpy function is linked + into L1 instruction memory.(less latency) + +config SYS_BFIN_SPINLOCK_L1 + bool "Locate sys_bfin_spinlock function in L1 Memory" + default y + help + If enabled sys_bfin_spinlock function is linked + into L1 instruction memory.(less latency) + +config IP_CHECKSUM_L1 + bool "Locate IP Checksum function in L1 Memory" + default n + help + If enabled IP Checksum function is linked + into L1 instruction memory.(less latency) + +config CACHELINE_ALIGNED_L1 + bool "Locate cacheline_aligned data to L1 Data Memory" + default y + depends on !BF531 + help + If enabled cacheline_anligned data is linked + into L1 data memory.(less latency) + +config SYSCALL_TAB_L1 + bool "Locate Syscall Table L1 Data Memory" + default n + depends on !BF531 + help + If enabled the Syscall LUT is linked + into L1 data memory.(less latency) + +config CPLB_SWITCH_TAB_L1 + bool "Locate CPLB Switch Tables L1 Data Memory" + default n + depends on !BF531 + help + If enabled the CPLB Switch Tables are linked + into L1 data memory.(less latency) + +endmenu + + +choice + prompt "Kernel executes from" + help + Choose the memory type that the kernel will be running in. + +config RAMKERNEL + bool "RAM" + help + The kernel will be resident in RAM when running. + +config ROMKERNEL + bool "ROM" + help + The kernel will be resident in FLASH/ROM when running. + +endchoice + +source "mm/Kconfig" + +config LARGE_ALLOCS + bool "Allow allocating large blocks (> 1MB) of memory" + help + Allow the slab memory allocator to keep chains for very large + memory sizes - upto 32MB. You may need this if your system has + a lot of RAM, and you need to able to allocate very large + contiguous chunks. If unsure, say N. + +config BFIN_DMA_5XX + bool "Enable DMA Support" + depends on (BF533 || BF532 || BF531 || BF537 || BF536 || BF534 || BF561) + default y + help + DMA driver for BF5xx. + +choice + prompt "Uncached SDRAM region" + default DMA_UNCACHED_1M + depends BFIN_DMA_5XX +config DMA_UNCACHED_2M + bool "Enable 2M DMA region" +config DMA_UNCACHED_1M + bool "Enable 1M DMA region" +config DMA_UNCACHED_NONE + bool "Disable DMA region" +endchoice + + +comment "Cache Support" +config BLKFIN_CACHE + bool "Enable ICACHE" +config BLKFIN_DCACHE + bool "Enable DCACHE" +config BLKFIN_DCACHE_BANKA + bool "Enable only 16k BankA DCACHE - BankB is SRAM" + depends on BLKFIN_DCACHE && !BF531 + default n +config BLKFIN_CACHE_LOCK + bool "Enable Cache Locking" + +choice + prompt "Policy" + depends on BLKFIN_DCACHE + default BLKFIN_WB +config BLKFIN_WB + bool "Write back" + help + Write Back Policy: + Cached data will be written back to SDRAM only when needed. + This can give a nice increase in performance, but beware of + broken drivers that do not properly invalidate/flush their + cache. + + Write Through Policy: + Cached data will always be written back to SDRAM when the + cache is updated. This is a completely safe setting, but + performance is worse than Write Back. + + If you are unsure of the options and you want to be safe, + then go with Write Through. + +config BLKFIN_WT + bool "Write through" + help + Write Back Policy: + Cached data will be written back to SDRAM only when needed. + This can give a nice increase in performance, but beware of + broken drivers that do not properly invalidate/flush their + cache. + + Write Through Policy: + Cached data will always be written back to SDRAM when the + cache is updated. This is a completely safe setting, but + performance is worse than Write Back. + + If you are unsure of the options and you want to be safe, + then go with Write Through. + +endchoice + +config L1_MAX_PIECE + int "Set the max L1 SRAM pieces" + default 16 + help + Set the max memory pieces for the L1 SRAM allocation algorithm. + Min value is 16. Max value is 1024. + +menu "Clock Settings" + + +config BFIN_KERNEL_CLOCK + bool "Re-program Clocks while Kernel boots?" + default n + help + This option decides if kernel clocks are re-programed from the + bootloader settings. If the clocks are not set, the SDRAM settings + are also not changed, and the Bootloader does 100% of the hardware + configuration. + +config VCO_MULT + int "VCO Multiplier" + depends on BFIN_KERNEL_CLOCK + default "22" if BFIN533_EZKIT + default "45" if BFIN533_STAMP + default "20" if BFIN537_STAMP + default "22" if BFIN533_BLUETECHNIX_CM + default "20" if BFIN537_BLUETECHNIX_CM + default "20" if BFIN561_BLUETECHNIX_CM + default "20" if BFIN561_EZKIT + +config CCLK_DIV + int "Core Clock Divider" + depends on BFIN_KERNEL_CLOCK + default 1 if BFIN533_EZKIT + default 1 if BFIN533_STAMP + default 1 if BFIN537_STAMP + default 1 if BFIN533_BLUETECHNIX_CM + default 1 if BFIN537_BLUETECHNIX_CM + default 1 if BFIN561_BLUETECHNIX_CM + default 1 if BFIN561_EZKIT + +config SCLK_DIV + int "System Clock Divider" + depends on BFIN_KERNEL_CLOCK + default 5 if BFIN533_EZKIT + default 5 if BFIN533_STAMP + default 4 if BFIN537_STAMP + default 5 if BFIN533_BLUETECHNIX_CM + default 4 if BFIN537_BLUETECHNIX_CM + default 4 if BFIN561_BLUETECHNIX_CM + default 5 if BFIN561_EZKIT + +config CLKIN_HALF + bool "Half ClockIn" + depends on BFIN_KERNEL_CLOCK + default n + +config PLL_BYPASS + bool "Bypass PLL" + depends on BFIN_KERNEL_CLOCK + default n + +endmenu + +comment "Asynchonous Memory Configuration" + +menu "EBIU_AMBCTL Global Control" +config C_AMCKEN + bool "Enable CLKOUT" + default y + +config C_CDPRIO + bool "DMA has priority over core for ext. accesses" + default n + +config C_B0PEN + depends on BF561 + bool "Bank 0 16 bit packing enable" + default y + +config C_B1PEN + depends on BF561 + bool "Bank 1 16 bit packing enable" + default y + +config C_B2PEN + depends on BF561 + bool "Bank 2 16 bit packing enable" + default y + +config C_B3PEN + depends on BF561 + bool "Bank 3 16 bit packing enable" + default n + +choice + prompt"Enable Asynchonous Memory Banks" + default C_AMBEN_ALL + +config C_AMBEN + bool "Disable All Banks" + +config C_AMBEN_B0 + bool "Enable Bank 0" + +config C_AMBEN_B0_B1 + bool "Enable Bank 0 & 1" + +config C_AMBEN_B0_B1_B2 + bool "Enable Bank 0 & 1 & 2" + +config C_AMBEN_ALL + bool "Enable All Banks" +endchoice +endmenu + +menu "EBIU_AMBCTL Control" +config BANK_0 + hex "Bank 0" + default 0x7BB0 + +config BANK_1 + hex "Bank 1" + default 0x7BB0 + +config BANK_2 + hex "Bank 2" + default 0x7BB0 + +config BANK_3 + hex "Bank 3" + default 0x99B3 +endmenu + +endmenu + +############################################################################# +menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)" + +config PCI + bool "PCI support" + help + Support for PCI bus. + +source "drivers/pci/Kconfig" + +config HOTPLUG + bool "Support for hot-pluggable device" + help + Say Y here if you want to plug devices into your computer while + the system is running, and be able to use them quickly. In many + cases, the devices can likewise be unplugged at any time too. + + One well known example of this is PCMCIA- or PC-cards, credit-card + size devices such as network cards, modems or hard drives which are + plugged into slots found on all modern laptop computers. Another + example, used on modern desktops as well as laptops, is USB. + + Enable HOTPLUG and KMOD, and build a modular kernel. Get agent + software (at ) and install it. + Then your kernel will automatically call out to a user mode "policy + agent" (/sbin/hotplug) to load modules and set up software needed + to use devices as you hotplug them. + +source "drivers/pcmcia/Kconfig" + +source "drivers/pci/hotplug/Kconfig" + +endmenu + +menu "Executable file formats" + +source "fs/Kconfig.binfmt" + +endmenu + +menu "Power management options" +source "kernel/power/Kconfig" + +choice + prompt "Select PM Wakeup Event Source" + default PM_WAKEUP_GPIO_BY_SIC_IWR + depends on PM + help + If you have a GPIO already configured as input with the corresponding PORTx_MASK + bit set - "Specify Wakeup Event by SIC_IWR value" + +config PM_WAKEUP_GPIO_BY_SIC_IWR + bool "Specify Wakeup Event by SIC_IWR value" +config PM_WAKEUP_BY_GPIO + bool "Cause Wakeup Event by GPIO" +config PM_WAKEUP_GPIO_API + bool "Configure Wakeup Event by PM GPIO API" + +endchoice + +config PM_WAKEUP_SIC_IWR + hex "Wakeup Events (SIC_IWR)" + depends on PM_WAKEUP_GPIO_BY_SIC_IWR + default 0x80000000 if (BF537 || BF536 || BF534) + default 0x100000 if (BF533 || BF532 || BF531) + +config PM_WAKEUP_GPIO_NUMBER + int "Wakeup GPIO number" + range 0 47 + depends on PM_WAKEUP_BY_GPIO + default 2 if BFIN537_STAMP + +choice + prompt "GPIO Polarity" + depends on PM_WAKEUP_BY_GPIO + default PM_WAKEUP_GPIO_POLAR_H +config PM_WAKEUP_GPIO_POLAR_H + bool "Active High" +config PM_WAKEUP_GPIO_POLAR_L + bool "Active Low" +config PM_WAKEUP_GPIO_POLAR_EDGE_F + bool "Falling EDGE" +config PM_WAKEUP_GPIO_POLAR_EDGE_R + bool "Rising EDGE" +config PM_WAKEUP_GPIO_POLAR_EDGE_B + bool "Both EDGE" +endchoice + +endmenu + +if (BF537 || BF533) + +menu "CPU Frequency scaling" + +source "drivers/cpufreq/Kconfig" + +config CPU_FREQ + bool + default n + help + If you want to enable this option, you should select the + DPMC driver from Character Devices. +endmenu + +endif + +source "net/Kconfig" + +source "drivers/Kconfig" + +source "fs/Kconfig" + +source "arch/blackfin/oprofile/Kconfig" + +menu "Kernel hacking" + +source "lib/Kconfig.debug" + +config DEBUG_HWERR + bool "Hardware error interrupt debugging" + depends on DEBUG_KERNEL + help + When enabled, the hardware error interrupt is never disabled, and + will happen immediately when an error condition occurs. This comes + at a slight cost in code size, but is necessary if you are getting + hardware error interrupts and need to know where they are coming + from. + +config DEBUG_ICACHE_CHECK + bool "Check Instruction cache coherancy" + depends on DEBUG_KERNEL + depends on DEBUG_HWERR + help + Say Y here if you are getting wierd unexplained errors. This will + ensure that icache is what SDRAM says it should be, by doing a + byte wise comparision between SDRAM and instruction cache. This + also relocates the irq_panic() function to L1 memory, (which is + un-cached). + +config DEBUG_KERNEL_START + bool "Debug Kernel Startup" + depends on DEBUG_KERNEL + help + Say Y here to put in an mini-execption handler before the kernel + replaces the bootloader exception handler. This will stop kernels + from dieing at startup with no visible error messages. + +config DEBUG_SERIAL_EARLY_INIT + bool "Initialize serial driver early" + default n + depends on SERIAL_BFIN + help + Say Y here if you want to get kernel output early when kernel + crashes before the normal console initialization. If this option + is enable, console output will always go to the ttyBF0, no matter + what kernel boot paramters you set. + +config DEBUG_HUNT_FOR_ZERO + bool "Catch NULL pointer reads/writes" + default y + help + Say Y here to catch reads/writes to anywhere in the memory range + from 0x0000 - 0x0FFF (the first 4k) of memory. This is useful in + catching common programming errors such as NULL pointer dereferences. + + Misbehaving applications will be killed (generate a SEGV) while the + kernel will trigger a panic. + + Enabling this option will take up an extra entry in CPLB table. + Otherwise, there is no extra overhead. + +config DEBUG_BFIN_NO_KERN_HWTRACE + bool "Trace user apps (turn off hwtrace in kernel)" + default n + help + Some pieces of the kernel contain a lot of flow changes which can + quickly fill up the hardware trace buffer. When debugging crashes, + the hardware trace may indicate that the problem lies in kernel + space when in reality an application is buggy. + + Say Y here to disable hardware tracing in some known "jumpy" pieces + of code so that the trace buffer will extend further back. + +config DUAL_CORE_TEST_MODULE + tristate "Dual Core Test Module" + depends on (BF561) + default n + help + Say Y here to build-in dual core test module for dual core test. + +config CPLB_INFO + bool "Display the CPLB information" + help + Display the CPLB information. + +config ACCESS_CHECK + bool "Check the user pointer address" + default y + help + Usually the pointer transfer from user space is checked to see if its + address is in the kernel space. + + Say N here to disable that check to improve the performance. + +endmenu + +source "security/Kconfig" + +source "crypto/Kconfig" + +source "lib/Kconfig" diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile new file mode 100644 index 000000000000..52d4dbdb2b1a --- /dev/null +++ b/arch/blackfin/Makefile @@ -0,0 +1,80 @@ +# +# arch/blackfin/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# + + +CROSS_COMPILE ?= bfin-uclinux- +LDFLAGS_vmlinux := -X +OBJCOPYFLAGS := -O binary -R .note -R .comment -S +GZFLAGS := -9 + +CFLAGS_MODULE += -mlong-calls +KALLSYMS += --symbol-prefix=_ + + +# setup the machine name and the machine dependent settings +machine-$(CONFIG_BF531) := bf533 +machine-$(CONFIG_BF532) := bf533 +machine-$(CONFIG_BF533) := bf533 +machine-$(CONFIG_BF534) := bf537 +machine-$(CONFIG_BF536) := bf537 +machine-$(CONFIG_BF537) := bf537 +machine-$(CONFIG_BF561) := bf561 +MACHINE := $(machine-y) +export MACHINE + + +head-y := arch/$(ARCH)/mach-$(MACHINE)/head.o arch/$(ARCH)/kernel/init_task.o + +core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/ arch/$(ARCH)/mach-common/ + +# If we have a machine-specific directory, then include it in the build. +ifneq ($(machine-y),) +core-y += arch/$(ARCH)/mach-$(MACHINE)/ +core-y += arch/$(ARCH)/mach-$(MACHINE)/boards/ +endif + +libs-y += arch/$(ARCH)/lib/ + +drivers-$(CONFIG_OPROFILE) += arch/$(ARCH)/oprofile/ + + + +# Update machine arch symlinks if something which affects +# them changed. We use .mach to indicate when they were updated +# last, otherwise make uses the target directory mtime. + +include/asm-blackfin/.mach: $(wildcard include/config/arch/*.h) include/config/auto.conf + @echo ' SYMLINK include/asm-$(ARCH)/mach-$(MACHINE) -> include/asm-$(ARCH)/mach' +ifneq ($(KBUILD_SRC),) + $(Q)mkdir -p include/asm-$(ARCH) + $(Q)ln -fsn $(srctree)/include/asm-$(ARCH)/mach-$(MACHINE) include/asm-$(ARCH)/mach +else + $(Q)ln -fsn mach-$(MACHINE) include/asm-$(ARCH)/mach +endif + @touch $@ + +CLEAN_FILES += \ + include/asm-$(ARCH)/asm-offsets.h \ + arch/$(ARCH)/kernel/asm-offsets.s \ + include/asm-$(ARCH)/mach \ + include/asm-$(ARCH)/.mach + +archprepare: include/asm-blackfin/.mach +archclean: + $(Q)$(MAKE) $(clean)=$(boot) + + +all: vmImage +boot := arch/$(ARCH)/boot +BOOT_TARGETS = vmImage +.PHONY: $(BOOT_TARGETS) +$(BOOT_TARGETS): vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ +define archhelp + echo '* vmImage - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage)' +endef diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile new file mode 100644 index 000000000000..49e8098d4c21 --- /dev/null +++ b/arch/blackfin/boot/Makefile @@ -0,0 +1,27 @@ +# +# arch/blackfin/boot/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# + +MKIMAGE := $(srctree)/scripts/mkuboot.sh + +targets := vmImage +extra-y += vmlinux.bin vmlinux.gz + +quiet_cmd_uimage = UIMAGE $@ + cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \ + -C gzip -a $(CONFIG_BOOT_LOAD) -e $(CONFIG_BOOT_LOAD) -n 'Linux-$(KERNELRELEASE)' \ + -d $< $@ + +$(obj)/vmlinux.bin: vmlinux FORCE + $(call if_changed,objcopy) + +$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE + $(call if_changed,gzip) + +$(obj)/vmImage: $(obj)/vmlinux.gz + $(call if_changed,uimage) + @echo 'Kernel: $@ is ready' diff --git a/arch/blackfin/defconfig b/arch/blackfin/defconfig new file mode 100644 index 000000000000..d5904ca994cf --- /dev/null +++ b/arch/blackfin/defconfig @@ -0,0 +1,1314 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.20 +# +# CONFIG_MMU is not set +# CONFIG_FPU is not set +CONFIG_RWSEM_GENERIC_SPINLOCK=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set +CONFIG_BFIN=y +CONFIG_SEMAPHORE_SLEEPERS=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_UCLINUX=y +CONFIG_FORCE_MAX_ZONEORDER=14 +CONFIG_IRQCHIP_DEMUX_GPIO=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 + +# +# General setup +# +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_SYSVIPC=y +# CONFIG_IPC_NS is not set +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_UTS_NS is not set +# CONFIG_AUDIT is not set +# CONFIG_IKCONFIG is not set +CONFIG_SYSFS_DEPRECATED=y +# CONFIG_RELAY is not set +CONFIG_INITRAMFS_SOURCE="" +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +# CONFIG_LIMIT_PAGECACHE is not set +CONFIG_BUDDY=y +# CONFIG_NP2 is not set +CONFIG_SLAB=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_RT_MUTEXES=y +CONFIG_TINY_SHMEM=y +CONFIG_BASE_SMALL=0 +# CONFIG_SLOB is not set + +# +# Loadable module support +# +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_KMOD=y + +# +# Block layer +# +CONFIG_BLOCK=y +# CONFIG_LBD is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_LSF is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_IOSCHED_CFQ=y +CONFIG_DEFAULT_AS=y +# CONFIG_DEFAULT_DEADLINE is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="anticipatory" + +# +# Blackfin Processor Options +# + +# +# Processor and Board Settings +# +# CONFIG_BF531 is not set +# CONFIG_BF532 is not set +# CONFIG_BF533 is not set +# CONFIG_BF534 is not set +# CONFIG_BF535 is not set +# CONFIG_BF536 is not set +CONFIG_BF537=y +# CONFIG_BF561 is not set +CONFIG_BF_REV_0_2=y +# CONFIG_BF_REV_0_3 is not set +# CONFIG_BF_REV_0_4 is not set +# CONFIG_BF_REV_0_5 is not set +CONFIG_BLACKFIN=y +CONFIG_BFIN_SINGLE_CORE=y +# CONFIG_BFIN533_EZKIT is not set +# CONFIG_BFIN533_STAMP is not set +CONFIG_BFIN537_STAMP=y +# CONFIG_BFIN533_BLUETECHNIX_CM is not set +# CONFIG_BFIN537_BLUETECHNIX_CM is not set +# CONFIG_BFIN561_BLUETECHNIX_CM is not set +# CONFIG_BFIN561_EZKIT is not set +# CONFIG_PNAV10 is not set +# CONFIG_GENERIC_BOARD is not set +CONFIG_MEM_MT48LC32M8A2_75=y +CONFIG_IRQ_PLL_WAKEUP=7 + +# +# BF537 Specific Configuration +# + +# +# PORT F/G Selection +# +CONFIG_BF537_PORT_F=y +# CONFIG_BF537_PORT_G is not set +# CONFIG_BF537_PORT_H is not set + +# +# Interrupt Priority Assignment +# + +# +# Priority +# +CONFIG_IRQ_DMA_ERROR=7 +CONFIG_IRQ_ERROR=7 +CONFIG_IRQ_RTC=8 +CONFIG_IRQ_PPI=8 +CONFIG_IRQ_SPORT0_RX=9 +CONFIG_IRQ_SPORT0_TX=9 +CONFIG_IRQ_SPORT1_RX=9 +CONFIG_IRQ_SPORT1_TX=9 +CONFIG_IRQ_TWI=10 +CONFIG_IRQ_SPI=10 +CONFIG_IRQ_UART0_RX=10 +CONFIG_IRQ_UART0_TX=10 +CONFIG_IRQ_UART1_RX=10 +CONFIG_IRQ_UART1_TX=10 +CONFIG_IRQ_CAN_RX=11 +CONFIG_IRQ_CAN_TX=11 +CONFIG_IRQ_MAC_RX=11 +CONFIG_IRQ_MAC_TX=11 +CONFIG_IRQ_TMR0=12 +CONFIG_IRQ_TMR1=12 +CONFIG_IRQ_TMR2=12 +CONFIG_IRQ_TMR3=12 +CONFIG_IRQ_TMR4=12 +CONFIG_IRQ_TMR5=12 +CONFIG_IRQ_TMR6=12 +CONFIG_IRQ_TMR7=12 +CONFIG_IRQ_PROG_INTA=12 +CONFIG_IRQ_PORTG_INTB=12 +CONFIG_IRQ_MEM_DMA0=13 +CONFIG_IRQ_MEM_DMA1=13 +CONFIG_IRQ_WATCH=13 + +# +# Board customizations +# + +# +# Board Setup +# +CONFIG_CLKIN_HZ=25000000 +CONFIG_MEM_SIZE=64 +CONFIG_MEM_ADD_WIDTH=10 +CONFIG_BOOT_LOAD=0x1000 + +# +# Console UART Setup +# +# CONFIG_BAUD_9600 is not set +# CONFIG_BAUD_19200 is not set +# CONFIG_BAUD_38400 is not set +CONFIG_BAUD_57600=y +# CONFIG_BAUD_115200 is not set +CONFIG_BAUD_NO_PARITY=y +# CONFIG_BAUD_PARITY is not set +CONFIG_BAUD_1_STOPBIT=y +# CONFIG_BAUD_2_STOPBIT is not set + +# +# Blackfin Kernel Optimizations +# + +# +# Timer Tick +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 + +# +# Memory Optimizations +# +CONFIG_I_ENTRY_L1=y +CONFIG_RAMKERNEL=y +# CONFIG_ROMKERNEL is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +# CONFIG_SPARSEMEM_STATIC is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_RESOURCES_64BIT is not set +CONFIG_LARGE_ALLOCS=y +CONFIG_BFIN_DMA_5XX=y +# CONFIG_DMA_UNCACHED_2M is not set +CONFIG_DMA_UNCACHED_1M=y +# CONFIG_DMA_UNCACHED_NONE is not set + +# +# Cache Support +# +CONFIG_BLKFIN_CACHE=y +CONFIG_BLKFIN_DCACHE=y +# CONFIG_BLKFIN_CACHE_LOCK is not set +# CONFIG_BLKFIN_WB is not set +CONFIG_BLKFIN_WT=y +CONFIG_L1_MAX_PIECE=16 + +# +# Clock Settings +# +# CONFIG_BFIN_KERNEL_CLOCK is not set + +# +# Asynchonous Memory Configuration +# + +# +# EBIU_AMBCTL Global Control +# +CONFIG_C_AMCKEN=y +CONFIG_C_CDPRIO=y +# CONFIG_C_AMBEN is not set +# CONFIG_C_AMBEN_B0 is not set +# CONFIG_C_AMBEN_B0_B1 is not set +# CONFIG_C_AMBEN_B0_B1_B2 is not set +CONFIG_C_AMBEN_ALL=y + +# +# EBIU_AMBCTL Control +# +CONFIG_BANK_0=0x7BB0 +CONFIG_BANK_1=0x7BB0 +CONFIG_BANK_2=0x7BB0 +CONFIG_BANK_3=0x99B3 + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# +# CONFIG_PCI is not set + +# +# PCCARD (PCMCIA/CardBus) support +# +# CONFIG_PCCARD is not set + +# +# PCI Hotplug Support +# + +# +# Executable file formats +# +CONFIG_BINFMT_ELF_FDPIC=y +CONFIG_BINFMT_FLAT=y +CONFIG_BINFMT_ZFLAT=y +# CONFIG_BINFMT_SHARED_FLAT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +CONFIG_PM_LEGACY=y +# CONFIG_PM_DEBUG is not set +# CONFIG_PM_SYSFS_DEPRECATED is not set +CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR=y +# CONFIG_PM_WAKEUP_BY_GPIO is not set +# CONFIG_PM_WAKEUP_GPIO_API is not set +CONFIG_PM_WAKEUP_SIC_IWR=0x80000000 + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# Networking +# +CONFIG_NET=y + +# +# Networking options +# +# CONFIG_NETDEBUG is not set +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +# CONFIG_INET_TUNNEL is not set +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_INET6_XFRM_TUNNEL is not set +# CONFIG_INET6_TUNNEL is not set +# CONFIG_NETLABEL is not set +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETFILTER is not set + +# +# DCCP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set + +# +# TIPC Configuration (EXPERIMENTAL) +# +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +CONFIG_IRDA=m + +# +# IrDA protocols +# +CONFIG_IRLAN=m +CONFIG_IRCOMM=m +# CONFIG_IRDA_ULTRA is not set + +# +# IrDA options +# +CONFIG_IRDA_CACHE_LAST_LSAP=y +# CONFIG_IRDA_FAST_RR is not set +# CONFIG_IRDA_DEBUG is not set + +# +# Infrared-port device drivers +# + +# +# SIR device drivers +# +CONFIG_IRTTY_SIR=m + +# +# Dongle support +# +# CONFIG_DONGLE is not set + +# +# Old SIR device drivers +# +# CONFIG_IRPORT_SIR is not set + +# +# Old Serial dongle support +# + +# +# FIR device drivers +# +# CONFIG_BT is not set +# CONFIG_IEEE80211 is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +# CONFIG_FW_LOADER is not set +# CONFIG_SYS_HYPERVISOR is not set + +# +# Connector - unified userspace <-> kernelspace linker +# +# CONFIG_CONNECTOR is not set + +# +# Memory Technology Devices (MTD) +# +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=m +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_CFI_INTELEXT is not set +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_MW320D=m +CONFIG_MTD_RAM=y +CONFIG_MTD_ROM=m +# CONFIG_MTD_ABSENT is not set +# CONFIG_MTD_OBSOLETE_CHIPS is not set + +# +# Mapping drivers for chip access +# +CONFIG_MTD_COMPLEX_MAPPINGS=y +# CONFIG_MTD_PHYSMAP is not set +CONFIG_MTD_BF5xx=m +CONFIG_BFIN_FLASH_SIZE=0x400000 +CONFIG_EBIU_FLASH_BASE=0x20000000 + +# +# FLASH_EBIU_AMBCTL Control +# +CONFIG_BFIN_FLASH_BANK_0=0x7BB0 +CONFIG_BFIN_FLASH_BANK_1=0x7BB0 +CONFIG_BFIN_FLASH_BANK_2=0x7BB0 +CONFIG_BFIN_FLASH_BANK_3=0x7BB0 +CONFIG_MTD_UCLINUX=y +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set + +# +# NAND Flash Device Drivers +# +CONFIG_MTD_NAND=m +# CONFIG_MTD_NAND_VERIFY_WRITE is not set +# CONFIG_MTD_NAND_ECC_SMC is not set +CONFIG_MTD_NAND_BFIN=m +CONFIG_BFIN_NAND_BASE=0x20212000 +CONFIG_BFIN_NAND_CLE=2 +CONFIG_BFIN_NAND_ALE=1 +CONFIG_BFIN_NAND_READY=3 +CONFIG_MTD_NAND_IDS=m +# CONFIG_MTD_NAND_DISKONCHIP is not set +# CONFIG_MTD_NAND_NANDSIM is not set + +# +# OneNAND Flash Device Drivers +# +# CONFIG_MTD_ONENAND is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_COW_COMMON is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 +# CONFIG_BLK_DEV_INITRD is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set + +# +# Misc devices +# +# CONFIG_TIFM_CORE is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_NETLINK is not set + +# +# Serial ATA (prod) and Parallel ATA (experimental) drivers +# +# CONFIG_ATA is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# + +# +# I2O device support +# + +# +# Network device support +# +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# PHY device support +# +# CONFIG_PHYLIB is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +# CONFIG_SMC91X is not set +CONFIG_BFIN_MAC=y +CONFIG_BFIN_MAC_USE_L1=y +CONFIG_BFIN_TX_DESC_NUM=10 +CONFIG_BFIN_RX_DESC_NUM=20 +# CONFIG_BFIN_MAC_RMII is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +CONFIG_INPUT_EVDEV=m +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_UINPUT is not set +# CONFIG_BF53X_PFBUTTONS is not set +CONFIG_TWI_KEYPAD=m +CONFIG_BFIN_TWIKEYPAD_IRQ_PFX=72 + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_AD9960 is not set +# CONFIG_SPI_ADC_BF533 is not set +# CONFIG_BF533_PFLAGS is not set +# CONFIG_BF5xx_PPIFCD is not set +# CONFIG_BF5xx_TIMERS is not set +# CONFIG_BF5xx_PPI is not set +CONFIG_BFIN_SPORT=y +# CONFIG_BFIN_TIMER_LATENCY is not set +CONFIG_TWI_LCD=m +CONFIG_TWI_LCD_SLAVE_ADDR=34 +# CONFIG_AD5304 is not set +# CONFIG_VT is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_BFIN=y +CONFIG_SERIAL_BFIN_CONSOLE=y +CONFIG_SERIAL_BFIN_DMA=y +# CONFIG_SERIAL_BFIN_PIO is not set +CONFIG_SERIAL_BFIN_UART0=y +# CONFIG_BFIN_UART0_CTSRTS is not set +# CONFIG_SERIAL_BFIN_UART1 is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_BFIN_SPORT is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set + +# +# CAN, the car bus and industrial fieldbus +# +CONFIG_CAN4LINUX=y + +# +# linux embedded drivers +# +# CONFIG_CAN_MCF5282 is not set +# CONFIG_CAN_UNCTWINCAN is not set +CONFIG_CAN_BLACKFIN=m + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +CONFIG_HW_RANDOM=y +# CONFIG_GEN_RTC is not set +CONFIG_BLACKFIN_DPMC=y +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set + +# +# TPM devices +# +# CONFIG_TCG_TPM is not set + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_CHARDEV=m + +# +# I2C Algorithms +# +# CONFIG_I2C_ALGOBIT is not set +# CONFIG_I2C_ALGOPCF is not set +# CONFIG_I2C_ALGOPCA is not set + +# +# I2C Hardware Bus support +# +# CONFIG_I2C_BFIN_GPIO is not set +CONFIG_I2C_BFIN_TWI=m +CONFIG_TWICLK_KHZ=50 +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_PCA_ISA is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_SENSORS_DS1337 is not set +# CONFIG_SENSORS_DS1374 is not set +CONFIG_SENSORS_AD5252=m +# CONFIG_SENSORS_EEPROM is not set +# CONFIG_SENSORS_PCF8574 is not set +# CONFIG_SENSORS_PCF8575 is not set +# CONFIG_SENSORS_PCA9539 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_MAX6875 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set + +# +# SPI support +# +CONFIG_SPI=y +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_BITBANG is not set +CONFIG_SPI_BFIN=y + +# +# SPI Protocol Masters +# + +# +# Dallas's 1-wire bus +# +# CONFIG_W1 is not set + +# +# Hardware Monitoring support +# +CONFIG_HWMON=y +# CONFIG_HWMON_VID is not set +# CONFIG_SENSORS_ABITUGURU is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_FSCHER is not set +# CONFIG_SENSORS_FSCPOS is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +CONFIG_FIRMWARE_EDID=y +CONFIG_FB=m +CONFIG_FB_CFB_FILLRECT=m +CONFIG_FB_CFB_COPYAREA=m +CONFIG_FB_CFB_IMAGEBLIT=m +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set +CONFIG_FB_BFIN_7171=m +CONFIG_FB_BFIN_7393=m +CONFIG_NTSC=y +# CONFIG_PAL is not set +# CONFIG_NTSC_640x480 is not set +# CONFIG_PAL_640x480 is not set +# CONFIG_NTSC_YCBCR is not set +# CONFIG_PAL_YCBCR is not set +CONFIG_ADV7393_1XMEM=y +# CONFIG_ADV7393_2XMEM is not set +CONFIG_FB_BF537_LQ035=m +CONFIG_LQ035_SLAVE_ADDR=0x58 +# CONFIG_FB_BFIN_LANDSCAPE is not set +# CONFIG_FB_BFIN_BGR is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set + +# +# Logo configuration +# +# CONFIG_LOGO is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_CLASS_DEVICE=m +CONFIG_BACKLIGHT_DEVICE=y +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_DEVICE=y + +# +# Sound +# +CONFIG_SOUND=m + +# +# Advanced Linux Sound Architecture +# +CONFIG_SND=m +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +# CONFIG_SND_SEQUENCER is not set +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_PCM_OSS_PLUGINS=y +# CONFIG_SND_DYNAMIC_MINORS is not set +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set + +# +# Generic devices +# +# CONFIG_SND_DUMMY is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set + +# +# Open Sound System +# +# CONFIG_SOUND_PRIME is not set + +# +# HID Devices +# +CONFIG_HID=y + +# +# USB support +# +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +# CONFIG_USB_ARCH_HAS_EHCI is not set +# CONFIG_USB is not set + +# +# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# MMC/SD Card support +# +# CONFIG_SPI_MMC is not set +# CONFIG_MMC is not set + +# +# LED devices +# +# CONFIG_NEW_LEDS is not set + +# +# LED drivers +# + +# +# LED Triggers +# + +# +# InfiniBand support +# + +# +# EDAC - error detection and reporting (RAS) (EXPERIMENTAL) +# + +# +# Real Time Clock +# +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set + +# +# RTC drivers +# +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_TEST is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_V3020 is not set +CONFIG_RTC_DRV_BFIN=y + +# +# DMA Engine support +# +# CONFIG_DMA_ENGINE is not set + +# +# DMA Clients +# + +# +# DMA Devices +# + +# +# Virtualization +# + +# +# PBX support +# +# CONFIG_PBX is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +# CONFIG_EXT2_FS_POSIX_ACL is not set +# CONFIG_EXT2_FS_SECURITY is not set +# CONFIG_EXT3_FS is not set +# CONFIG_EXT4DEV_FS is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_FS_POSIX_ACL is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_QUOTA is not set +CONFIG_DNOTIFY=y +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_SYSFS=y +# CONFIG_TMPFS is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y +# CONFIG_CONFIGFS_FS is not set + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=m +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set +CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS=10 +# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_SUMMARY is not set +# CONFIG_JFFS2_FS_XATTR is not set +# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_RTIME=y +# CONFIG_JFFS2_RUBIN is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=m +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_RPCSEC_GSS_SPKM3 is not set +CONFIG_SMB_FS=m +# CONFIG_SMB_NLS_DEFAULT is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y + +# +# Native Language Support +# +CONFIG_NLS=m +CONFIG_NLS_DEFAULT="iso8859-1" +# CONFIG_NLS_CODEPAGE_437 is not set +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +# CONFIG_NLS_ISO8859_1 is not set +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set + +# +# Distributed Lock Manager +# +# CONFIG_DLM is not set + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Kernel hacking +# +# CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_MUST_CHECK=y +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_DEBUG_FS is not set +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_KERNEL is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_DEBUG_BUGVERBOSE is not set +CONFIG_DEBUG_HUNT_FOR_ZERO=y +# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set +# CONFIG_BOOTPARAM is not set +# CONFIG_NO_KERNEL_MSG is not set +CONFIG_CPLB_INFO=y +# CONFIG_NO_ACCESS_CHECK is not set + +# +# Security options +# +# CONFIG_KEYS is not set +CONFIG_SECURITY=y +# CONFIG_SECURITY_NETWORK is not set +CONFIG_SECURITY_CAPABILITIES=y + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_CRC_CCITT=m +# CONFIG_CRC16 is not set +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=m +CONFIG_PLIST=y +CONFIG_IOMAP_COPY=y diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile new file mode 100644 index 000000000000..f3b7d2f9d49c --- /dev/null +++ b/arch/blackfin/kernel/Makefile @@ -0,0 +1,14 @@ +# +# arch/blackfin/kernel/Makefile +# + +extra-y := init_task.o vmlinux.lds + +obj-y := \ + entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \ + sys_bfin.o time.o traps.o irqchip.o dma-mapping.o bfin_gpio.o \ + flat.o + +obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_BFIN_DMA_5XX) += bfin_dma_5xx.o +obj-$(CONFIG_DUAL_CORE_TEST_MODULE) += dualcore_test.o diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c new file mode 100644 index 000000000000..41d9a9f89700 --- /dev/null +++ b/arch/blackfin/kernel/asm-offsets.c @@ -0,0 +1,136 @@ +/* + * File: arch/blackfin/kernel/asm-offsets.c + * Based on: + * Author: + * + * Created: + * Description: generate definitions needed by assembly language modules. + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include + +#define DEFINE(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + +int main(void) +{ + /* offsets into the task struct */ + DEFINE(TASK_STATE, offsetof(struct task_struct, state)); + DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); + DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); + DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); + DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); + DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info)); + DEFINE(TASK_MM, offsetof(struct task_struct, mm)); + DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); + DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending)); + + /* offsets into the irq_cpustat_t struct */ + DEFINE(CPUSTAT_SOFTIRQ_PENDING, + offsetof(irq_cpustat_t, __softirq_pending)); + + /* offsets into the thread struct */ + DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); + DEFINE(THREAD_USP, offsetof(struct thread_struct, usp)); + DEFINE(THREAD_SR, offsetof(struct thread_struct, seqstat)); + DEFINE(PT_SR, offsetof(struct thread_struct, seqstat)); + DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0)); + DEFINE(THREAD_PC, offsetof(struct thread_struct, pc)); + DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE); + + /* offsets into the pt_regs */ + DEFINE(PT_ORIG_P0, offsetof(struct pt_regs, orig_p0)); + DEFINE(PT_ORIG_PC, offsetof(struct pt_regs, orig_pc)); + DEFINE(PT_R0, offsetof(struct pt_regs, r0)); + DEFINE(PT_R1, offsetof(struct pt_regs, r1)); + DEFINE(PT_R2, offsetof(struct pt_regs, r2)); + DEFINE(PT_R3, offsetof(struct pt_regs, r3)); + DEFINE(PT_R4, offsetof(struct pt_regs, r4)); + DEFINE(PT_R5, offsetof(struct pt_regs, r5)); + DEFINE(PT_R6, offsetof(struct pt_regs, r6)); + DEFINE(PT_R7, offsetof(struct pt_regs, r7)); + + DEFINE(PT_P0, offsetof(struct pt_regs, p0)); + DEFINE(PT_P1, offsetof(struct pt_regs, p1)); + DEFINE(PT_P2, offsetof(struct pt_regs, p2)); + DEFINE(PT_P3, offsetof(struct pt_regs, p3)); + DEFINE(PT_P4, offsetof(struct pt_regs, p4)); + DEFINE(PT_P5, offsetof(struct pt_regs, p5)); + + DEFINE(PT_FP, offsetof(struct pt_regs, fp)); + DEFINE(PT_USP, offsetof(struct pt_regs, usp)); + DEFINE(PT_I0, offsetof(struct pt_regs, i0)); + DEFINE(PT_I1, offsetof(struct pt_regs, i1)); + DEFINE(PT_I2, offsetof(struct pt_regs, i2)); + DEFINE(PT_I3, offsetof(struct pt_regs, i3)); + DEFINE(PT_M0, offsetof(struct pt_regs, m0)); + DEFINE(PT_M1, offsetof(struct pt_regs, m1)); + DEFINE(PT_M2, offsetof(struct pt_regs, m2)); + DEFINE(PT_M3, offsetof(struct pt_regs, m3)); + DEFINE(PT_L0, offsetof(struct pt_regs, l0)); + DEFINE(PT_L1, offsetof(struct pt_regs, l1)); + DEFINE(PT_L2, offsetof(struct pt_regs, l2)); + DEFINE(PT_L3, offsetof(struct pt_regs, l3)); + DEFINE(PT_B0, offsetof(struct pt_regs, b0)); + DEFINE(PT_B1, offsetof(struct pt_regs, b1)); + DEFINE(PT_B2, offsetof(struct pt_regs, b2)); + DEFINE(PT_B3, offsetof(struct pt_regs, b3)); + DEFINE(PT_A0X, offsetof(struct pt_regs, a0x)); + DEFINE(PT_A0W, offsetof(struct pt_regs, a0w)); + DEFINE(PT_A1X, offsetof(struct pt_regs, a1x)); + DEFINE(PT_A1W, offsetof(struct pt_regs, a1w)); + DEFINE(PT_LC0, offsetof(struct pt_regs, lc0)); + DEFINE(PT_LC1, offsetof(struct pt_regs, lc1)); + DEFINE(PT_LT0, offsetof(struct pt_regs, lt0)); + DEFINE(PT_LT1, offsetof(struct pt_regs, lt1)); + DEFINE(PT_LB0, offsetof(struct pt_regs, lb0)); + DEFINE(PT_LB1, offsetof(struct pt_regs, lb1)); + DEFINE(PT_ASTAT, offsetof(struct pt_regs, astat)); + DEFINE(PT_RESERVED, offsetof(struct pt_regs, reserved)); + DEFINE(PT_RETS, offsetof(struct pt_regs, rets)); + DEFINE(PT_PC, offsetof(struct pt_regs, pc)); + DEFINE(PT_RETX, offsetof(struct pt_regs, retx)); + DEFINE(PT_RETN, offsetof(struct pt_regs, retn)); + DEFINE(PT_RETE, offsetof(struct pt_regs, rete)); + DEFINE(PT_SEQSTAT, offsetof(struct pt_regs, seqstat)); + DEFINE(PT_SYSCFG, offsetof(struct pt_regs, syscfg)); + DEFINE(PT_IPEND, offsetof(struct pt_regs, ipend)); + DEFINE(SIZEOF_PTREGS, sizeof(struct pt_regs)); + DEFINE(PT_TEXT_ADDR, sizeof(struct pt_regs)); /* Needed by gdb */ + DEFINE(PT_TEXT_END_ADDR, 4 + sizeof(struct pt_regs));/* Needed by gdb */ + DEFINE(PT_DATA_ADDR, 8 + sizeof(struct pt_regs)); /* Needed by gdb */ + DEFINE(PT_FDPIC_EXEC, 12 + sizeof(struct pt_regs)); /* Needed by gdb */ + DEFINE(PT_FDPIC_INTERP, 16 + sizeof(struct pt_regs));/* Needed by gdb */ + + /* signal defines */ + DEFINE(SIGSEGV, SIGSEGV); + DEFINE(SIGTRAP, SIGTRAP); + + return 0; +} diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c new file mode 100644 index 000000000000..8ea079ebecb5 --- /dev/null +++ b/arch/blackfin/kernel/bfin_dma_5xx.c @@ -0,0 +1,742 @@ +/* + * File: arch/blackfin/kernel/bfin_dma_5xx.c + * Based on: + * Author: + * + * Created: + * Description: This file contains the simple DMA Implementation for Blackfin + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +/* Remove unused code not exported by symbol or internally called */ +#define REMOVE_DEAD_CODE + +/************************************************************************** + * Global Variables +***************************************************************************/ + +static struct dma_channel dma_ch[MAX_BLACKFIN_DMA_CHANNEL]; +#if defined (CONFIG_BF561) +static struct dma_register *base_addr[MAX_BLACKFIN_DMA_CHANNEL] = { + (struct dma_register *) DMA1_0_NEXT_DESC_PTR, + (struct dma_register *) DMA1_1_NEXT_DESC_PTR, + (struct dma_register *) DMA1_2_NEXT_DESC_PTR, + (struct dma_register *) DMA1_3_NEXT_DESC_PTR, + (struct dma_register *) DMA1_4_NEXT_DESC_PTR, + (struct dma_register *) DMA1_5_NEXT_DESC_PTR, + (struct dma_register *) DMA1_6_NEXT_DESC_PTR, + (struct dma_register *) DMA1_7_NEXT_DESC_PTR, + (struct dma_register *) DMA1_8_NEXT_DESC_PTR, + (struct dma_register *) DMA1_9_NEXT_DESC_PTR, + (struct dma_register *) DMA1_10_NEXT_DESC_PTR, + (struct dma_register *) DMA1_11_NEXT_DESC_PTR, + (struct dma_register *) DMA2_0_NEXT_DESC_PTR, + (struct dma_register *) DMA2_1_NEXT_DESC_PTR, + (struct dma_register *) DMA2_2_NEXT_DESC_PTR, + (struct dma_register *) DMA2_3_NEXT_DESC_PTR, + (struct dma_register *) DMA2_4_NEXT_DESC_PTR, + (struct dma_register *) DMA2_5_NEXT_DESC_PTR, + (struct dma_register *) DMA2_6_NEXT_DESC_PTR, + (struct dma_register *) DMA2_7_NEXT_DESC_PTR, + (struct dma_register *) DMA2_8_NEXT_DESC_PTR, + (struct dma_register *) DMA2_9_NEXT_DESC_PTR, + (struct dma_register *) DMA2_10_NEXT_DESC_PTR, + (struct dma_register *) DMA2_11_NEXT_DESC_PTR, + (struct dma_register *) MDMA1_D0_NEXT_DESC_PTR, + (struct dma_register *) MDMA1_S0_NEXT_DESC_PTR, + (struct dma_register *) MDMA1_D1_NEXT_DESC_PTR, + (struct dma_register *) MDMA1_S1_NEXT_DESC_PTR, + (struct dma_register *) MDMA2_D0_NEXT_DESC_PTR, + (struct dma_register *) MDMA2_S0_NEXT_DESC_PTR, + (struct dma_register *) MDMA2_D1_NEXT_DESC_PTR, + (struct dma_register *) MDMA2_S1_NEXT_DESC_PTR, + (struct dma_register *) IMDMA_D0_NEXT_DESC_PTR, + (struct dma_register *) IMDMA_S0_NEXT_DESC_PTR, + (struct dma_register *) IMDMA_D1_NEXT_DESC_PTR, + (struct dma_register *) IMDMA_S1_NEXT_DESC_PTR, +}; +#else +static struct dma_register *base_addr[MAX_BLACKFIN_DMA_CHANNEL] = { + (struct dma_register *) DMA0_NEXT_DESC_PTR, + (struct dma_register *) DMA1_NEXT_DESC_PTR, + (struct dma_register *) DMA2_NEXT_DESC_PTR, + (struct dma_register *) DMA3_NEXT_DESC_PTR, + (struct dma_register *) DMA4_NEXT_DESC_PTR, + (struct dma_register *) DMA5_NEXT_DESC_PTR, + (struct dma_register *) DMA6_NEXT_DESC_PTR, + (struct dma_register *) DMA7_NEXT_DESC_PTR, +#if (defined(CONFIG_BF537) || defined(CONFIG_BF534) || defined(CONFIG_BF536)) + (struct dma_register *) DMA8_NEXT_DESC_PTR, + (struct dma_register *) DMA9_NEXT_DESC_PTR, + (struct dma_register *) DMA10_NEXT_DESC_PTR, + (struct dma_register *) DMA11_NEXT_DESC_PTR, +#endif + (struct dma_register *) MDMA_D0_NEXT_DESC_PTR, + (struct dma_register *) MDMA_S0_NEXT_DESC_PTR, + (struct dma_register *) MDMA_D1_NEXT_DESC_PTR, + (struct dma_register *) MDMA_S1_NEXT_DESC_PTR, +}; +#endif + +/*------------------------------------------------------------------------------ + * Set the Buffer Clear bit in the Configuration register of specific DMA + * channel. This will stop the descriptor based DMA operation. + *-----------------------------------------------------------------------------*/ +static void clear_dma_buffer(unsigned int channel) +{ + dma_ch[channel].regs->cfg |= RESTART; + SSYNC(); + dma_ch[channel].regs->cfg &= ~RESTART; + SSYNC(); +} + +int __init blackfin_dma_init(void) +{ + int i; + + printk(KERN_INFO "Blackfin DMA Controller\n"); + + for (i = 0; i < MAX_BLACKFIN_DMA_CHANNEL; i++) { + dma_ch[i].chan_status = DMA_CHANNEL_FREE; + dma_ch[i].regs = base_addr[i]; + mutex_init(&(dma_ch[i].dmalock)); + } + + return 0; +} + +arch_initcall(blackfin_dma_init); + +/* + * Form the channel find the irq number for that channel. + */ +#if !defined(CONFIG_BF561) + +static int bf533_channel2irq(unsigned int channel) +{ + int ret_irq = -1; + + switch (channel) { + case CH_PPI: + ret_irq = IRQ_PPI; + break; + +#if (defined(CONFIG_BF537) || defined(CONFIG_BF534) || defined(CONFIG_BF536)) + case CH_EMAC_RX: + ret_irq = IRQ_MAC_RX; + break; + + case CH_EMAC_TX: + ret_irq = IRQ_MAC_TX; + break; + + case CH_UART1_RX: + ret_irq = IRQ_UART1_RX; + break; + + case CH_UART1_TX: + ret_irq = IRQ_UART1_TX; + break; +#endif + + case CH_SPORT0_RX: + ret_irq = IRQ_SPORT0_RX; + break; + + case CH_SPORT0_TX: + ret_irq = IRQ_SPORT0_TX; + break; + + case CH_SPORT1_RX: + ret_irq = IRQ_SPORT1_RX; + break; + + case CH_SPORT1_TX: + ret_irq = IRQ_SPORT1_TX; + break; + + case CH_SPI: + ret_irq = IRQ_SPI; + break; + + case CH_UART_RX: + ret_irq = IRQ_UART_RX; + break; + + case CH_UART_TX: + ret_irq = IRQ_UART_TX; + break; + + case CH_MEM_STREAM0_SRC: + case CH_MEM_STREAM0_DEST: + ret_irq = IRQ_MEM_DMA0; + break; + + case CH_MEM_STREAM1_SRC: + case CH_MEM_STREAM1_DEST: + ret_irq = IRQ_MEM_DMA1; + break; + } + return ret_irq; +} + +# define channel2irq(channel) bf533_channel2irq(channel) + +#else + +static int bf561_channel2irq(unsigned int channel) +{ + int ret_irq = -1; + + switch (channel) { + case CH_PPI0: + ret_irq = IRQ_PPI0; + break; + case CH_PPI1: + ret_irq = IRQ_PPI1; + break; + case CH_SPORT0_RX: + ret_irq = IRQ_SPORT0_RX; + break; + case CH_SPORT0_TX: + ret_irq = IRQ_SPORT0_TX; + break; + case CH_SPORT1_RX: + ret_irq = IRQ_SPORT1_RX; + break; + case CH_SPORT1_TX: + ret_irq = IRQ_SPORT1_TX; + break; + case CH_SPI: + ret_irq = IRQ_SPI; + break; + case CH_UART_RX: + ret_irq = IRQ_UART_RX; + break; + case CH_UART_TX: + ret_irq = IRQ_UART_TX; + break; + + case CH_MEM_STREAM0_SRC: + case CH_MEM_STREAM0_DEST: + ret_irq = IRQ_MEM_DMA0; + break; + case CH_MEM_STREAM1_SRC: + case CH_MEM_STREAM1_DEST: + ret_irq = IRQ_MEM_DMA1; + break; + case CH_MEM_STREAM2_SRC: + case CH_MEM_STREAM2_DEST: + ret_irq = IRQ_MEM_DMA2; + break; + case CH_MEM_STREAM3_SRC: + case CH_MEM_STREAM3_DEST: + ret_irq = IRQ_MEM_DMA3; + break; + + case CH_IMEM_STREAM0_SRC: + case CH_IMEM_STREAM0_DEST: + ret_irq = IRQ_IMEM_DMA0; + break; + case CH_IMEM_STREAM1_SRC: + case CH_IMEM_STREAM1_DEST: + ret_irq = IRQ_IMEM_DMA1; + break; + } + return ret_irq; +} + +# define channel2irq(channel) bf561_channel2irq(channel) + +#endif + +/*------------------------------------------------------------------------------ + * Request the specific DMA channel from the system. + *-----------------------------------------------------------------------------*/ +int request_dma(unsigned int channel, char *device_id) +{ + + pr_debug("request_dma() : BEGIN \n"); + mutex_lock(&(dma_ch[channel].dmalock)); + + if ((dma_ch[channel].chan_status == DMA_CHANNEL_REQUESTED) + || (dma_ch[channel].chan_status == DMA_CHANNEL_ENABLED)) { + mutex_unlock(&(dma_ch[channel].dmalock)); + pr_debug("DMA CHANNEL IN USE \n"); + return -EBUSY; + } else { + dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED; + pr_debug("DMA CHANNEL IS ALLOCATED \n"); + } + + mutex_unlock(&(dma_ch[channel].dmalock)); + + dma_ch[channel].device_id = device_id; + dma_ch[channel].irq_callback = NULL; + + /* This is to be enabled by putting a restriction - + * you have to request DMA, before doing any operations on + * descriptor/channel + */ + pr_debug("request_dma() : END \n"); + return channel; +} +EXPORT_SYMBOL(request_dma); + +int set_dma_callback(unsigned int channel, dma_interrupt_t callback, void *data) +{ + int ret_irq = 0; + + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + if (callback != NULL) { + int ret_val; + ret_irq = channel2irq(channel); + + dma_ch[channel].data = data; + + ret_val = + request_irq(ret_irq, (void *)callback, IRQF_DISABLED, + dma_ch[channel].device_id, data); + if (ret_val) { + printk(KERN_NOTICE + "Request irq in DMA engine failed.\n"); + return -EPERM; + } + dma_ch[channel].irq_callback = callback; + } + return 0; +} +EXPORT_SYMBOL(set_dma_callback); + +void free_dma(unsigned int channel) +{ + int ret_irq; + + pr_debug("freedma() : BEGIN \n"); + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + /* Halt the DMA */ + disable_dma(channel); + clear_dma_buffer(channel); + + if (dma_ch[channel].irq_callback != NULL) { + ret_irq = channel2irq(channel); + free_irq(ret_irq, dma_ch[channel].data); + } + + /* Clear the DMA Variable in the Channel */ + mutex_lock(&(dma_ch[channel].dmalock)); + dma_ch[channel].chan_status = DMA_CHANNEL_FREE; + mutex_unlock(&(dma_ch[channel].dmalock)); + + pr_debug("freedma() : END \n"); +} +EXPORT_SYMBOL(free_dma); + +void dma_enable_irq(unsigned int channel) +{ + int ret_irq; + + pr_debug("dma_enable_irq() : BEGIN \n"); + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + ret_irq = channel2irq(channel); + enable_irq(ret_irq); +} +EXPORT_SYMBOL(dma_enable_irq); + +void dma_disable_irq(unsigned int channel) +{ + int ret_irq; + + pr_debug("dma_disable_irq() : BEGIN \n"); + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + ret_irq = channel2irq(channel); + disable_irq(ret_irq); +} +EXPORT_SYMBOL(dma_disable_irq); + +int dma_channel_active(unsigned int channel) +{ + if (dma_ch[channel].chan_status == DMA_CHANNEL_FREE) { + return 0; + } else { + return 1; + } +} +EXPORT_SYMBOL(dma_channel_active); + +/*------------------------------------------------------------------------------ +* stop the specific DMA channel. +*-----------------------------------------------------------------------------*/ +void disable_dma(unsigned int channel) +{ + pr_debug("stop_dma() : BEGIN \n"); + + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + dma_ch[channel].regs->cfg &= ~DMAEN; /* Clean the enable bit */ + SSYNC(); + dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED; + /* Needs to be enabled Later */ + pr_debug("stop_dma() : END \n"); + return; +} +EXPORT_SYMBOL(disable_dma); + +void enable_dma(unsigned int channel) +{ + pr_debug("enable_dma() : BEGIN \n"); + + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + dma_ch[channel].chan_status = DMA_CHANNEL_ENABLED; + dma_ch[channel].regs->curr_x_count = 0; + dma_ch[channel].regs->curr_y_count = 0; + + dma_ch[channel].regs->cfg |= DMAEN; /* Set the enable bit */ + SSYNC(); + pr_debug("enable_dma() : END \n"); + return; +} +EXPORT_SYMBOL(enable_dma); + +/*------------------------------------------------------------------------------ +* Set the Start Address register for the specific DMA channel +* This function can be used for register based DMA, +* to setup the start address +* addr: Starting address of the DMA Data to be transferred. +*-----------------------------------------------------------------------------*/ +void set_dma_start_addr(unsigned int channel, unsigned long addr) +{ + pr_debug("set_dma_start_addr() : BEGIN \n"); + + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + dma_ch[channel].regs->start_addr = addr; + SSYNC(); + pr_debug("set_dma_start_addr() : END\n"); +} +EXPORT_SYMBOL(set_dma_start_addr); + +void set_dma_next_desc_addr(unsigned int channel, unsigned long addr) +{ + pr_debug("set_dma_next_desc_addr() : BEGIN \n"); + + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + dma_ch[channel].regs->next_desc_ptr = addr; + SSYNC(); + pr_debug("set_dma_start_addr() : END\n"); +} +EXPORT_SYMBOL(set_dma_next_desc_addr); + +void set_dma_x_count(unsigned int channel, unsigned short x_count) +{ + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + dma_ch[channel].regs->x_count = x_count; + SSYNC(); +} +EXPORT_SYMBOL(set_dma_x_count); + +void set_dma_y_count(unsigned int channel, unsigned short y_count) +{ + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + dma_ch[channel].regs->y_count = y_count; + SSYNC(); +} +EXPORT_SYMBOL(set_dma_y_count); + +void set_dma_x_modify(unsigned int channel, short x_modify) +{ + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + dma_ch[channel].regs->x_modify = x_modify; + SSYNC(); +} +EXPORT_SYMBOL(set_dma_x_modify); + +void set_dma_y_modify(unsigned int channel, short y_modify) +{ + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + dma_ch[channel].regs->y_modify = y_modify; + SSYNC(); +} +EXPORT_SYMBOL(set_dma_y_modify); + +void set_dma_config(unsigned int channel, unsigned short config) +{ + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + dma_ch[channel].regs->cfg = config; + SSYNC(); +} +EXPORT_SYMBOL(set_dma_config); + +unsigned short +set_bfin_dma_config(char direction, char flow_mode, + char intr_mode, char dma_mode, char width) +{ + unsigned short config; + + config = + ((direction << 1) | (width << 2) | (dma_mode << 4) | + (intr_mode << 6) | (flow_mode << 12) | RESTART); + return config; +} +EXPORT_SYMBOL(set_bfin_dma_config); + +void set_dma_sg(unsigned int channel, struct dmasg * sg, int nr_sg) +{ + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + dma_ch[channel].regs->cfg |= ((nr_sg & 0x0F) << 8); + + dma_ch[channel].regs->next_desc_ptr = (unsigned int)sg; + + SSYNC(); +} +EXPORT_SYMBOL(set_dma_sg); + +/*------------------------------------------------------------------------------ + * Get the DMA status of a specific DMA channel from the system. + *-----------------------------------------------------------------------------*/ +unsigned short get_dma_curr_irqstat(unsigned int channel) +{ + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + return dma_ch[channel].regs->irq_status; +} +EXPORT_SYMBOL(get_dma_curr_irqstat); + +/*------------------------------------------------------------------------------ + * Clear the DMA_DONE bit in DMA status. Stop the DMA completion interrupt. + *-----------------------------------------------------------------------------*/ +void clear_dma_irqstat(unsigned int channel) +{ + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + dma_ch[channel].regs->irq_status |= 3; +} +EXPORT_SYMBOL(clear_dma_irqstat); + +/*------------------------------------------------------------------------------ + * Get current DMA xcount of a specific DMA channel from the system. + *-----------------------------------------------------------------------------*/ +unsigned short get_dma_curr_xcount(unsigned int channel) +{ + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + return dma_ch[channel].regs->curr_x_count; +} +EXPORT_SYMBOL(get_dma_curr_xcount); + +/*------------------------------------------------------------------------------ + * Get current DMA ycount of a specific DMA channel from the system. + *-----------------------------------------------------------------------------*/ +unsigned short get_dma_curr_ycount(unsigned int channel) +{ + BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE + && channel < MAX_BLACKFIN_DMA_CHANNEL)); + + return dma_ch[channel].regs->curr_y_count; +} +EXPORT_SYMBOL(get_dma_curr_ycount); + +void *dma_memcpy(void *dest, const void *src, size_t size) +{ + int direction; /* 1 - address decrease, 0 - address increase */ + int flag_align; /* 1 - address aligned, 0 - address unaligned */ + int flag_2D; /* 1 - 2D DMA needed, 0 - 1D DMA needed */ + + if (size <= 0) + return NULL; + + if ((unsigned long)src < memory_end) + blackfin_dcache_flush_range((unsigned int)src, + (unsigned int)(src + size)); + + bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); + + if ((unsigned long)src < (unsigned long)dest) + direction = 1; + else + direction = 0; + + if ((((unsigned long)dest % 2) == 0) && (((unsigned long)src % 2) == 0) + && ((size % 2) == 0)) + flag_align = 1; + else + flag_align = 0; + + if (size > 0x10000) /* size > 64K */ + flag_2D = 1; + else + flag_2D = 0; + + /* Setup destination and source start address */ + if (direction) { + if (flag_align) { + bfin_write_MDMA_D0_START_ADDR(dest + size - 2); + bfin_write_MDMA_S0_START_ADDR(src + size - 2); + } else { + bfin_write_MDMA_D0_START_ADDR(dest + size - 1); + bfin_write_MDMA_S0_START_ADDR(src + size - 1); + } + } else { + bfin_write_MDMA_D0_START_ADDR(dest); + bfin_write_MDMA_S0_START_ADDR(src); + } + + /* Setup destination and source xcount */ + if (flag_2D) { + if (flag_align) { + bfin_write_MDMA_D0_X_COUNT(1024 / 2); + bfin_write_MDMA_S0_X_COUNT(1024 / 2); + } else { + bfin_write_MDMA_D0_X_COUNT(1024); + bfin_write_MDMA_S0_X_COUNT(1024); + } + bfin_write_MDMA_D0_Y_COUNT(size >> 10); + bfin_write_MDMA_S0_Y_COUNT(size >> 10); + } else { + if (flag_align) { + bfin_write_MDMA_D0_X_COUNT(size / 2); + bfin_write_MDMA_S0_X_COUNT(size / 2); + } else { + bfin_write_MDMA_D0_X_COUNT(size); + bfin_write_MDMA_S0_X_COUNT(size); + } + } + + /* Setup destination and source xmodify and ymodify */ + if (direction) { + if (flag_align) { + bfin_write_MDMA_D0_X_MODIFY(-2); + bfin_write_MDMA_S0_X_MODIFY(-2); + if (flag_2D) { + bfin_write_MDMA_D0_Y_MODIFY(-2); + bfin_write_MDMA_S0_Y_MODIFY(-2); + } + } else { + bfin_write_MDMA_D0_X_MODIFY(-1); + bfin_write_MDMA_S0_X_MODIFY(-1); + if (flag_2D) { + bfin_write_MDMA_D0_Y_MODIFY(-1); + bfin_write_MDMA_S0_Y_MODIFY(-1); + } + } + } else { + if (flag_align) { + bfin_write_MDMA_D0_X_MODIFY(2); + bfin_write_MDMA_S0_X_MODIFY(2); + if (flag_2D) { + bfin_write_MDMA_D0_Y_MODIFY(2); + bfin_write_MDMA_S0_Y_MODIFY(2); + } + } else { + bfin_write_MDMA_D0_X_MODIFY(1); + bfin_write_MDMA_S0_X_MODIFY(1); + if (flag_2D) { + bfin_write_MDMA_D0_Y_MODIFY(1); + bfin_write_MDMA_S0_Y_MODIFY(1); + } + } + } + + /* Enable source DMA */ + if (flag_2D) { + if (flag_align) { + bfin_write_MDMA_S0_CONFIG(DMAEN | DMA2D | WDSIZE_16); + bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | DMA2D | WDSIZE_16); + } else { + bfin_write_MDMA_S0_CONFIG(DMAEN | DMA2D); + bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | DMA2D); + } + } else { + if (flag_align) { + bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_16); + bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_16); + } else { + bfin_write_MDMA_S0_CONFIG(DMAEN); + bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN); + } + } + + while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) + ; + + bfin_write_MDMA_D0_IRQ_STATUS(bfin_read_MDMA_D0_IRQ_STATUS() | + (DMA_DONE | DMA_ERR)); + + bfin_write_MDMA_S0_CONFIG(0); + bfin_write_MDMA_D0_CONFIG(0); + + if ((unsigned long)dest < memory_end) + blackfin_dcache_invalidate_range((unsigned int)dest, + (unsigned int)(dest + size)); + + return dest; +} +EXPORT_SYMBOL(dma_memcpy); + +void *safe_dma_memcpy(void *dest, const void *src, size_t size) +{ + int flags = 0; + void *addr; + local_irq_save(flags); + addr = dma_memcpy(dest, src, size); + local_irq_restore(flags); + return addr; +} +EXPORT_SYMBOL(safe_dma_memcpy); diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c new file mode 100644 index 000000000000..e9f24a9a46ba --- /dev/null +++ b/arch/blackfin/kernel/bfin_gpio.c @@ -0,0 +1,637 @@ +/* + * File: arch/blackfin/kernel/bfin_gpio.c + * Based on: + * Author: Michael Hennerich (hennerich@blackfin.uclinux.org) + * + * Created: + * Description: GPIO Abstraction Layer + * + * Modified: + * Copyright 2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* +* Number BF537/6/4 BF561 BF533/2/1 +* +* GPIO_0 PF0 PF0 PF0 +* GPIO_1 PF1 PF1 PF1 +* GPIO_2 PF2 PF2 PF2 +* GPIO_3 PF3 PF3 PF3 +* GPIO_4 PF4 PF4 PF4 +* GPIO_5 PF5 PF5 PF5 +* GPIO_6 PF6 PF6 PF6 +* GPIO_7 PF7 PF7 PF7 +* GPIO_8 PF8 PF8 PF8 +* GPIO_9 PF9 PF9 PF9 +* GPIO_10 PF10 PF10 PF10 +* GPIO_11 PF11 PF11 PF11 +* GPIO_12 PF12 PF12 PF12 +* GPIO_13 PF13 PF13 PF13 +* GPIO_14 PF14 PF14 PF14 +* GPIO_15 PF15 PF15 PF15 +* GPIO_16 PG0 PF16 +* GPIO_17 PG1 PF17 +* GPIO_18 PG2 PF18 +* GPIO_19 PG3 PF19 +* GPIO_20 PG4 PF20 +* GPIO_21 PG5 PF21 +* GPIO_22 PG6 PF22 +* GPIO_23 PG7 PF23 +* GPIO_24 PG8 PF24 +* GPIO_25 PG9 PF25 +* GPIO_26 PG10 PF26 +* GPIO_27 PG11 PF27 +* GPIO_28 PG12 PF28 +* GPIO_29 PG13 PF29 +* GPIO_30 PG14 PF30 +* GPIO_31 PG15 PF31 +* GPIO_32 PH0 PF32 +* GPIO_33 PH1 PF33 +* GPIO_34 PH2 PF34 +* GPIO_35 PH3 PF35 +* GPIO_36 PH4 PF36 +* GPIO_37 PH5 PF37 +* GPIO_38 PH6 PF38 +* GPIO_39 PH7 PF39 +* GPIO_40 PH8 PF40 +* GPIO_41 PH9 PF41 +* GPIO_42 PH10 PF42 +* GPIO_43 PH11 PF43 +* GPIO_44 PH12 PF44 +* GPIO_45 PH13 PF45 +* GPIO_46 PH14 PF46 +* GPIO_47 PH15 PF47 +*/ + +#include +#include +#include +#include +#include + +#ifdef BF533_FAMILY +static struct gpio_port_t *gpio_bankb[gpio_bank(MAX_BLACKFIN_GPIOS)] = { + (struct gpio_port_t *) FIO_FLAG_D, +}; +#endif + +#ifdef BF537_FAMILY +static struct gpio_port_t *gpio_bankb[gpio_bank(MAX_BLACKFIN_GPIOS)] = { + (struct gpio_port_t *) PORTFIO, + (struct gpio_port_t *) PORTGIO, + (struct gpio_port_t *) PORTHIO, +}; + +static unsigned short *port_fer[gpio_bank(MAX_BLACKFIN_GPIOS)] = { + (unsigned short *) PORTF_FER, + (unsigned short *) PORTG_FER, + (unsigned short *) PORTH_FER, +}; + +#endif + +#ifdef BF561_FAMILY +static struct gpio_port_t *gpio_bankb[gpio_bank(MAX_BLACKFIN_GPIOS)] = { + (struct gpio_port_t *) FIO0_FLAG_D, + (struct gpio_port_t *) FIO1_FLAG_D, + (struct gpio_port_t *) FIO2_FLAG_D, +}; +#endif + +static unsigned short reserved_map[gpio_bank(MAX_BLACKFIN_GPIOS)]; + +#ifdef CONFIG_PM +static unsigned short wakeup_map[gpio_bank(MAX_BLACKFIN_GPIOS)]; +static unsigned char wakeup_flags_map[MAX_BLACKFIN_GPIOS]; +static struct gpio_port_s gpio_bank_saved[gpio_bank(MAX_BLACKFIN_GPIOS)]; + +#ifdef BF533_FAMILY +static unsigned int sic_iwr_irqs[gpio_bank(MAX_BLACKFIN_GPIOS)] = {IRQ_PROG_INTB}; +#endif + +#ifdef BF537_FAMILY +static unsigned int sic_iwr_irqs[gpio_bank(MAX_BLACKFIN_GPIOS)] = {IRQ_PROG_INTB, IRQ_PORTG_INTB, IRQ_MAC_TX}; +#endif + +#ifdef BF561_FAMILY +static unsigned int sic_iwr_irqs[gpio_bank(MAX_BLACKFIN_GPIOS)] = {IRQ_PROG0_INTB, IRQ_PROG1_INTB, IRQ_PROG2_INTB}; +#endif + +#endif /* CONFIG_PM */ + +inline int check_gpio(unsigned short gpio) +{ + if (gpio > MAX_BLACKFIN_GPIOS) + return -EINVAL; + return 0; +} + +#ifdef BF537_FAMILY +void port_setup(unsigned short gpio, unsigned short usage) +{ + if (usage == GPIO_USAGE) { + if (*port_fer[gpio_bank(gpio)] & gpio_bit(gpio)) + printk(KERN_WARNING "bfin-gpio: Possible Conflict with Peripheral " + "usage and GPIO %d detected!\n", gpio); + *port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio); + } else + *port_fer[gpio_bank(gpio)] |= gpio_bit(gpio); + SSYNC(); +} +#else +# define port_setup(...) do { } while (0) +#endif + + +void default_gpio(unsigned short gpio) +{ + unsigned short bank,bitmask; + + bank = gpio_bank(gpio); + bitmask = gpio_bit(gpio); + + gpio_bankb[bank]->maska_clear = bitmask; + gpio_bankb[bank]->maskb_clear = bitmask; + SSYNC(); + gpio_bankb[bank]->inen &= ~bitmask; + gpio_bankb[bank]->dir &= ~bitmask; + gpio_bankb[bank]->polar &= ~bitmask; + gpio_bankb[bank]->both &= ~bitmask; + gpio_bankb[bank]->edge &= ~bitmask; +} + + +int __init bfin_gpio_init(void) +{ + int i; + + printk(KERN_INFO "Blackfin GPIO Controller\n"); + + for (i = 0; i < MAX_BLACKFIN_GPIOS; i+=GPIO_BANKSIZE) + reserved_map[gpio_bank(i)] = 0; + +#if defined(BF537_FAMILY) && (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) +# if defined(CONFIG_BFIN_MAC_RMII) + reserved_map[PORT_H] = 0xC373; +# else + reserved_map[PORT_H] = 0xFFFF; +# endif +#endif + + return 0; +} + +arch_initcall(bfin_gpio_init); + + +/*********************************************************** +* +* FUNCTIONS: Blackfin General Purpose Ports Access Functions +* +* INPUTS/OUTPUTS: +* gpio - GPIO Number between 0 and MAX_BLACKFIN_GPIOS +* +* +* DESCRIPTION: These functions abstract direct register access +* to Blackfin processor General Purpose +* Ports Regsiters +* +* CAUTION: These functions do not belong to the GPIO Driver API +************************************************************* +* MODIFICATION HISTORY : +**************************************************************/ + +/* Set a specific bit */ + +#define SET_GPIO(name) \ +void set_gpio_ ## name(unsigned short gpio, unsigned short arg) \ +{ \ + unsigned long flags; \ + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); \ + local_irq_save(flags); \ + if (arg) \ + gpio_bankb[gpio_bank(gpio)]->name |= gpio_bit(gpio); \ + else \ + gpio_bankb[gpio_bank(gpio)]->name &= ~gpio_bit(gpio); \ + local_irq_restore(flags); \ +} \ +EXPORT_SYMBOL(set_gpio_ ## name); + +SET_GPIO(dir) +SET_GPIO(inen) +SET_GPIO(polar) +SET_GPIO(edge) +SET_GPIO(both) + + +#define SET_GPIO_SC(name) \ +void set_gpio_ ## name(unsigned short gpio, unsigned short arg) \ +{ \ + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); \ + if (arg) \ + gpio_bankb[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \ + else \ + gpio_bankb[gpio_bank(gpio)]->name ## _clear = gpio_bit(gpio); \ +} \ +EXPORT_SYMBOL(set_gpio_ ## name); + +SET_GPIO_SC(maska) +SET_GPIO_SC(maskb) + +#if defined(ANOMALY_05000311) +void set_gpio_data(unsigned short gpio, unsigned short arg) +{ + unsigned long flags; + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); + local_irq_save(flags); + if (arg) + gpio_bankb[gpio_bank(gpio)]->data_set = gpio_bit(gpio); + else + gpio_bankb[gpio_bank(gpio)]->data_clear = gpio_bit(gpio); + bfin_read_CHIPID(); + local_irq_restore(flags); +} +EXPORT_SYMBOL(set_gpio_data); +#else +SET_GPIO_SC(data) +#endif + + +#if defined(ANOMALY_05000311) +void set_gpio_toggle(unsigned short gpio) +{ + unsigned long flags; + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); + local_irq_save(flags); + gpio_bankb[gpio_bank(gpio)]->toggle = gpio_bit(gpio); + bfin_read_CHIPID(); + local_irq_restore(flags); +} +#else +void set_gpio_toggle(unsigned short gpio) +{ + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); + gpio_bankb[gpio_bank(gpio)]->toggle = gpio_bit(gpio); +} +#endif +EXPORT_SYMBOL(set_gpio_toggle); + + +/*Set current PORT date (16-bit word)*/ + +#define SET_GPIO_P(name) \ +void set_gpiop_ ## name(unsigned short gpio, unsigned short arg) \ +{ \ + gpio_bankb[gpio_bank(gpio)]->name = arg; \ +} \ +EXPORT_SYMBOL(set_gpiop_ ## name); + +SET_GPIO_P(dir) +SET_GPIO_P(inen) +SET_GPIO_P(polar) +SET_GPIO_P(edge) +SET_GPIO_P(both) +SET_GPIO_P(maska) +SET_GPIO_P(maskb) + + +#if defined(ANOMALY_05000311) +void set_gpiop_data(unsigned short gpio, unsigned short arg) +{ + unsigned long flags; + local_irq_save(flags); + gpio_bankb[gpio_bank(gpio)]->data = arg; + bfin_read_CHIPID(); + local_irq_restore(flags); +} +EXPORT_SYMBOL(set_gpiop_data); +#else +SET_GPIO_P(data) +#endif + + + +/* Get a specific bit */ + +#define GET_GPIO(name) \ +unsigned short get_gpio_ ## name(unsigned short gpio) \ +{ \ + return (0x01 & (gpio_bankb[gpio_bank(gpio)]->name >> gpio_sub_n(gpio))); \ +} \ +EXPORT_SYMBOL(get_gpio_ ## name); + +GET_GPIO(dir) +GET_GPIO(inen) +GET_GPIO(polar) +GET_GPIO(edge) +GET_GPIO(both) +GET_GPIO(maska) +GET_GPIO(maskb) + + +#if defined(ANOMALY_05000311) +unsigned short get_gpio_data(unsigned short gpio) +{ + unsigned long flags; + unsigned short ret; + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); + local_irq_save(flags); + ret = 0x01 & (gpio_bankb[gpio_bank(gpio)]->data >> gpio_sub_n(gpio)); + bfin_read_CHIPID(); + local_irq_restore(flags); + return ret; +} +EXPORT_SYMBOL(get_gpio_data); +#else +GET_GPIO(data) +#endif + +/*Get current PORT date (16-bit word)*/ + +#define GET_GPIO_P(name) \ +unsigned short get_gpiop_ ## name(unsigned short gpio) \ +{ \ + return (gpio_bankb[gpio_bank(gpio)]->name);\ +} \ +EXPORT_SYMBOL(get_gpiop_ ## name); + +GET_GPIO_P(dir) +GET_GPIO_P(inen) +GET_GPIO_P(polar) +GET_GPIO_P(edge) +GET_GPIO_P(both) +GET_GPIO_P(maska) +GET_GPIO_P(maskb) + +#if defined(ANOMALY_05000311) +unsigned short get_gpiop_data(unsigned short gpio) +{ + unsigned long flags; + unsigned short ret; + local_irq_save(flags); + ret = gpio_bankb[gpio_bank(gpio)]->data; + bfin_read_CHIPID(); + local_irq_restore(flags); + return ret; +} +EXPORT_SYMBOL(get_gpiop_data); +#else +GET_GPIO_P(data) +#endif + +#ifdef CONFIG_PM +/*********************************************************** +* +* FUNCTIONS: Blackfin PM Setup API +* +* INPUTS/OUTPUTS: +* gpio - GPIO Number between 0 and MAX_BLACKFIN_GPIOS +* type - +* PM_WAKE_RISING +* PM_WAKE_FALLING +* PM_WAKE_HIGH +* PM_WAKE_LOW +* PM_WAKE_BOTH_EDGES +* +* DESCRIPTION: Blackfin PM Driver API +* +* CAUTION: +************************************************************* +* MODIFICATION HISTORY : +**************************************************************/ +int gpio_pm_wakeup_request(unsigned short gpio, unsigned char type) +{ + unsigned long flags; + + if ((check_gpio(gpio) < 0) || !type) + return -EINVAL; + + local_irq_save(flags); + + wakeup_map[gpio_bank(gpio)] |= gpio_bit(gpio); + wakeup_flags_map[gpio] = type; + local_irq_restore(flags); + + return 0; +} +EXPORT_SYMBOL(gpio_pm_wakeup_request); + +void gpio_pm_wakeup_free(unsigned short gpio) +{ + unsigned long flags; + + if (check_gpio(gpio) < 0) + return; + + local_irq_save(flags); + + wakeup_map[gpio_bank(gpio)] &= ~gpio_bit(gpio); + + local_irq_restore(flags); +} +EXPORT_SYMBOL(gpio_pm_wakeup_free); + +static int bfin_gpio_wakeup_type(unsigned short gpio, unsigned char type) +{ + port_setup(gpio, GPIO_USAGE); + set_gpio_dir(gpio, 0); + set_gpio_inen(gpio, 1); + + if (type & (PM_WAKE_RISING | PM_WAKE_FALLING)) + set_gpio_edge(gpio, 1); + else + set_gpio_edge(gpio, 0); + + if ((type & (PM_WAKE_BOTH_EDGES)) == (PM_WAKE_BOTH_EDGES)) + set_gpio_both(gpio, 1); + else + set_gpio_both(gpio, 0); + + if ((type & (PM_WAKE_FALLING | PM_WAKE_LOW))) + set_gpio_polar(gpio, 1); + else + set_gpio_polar(gpio, 0); + + SSYNC(); + + return 0; +} + +u32 gpio_pm_setup(void) +{ + u32 sic_iwr = 0; + u16 bank, mask, i, gpio; + + for (i = 0; i < MAX_BLACKFIN_GPIOS; i+=GPIO_BANKSIZE) { + mask = wakeup_map[gpio_bank(i)]; + bank = gpio_bank(i); + + gpio_bank_saved[bank].maskb = gpio_bankb[bank]->maskb; + gpio_bankb[bank]->maskb = 0; + + if (mask) { +#ifdef BF537_FAMILY + gpio_bank_saved[bank].fer = *port_fer[bank]; +#endif + gpio_bank_saved[bank].inen = gpio_bankb[bank]->inen; + gpio_bank_saved[bank].polar = gpio_bankb[bank]->polar; + gpio_bank_saved[bank].dir = gpio_bankb[bank]->dir; + gpio_bank_saved[bank].edge = gpio_bankb[bank]->edge; + gpio_bank_saved[bank].both = gpio_bankb[bank]->both; + + gpio = i; + + while (mask) { + if (mask & 1) { + bfin_gpio_wakeup_type(gpio, wakeup_flags_map[gpio]); + set_gpio_data(gpio, 0); /*Clear*/ + } + gpio++; + mask >>= 1; + } + + sic_iwr |= 1 << (sic_iwr_irqs[bank] - (IRQ_CORETMR + 1)); + gpio_bankb[bank]->maskb_set = wakeup_map[gpio_bank(i)]; + } + } + + if (sic_iwr) + return sic_iwr; + else + return IWR_ENABLE_ALL; +} + + +void gpio_pm_restore(void) +{ + u16 bank, mask, i; + + for (i = 0; i < MAX_BLACKFIN_GPIOS; i+=GPIO_BANKSIZE) { + mask = wakeup_map[gpio_bank(i)]; + bank = gpio_bank(i); + + if (mask) { +#ifdef BF537_FAMILY + *port_fer[bank] = gpio_bank_saved[bank].fer; +#endif + gpio_bankb[bank]->inen = gpio_bank_saved[bank].inen; + gpio_bankb[bank]->dir = gpio_bank_saved[bank].dir; + gpio_bankb[bank]->polar = gpio_bank_saved[bank].polar; + gpio_bankb[bank]->edge = gpio_bank_saved[bank].edge; + gpio_bankb[bank]->both = gpio_bank_saved[bank].both; + } + + gpio_bankb[bank]->maskb = gpio_bank_saved[bank].maskb; + } +} + +#endif + +/*********************************************************** +* +* FUNCTIONS: Blackfin GPIO Driver +* +* INPUTS/OUTPUTS: +* gpio - GPIO Number between 0 and MAX_BLACKFIN_GPIOS +* +* +* DESCRIPTION: Blackfin GPIO Driver API +* +* CAUTION: +************************************************************* +* MODIFICATION HISTORY : +**************************************************************/ + +int gpio_request(unsigned short gpio, const char *label) +{ + unsigned long flags; + + if (check_gpio(gpio) < 0) + return -EINVAL; + + local_irq_save(flags); + + if (unlikely(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))) { + printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved!\n", gpio); + dump_stack(); + local_irq_restore(flags); + return -EBUSY; + } + reserved_map[gpio_bank(gpio)] |= gpio_bit(gpio); + + local_irq_restore(flags); + + port_setup(gpio, GPIO_USAGE); + + return 0; +} +EXPORT_SYMBOL(gpio_request); + + +void gpio_free(unsigned short gpio) +{ + unsigned long flags; + + if (check_gpio(gpio) < 0) + return; + + local_irq_save(flags); + + if (unlikely(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio)))) { + printk(KERN_ERR "bfin-gpio: GPIO %d wasn't reserved!\n", gpio); + dump_stack(); + local_irq_restore(flags); + return; + } + + default_gpio(gpio); + + reserved_map[gpio_bank(gpio)] &= ~gpio_bit(gpio); + + local_irq_restore(flags); +} +EXPORT_SYMBOL(gpio_free); + + +void gpio_direction_input(unsigned short gpio) +{ + unsigned long flags; + + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); + + local_irq_save(flags); + gpio_bankb[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio); + gpio_bankb[gpio_bank(gpio)]->inen |= gpio_bit(gpio); + local_irq_restore(flags); +} +EXPORT_SYMBOL(gpio_direction_input); + +void gpio_direction_output(unsigned short gpio) +{ + unsigned long flags; + + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); + + local_irq_save(flags); + gpio_bankb[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio); + gpio_bankb[gpio_bank(gpio)]->dir |= gpio_bit(gpio); + local_irq_restore(flags); +} +EXPORT_SYMBOL(gpio_direction_output); diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c new file mode 100644 index 000000000000..f64ecb638fab --- /dev/null +++ b/arch/blackfin/kernel/bfin_ksyms.c @@ -0,0 +1,119 @@ +/* + * File: arch/blackfin/kernel/bfin_ksyms.c + * Based on: none - original work + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +/* platform dependent support */ + +EXPORT_SYMBOL(__ioremap); +EXPORT_SYMBOL(strcmp); +EXPORT_SYMBOL(strncmp); +EXPORT_SYMBOL(dump_thread); + +EXPORT_SYMBOL(ip_fast_csum); + +EXPORT_SYMBOL(kernel_thread); + +EXPORT_SYMBOL(__up); +EXPORT_SYMBOL(__down); +EXPORT_SYMBOL(__down_trylock); +EXPORT_SYMBOL(__down_interruptible); + +EXPORT_SYMBOL(is_in_rom); + +/* Networking helper routines. */ +EXPORT_SYMBOL(csum_partial_copy); + +/* The following are special because they're not called + * explicitly (the C compiler generates them). Fortunately, + * their interface isn't gonna change any time soon now, so + * it's OK to leave it out of version control. + */ +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(memcmp); +EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(memchr); +EXPORT_SYMBOL(get_wchan); + +/* + * libgcc functions - functions that are used internally by the + * compiler... (prototypes are not correct though, but that + * doesn't really matter since they're not versioned). + */ +extern void __ashldi3(void); +extern void __ashrdi3(void); +extern void __smulsi3_highpart(void); +extern void __umulsi3_highpart(void); +extern void __divsi3(void); +extern void __lshrdi3(void); +extern void __modsi3(void); +extern void __muldi3(void); +extern void __udivsi3(void); +extern void __umodsi3(void); + +/* gcc lib functions */ +EXPORT_SYMBOL(__ashldi3); +EXPORT_SYMBOL(__ashrdi3); +EXPORT_SYMBOL(__umulsi3_highpart); +EXPORT_SYMBOL(__smulsi3_highpart); +EXPORT_SYMBOL(__divsi3); +EXPORT_SYMBOL(__lshrdi3); +EXPORT_SYMBOL(__modsi3); +EXPORT_SYMBOL(__muldi3); +EXPORT_SYMBOL(__udivsi3); +EXPORT_SYMBOL(__umodsi3); + +EXPORT_SYMBOL(outsb); +EXPORT_SYMBOL(insb); +EXPORT_SYMBOL(outsw); +EXPORT_SYMBOL(insw); +EXPORT_SYMBOL(outsl); +EXPORT_SYMBOL(insl); +EXPORT_SYMBOL(irq_flags); +EXPORT_SYMBOL(iounmap); +EXPORT_SYMBOL(blackfin_dcache_invalidate_range); +EXPORT_SYMBOL(blackfin_icache_dcache_flush_range); +EXPORT_SYMBOL(blackfin_icache_flush_range); +EXPORT_SYMBOL(blackfin_dcache_flush_range); +EXPORT_SYMBOL(blackfin_dflush_page); + +EXPORT_SYMBOL(csum_partial); +EXPORT_SYMBOL(__init_begin); +EXPORT_SYMBOL(__init_end); +EXPORT_SYMBOL(_ebss_l1); +EXPORT_SYMBOL(_stext_l1); +EXPORT_SYMBOL(_etext_l1); +EXPORT_SYMBOL(_sdata_l1); +EXPORT_SYMBOL(_ebss_b_l1); +EXPORT_SYMBOL(_sdata_b_l1); diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c new file mode 100644 index 000000000000..539eb24e062f --- /dev/null +++ b/arch/blackfin/kernel/dma-mapping.c @@ -0,0 +1,183 @@ +/* + * File: arch/blackfin/kernel/dma-mapping.c + * Based on: + * Author: + * + * Created: + * Description: Dynamic DMA mapping support. + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static spinlock_t dma_page_lock; +static unsigned int *dma_page; +static unsigned int dma_pages; +static unsigned long dma_base; +static unsigned long dma_size; +static unsigned int dma_initialized; + +void dma_alloc_init(unsigned long start, unsigned long end) +{ + spin_lock_init(&dma_page_lock); + dma_initialized = 0; + + dma_page = (unsigned int *)__get_free_page(GFP_KERNEL); + memset(dma_page, 0, PAGE_SIZE); + dma_base = PAGE_ALIGN(start); + dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start); + dma_pages = dma_size >> PAGE_SHIFT; + memset((void *)dma_base, 0, DMA_UNCACHED_REGION); + dma_initialized = 1; + + printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __FUNCTION__, + dma_page, dma_pages, dma_base); +} + +static inline unsigned int get_pages(size_t size) +{ + return ((size - 1) >> PAGE_SHIFT) + 1; +} + +static unsigned long __alloc_dma_pages(unsigned int pages) +{ + unsigned long ret = 0, flags; + int i, count = 0; + + if (dma_initialized == 0) + dma_alloc_init(_ramend - DMA_UNCACHED_REGION, _ramend); + + spin_lock_irqsave(&dma_page_lock, flags); + + for (i = 0; i < dma_pages;) { + if (dma_page[i++] == 0) { + if (++count == pages) { + while (count--) + dma_page[--i] = 1; + ret = dma_base + (i << PAGE_SHIFT); + break; + } + } else + count = 0; + } + spin_unlock_irqrestore(&dma_page_lock, flags); + return ret; +} + +static void __free_dma_pages(unsigned long addr, unsigned int pages) +{ + unsigned long page = (addr - dma_base) >> PAGE_SHIFT; + unsigned long flags; + int i; + + if ((page + pages) > dma_pages) { + printk(KERN_ERR "%s: freeing outside range.\n", __FUNCTION__); + BUG(); + } + + spin_lock_irqsave(&dma_page_lock, flags); + for (i = page; i < page + pages; i++) { + dma_page[i] = 0; + } + spin_unlock_irqrestore(&dma_page_lock, flags); +} + +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t * dma_handle, gfp_t gfp) +{ + void *ret; + + ret = (void *)__alloc_dma_pages(get_pages(size)); + + if (ret) { + memset(ret, 0, size); + *dma_handle = virt_to_phys(ret); + } + + return ret; +} +EXPORT_SYMBOL(dma_alloc_coherent); + +void +dma_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle) +{ + __free_dma_pages((unsigned long)vaddr, get_pages(size)); +} +EXPORT_SYMBOL(dma_free_coherent); + +/* + * Dummy functions defined for some existing drivers + */ + +dma_addr_t +dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + + invalidate_dcache_range((unsigned long)ptr, + (unsigned long)ptr + size); + + return (dma_addr_t) ptr; +} +EXPORT_SYMBOL(dma_map_single); + +int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + int i; + + BUG_ON(direction == DMA_NONE); + + for (i = 0; i < nents; i++) + invalidate_dcache_range(sg_dma_address(&sg[i]), + sg_dma_address(&sg[i]) + + sg_dma_len(&sg[i])); + + return nents; +} +EXPORT_SYMBOL(dma_map_sg); + +void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} +EXPORT_SYMBOL(dma_unmap_single); + +void dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nhwentries, enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} +EXPORT_SYMBOL(dma_unmap_sg); diff --git a/arch/blackfin/kernel/dualcore_test.c b/arch/blackfin/kernel/dualcore_test.c new file mode 100644 index 000000000000..8b89c99f9dfa --- /dev/null +++ b/arch/blackfin/kernel/dualcore_test.c @@ -0,0 +1,49 @@ +/* + * File: arch/blackfin/kernel/dualcore_test.c + * Based on: + * Author: + * + * Created: + * Description: Small test code for CoreB on a BF561 + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +static int *testarg = (int*)0xfeb00000; + +static int test_init(void) +{ + *testarg = 1; + printk("Dual core test module inserted: set testarg = [%d]\n @ [%p]\n", + *testarg, testarg); + return 0; +} + +static void test_exit(void) +{ + printk("Dual core test module removed: testarg = [%d]\n", *testarg); +} + +module_init(test_init); +module_exit(test_exit); diff --git a/arch/blackfin/kernel/entry.S b/arch/blackfin/kernel/entry.S new file mode 100644 index 000000000000..5880b270bd50 --- /dev/null +++ b/arch/blackfin/kernel/entry.S @@ -0,0 +1,94 @@ +/* + * File: arch/blackfin/kernel/entry.S + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include + +#include + +#ifdef CONFIG_EXCPT_IRQ_SYSC_L1 +.section .l1.text +#else +.text +#endif + +ENTRY(_ret_from_fork) + SP += -12; + call _schedule_tail; + SP += 12; + r0 = [sp + PT_IPEND]; + cc = bittst(r0,1); + if cc jump .Lin_kernel; + RESTORE_CONTEXT + rti; +.Lin_kernel: + bitclr(r0,1); + [sp + PT_IPEND] = r0; + /* do a 'fake' RTI by jumping to [RETI] + * to avoid clearing supervisor mode in child + */ + RESTORE_ALL_SYS + p0 = reti; + jump (p0); + +ENTRY(_sys_fork) + r0 = -EINVAL; + rts; + +ENTRY(_sys_vfork) + r0 = sp; + r0 += 24; + [--sp] = rets; + SP += -12; + call _bfin_vfork; + SP += 12; + rets = [sp++]; + rts; + +ENTRY(_sys_clone) + r0 = sp; + r0 += 24; + [--sp] = rets; + SP += -12; + call _bfin_clone; + SP += 12; + rets = [sp++]; + rts; + +ENTRY(_sys_rt_sigreturn) + r0 = sp; + r0 += 24; + [--sp] = rets; + SP += -12; + call _do_rt_sigreturn; + SP += 12; + rets = [sp++]; + rts; diff --git a/arch/blackfin/kernel/flat.c b/arch/blackfin/kernel/flat.c new file mode 100644 index 000000000000..a92587b628b5 --- /dev/null +++ b/arch/blackfin/kernel/flat.c @@ -0,0 +1,101 @@ +/* + * arch/blackfin/kernel/flat.c + * + * Copyright (C) 2007 Analog Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include + +#define FLAT_BFIN_RELOC_TYPE_16_BIT 0 +#define FLAT_BFIN_RELOC_TYPE_16H_BIT 1 +#define FLAT_BFIN_RELOC_TYPE_32_BIT 2 + +unsigned long bfin_get_addr_from_rp(unsigned long *ptr, + unsigned long relval, + unsigned long flags, + unsigned long *persistent) +{ + unsigned short *usptr = (unsigned short *)ptr; + int type = (relval >> 26) & 7; + unsigned long val; + + switch (type) { + case FLAT_BFIN_RELOC_TYPE_16_BIT: + case FLAT_BFIN_RELOC_TYPE_16H_BIT: + usptr = (unsigned short *)ptr; + pr_debug("*usptr = %x", get_unaligned(usptr)); + val = get_unaligned(usptr); + val += *persistent; + break; + + case FLAT_BFIN_RELOC_TYPE_32_BIT: + pr_debug("*ptr = %lx", get_unaligned(ptr)); + val = get_unaligned(ptr); + break; + + default: + pr_debug("BINFMT_FLAT: Unknown relocation type %x\n", + type); + + return 0; + } + + /* + * Stack-relative relocs contain the offset into the stack, we + * have to add the stack's start address here and return 1 from + * flat_addr_absolute to prevent the normal address calculations + */ + if (relval & (1 << 29)) + return val + current->mm->context.end_brk; + + if ((flags & FLAT_FLAG_GOTPIC) == 0) + val = htonl(val); + return val; +} +EXPORT_SYMBOL(bfin_get_addr_from_rp); + +/* + * Insert the address ADDR into the symbol reference at RP; + * RELVAL is the raw relocation-table entry from which RP is derived + */ +void bfin_put_addr_at_rp(unsigned long *ptr, unsigned long addr, + unsigned long relval) +{ + unsigned short *usptr = (unsigned short *)ptr; + int type = (relval >> 26) & 7; + + switch (type) { + case FLAT_BFIN_RELOC_TYPE_16_BIT: + put_unaligned(addr, usptr); + pr_debug("new value %x at %p", get_unaligned(usptr), + usptr); + break; + + case FLAT_BFIN_RELOC_TYPE_16H_BIT: + put_unaligned(addr >> 16, usptr); + pr_debug("new value %x", get_unaligned(usptr)); + break; + + case FLAT_BFIN_RELOC_TYPE_32_BIT: + put_unaligned(addr, ptr); + pr_debug("new ptr =%lx", get_unaligned(ptr)); + break; + } +} +EXPORT_SYMBOL(bfin_put_addr_at_rp); diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c new file mode 100644 index 000000000000..b45188f8512e --- /dev/null +++ b/arch/blackfin/kernel/init_task.c @@ -0,0 +1,60 @@ +/* + * File: arch/blackfin/kernel/init_task.c + * Based on: + * Author: + * + * Created: + * Description: This file contains the simple DMA Implementation for Blackfin + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include + +static struct fs_struct init_fs = INIT_FS; +static struct files_struct init_files = INIT_FILES; +static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); + +struct mm_struct init_mm = INIT_MM(init_mm); +EXPORT_SYMBOL(init_mm); + +/* + * Initial task structure. + * + * All other task structs will be allocated on slabs in fork.c + */ +struct task_struct init_task = INIT_TASK(init_task); +EXPORT_SYMBOL(init_task); + +/* + * Initial thread structure. + * + * We need to make sure that this is 8192-byte aligned due to the + * way process stacks are handled. This is done by having a special + * "init_task" linker map entry. + */ +union thread_union init_thread_union + __attribute__ ((__section__(".data.init_task"))) = { +INIT_THREAD_INFO(init_task)}; diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c new file mode 100644 index 000000000000..df5bf022cf79 --- /dev/null +++ b/arch/blackfin/kernel/irqchip.c @@ -0,0 +1,147 @@ +/* + * File: arch/blackfin/kernel/irqchip.c + * Based on: + * Author: + * + * Created: + * Description: This file contains the simple DMA Implementation for Blackfin + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include + +static unsigned long irq_err_count; +static spinlock_t irq_controller_lock; + +/* + * Dummy mask/unmask handler + */ +void dummy_mask_unmask_irq(unsigned int irq) +{ +} + +void ack_bad_irq(unsigned int irq) +{ + irq_err_count += 1; + printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); +} +EXPORT_SYMBOL(ack_bad_irq); + +static struct irq_chip bad_chip = { + .ack = dummy_mask_unmask_irq, + .mask = dummy_mask_unmask_irq, + .unmask = dummy_mask_unmask_irq, +}; + +static struct irq_desc bad_irq_desc = { + .chip = &bad_chip, + .handle_irq = handle_bad_irq, + .depth = 1, +}; + +int show_interrupts(struct seq_file *p, void *v) +{ + int i = *(loff_t *) v; + struct irqaction *action; + unsigned long flags; + + if (i < NR_IRQS) { + spin_lock_irqsave(&irq_desc[i].lock, flags); + action = irq_desc[i].action; + if (!action) + goto unlock; + + seq_printf(p, "%3d: %10u ", i, kstat_irqs(i)); + seq_printf(p, " %s", action->name); + for (action = action->next; action; action = action->next) + seq_printf(p, ", %s", action->name); + + seq_putc(p, '\n'); + unlock: + spin_unlock_irqrestore(&irq_desc[i].lock, flags); + } else if (i == NR_IRQS) { + seq_printf(p, "Err: %10lu\n", irq_err_count); + } + return 0; +} + +/* + * do_IRQ handles all hardware IRQ's. Decoded IRQs should not + * come via this function. Instead, they should provide their + * own 'handler' + */ + +#ifdef CONFIG_DO_IRQ_L1 +asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)__attribute__((l1_text)); +#endif + +asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) +{ + struct pt_regs *old_regs; + struct irq_desc *desc = irq_desc + irq; + unsigned short pending, other_ints; + + old_regs = set_irq_regs(regs); + + /* + * Some hardware gives randomly wrong interrupts. Rather + * than crashing, do something sensible. + */ + if (irq >= NR_IRQS) + desc = &bad_irq_desc; + + irq_enter(); + + generic_handle_irq(irq); + + /* If we're the only interrupt running (ignoring IRQ15 which is for + syscalls), lower our priority to IRQ14 so that softirqs run at + that level. If there's another, lower-level interrupt, irq_exit + will defer softirqs to that. */ + CSYNC(); + pending = bfin_read_IPEND() & ~0x8000; + other_ints = pending & (pending - 1); + if (other_ints == 0) + lower_to_irq14(); + irq_exit(); + + set_irq_regs(old_regs); +} + +void __init init_IRQ(void) +{ + struct irq_desc *desc; + int irq; + + spin_lock_init(&irq_controller_lock); + for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) { + *desc = bad_irq_desc; + } + + init_arch_irq(); +} diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c new file mode 100644 index 000000000000..372f756f1ad9 --- /dev/null +++ b/arch/blackfin/kernel/module.c @@ -0,0 +1,429 @@ +/* + * File: arch/blackfin/kernel/module.c + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * handle arithmetic relocations. + * See binutils/bfd/elf32-bfin.c for more details + */ +#define RELOC_STACK_SIZE 100 +static uint32_t reloc_stack[RELOC_STACK_SIZE]; +static unsigned int reloc_stack_tos; + +#define is_reloc_stack_empty() ((reloc_stack_tos > 0)?0:1) + +static void reloc_stack_push(uint32_t value) +{ + reloc_stack[reloc_stack_tos++] = value; +} + +static uint32_t reloc_stack_pop(void) +{ + return reloc_stack[--reloc_stack_tos]; +} + +static uint32_t reloc_stack_operate(unsigned int oper, struct module *mod) +{ + uint32_t value; + + switch (oper) { + case R_add: + value = reloc_stack[reloc_stack_tos - 2] + + reloc_stack[reloc_stack_tos - 1]; + reloc_stack_tos -= 2; + break; + case R_sub: + value = reloc_stack[reloc_stack_tos - 2] - + reloc_stack[reloc_stack_tos - 1]; + reloc_stack_tos -= 2; + break; + case R_mult: + value = reloc_stack[reloc_stack_tos - 2] * + reloc_stack[reloc_stack_tos - 1]; + reloc_stack_tos -= 2; + break; + case R_div: + value = reloc_stack[reloc_stack_tos - 2] / + reloc_stack[reloc_stack_tos - 1]; + reloc_stack_tos -= 2; + break; + case R_mod: + value = reloc_stack[reloc_stack_tos - 2] % + reloc_stack[reloc_stack_tos - 1]; + reloc_stack_tos -= 2; + break; + case R_lshift: + value = reloc_stack[reloc_stack_tos - 2] << + reloc_stack[reloc_stack_tos - 1]; + reloc_stack_tos -= 2; + break; + case R_rshift: + value = reloc_stack[reloc_stack_tos - 2] >> + reloc_stack[reloc_stack_tos - 1]; + reloc_stack_tos -= 2; + break; + case R_and: + value = reloc_stack[reloc_stack_tos - 2] & + reloc_stack[reloc_stack_tos - 1]; + reloc_stack_tos -= 2; + break; + case R_or: + value = reloc_stack[reloc_stack_tos - 2] | + reloc_stack[reloc_stack_tos - 1]; + reloc_stack_tos -= 2; + break; + case R_xor: + value = reloc_stack[reloc_stack_tos - 2] ^ + reloc_stack[reloc_stack_tos - 1]; + reloc_stack_tos -= 2; + break; + case R_land: + value = reloc_stack[reloc_stack_tos - 2] && + reloc_stack[reloc_stack_tos - 1]; + reloc_stack_tos -= 2; + break; + case R_lor: + value = reloc_stack[reloc_stack_tos - 2] || + reloc_stack[reloc_stack_tos - 1]; + reloc_stack_tos -= 2; + break; + case R_neg: + value = -reloc_stack[reloc_stack_tos - 1]; + reloc_stack_tos--; + break; + case R_comp: + value = ~reloc_stack[reloc_stack_tos - 1]; + reloc_stack_tos -= 1; + break; + default: + printk(KERN_WARNING "module %s: unhandled reloction\n", + mod->name); + return 0; + } + + /* now push the new value back on stack */ + reloc_stack_push(value); + + return value; +} + +void *module_alloc(unsigned long size) +{ + if (size == 0) + return NULL; + return vmalloc(size); +} + +/* Free memory returned from module_alloc */ +void module_free(struct module *mod, void *module_region) +{ + vfree(module_region); +} + +/* Transfer the section to the L1 memory */ +int +module_frob_arch_sections(Elf_Ehdr * hdr, Elf_Shdr * sechdrs, + char *secstrings, struct module *mod) +{ + Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; + void *dest = NULL; + + for (s = sechdrs; s < sechdrs_end; ++s) { + if ((strcmp(".l1.text", secstrings + s->sh_name) == 0) || + ((strcmp(".text", secstrings + s->sh_name)==0) && + (hdr->e_flags & FLG_CODE_IN_L1) && (s->sh_size > 0))) { + mod->arch.text_l1 = s; + dest = l1_inst_sram_alloc(s->sh_size); + if (dest == NULL) { + printk(KERN_ERR + "module %s: L1 instruction memory allocation failed\n", + mod->name); + return -1; + } + dma_memcpy(dest, (void *)s->sh_addr, s->sh_size); + s->sh_flags &= ~SHF_ALLOC; + s->sh_addr = (unsigned long)dest; + } + if ((strcmp(".l1.data", secstrings + s->sh_name) == 0)|| + ((strcmp(".data", secstrings + s->sh_name)==0) && + (hdr->e_flags & FLG_DATA_IN_L1) && (s->sh_size > 0))) { + mod->arch.data_a_l1 = s; + dest = l1_data_sram_alloc(s->sh_size); + if (dest == NULL) { + printk(KERN_ERR + "module %s: L1 data memory allocation failed\n", + mod->name); + return -1; + } + memcpy(dest, (void *)s->sh_addr, s->sh_size); + s->sh_flags &= ~SHF_ALLOC; + s->sh_addr = (unsigned long)dest; + } + if (strcmp(".l1.bss", secstrings + s->sh_name) == 0 || + ((strcmp(".bss", secstrings + s->sh_name)==0) && + (hdr->e_flags & FLG_DATA_IN_L1) && (s->sh_size > 0))) { + mod->arch.bss_a_l1 = s; + dest = l1_data_sram_alloc(s->sh_size); + if (dest == NULL) { + printk(KERN_ERR + "module %s: L1 data memory allocation failed\n", + mod->name); + return -1; + } + memset(dest, 0, s->sh_size); + s->sh_flags &= ~SHF_ALLOC; + s->sh_addr = (unsigned long)dest; + } + if (strcmp(".l1.data.B", secstrings + s->sh_name) == 0) { + mod->arch.data_b_l1 = s; + dest = l1_data_B_sram_alloc(s->sh_size); + if (dest == NULL) { + printk(KERN_ERR + "module %s: L1 data memory allocation failed\n", + mod->name); + return -1; + } + memcpy(dest, (void *)s->sh_addr, s->sh_size); + s->sh_flags &= ~SHF_ALLOC; + s->sh_addr = (unsigned long)dest; + } + if (strcmp(".l1.bss.B", secstrings + s->sh_name) == 0) { + mod->arch.bss_b_l1 = s; + dest = l1_data_B_sram_alloc(s->sh_size); + if (dest == NULL) { + printk(KERN_ERR + "module %s: L1 data memory allocation failed\n", + mod->name); + return -1; + } + memset(dest, 0, s->sh_size); + s->sh_flags &= ~SHF_ALLOC; + s->sh_addr = (unsigned long)dest; + } + } + return 0; +} + +int +apply_relocate(Elf_Shdr * sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, struct module *me) +{ + printk(KERN_ERR "module %s: .rel unsupported\n", me->name); + return -ENOEXEC; +} + +/*************************************************************************/ +/* FUNCTION : apply_relocate_add */ +/* ABSTRACT : Blackfin specific relocation handling for the loadable */ +/* modules. Modules are expected to be .o files. */ +/* Arithmetic relocations are handled. */ +/* We do not expect LSETUP to be split and hence is not */ +/* handled. */ +/* R_byte and R_byte2 are also not handled as the gas */ +/* does not generate it. */ +/*************************************************************************/ +int +apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *mod) +{ + unsigned int i; + unsigned short tmp; + Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr; + Elf32_Sym *sym; + uint32_t *location32; + uint16_t *location16; + uint32_t value; + + pr_debug("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ + location16 = + (uint16_t *) (sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset); + location32 = (uint32_t *) location16; + /* This is the symbol it is referring to. Note that all + undefined symbols have been resolved. */ + sym = (Elf32_Sym *) sechdrs[symindex].sh_addr + + ELF32_R_SYM(rel[i].r_info); + if (is_reloc_stack_empty()) { + value = sym->st_value; + } else { + value = reloc_stack_pop(); + } + value += rel[i].r_addend; + pr_debug("location is %x, value is %x type is %d \n", + (unsigned int) location32, value, + ELF32_R_TYPE(rel[i].r_info)); + + switch (ELF32_R_TYPE(rel[i].r_info)) { + + case R_pcrel24: + case R_pcrel24_jump_l: + /* Add the value, subtract its postition */ + location16 = + (uint16_t *) (sechdrs[sechdrs[relsec].sh_info]. + sh_addr + rel[i].r_offset - 2); + location32 = (uint32_t *) location16; + value -= (uint32_t) location32; + value >>= 1; + pr_debug("value is %x, before %x-%x after %x-%x\n", value, + *location16, *(location16 + 1), + (*location16 & 0xff00) | (value >> 16 & 0x00ff), + value & 0xffff); + *location16 = + (*location16 & 0xff00) | (value >> 16 & 0x00ff); + *(location16 + 1) = value & 0xffff; + break; + case R_pcrel12_jump: + case R_pcrel12_jump_s: + value -= (uint32_t) location32; + value >>= 1; + *location16 = (value & 0xfff); + break; + case R_pcrel10: + value -= (uint32_t) location32; + value >>= 1; + *location16 = (value & 0x3ff); + break; + case R_luimm16: + pr_debug("before %x after %x\n", *location16, + (value & 0xffff)); + tmp = (value & 0xffff); + if((unsigned long)location16 >= L1_CODE_START) { + dma_memcpy(location16, &tmp, 2); + } else + *location16 = tmp; + break; + case R_huimm16: + pr_debug("before %x after %x\n", *location16, + ((value >> 16) & 0xffff)); + tmp = ((value >> 16) & 0xffff); + if((unsigned long)location16 >= L1_CODE_START) { + dma_memcpy(location16, &tmp, 2); + } else + *location16 = tmp; + break; + case R_rimm16: + *location16 = (value & 0xffff); + break; + case R_byte4_data: + pr_debug("before %x after %x\n", *location32, value); + *location32 = value; + break; + case R_push: + reloc_stack_push(value); + break; + case R_const: + reloc_stack_push(rel[i].r_addend); + break; + case R_add: + case R_sub: + case R_mult: + case R_div: + case R_mod: + case R_lshift: + case R_rshift: + case R_and: + case R_or: + case R_xor: + case R_land: + case R_lor: + case R_neg: + case R_comp: + reloc_stack_operate(ELF32_R_TYPE(rel[i].r_info), mod); + break; + default: + printk(KERN_ERR "module %s: Unknown relocation: %u\n", + mod->name, ELF32_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } + } + return 0; +} + +int +module_finalize(const Elf_Ehdr * hdr, + const Elf_Shdr * sechdrs, struct module *mod) +{ + unsigned int i, strindex = 0, symindex = 0; + char *secstrings; + + secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (i = 1; i < hdr->e_shnum; i++) { + /* Internal symbols and strings. */ + if (sechdrs[i].sh_type == SHT_SYMTAB) { + symindex = i; + strindex = sechdrs[i].sh_link; + } + } + + for (i = 1; i < hdr->e_shnum; i++) { + const char *strtab = (char *)sechdrs[strindex].sh_addr; + unsigned int info = sechdrs[i].sh_info; + + /* Not a valid relocation section? */ + if (info >= hdr->e_shnum) + continue; + + if ((sechdrs[i].sh_type == SHT_RELA) && + ((strcmp(".rela.l1.text", secstrings + sechdrs[i].sh_name) == 0)|| + ((strcmp(".rela.text", secstrings + sechdrs[i].sh_name) == 0) && + (hdr->e_flags & FLG_CODE_IN_L1)))) { + apply_relocate_add((Elf_Shdr *) sechdrs, strtab, + symindex, i, mod); + } + } + return 0; +} + +void module_arch_cleanup(struct module *mod) +{ + if ((mod->arch.text_l1) && (mod->arch.text_l1->sh_addr)) + l1_inst_sram_free((void*)mod->arch.text_l1->sh_addr); + if ((mod->arch.data_a_l1) && (mod->arch.data_a_l1->sh_addr)) + l1_data_sram_free((void*)mod->arch.data_a_l1->sh_addr); + if ((mod->arch.bss_a_l1) && (mod->arch.bss_a_l1->sh_addr)) + l1_data_sram_free((void*)mod->arch.bss_a_l1->sh_addr); + if ((mod->arch.data_b_l1) && (mod->arch.data_b_l1->sh_addr)) + l1_data_B_sram_free((void*)mod->arch.data_b_l1->sh_addr); + if ((mod->arch.bss_b_l1) && (mod->arch.bss_b_l1->sh_addr)) + l1_data_B_sram_free((void*)mod->arch.bss_b_l1->sh_addr); +} diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c new file mode 100644 index 000000000000..3eff7439d8d3 --- /dev/null +++ b/arch/blackfin/kernel/process.c @@ -0,0 +1,394 @@ +/* + * File: arch/blackfin/kernel/process.c + * Based on: + * Author: + * + * Created: + * Description: Blackfin architecture-dependent process handling. + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +#include +#include + +#define LED_ON 0 +#define LED_OFF 1 + +asmlinkage void ret_from_fork(void); + +/* Points to the SDRAM backup memory for the stack that is currently in + * L1 scratchpad memory. + */ +void *current_l1_stack_save; + +/* The number of tasks currently using a L1 stack area. The SRAM is + * allocated/deallocated whenever this changes from/to zero. + */ +int nr_l1stack_tasks; + +/* Start and length of the area in L1 scratchpad memory which we've allocated + * for process stacks. + */ +void *l1_stack_base; +unsigned long l1_stack_len; + +/* + * Powermanagement idle function, if any.. + */ +void (*pm_idle)(void) = NULL; +EXPORT_SYMBOL(pm_idle); + +void (*pm_power_off)(void) = NULL; +EXPORT_SYMBOL(pm_power_off); + +/* + * We are using a different LED from the one used to indicate timer interrupt. + */ +#if defined(CONFIG_BFIN_IDLE_LED) +static inline void leds_switch(int flag) +{ + unsigned short tmp = 0; + + tmp = bfin_read_CONFIG_BFIN_IDLE_LED_PORT(); + SSYNC(); + + if (flag == LED_ON) + tmp &= ~CONFIG_BFIN_IDLE_LED_PIN; /* light on */ + else + tmp |= CONFIG_BFIN_IDLE_LED_PIN; /* light off */ + + bfin_write_CONFIG_BFIN_IDLE_LED_PORT(tmp); + SSYNC(); + +} +#else +static inline void leds_switch(int flag) +{ +} +#endif + +/* + * The idle loop on BFIN + */ +#ifdef CONFIG_IDLE_L1 +void default_idle(void)__attribute__((l1_text)); +void cpu_idle(void)__attribute__((l1_text)); +#endif + +void default_idle(void) +{ + while (!need_resched()) { + leds_switch(LED_OFF); + local_irq_disable(); + if (likely(!need_resched())) + idle_with_irq_disabled(); + local_irq_enable(); + leds_switch(LED_ON); + } +} + +void (*idle)(void) = default_idle; + +/* + * The idle thread. There's no useful work to be + * done, so just try to conserve power and have a + * low exit latency (ie sit in a loop waiting for + * somebody to say that they'd like to reschedule) + */ +void cpu_idle(void) +{ + /* endless idle loop with no priority at all */ + while (1) { + idle(); + preempt_enable_no_resched(); + schedule(); + preempt_disable(); + } +} + +void machine_restart(char *__unused) +{ +#if defined(CONFIG_BLKFIN_CACHE) + bfin_write_IMEM_CONTROL(0x01); + SSYNC(); +#endif + bfin_reset(); + /* Dont do anything till the reset occurs */ + while (1) { + SSYNC(); + } +} + +void machine_halt(void) +{ + for (;;) + asm volatile ("idle"); +} + +void machine_power_off(void) +{ + for (;;) + asm volatile ("idle"); +} + +void show_regs(struct pt_regs *regs) +{ + printk(KERN_NOTICE "\n"); + printk(KERN_NOTICE + "PC: %08lu Status: %04lu SysStatus: %04lu RETS: %08lu\n", + regs->pc, regs->astat, regs->seqstat, regs->rets); + printk(KERN_NOTICE + "A0.x: %08lx A0.w: %08lx A1.x: %08lx A1.w: %08lx\n", + regs->a0x, regs->a0w, regs->a1x, regs->a1w); + printk(KERN_NOTICE "P0: %08lx P1: %08lx P2: %08lx P3: %08lx\n", + regs->p0, regs->p1, regs->p2, regs->p3); + printk(KERN_NOTICE "P4: %08lx P5: %08lx\n", regs->p4, regs->p5); + printk(KERN_NOTICE "R0: %08lx R1: %08lx R2: %08lx R3: %08lx\n", + regs->r0, regs->r1, regs->r2, regs->r3); + printk(KERN_NOTICE "R4: %08lx R5: %08lx R6: %08lx R7: %08lx\n", + regs->r4, regs->r5, regs->r6, regs->r7); + + if (!(regs->ipend)) + printk("USP: %08lx\n", rdusp()); +} + +/* Fill in the fpu structure for a core dump. */ + +int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpregs) +{ + return 1; +} + +/* + * This gets run with P1 containing the + * function to call, and R1 containing + * the "args". Note P0 is clobbered on the way here. + */ +void kernel_thread_helper(void); +__asm__(".section .text\n" + ".align 4\n" + "_kernel_thread_helper:\n\t" + "\tsp += -12;\n\t" + "\tr0 = r1;\n\t" "\tcall (p1);\n\t" "\tcall _do_exit;\n" ".previous"); + +/* + * Create a kernel thread. + */ +pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags) +{ + struct pt_regs regs; + + memset(®s, 0, sizeof(regs)); + + regs.r1 = (unsigned long)arg; + regs.p1 = (unsigned long)fn; + regs.pc = (unsigned long)kernel_thread_helper; + regs.orig_p0 = -1; + /* Set bit 2 to tell ret_from_fork we should be returning to kernel + mode. */ + regs.ipend = 0x8002; + __asm__ __volatile__("%0 = syscfg;":"=da"(regs.syscfg):); + return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, + NULL); +} + +void flush_thread(void) +{ +} + +asmlinkage int bfin_vfork(struct pt_regs *regs) +{ + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, + NULL); +} + +asmlinkage int bfin_clone(struct pt_regs *regs) +{ + unsigned long clone_flags; + unsigned long newsp; + + /* syscall2 puts clone_flags in r0 and usp in r1 */ + clone_flags = regs->r0; + newsp = regs->r1; + if (!newsp) + newsp = rdusp(); + else + newsp -= 12; + return do_fork(clone_flags, newsp, regs, 0, NULL, NULL); +} + +int +copy_thread(int nr, unsigned long clone_flags, + unsigned long usp, unsigned long topstk, + struct task_struct *p, struct pt_regs *regs) +{ + struct pt_regs *childregs; + + childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1; + *childregs = *regs; + childregs->r0 = 0; + + p->thread.usp = usp; + p->thread.ksp = (unsigned long)childregs; + p->thread.pc = (unsigned long)ret_from_fork; + + return 0; +} + +/* + * fill in the user structure for a core dump.. + */ +void dump_thread(struct pt_regs *regs, struct user *dump) +{ + dump->magic = CMAGIC; + dump->start_code = 0; + dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); + dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT; + dump->u_dsize = ((unsigned long)(current->mm->brk + + (PAGE_SIZE - 1))) >> PAGE_SHIFT; + dump->u_dsize -= dump->u_tsize; + dump->u_ssize = 0; + + if (dump->start_stack < TASK_SIZE) + dump->u_ssize = + ((unsigned long)(TASK_SIZE - + dump->start_stack)) >> PAGE_SHIFT; + + dump->u_ar0 = (struct user_regs_struct *)((int)&dump->regs - (int)dump); + + dump->regs.r0 = regs->r0; + dump->regs.r1 = regs->r1; + dump->regs.r2 = regs->r2; + dump->regs.r3 = regs->r3; + dump->regs.r4 = regs->r4; + dump->regs.r5 = regs->r5; + dump->regs.r6 = regs->r6; + dump->regs.r7 = regs->r7; + dump->regs.p0 = regs->p0; + dump->regs.p1 = regs->p1; + dump->regs.p2 = regs->p2; + dump->regs.p3 = regs->p3; + dump->regs.p4 = regs->p4; + dump->regs.p5 = regs->p5; + dump->regs.orig_p0 = regs->orig_p0; + dump->regs.a0w = regs->a0w; + dump->regs.a1w = regs->a1w; + dump->regs.a0x = regs->a0x; + dump->regs.a1x = regs->a1x; + dump->regs.rets = regs->rets; + dump->regs.astat = regs->astat; + dump->regs.pc = regs->pc; +} + +/* + * sys_execve() executes a new program. + */ + +asmlinkage int sys_execve(char *name, char **argv, char **envp) +{ + int error; + char *filename; + struct pt_regs *regs = (struct pt_regs *)((&name) + 6); + + lock_kernel(); + filename = getname(name); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + goto out; + error = do_execve(filename, argv, envp, regs); + putname(filename); + out: + unlock_kernel(); + return error; +} + +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long fp, pc; + unsigned long stack_page; + int count = 0; + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + + stack_page = (unsigned long)p; + fp = p->thread.usp; + do { + if (fp < stack_page + sizeof(struct thread_info) || + fp >= 8184 + stack_page) + return 0; + pc = ((unsigned long *)fp)[1]; + if (!in_sched_functions(pc)) + return pc; + fp = *(unsigned long *)fp; + } + while (count++ < 16); + return 0; +} + +#if defined(CONFIG_ACCESS_CHECK) +int _access_ok(unsigned long addr, unsigned long size) +{ + + if (addr > (addr + size)) + return 0; + if (segment_eq(get_fs(),KERNEL_DS)) + return 1; +#ifdef CONFIG_MTD_UCLINUX + if (addr >= memory_start && (addr + size) <= memory_end) + return 1; + if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end) + return 1; +#else + if (addr >= memory_start && (addr + size) <= physical_mem_end) + return 1; +#endif + if (addr >= (unsigned long)__init_begin && + addr + size <= (unsigned long)__init_end) + return 1; + if (addr >= L1_SCRATCH_START + && addr + size <= L1_SCRATCH_START + L1_SCRATCH_LENGTH) + return 1; +#if L1_CODE_LENGTH != 0 + if (addr >= L1_CODE_START + (_etext_l1 - _stext_l1) + && addr + size <= L1_CODE_START + L1_CODE_LENGTH) + return 1; +#endif +#if L1_DATA_A_LENGTH != 0 + if (addr >= L1_DATA_A_START + (_ebss_l1 - _sdata_l1) + && addr + size <= L1_DATA_A_START + L1_DATA_A_LENGTH) + return 1; +#endif +#if L1_DATA_B_LENGTH != 0 + if (addr >= L1_DATA_B_START + && addr + size <= L1_DATA_B_START + L1_DATA_B_LENGTH) + return 1; +#endif + return 0; +} +EXPORT_SYMBOL(_access_ok); +#endif /* CONFIG_ACCESS_CHECK */ diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c new file mode 100644 index 000000000000..d7c8e514cb92 --- /dev/null +++ b/arch/blackfin/kernel/ptrace.c @@ -0,0 +1,430 @@ +/* + * File: arch/blackfin/kernel/ptrace.c + * Based on: Taken from linux/kernel/ptrace.c + * Author: linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds + * + * Created: 1/23/92 + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define MAX_SHARED_LIBS 3 +#define TEXT_OFFSET 0 +/* + * does not yet catch signals sent when the child dies. + * in exit.c or in signal.c. + */ + +/* determines which bits in the SYSCFG reg the user has access to. */ +/* 1 = access 0 = no access */ +#define SYSCFG_MASK 0x0007 /* SYSCFG reg */ +/* sets the trace bits. */ +#define TRACE_BITS 0x0001 + +/* Find the stack offset for a register, relative to thread.esp0. */ +#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg) + +/* + * Get the address of the live pt_regs for the specified task. + * These are saved onto the top kernel stack when the process + * is not running. + * + * Note: if a user thread is execve'd from kernel space, the + * kernel stack will not be empty on entry to the kernel, so + * ptracing these tasks will fail. + */ +static inline struct pt_regs *get_user_regs(struct task_struct *task) +{ + return (struct pt_regs *) + ((unsigned long)task->thread_info + + (THREAD_SIZE - sizeof(struct pt_regs))); +} + +/* + * Get all user integer registers. + */ +static inline int ptrace_getregs(struct task_struct *tsk, void __user * uregs) +{ + struct pt_regs *regs = get_user_regs(tsk); + return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; +} + +/* Mapping from PT_xxx to the stack offset at which the register is + * saved. Notice that usp has no stack-slot and needs to be treated + * specially (see get_reg/put_reg below). + */ + +/* + * Get contents of register REGNO in task TASK. + */ +static inline long get_reg(struct task_struct *task, int regno) +{ + unsigned char *reg_ptr; + + struct pt_regs *regs = + (struct pt_regs *)((unsigned long)task->thread_info + + (THREAD_SIZE - sizeof(struct pt_regs))); + reg_ptr = (char *)regs; + + switch (regno) { + case PT_USP: + return task->thread.usp; + default: + if (regno <= 216) + return *(long *)(reg_ptr + regno); + } + /* slight mystery ... never seems to come here but kernel misbehaves without this code! */ + + printk(KERN_WARNING "Request to get for unknown register %d\n", regno); + return 0; +} + +/* + * Write contents of register REGNO in task TASK. + */ +static inline int +put_reg(struct task_struct *task, int regno, unsigned long data) +{ + char * reg_ptr; + + struct pt_regs *regs = + (struct pt_regs *)((unsigned long)task->thread_info + + (THREAD_SIZE - sizeof(struct pt_regs))); + reg_ptr = (char *)regs; + + switch (regno) { + case PT_PC: + /*********************************************************************/ + /* At this point the kernel is most likely in exception. */ + /* The RETX register will be used to populate the pc of the process. */ + /*********************************************************************/ + regs->retx = data; + regs->pc = data; + break; + case PT_RETX: + break; /* regs->retx = data; break; */ + case PT_USP: + regs->usp = data; + task->thread.usp = data; + break; + default: + if (regno <= 216) + *(long *)(reg_ptr + regno) = data; + } + return 0; +} + +/* + * check that an address falls within the bounds of the target process's memory mappings + */ +static inline int is_user_addr_valid(struct task_struct *child, + unsigned long start, unsigned long len) +{ + struct vm_list_struct *vml; + struct sram_list_struct *sraml; + + for (vml = child->mm->context.vmlist; vml; vml = vml->next) + if (start >= vml->vma->vm_start && start + len <= vml->vma->vm_end) + return 0; + + for (sraml = child->mm->context.sram_list; sraml; sraml = sraml->next) + if (start >= (unsigned long)sraml->addr + && start + len <= (unsigned long)sraml->addr + sraml->length) + return 0; + + return -EIO; +} + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure the single step bit is not set. + */ +void ptrace_disable(struct task_struct *child) +{ + unsigned long tmp; + /* make sure the single step bit is not set. */ + tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16); + put_reg(child, PT_SR, tmp); +} + +long arch_ptrace(struct task_struct *child, long request, long addr, long data) +{ + int ret; + int add = 0; + + switch (request) { + /* when I and D space are separate, these will need to be fixed. */ + case PTRACE_PEEKDATA: + pr_debug("ptrace: PEEKDATA\n"); + add = MAX_SHARED_LIBS * 4; /* space between text and data */ + /* fall through */ + case PTRACE_PEEKTEXT: /* read word at location addr. */ + { + unsigned long tmp = 0; + int copied; + + ret = -EIO; + pr_debug("ptrace: PEEKTEXT at addr 0x%08lx + add %d %ld\n", addr, add, + sizeof(data)); + if (is_user_addr_valid(child, addr + add, sizeof(tmp)) < 0) + break; + pr_debug("ptrace: user address is valid\n"); + +#if L1_CODE_LENGTH != 0 + if (addr + add >= L1_CODE_START + && addr + add + sizeof(tmp) <= L1_CODE_START + L1_CODE_LENGTH) { + safe_dma_memcpy (&tmp, (const void *)(addr + add), sizeof(tmp)); + copied = sizeof(tmp); + } else +#endif + copied = + access_process_vm(child, addr + add, &tmp, + sizeof(tmp), 0); + pr_debug("ptrace: copied size %d [0x%08lx]\n", copied, tmp); + if (copied != sizeof(tmp)) + break; + ret = put_user(tmp, (unsigned long *)data); + break; + } + + /* read the word at location addr in the USER area. */ + case PTRACE_PEEKUSR: + { + unsigned long tmp; + ret = -EIO; + tmp = 0; + if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) { + printk(KERN_WARNING "ptrace error : PEEKUSR : temporarily returning " + "0 - %x sizeof(pt_regs) is %lx\n", + (int)addr, sizeof(struct pt_regs)); + break; + } + if (addr == sizeof(struct pt_regs)) { + /* PT_TEXT_ADDR */ + tmp = child->mm->start_code + TEXT_OFFSET; + } else if (addr == (sizeof(struct pt_regs) + 4)) { + /* PT_TEXT_END_ADDR */ + tmp = child->mm->end_code; + } else if (addr == (sizeof(struct pt_regs) + 8)) { + /* PT_DATA_ADDR */ + tmp = child->mm->start_data; +#ifdef CONFIG_BINFMT_ELF_FDPIC + } else if (addr == (sizeof(struct pt_regs) + 12)) { + tmp = child->mm->context.exec_fdpic_loadmap; + } else if (addr == (sizeof(struct pt_regs) + 16)) { + tmp = child->mm->context.interp_fdpic_loadmap; +#endif + } else { + tmp = get_reg(child, addr); + } + ret = put_user(tmp, (unsigned long *)data); + break; + } + + /* when I and D space are separate, this will have to be fixed. */ + case PTRACE_POKEDATA: + printk(KERN_NOTICE "ptrace: PTRACE_PEEKDATA\n"); + /* fall through */ + case PTRACE_POKETEXT: /* write the word at location addr. */ + { + int copied; + + ret = -EIO; + pr_debug("ptrace: POKETEXT at addr 0x%08lx + add %d %ld bytes %lx\n", + addr, add, sizeof(data), data); + if (is_user_addr_valid(child, addr + add, sizeof(data)) < 0) + break; + pr_debug("ptrace: user address is valid\n"); + +#if L1_CODE_LENGTH != 0 + if (addr + add >= L1_CODE_START + && addr + add + sizeof(data) <= L1_CODE_START + L1_CODE_LENGTH) { + safe_dma_memcpy ((void *)(addr + add), &data, sizeof(data)); + copied = sizeof(data); + } else +#endif + copied = + access_process_vm(child, addr + add, &data, + sizeof(data), 1); + pr_debug("ptrace: copied size %d\n", copied); + if (copied != sizeof(data)) + break; + ret = 0; + break; + } + + case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ + ret = -EIO; + if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) { + printk(KERN_WARNING "ptrace error : POKEUSR: temporarily returning 0\n"); + break; + } + + if (addr >= (sizeof(struct pt_regs))) { + ret = 0; + break; + } + if (addr == PT_SYSCFG) { + data &= SYSCFG_MASK; + data |= get_reg(child, PT_SYSCFG); + } + ret = put_reg(child, addr, data); + break; + + case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ + case PTRACE_CONT: + { /* restart after signal. */ + long tmp; + + pr_debug("ptrace_cont\n"); + + ret = -EIO; + if (!valid_signal(data)) + break; + if (request == PTRACE_SYSCALL) + set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + else + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + + child->exit_code = data; + /* make sure the single step bit is not set. */ + tmp = get_reg(child, PT_SYSCFG) & ~(TRACE_BITS); + put_reg(child, PT_SYSCFG, tmp); + pr_debug("before wake_up_process\n"); + wake_up_process(child); + ret = 0; + break; + } + + /* + * make the child exit. Best I can do is send it a sigkill. + * perhaps it should be put in the status that it wants to + * exit. + */ + case PTRACE_KILL: + { + long tmp; + ret = 0; + if (child->exit_state == EXIT_ZOMBIE) /* already dead */ + break; + child->exit_code = SIGKILL; + /* make sure the single step bit is not set. */ + tmp = get_reg(child, PT_SYSCFG) & ~(TRACE_BITS); + put_reg(child, PT_SYSCFG, tmp); + wake_up_process(child); + break; + } + + case PTRACE_SINGLESTEP: + { /* set the trap flag. */ + long tmp; + + pr_debug("single step\n"); + ret = -EIO; + if (!valid_signal(data)) + break; + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + + tmp = get_reg(child, PT_SYSCFG) | (TRACE_BITS); + put_reg(child, PT_SYSCFG, tmp); + + child->exit_code = data; + /* give it a chance to run. */ + wake_up_process(child); + ret = 0; + break; + } + + case PTRACE_DETACH: + { /* detach a process that was attached. */ + ret = ptrace_detach(child, data); + break; + } + + case PTRACE_GETREGS: + { + + /* Get all gp regs from the child. */ + ret = ptrace_getregs(child, (void __user *)data); + break; + } + + case PTRACE_SETREGS: + { + printk(KERN_NOTICE + "ptrace: SETREGS: **** NOT IMPLEMENTED ***\n"); + /* Set all gp regs in the child. */ + ret = 0; + break; + } + default: + ret = ptrace_request(child, request, addr, data); + break; + } + + return ret; +} + +asmlinkage void syscall_trace(void) +{ + + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + return; + + if (!(current->ptrace & PT_PTRACED)) + return; + + /* the 0x80 provides a way for the tracing parent to distinguish + * between a syscall stop and SIGTRAP delivery + */ + ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) + ? 0x80 : 0)); + + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } +} diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c new file mode 100644 index 000000000000..342bb8dd56ac --- /dev/null +++ b/arch/blackfin/kernel/setup.c @@ -0,0 +1,902 @@ +/* + * File: arch/blackfin/kernel/setup.c + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +unsigned long memory_start, memory_end, physical_mem_end; +unsigned long reserved_mem_dcache_on; +unsigned long reserved_mem_icache_on; +EXPORT_SYMBOL(memory_start); +EXPORT_SYMBOL(memory_end); +EXPORT_SYMBOL(physical_mem_end); +EXPORT_SYMBOL(_ramend); + +#ifdef CONFIG_MTD_UCLINUX +unsigned long memory_mtd_end, memory_mtd_start, mtd_size; +unsigned long _ebss; +EXPORT_SYMBOL(memory_mtd_end); +EXPORT_SYMBOL(memory_mtd_start); +EXPORT_SYMBOL(mtd_size); +#endif + +char command_line[COMMAND_LINE_SIZE]; + +#if defined(CONFIG_BLKFIN_DCACHE) || defined(CONFIG_BLKFIN_CACHE) +static void generate_cpl_tables(void); +#endif + +void __init bf53x_cache_init(void) +{ +#if defined(CONFIG_BLKFIN_DCACHE) || defined(CONFIG_BLKFIN_CACHE) + generate_cpl_tables(); +#endif + +#ifdef CONFIG_BLKFIN_CACHE + bfin_icache_init(); + printk(KERN_INFO "Instruction Cache Enabled\n"); +#endif + +#ifdef CONFIG_BLKFIN_DCACHE + bfin_dcache_init(); + printk(KERN_INFO "Data Cache Enabled" +# if defined CONFIG_BLKFIN_WB + " (write-back)" +# elif defined CONFIG_BLKFIN_WT + " (write-through)" +# endif + "\n"); +#endif +} + +void bf53x_relocate_l1_mem(void) +{ + unsigned long l1_code_length; + unsigned long l1_data_a_length; + unsigned long l1_data_b_length; + + l1_code_length = _etext_l1 - _stext_l1; + if (l1_code_length > L1_CODE_LENGTH) + l1_code_length = L1_CODE_LENGTH; + /* cannot complain as printk is not available as yet. + * But we can continue booting and complain later! + */ + + /* Copy _stext_l1 to _etext_l1 to L1 instruction SRAM */ + dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length); + + l1_data_a_length = _ebss_l1 - _sdata_l1; + if (l1_data_a_length > L1_DATA_A_LENGTH) + l1_data_a_length = L1_DATA_A_LENGTH; + + /* Copy _sdata_l1 to _ebss_l1 to L1 data bank A SRAM */ + dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length); + + l1_data_b_length = _ebss_b_l1 - _sdata_b_l1; + if (l1_data_b_length > L1_DATA_B_LENGTH) + l1_data_b_length = L1_DATA_B_LENGTH; + + /* Copy _sdata_b_l1 to _ebss_b_l1 to L1 data bank B SRAM */ + dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length + + l1_data_a_length, l1_data_b_length); + +} + +/* + * Initial parsing of the command line. Currently, we support: + * - Controlling the linux memory size: mem=xxx[KMG] + * - Controlling the physical memory size: max_mem=xxx[KMG][$][#] + * $ -> reserved memory is dcacheable + * # -> reserved memory is icacheable + */ +static __init void parse_cmdline_early(char *cmdline_p) +{ + char c = ' ', *to = cmdline_p; + unsigned int memsize; + for (;;) { + if (c == ' ') { + + if (!memcmp(to, "mem=", 4)) { + to += 4; + memsize = memparse(to, &to); + if (memsize) + _ramend = memsize; + + } else if (!memcmp(to, "max_mem=", 8)) { + to += 8; + memsize = memparse(to, &to); + if (memsize) { + physical_mem_end = memsize; + if (*to != ' ') { + if (*to == '$' + || *(to + 1) == '$') + reserved_mem_dcache_on = + 1; + if (*to == '#' + || *(to + 1) == '#') + reserved_mem_icache_on = + 1; + } + } + } + + } + c = *(to++); + if (!c) + break; + } +} + +void __init setup_arch(char **cmdline_p) +{ + int bootmap_size; + unsigned long l1_length, sclk, cclk; +#ifdef CONFIG_MTD_UCLINUX + unsigned long mtd_phys = 0; +#endif + + cclk = get_cclk(); + sclk = get_sclk(); + +#if !defined(CONFIG_BFIN_KERNEL_CLOCK) && defined(ANOMALY_05000273) + if (cclk == sclk) + panic("ANOMALY 05000273, SCLK can not be same as CCLK"); +#endif + +#if defined(ANOMALY_05000266) + bfin_read_IMDMA_D0_IRQ_STATUS(); + bfin_read_IMDMA_D1_IRQ_STATUS(); +#endif + +#ifdef DEBUG_SERIAL_EARLY_INIT + bfin_console_init(); /* early console registration */ + /* this give a chance to get printk() working before crash. */ +#endif + +#if defined(CONFIG_CHR_DEV_FLASH) || defined(CONFIG_BLK_DEV_FLASH) + /* we need to initialize the Flashrom device here since we might + * do things with flash early on in the boot + */ + flash_probe(); +#endif + +#if defined(CONFIG_CMDLINE_BOOL) + memset(command_line, 0, sizeof(command_line)); + strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line)); + command_line[sizeof(command_line) - 1] = 0; +#endif + + /* Keep a copy of command line */ + *cmdline_p = &command_line[0]; + memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); + boot_command_line[COMMAND_LINE_SIZE - 1] = 0; + + /* setup memory defaults from the user config */ + physical_mem_end = 0; + _ramend = CONFIG_MEM_SIZE * 1024 * 1024; + + parse_cmdline_early(&command_line[0]); + + if (physical_mem_end == 0) + physical_mem_end = _ramend; + + /* by now the stack is part of the init task */ + memory_end = _ramend - DMA_UNCACHED_REGION; + + _ramstart = (unsigned long)__bss_stop; + memory_start = PAGE_ALIGN(_ramstart); + +#if defined(CONFIG_MTD_UCLINUX) + /* generic memory mapped MTD driver */ + memory_mtd_end = memory_end; + + mtd_phys = _ramstart; + mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8))); + +# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS) + if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC) + mtd_size = + PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10); +# endif + +# if defined(CONFIG_CRAMFS) + if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC) + mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4))); +# endif + +# if defined(CONFIG_ROMFS_FS) + if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0 + && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) + mtd_size = + PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2])); +# if (defined(CONFIG_BLKFIN_CACHE) && defined(ANOMALY_05000263)) + /* Due to a Hardware Anomaly we need to limit the size of usable + * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on + * 05000263 - Hardware loop corrupted when taking an ICPLB exception + */ +# if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO)) + if (memory_end >= 56 * 1024 * 1024) + memory_end = 56 * 1024 * 1024; +# else + if (memory_end >= 60 * 1024 * 1024) + memory_end = 60 * 1024 * 1024; +# endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */ +# endif /* ANOMALY_05000263 */ +# endif /* CONFIG_ROMFS_FS */ + + memory_end -= mtd_size; + + if (mtd_size == 0) { + console_init(); + panic("Don't boot kernel without rootfs attached.\n"); + } + + /* Relocate MTD image to the top of memory after the uncached memory area */ + dma_memcpy((char *)memory_end, __bss_stop, mtd_size); + + memory_mtd_start = memory_end; + _ebss = memory_mtd_start; /* define _ebss for compatible */ +#endif /* CONFIG_MTD_UCLINUX */ + +#if (defined(CONFIG_BLKFIN_CACHE) && defined(ANOMALY_05000263)) + /* Due to a Hardware Anomaly we need to limit the size of usable + * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on + * 05000263 - Hardware loop corrupted when taking an ICPLB exception + */ +#if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO)) + if (memory_end >= 56 * 1024 * 1024) + memory_end = 56 * 1024 * 1024; +#else + if (memory_end >= 60 * 1024 * 1024) + memory_end = 60 * 1024 * 1024; +#endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */ + printk(KERN_NOTICE "Warning: limiting memory to %liMB due to hardware anomaly 05000263\n", memory_end >> 20); +#endif /* ANOMALY_05000263 */ + +#if !defined(CONFIG_MTD_UCLINUX) + memory_end -= SIZE_4K; /*In case there is no valid CPLB behind memory_end make sure we don't get to close*/ +#endif + init_mm.start_code = (unsigned long)_stext; + init_mm.end_code = (unsigned long)_etext; + init_mm.end_data = (unsigned long)_edata; + init_mm.brk = (unsigned long)0; + + init_leds(); + + printk(KERN_INFO "Blackfin support (C) 2004-2007 Analog Devices, Inc.\n"); + printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid()); + if (bfin_revid() != bfin_compiled_revid()) + printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n", + bfin_compiled_revid(), bfin_revid()); + if (bfin_revid() < SUPPORTED_REVID) + printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n", + CPU, bfin_revid()); + printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n"); + + printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu Mhz System Clock\n", + cclk / 1000000, sclk / 1000000); + +#if defined(ANOMALY_05000273) + if ((cclk >> 1) <= sclk) + printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n"); +#endif + + printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20); + printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20); + + printk(KERN_INFO "Memory map:\n" + KERN_INFO " text = 0x%p-0x%p\n" + KERN_INFO " init = 0x%p-0x%p\n" + KERN_INFO " data = 0x%p-0x%p\n" + KERN_INFO " stack = 0x%p-0x%p\n" + KERN_INFO " bss = 0x%p-0x%p\n" + KERN_INFO " available = 0x%p-0x%p\n" +#ifdef CONFIG_MTD_UCLINUX + KERN_INFO " rootfs = 0x%p-0x%p\n" +#endif +#if DMA_UNCACHED_REGION > 0 + KERN_INFO " DMA Zone = 0x%p-0x%p\n" +#endif + , _stext, _etext, + __init_begin, __init_end, + _sdata, _edata, + (void*)&init_thread_union, (void*)((int)(&init_thread_union) + 0x2000), + __bss_start, __bss_stop, + (void*)_ramstart, (void*)memory_end +#ifdef CONFIG_MTD_UCLINUX + , (void*)memory_mtd_start, (void*)(memory_mtd_start + mtd_size) +#endif +#if DMA_UNCACHED_REGION > 0 + , (void*)(_ramend - DMA_UNCACHED_REGION), (void*)(_ramend) +#endif + ); + + /* + * give all the memory to the bootmap allocator, tell it to put the + * boot mem_map at the start of memory + */ + bootmap_size = init_bootmem_node(NODE_DATA(0), memory_start >> PAGE_SHIFT, /* map goes here */ + PAGE_OFFSET >> PAGE_SHIFT, + memory_end >> PAGE_SHIFT); + /* + * free the usable memory, we have to make sure we do not free + * the bootmem bitmap so we then reserve it after freeing it :-) + */ + free_bootmem(memory_start, memory_end - memory_start); + + reserve_bootmem(memory_start, bootmap_size); + /* + * get kmalloc into gear + */ + paging_init(); + + /* check the size of the l1 area */ + l1_length = _etext_l1 - _stext_l1; + if (l1_length > L1_CODE_LENGTH) + panic("L1 memory overflow\n"); + + l1_length = _ebss_l1 - _sdata_l1; + if (l1_length > L1_DATA_A_LENGTH) + panic("L1 memory overflow\n"); + + bf53x_cache_init(); + +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) +# if defined(CONFIG_BFIN_SHARED_FLASH_ENET) && defined(CONFIG_BFIN533_STAMP) + /* setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC */ + bfin_write_FIO_DIR(bfin_read_FIO_DIR() | (1 << CONFIG_ENET_FLASH_PIN)); + bfin_write_FIO_FLAG_S(1 << CONFIG_ENET_FLASH_PIN); + SSYNC(); +# endif +# if defined (CONFIG_BFIN561_EZKIT) + bfin_write_FIO0_DIR(bfin_read_FIO0_DIR() | (1 << 12)); + SSYNC(); +# endif /* defined (CONFIG_BFIN561_EZKIT) */ +#endif + + printk(KERN_INFO "Hardware Trace Enabled\n"); + bfin_write_TBUFCTL(0x03); +} + +#if defined(CONFIG_BF561) +static struct cpu cpu[2]; +#else +static struct cpu cpu[1]; +#endif +static int __init topology_init(void) +{ +#if defined (CONFIG_BF561) + register_cpu(&cpu[0], 0); + register_cpu(&cpu[1], 1); + return 0; +#else + return register_cpu(cpu, 0); +#endif +} + +subsys_initcall(topology_init); + +#if defined(CONFIG_BLKFIN_DCACHE) || defined(CONFIG_BLKFIN_CACHE) +u16 lock_kernel_check(u32 start, u32 end) +{ + if ((start <= (u32) _stext && end >= (u32) _end) + || (start >= (u32) _stext && end <= (u32) _end)) + return IN_KERNEL; + return 0; +} + +static unsigned short __init +fill_cplbtab(struct cplb_tab *table, + unsigned long start, unsigned long end, + unsigned long block_size, unsigned long cplb_data) +{ + int i; + + switch (block_size) { + case SIZE_4M: + i = 3; + break; + case SIZE_1M: + i = 2; + break; + case SIZE_4K: + i = 1; + break; + case SIZE_1K: + default: + i = 0; + break; + } + + cplb_data = (cplb_data & ~(3 << 16)) | (i << 16); + + while ((start < end) && (table->pos < table->size)) { + + table->tab[table->pos++] = start; + + if (lock_kernel_check(start, start + block_size) == IN_KERNEL) + table->tab[table->pos++] = + cplb_data | CPLB_LOCK | CPLB_DIRTY; + else + table->tab[table->pos++] = cplb_data; + + start += block_size; + } + return 0; +} + +static unsigned short __init +close_cplbtab(struct cplb_tab *table) +{ + + while (table->pos < table->size) { + + table->tab[table->pos++] = 0; + table->tab[table->pos++] = 0; /* !CPLB_VALID */ + } + return 0; +} + +static void __init generate_cpl_tables(void) +{ + + u16 i, j, process; + u32 a_start, a_end, as, ae, as_1m; + + struct cplb_tab *t_i = NULL; + struct cplb_tab *t_d = NULL; + struct s_cplb cplb; + + cplb.init_i.size = MAX_CPLBS; + cplb.init_d.size = MAX_CPLBS; + cplb.switch_i.size = MAX_SWITCH_I_CPLBS; + cplb.switch_d.size = MAX_SWITCH_D_CPLBS; + + cplb.init_i.pos = 0; + cplb.init_d.pos = 0; + cplb.switch_i.pos = 0; + cplb.switch_d.pos = 0; + + cplb.init_i.tab = icplb_table; + cplb.init_d.tab = dcplb_table; + cplb.switch_i.tab = ipdt_table; + cplb.switch_d.tab = dpdt_table; + + cplb_data[SDRAM_KERN].end = memory_end; + +#ifdef CONFIG_MTD_UCLINUX + cplb_data[SDRAM_RAM_MTD].start = memory_mtd_start; + cplb_data[SDRAM_RAM_MTD].end = memory_mtd_start + mtd_size; + cplb_data[SDRAM_RAM_MTD].valid = mtd_size > 0; +# if defined(CONFIG_ROMFS_FS) + cplb_data[SDRAM_RAM_MTD].attr |= I_CPLB; + + /* + * The ROMFS_FS size is often not multiple of 1MB. + * This can cause multiple CPLB sets covering the same memory area. + * This will then cause multiple CPLB hit exceptions. + * Workaround: We ensure a contiguous memory area by extending the kernel + * memory section over the mtd section. + * For ROMFS_FS memory must be covered with ICPLBs anyways. + * So there is no difference between kernel and mtd memory setup. + */ + + cplb_data[SDRAM_KERN].end = memory_mtd_start + mtd_size;; + cplb_data[SDRAM_RAM_MTD].valid = 0; + +# endif +#else + cplb_data[SDRAM_RAM_MTD].valid = 0; +#endif + + cplb_data[SDRAM_DMAZ].start = _ramend - DMA_UNCACHED_REGION; + cplb_data[SDRAM_DMAZ].end = _ramend; + + cplb_data[RES_MEM].start = _ramend; + cplb_data[RES_MEM].end = physical_mem_end; + + if (reserved_mem_dcache_on) + cplb_data[RES_MEM].d_conf = SDRAM_DGENERIC; + else + cplb_data[RES_MEM].d_conf = SDRAM_DNON_CHBL; + + if (reserved_mem_icache_on) + cplb_data[RES_MEM].i_conf = SDRAM_IGENERIC; + else + cplb_data[RES_MEM].i_conf = SDRAM_INON_CHBL; + + for (i = ZERO_P; i <= L2_MEM; i++) { + + if (cplb_data[i].valid) { + + as_1m = cplb_data[i].start % SIZE_1M; + + /* We need to make sure all sections are properly 1M aligned + * However between Kernel Memory and the Kernel mtd section, depending on the + * rootfs size, there can be overlapping memory areas. + */ + + if (as_1m && i!=L1I_MEM && i!=L1D_MEM) { +#ifdef CONFIG_MTD_UCLINUX + if (i == SDRAM_RAM_MTD) { + if ((cplb_data[SDRAM_KERN].end + 1) > cplb_data[SDRAM_RAM_MTD].start) + cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M)) + SIZE_1M; + else + cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M)); + } else +#endif + printk(KERN_WARNING "Unaligned Start of %s at 0x%X\n", + cplb_data[i].name, cplb_data[i].start); + } + + as = cplb_data[i].start % SIZE_4M; + ae = cplb_data[i].end % SIZE_4M; + + if (as) + a_start = cplb_data[i].start + (SIZE_4M - (as)); + else + a_start = cplb_data[i].start; + + a_end = cplb_data[i].end - ae; + + for (j = INITIAL_T; j <= SWITCH_T; j++) { + + switch (j) { + case INITIAL_T: + if (cplb_data[i].attr & INITIAL_T) { + t_i = &cplb.init_i; + t_d = &cplb.init_d; + process = 1; + } else + process = 0; + break; + case SWITCH_T: + if (cplb_data[i].attr & SWITCH_T) { + t_i = &cplb.switch_i; + t_d = &cplb.switch_d; + process = 1; + } else + process = 0; + break; + default: + process = 0; + break; + } + + if (process) { + if (cplb_data[i].attr & I_CPLB) { + + if (cplb_data[i].psize) { + fill_cplbtab(t_i, + cplb_data[i].start, + cplb_data[i].end, + cplb_data[i].psize, + cplb_data[i].i_conf); + } else { + /*icplb_table */ +#if (defined(CONFIG_BLKFIN_CACHE) && defined(ANOMALY_05000263)) + if (i == SDRAM_KERN) { + fill_cplbtab(t_i, + cplb_data[i].start, + cplb_data[i].end, + SIZE_4M, + cplb_data[i].i_conf); + } else +#endif + { + fill_cplbtab(t_i, + cplb_data[i].start, + a_start, + SIZE_1M, + cplb_data[i].i_conf); + fill_cplbtab(t_i, + a_start, + a_end, + SIZE_4M, + cplb_data[i].i_conf); + fill_cplbtab(t_i, a_end, + cplb_data[i].end, + SIZE_1M, + cplb_data[i].i_conf); + } + } + + } + if (cplb_data[i].attr & D_CPLB) { + + if (cplb_data[i].psize) { + fill_cplbtab(t_d, + cplb_data[i].start, + cplb_data[i].end, + cplb_data[i].psize, + cplb_data[i].d_conf); + } else { +/*dcplb_table*/ + fill_cplbtab(t_d, + cplb_data[i].start, + a_start, SIZE_1M, + cplb_data[i].d_conf); + fill_cplbtab(t_d, a_start, + a_end, SIZE_4M, + cplb_data[i].d_conf); + fill_cplbtab(t_d, a_end, + cplb_data[i].end, + SIZE_1M, + cplb_data[i].d_conf); + + } + + } + } + } + + } + } + +/* close tables */ + + close_cplbtab(&cplb.init_i); + close_cplbtab(&cplb.init_d); + + cplb.init_i.tab[cplb.init_i.pos] = -1; + cplb.init_d.tab[cplb.init_d.pos] = -1; + cplb.switch_i.tab[cplb.switch_i.pos] = -1; + cplb.switch_d.tab[cplb.switch_d.pos] = -1; + +} + +#endif + +static inline u_long get_vco(void) +{ + u_long msel; + u_long vco; + + msel = (bfin_read_PLL_CTL() >> 9) & 0x3F; + if (0 == msel) + msel = 64; + + vco = CONFIG_CLKIN_HZ; + vco >>= (1 & bfin_read_PLL_CTL()); /* DF bit */ + vco = msel * vco; + return vco; +} + +/*Get the Core clock*/ +u_long get_cclk(void) +{ + u_long csel, ssel; + if (bfin_read_PLL_STAT() & 0x1) + return CONFIG_CLKIN_HZ; + + ssel = bfin_read_PLL_DIV(); + csel = ((ssel >> 4) & 0x03); + ssel &= 0xf; + if (ssel && ssel < (1 << csel)) /* SCLK > CCLK */ + return get_vco() / ssel; + return get_vco() >> csel; +} + +EXPORT_SYMBOL(get_cclk); + +/* Get the System clock */ +u_long get_sclk(void) +{ + u_long ssel; + + if (bfin_read_PLL_STAT() & 0x1) + return CONFIG_CLKIN_HZ; + + ssel = (bfin_read_PLL_DIV() & 0xf); + if (0 == ssel) { + printk(KERN_WARNING "Invalid System Clock\n"); + ssel = 1; + } + + return get_vco() / ssel; +} + +EXPORT_SYMBOL(get_sclk); + +/* + * Get CPU information for use by the procfs. + */ +static int show_cpuinfo(struct seq_file *m, void *v) +{ + char *cpu, *mmu, *fpu, *name; + uint32_t revid; + + u_long cclk = 0, sclk = 0; + u_int dcache_size = 0, dsup_banks = 0; + + cpu = CPU; + mmu = "none"; + fpu = "none"; + revid = bfin_revid(); + name = bfin_board_name; + + cclk = get_cclk(); + sclk = get_sclk(); + + seq_printf(m, "CPU:\t\tADSP-%s Rev. 0.%d\n" + "MMU:\t\t%s\n" + "FPU:\t\t%s\n" + "Core Clock:\t%9lu Hz\n" + "System Clock:\t%9lu Hz\n" + "BogoMips:\t%lu.%02lu\n" + "Calibration:\t%lu loops\n", + cpu, revid, mmu, fpu, + cclk, + sclk, + (loops_per_jiffy * HZ) / 500000, + ((loops_per_jiffy * HZ) / 5000) % 100, + (loops_per_jiffy * HZ)); + seq_printf(m, "Board Name:\t%s\n", name); + seq_printf(m, "Board Memory:\t%ld MB\n", physical_mem_end >> 20); + seq_printf(m, "Kernel Memory:\t%ld MB\n", (unsigned long)_ramend >> 20); + if (bfin_read_IMEM_CONTROL() & (ENICPLB | IMC)) + seq_printf(m, "I-CACHE:\tON\n"); + else + seq_printf(m, "I-CACHE:\tOFF\n"); + if ((bfin_read_DMEM_CONTROL()) & (ENDCPLB | DMC_ENABLE)) + seq_printf(m, "D-CACHE:\tON" +#if defined CONFIG_BLKFIN_WB + " (write-back)" +#elif defined CONFIG_BLKFIN_WT + " (write-through)" +#endif + "\n"); + else + seq_printf(m, "D-CACHE:\tOFF\n"); + + + switch(bfin_read_DMEM_CONTROL() & (1 << DMC0_P | 1 << DMC1_P)) { + case ACACHE_BSRAM: + seq_printf(m, "DBANK-A:\tCACHE\n" "DBANK-B:\tSRAM\n"); + dcache_size = 16; + dsup_banks = 1; + break; + case ACACHE_BCACHE: + seq_printf(m, "DBANK-A:\tCACHE\n" "DBANK-B:\tCACHE\n"); + dcache_size = 32; + dsup_banks = 2; + break; + case ASRAM_BSRAM: + seq_printf(m, "DBANK-A:\tSRAM\n" "DBANK-B:\tSRAM\n"); + dcache_size = 0; + dsup_banks = 0; + break; + default: + break; + } + + + seq_printf(m, "I-CACHE Size:\t%dKB\n", BLKFIN_ICACHESIZE / 1024); + seq_printf(m, "D-CACHE Size:\t%dKB\n", dcache_size); + seq_printf(m, "I-CACHE Setup:\t%d Sub-banks/%d Ways, %d Lines/Way\n", + BLKFIN_ISUBBANKS, BLKFIN_IWAYS, BLKFIN_ILINES); + seq_printf(m, + "D-CACHE Setup:\t%d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n", + dsup_banks, BLKFIN_DSUBBANKS, BLKFIN_DWAYS, + BLKFIN_DLINES); +#ifdef CONFIG_BLKFIN_CACHE_LOCK + switch (read_iloc()) { + case WAY0_L: + seq_printf(m, "Way0 Locked-Down\n"); + break; + case WAY1_L: + seq_printf(m, "Way1 Locked-Down\n"); + break; + case WAY01_L: + seq_printf(m, "Way0,Way1 Locked-Down\n"); + break; + case WAY2_L: + seq_printf(m, "Way2 Locked-Down\n"); + break; + case WAY02_L: + seq_printf(m, "Way0,Way2 Locked-Down\n"); + break; + case WAY12_L: + seq_printf(m, "Way1,Way2 Locked-Down\n"); + break; + case WAY012_L: + seq_printf(m, "Way0,Way1 & Way2 Locked-Down\n"); + break; + case WAY3_L: + seq_printf(m, "Way3 Locked-Down\n"); + break; + case WAY03_L: + seq_printf(m, "Way0,Way3 Locked-Down\n"); + break; + case WAY13_L: + seq_printf(m, "Way1,Way3 Locked-Down\n"); + break; + case WAY013_L: + seq_printf(m, "Way 0,Way1,Way3 Locked-Down\n"); + break; + case WAY32_L: + seq_printf(m, "Way3,Way2 Locked-Down\n"); + break; + case WAY320_L: + seq_printf(m, "Way3,Way2,Way0 Locked-Down\n"); + break; + case WAY321_L: + seq_printf(m, "Way3,Way2,Way1 Locked-Down\n"); + break; + case WAYALL_L: + seq_printf(m, "All Ways are locked\n"); + break; + default: + seq_printf(m, "No Ways are locked\n"); + } +#endif + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return *pos < NR_CPUS ? ((void *)0x12345678) : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return c_start(m, pos); +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; + +void cmdline_init(unsigned long r0) +{ + if (r0) + strncpy(command_line, (char *)r0, COMMAND_LINE_SIZE); +} diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c new file mode 100644 index 000000000000..316e65c3439d --- /dev/null +++ b/arch/blackfin/kernel/signal.c @@ -0,0 +1,356 @@ +/* + * File: arch/blackfin/kernel/signal.c + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +struct fdpic_func_descriptor { + unsigned long text; + unsigned long GOT; +}; + +struct rt_sigframe { + int sig; + struct siginfo *pinfo; + void *puc; + char retcode[8]; + struct siginfo info; + struct ucontext uc; +}; + +asmlinkage int sys_sigaltstack(const stack_t * uss, stack_t * uoss) +{ + return do_sigaltstack(uss, uoss, rdusp()); +} + +static inline int +rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *pr0) +{ + unsigned long usp = 0; + int err = 0; + +#define RESTORE(x) err |= __get_user(regs->x, &sc->sc_##x) + + /* restore passed registers */ + RESTORE(r0); RESTORE(r1); RESTORE(r2); RESTORE(r3); + RESTORE(r4); RESTORE(r5); RESTORE(r6); RESTORE(r7); + RESTORE(p0); RESTORE(p1); RESTORE(p2); RESTORE(p3); + RESTORE(p4); RESTORE(p5); + err |= __get_user(usp, &sc->sc_usp); + wrusp(usp); + RESTORE(a0w); RESTORE(a1w); + RESTORE(a0x); RESTORE(a1x); + RESTORE(astat); + RESTORE(rets); + RESTORE(pc); + RESTORE(retx); + RESTORE(fp); + RESTORE(i0); RESTORE(i1); RESTORE(i2); RESTORE(i3); + RESTORE(m0); RESTORE(m1); RESTORE(m2); RESTORE(m3); + RESTORE(l0); RESTORE(l1); RESTORE(l2); RESTORE(l3); + RESTORE(b0); RESTORE(b1); RESTORE(b2); RESTORE(b3); + RESTORE(lc0); RESTORE(lc1); + RESTORE(lt0); RESTORE(lt1); + RESTORE(lb0); RESTORE(lb1); + RESTORE(seqstat); + + regs->orig_p0 = -1; /* disable syscall checks */ + + *pr0 = regs->r0; + return err; +} + +asmlinkage int do_rt_sigreturn(unsigned long __unused) +{ + struct pt_regs *regs = (struct pt_regs *)__unused; + unsigned long usp = rdusp(); + struct rt_sigframe *frame = (struct rt_sigframe *)(usp); + sigset_t set; + int r0; + + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto badframe; + + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + if (rt_restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) + goto badframe; + + if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->usp) == -EFAULT) + goto badframe; + + return r0; + + badframe: + force_sig(SIGSEGV, current); + return 0; +} + +static inline int rt_setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs) +{ + int err = 0; + +#define SETUP(x) err |= __put_user(regs->x, &sc->sc_##x) + + SETUP(r0); SETUP(r1); SETUP(r2); SETUP(r3); + SETUP(r4); SETUP(r5); SETUP(r6); SETUP(r7); + SETUP(p0); SETUP(p1); SETUP(p2); SETUP(p3); + SETUP(p4); SETUP(p5); + err |= __put_user(rdusp(), &sc->sc_usp); + SETUP(a0w); SETUP(a1w); + SETUP(a0x); SETUP(a1x); + SETUP(astat); + SETUP(rets); + SETUP(pc); + SETUP(retx); + SETUP(fp); + SETUP(i0); SETUP(i1); SETUP(i2); SETUP(i3); + SETUP(m0); SETUP(m1); SETUP(m2); SETUP(m3); + SETUP(l0); SETUP(l1); SETUP(l2); SETUP(l3); + SETUP(b0); SETUP(b1); SETUP(b2); SETUP(b3); + SETUP(lc0); SETUP(lc1); + SETUP(lt0); SETUP(lt1); + SETUP(lb0); SETUP(lb1); + SETUP(seqstat); + + return err; +} + +static inline void push_cache(unsigned long vaddr, unsigned int len) +{ + flush_icache_range(vaddr, vaddr + len); +} + +static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, + size_t frame_size) +{ + unsigned long usp; + + /* Default to using normal stack. */ + usp = rdusp(); + + /* This is the X/Open sanctioned signal stack switching. */ + if (ka->sa.sa_flags & SA_ONSTACK) { + if (!on_sig_stack(usp)) + usp = current->sas_ss_sp + current->sas_ss_size; + } + return (void *)((usp - frame_size) & -8UL); +} + +static int +setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info, + sigset_t * set, struct pt_regs *regs) +{ + struct rt_sigframe *frame; + int err = 0; + + frame = get_sigframe(ka, regs, sizeof(*frame)); + + err |= __put_user((current_thread_info()->exec_domain + && current_thread_info()->exec_domain->signal_invmap + && sig < 32 + ? current_thread_info()->exec_domain-> + signal_invmap[sig] : sig), &frame->sig); + + err |= __put_user(&frame->info, &frame->pinfo); + err |= __put_user(&frame->uc, &frame->puc); + err |= copy_siginfo_to_user(&frame->info, info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(0, &frame->uc.uc_link); + err |= + __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); + err |= __put_user(sas_ss_flags(rdusp()), &frame->uc.uc_stack.ss_flags); + err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= rt_setup_sigcontext(&frame->uc.uc_mcontext, regs); + err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + /* Set up to return from userspace. */ + err |= __put_user(0x28, &(frame->retcode[0])); + err |= __put_user(0xe1, &(frame->retcode[1])); + err |= __put_user(0xad, &(frame->retcode[2])); + err |= __put_user(0x00, &(frame->retcode[3])); + err |= __put_user(0xa0, &(frame->retcode[4])); + err |= __put_user(0x00, &(frame->retcode[5])); + + if (err) + goto give_sigsegv; + + push_cache((unsigned long)&frame->retcode, sizeof(frame->retcode)); + + /* Set up registers for signal handler */ + wrusp((unsigned long)frame); + if (get_personality & FDPIC_FUNCPTRS) { + struct fdpic_func_descriptor __user *funcptr = + (struct fdpic_func_descriptor *) ka->sa.sa_handler; + __get_user(regs->pc, &funcptr->text); + __get_user(regs->p3, &funcptr->GOT); + } else + regs->pc = (unsigned long)ka->sa.sa_handler; + regs->rets = (unsigned long)(frame->retcode); + + regs->r0 = frame->sig; + regs->r1 = (unsigned long)(&frame->info); + regs->r2 = (unsigned long)(&frame->uc); + + return 0; + + give_sigsegv: + if (sig == SIGSEGV) + ka->sa.sa_handler = SIG_DFL; + force_sig(SIGSEGV, current); + return -EFAULT; +} + +static inline void +handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) +{ + switch (regs->r0) { + case -ERESTARTNOHAND: + if (!has_handler) + goto do_restart; + regs->r0 = -EINTR; + break; + + case -ERESTARTSYS: + if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) { + regs->r0 = -EINTR; + break; + } + /* fallthrough */ + case -ERESTARTNOINTR: + do_restart: + regs->p0 = regs->orig_p0; + regs->r0 = regs->orig_r0; + regs->pc -= 2; + break; + } +} + +/* + * OK, we're invoking a handler + */ +static int +handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, + sigset_t *oldset, struct pt_regs *regs) +{ + int ret; + + /* are we from a system call? to see pt_regs->orig_p0 */ + if (regs->orig_p0 >= 0) + /* If so, check system call restarting.. */ + handle_restart(regs, ka, 1); + + /* set up the stack frame */ + ret = setup_rt_frame(sig, ka, info, oldset, regs); + + if (ret == 0) { + spin_lock_irq(¤t->sighand->siglock); + sigorsets(¤t->blocked, ¤t->blocked, + &ka->sa.sa_mask); + if (!(ka->sa.sa_flags & SA_NODEFER)) + sigaddset(¤t->blocked, sig); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + } + return ret; +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals + * that the kernel can handle, and then we build all the user-level signal + * handling stack-frames in one go after that. + */ +asmlinkage void do_signal(struct pt_regs *regs) +{ + siginfo_t info; + int signr; + struct k_sigaction ka; + sigset_t *oldset; + + current->thread.esp0 = (unsigned long)regs; + + if (try_to_freeze()) + goto no_signal; + + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + oldset = ¤t->saved_sigmask; + else + oldset = ¤t->blocked; + + signr = get_signal_to_deliver(&info, &ka, regs, NULL); + if (signr > 0) { + /* Whee! Actually deliver the signal. */ + if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { + /* a signal was successfully delivered; the saved + * sigmask will have been stored in the signal frame, + * and will be restored by sigreturn, so we can simply + * clear the TIF_RESTORE_SIGMASK flag */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + clear_thread_flag(TIF_RESTORE_SIGMASK); + } + + return; + } + +no_signal: + /* Did we come from a system call? */ + if (regs->orig_p0 >= 0) + /* Restart the system call - no handlers present */ + handle_restart(regs, NULL, 0); + + /* if there's no signal to deliver, we just put the saved sigmask + * back */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) { + clear_thread_flag(TIF_RESTORE_SIGMASK); + sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + } +} diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c new file mode 100644 index 000000000000..f436e6743f5a --- /dev/null +++ b/arch/blackfin/kernel/sys_bfin.c @@ -0,0 +1,115 @@ +/* + * File: arch/blackfin/kernel/sys_bfin.c + * Based on: + * Author: + * + * Created: + * Description: This file contains various random system calls that + * have a non-standard calling sequence on the Linux/bfin + * platform. + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* + * sys_pipe() is the normal C calling standard for creating + * a pipe. It's not the way unix traditionally does this, though. + */ +asmlinkage int sys_pipe(unsigned long *fildes) +{ + int fd[2]; + int error; + + error = do_pipe(fd); + if (!error) { + if (copy_to_user(fildes, fd, 2 * sizeof(int))) + error = -EFAULT; + } + return error; +} + +/* common code for old and new mmaps */ +static inline long +do_mmap2(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff) +{ + int error = -EBADF; + struct file *file = NULL; + + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + if (!(flags & MAP_ANONYMOUS)) { + file = fget(fd); + if (!file) + goto out; + } + + down_write(¤t->mm->mmap_sem); + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); + up_write(¤t->mm->mmap_sem); + + if (file) + fput(file); + out: + return error; +} + +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff) +{ + return do_mmap2(addr, len, prot, flags, fd, pgoff); +} + +asmlinkage int sys_getpagesize(void) +{ + return PAGE_SIZE; +} + +asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags) +{ + return sram_alloc_with_lsl(size, flags); +} + +asmlinkage int sys_sram_free(const void *addr) +{ + return sram_free_with_lsl(addr); +} + +asmlinkage void *sys_dma_memcpy(void *dest, const void *src, size_t len) +{ + return safe_dma_memcpy(dest, src, len); +} diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c new file mode 100644 index 000000000000..f578176b6d92 --- /dev/null +++ b/arch/blackfin/kernel/time.c @@ -0,0 +1,326 @@ +/* + * File: arch/blackfin/kernel/time.c + * Based on: none - original work + * Author: + * + * Created: + * Description: This file contains the bfin-specific time handling details. + * Most of the stuff is located in the machine specific files. + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +#include + +/* This is an NTP setting */ +#define TICK_SIZE (tick_nsec / 1000) + +static void time_sched_init(irqreturn_t(*timer_routine) + (int, void *)); +static unsigned long gettimeoffset(void); +static inline void do_leds(void); + +#if (defined(CONFIG_BFIN_ALIVE_LED) || defined(CONFIG_BFIN_IDLE_LED)) +void __init init_leds(void) +{ + unsigned int tmp = 0; + +#if defined(CONFIG_BFIN_ALIVE_LED) + /* config pins as output. */ + tmp = bfin_read_CONFIG_BFIN_ALIVE_LED_DPORT(); + SSYNC(); + bfin_write_CONFIG_BFIN_ALIVE_LED_DPORT(tmp | CONFIG_BFIN_ALIVE_LED_PIN); + SSYNC(); + + /* First set led be off */ + tmp = bfin_read_CONFIG_BFIN_ALIVE_LED_PORT(); + SSYNC(); + bfin_write_CONFIG_BFIN_ALIVE_LED_PORT(tmp | CONFIG_BFIN_ALIVE_LED_PIN); /* light off */ + SSYNC(); +#endif + +#if defined(CONFIG_BFIN_IDLE_LED) + /* config pins as output. */ + tmp = bfin_read_CONFIG_BFIN_IDLE_LED_DPORT(); + SSYNC(); + bfin_write_CONFIG_BFIN_IDLE_LED_DPORT(tmp | CONFIG_BFIN_IDLE_LED_PIN); + SSYNC(); + + /* First set led be off */ + tmp = bfin_read_CONFIG_BFIN_IDLE_LED_PORT(); + SSYNC(); + bfin_write_CONFIG_BFIN_IDLE_LED_PORT(tmp | CONFIG_BFIN_IDLE_LED_PIN); /* light off */ + SSYNC(); +#endif +} +#else +void __init init_leds(void) +{ +} +#endif + +#if defined(CONFIG_BFIN_ALIVE_LED) +static inline void do_leds(void) +{ + static unsigned int count = 50; + static int flag = 0; + unsigned short tmp = 0; + + if (--count == 0) { + count = 50; + flag = ~flag; + } + tmp = bfin_read_CONFIG_BFIN_ALIVE_LED_PORT(); + SSYNC(); + + if (flag) + tmp &= ~CONFIG_BFIN_ALIVE_LED_PIN; /* light on */ + else + tmp |= CONFIG_BFIN_ALIVE_LED_PIN; /* light off */ + + bfin_write_CONFIG_BFIN_ALIVE_LED_PORT(tmp); + SSYNC(); + +} +#else +static inline void do_leds(void) +{ +} +#endif + +static struct irqaction bfin_timer_irq = { + .name = "BFIN Timer Tick", + .flags = IRQF_DISABLED +}; + +/* + * The way that the Blackfin core timer works is: + * - CCLK is divided by a programmable 8-bit pre-scaler (TSCALE) + * - Every time TSCALE ticks, a 32bit is counted down (TCOUNT) + * + * If you take the fastest clock (1ns, or 1GHz to make the math work easier) + * 10ms is 10,000,000 clock ticks, which fits easy into a 32-bit counter + * (32 bit counter is 4,294,967,296ns or 4.2 seconds) so, we don't need + * to use TSCALE, and program it to zero (which is pass CCLK through). + * If you feel like using it, try to keep HZ * TIMESCALE to some + * value that divides easy (like power of 2). + */ + +#define TIME_SCALE 1 + +static void +time_sched_init(irqreturn_t(*timer_routine) (int, void *)) +{ + u32 tcount; + + /* power up the timer, but don't enable it just yet */ + bfin_write_TCNTL(1); + CSYNC(); + + /* + * the TSCALE prescaler counter. + */ + bfin_write_TSCALE((TIME_SCALE - 1)); + + tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1); + bfin_write_TPERIOD(tcount); + bfin_write_TCOUNT(tcount); + + /* now enable the timer */ + CSYNC(); + + bfin_write_TCNTL(7); + + bfin_timer_irq.handler = (irq_handler_t)timer_routine; + /* call setup_irq instead of request_irq because request_irq calls + * kmalloc which has not been initialized yet + */ + setup_irq(IRQ_CORETMR, &bfin_timer_irq); +} + +/* + * Should return useconds since last timer tick + */ +static unsigned long gettimeoffset(void) +{ + unsigned long offset; + unsigned long clocks_per_jiffy; + + clocks_per_jiffy = bfin_read_TPERIOD(); + offset = + (clocks_per_jiffy - + bfin_read_TCOUNT()) / (((clocks_per_jiffy + 1) * HZ) / + USEC_PER_SEC); + + /* Check if we just wrapped the counters and maybe missed a tick */ + if ((bfin_read_ILAT() & (1 << IRQ_CORETMR)) + && (offset < (100000 / HZ / 2))) + offset += (USEC_PER_SEC / HZ); + + return offset; +} + +static inline int set_rtc_mmss(unsigned long nowtime) +{ + return 0; +} + +/* + * timer_interrupt() needs to keep up the real-time clock, + * as well as call the "do_timer()" routine every clocktick + */ +#ifdef CONFIG_CORE_TIMER_IRQ_L1 +irqreturn_t timer_interrupt(int irq, void *dummy)__attribute__((l1_text)); +#endif + +irqreturn_t timer_interrupt(int irq, void *dummy) +{ + /* last time the cmos clock got updated */ + static long last_rtc_update = 0; + + write_seqlock(&xtime_lock); + + do_timer(1); + do_leds(); + +#ifndef CONFIG_SMP + update_process_times(user_mode(get_irq_regs())); +#endif + profile_tick(CPU_PROFILING); + + /* + * If we have an externally synchronized Linux clock, then update + * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be + * called as close as possible to 500 ms before the new second starts. + */ + + if (ntp_synced() && + xtime.tv_sec > last_rtc_update + 660 && + (xtime.tv_nsec / NSEC_PER_USEC) >= + 500000 - ((unsigned)TICK_SIZE) / 2 + && (xtime.tv_nsec / NSEC_PER_USEC) <= + 500000 + ((unsigned)TICK_SIZE) / 2) { + if (set_rtc_mmss(xtime.tv_sec) == 0) + last_rtc_update = xtime.tv_sec; + else + /* Do it again in 60s. */ + last_rtc_update = xtime.tv_sec - 600; + } + write_sequnlock(&xtime_lock); + return IRQ_HANDLED; +} + +void __init time_init(void) +{ + time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */ + +#ifdef CONFIG_RTC_DRV_BFIN + /* [#2663] hack to filter junk RTC values that would cause + * userspace to have to deal with time values greater than + * 2^31 seconds (which uClibc cannot cope with yet) + */ + if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) { + printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n"); + bfin_write_RTC_STAT(0); + } +#endif + + /* Initialize xtime. From now on, xtime is updated with timer interrupts */ + xtime.tv_sec = secs_since_1970; + xtime.tv_nsec = 0; + + wall_to_monotonic.tv_sec = -xtime.tv_sec; + + time_sched_init(timer_interrupt); +} + +#ifndef CONFIG_GENERIC_TIME +void do_gettimeofday(struct timeval *tv) +{ + unsigned long flags; + unsigned long seq; + unsigned long usec, sec; + + do { + seq = read_seqbegin_irqsave(&xtime_lock, flags); + usec = gettimeoffset(); + sec = xtime.tv_sec; + usec += (xtime.tv_nsec / NSEC_PER_USEC); + } + while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); + + while (usec >= USEC_PER_SEC) { + usec -= USEC_PER_SEC; + sec++; + } + + tv->tv_sec = sec; + tv->tv_usec = usec; +} +EXPORT_SYMBOL(do_gettimeofday); + +int do_settimeofday(struct timespec *tv) +{ + time_t wtm_sec, sec = tv->tv_sec; + long wtm_nsec, nsec = tv->tv_nsec; + + if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) + return -EINVAL; + + write_seqlock_irq(&xtime_lock); + /* + * This is revolting. We need to set the xtime.tv_usec + * correctly. However, the value in this location is + * is value at the last tick. + * Discover what correction gettimeofday + * would have done, and then undo it! + */ + nsec -= (gettimeoffset() * NSEC_PER_USEC); + + wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); + wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); + + set_normalized_timespec(&xtime, sec, nsec); + set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); + + ntp_clear(); + + write_sequnlock_irq(&xtime_lock); + clock_was_set(); + + return 0; +} +EXPORT_SYMBOL(do_settimeofday); +#endif /* !CONFIG_GENERIC_TIME */ + +/* + * Scheduler clock - returns current time in nanosec units. + */ +unsigned long long sched_clock(void) +{ + return (unsigned long long)jiffies *(NSEC_PER_SEC / HZ); +} diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c new file mode 100644 index 000000000000..9556b73de808 --- /dev/null +++ b/arch/blackfin/kernel/traps.c @@ -0,0 +1,649 @@ +/* + * File: arch/blackfin/kernel/traps.c + * Based on: + * Author: Hamish Macdonald + * + * Created: + * Description: uses S/W interrupt 15 for the system calls + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_KGDB +# include +# include +#endif + +/* Initiate the event table handler */ +void __init trap_init(void) +{ + CSYNC(); + bfin_write_EVT3(trap); + CSYNC(); +} + +asmlinkage void trap_c(struct pt_regs *fp); + +int kstack_depth_to_print = 48; + +static int printk_address(unsigned long address) +{ + struct vm_list_struct *vml; + struct task_struct *p; + struct mm_struct *mm; + +#ifdef CONFIG_KALLSYMS + unsigned long offset = 0, symsize; + const char *symname; + char *modname; + char *delim = ":"; + char namebuf[128]; + + /* look up the address and see if we are in kernel space */ + symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); + + if (symname) { + /* yeah! kernel space! */ + if (!modname) + modname = delim = ""; + return printk("<0x%p> { %s%s%s%s + 0x%lx }", + (void*)address, delim, modname, delim, symname, + (unsigned long)offset); + + } +#endif + + /* looks like we're off in user-land, so let's walk all the + * mappings of all our processes and see if we can't be a whee + * bit more specific + */ + write_lock_irq(&tasklist_lock); + for_each_process(p) { + mm = get_task_mm(p); + if (!mm) + continue; + + vml = mm->context.vmlist; + while (vml) { + struct vm_area_struct *vma = vml->vma; + + if (address >= vma->vm_start && address < vma->vm_end) { + char *name = p->comm; + struct file *file = vma->vm_file; + if (file) { + char _tmpbuf[256]; + name = d_path(file->f_dentry, + file->f_vfsmnt, + _tmpbuf, + sizeof(_tmpbuf)); + } + + write_unlock_irq(&tasklist_lock); + return printk("<0x%p> [ %s + 0x%lx ]", + (void*)address, name, + (unsigned long) + ((address - vma->vm_start) + + (vma->vm_pgoff << PAGE_SHIFT))); + } + + vml = vml->next; + } + } + write_unlock_irq(&tasklist_lock); + + /* we were unable to find this address anywhere */ + return printk("[<0x%p>]", (void*)address); +} + +#define trace_buffer_save(x) \ + do { \ + (x) = bfin_read_TBUFCTL(); \ + bfin_write_TBUFCTL((x) & ~TBUFEN); \ + } while (0) +#define trace_buffer_restore(x) \ + do { \ + bfin_write_TBUFCTL((x)); \ + } while (0) + +asmlinkage void trap_c(struct pt_regs *fp) +{ + int j, sig = 0; + siginfo_t info; + unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE; + +#ifdef CONFIG_KGDB +# define CHK_DEBUGGER_TRAP() do { CHK_DEBUGGER(trapnr, sig, info.si_code, fp,); } while (0) +# define CHK_DEBUGGER_TRAP_MAYBE() do { if (kgdb_connected) CHK_DEBUGGER_TRAP(); } while (0) +#else +# define CHK_DEBUGGER_TRAP() do { } while (0) +# define CHK_DEBUGGER_TRAP_MAYBE() do { } while (0) +#endif + + trace_buffer_save(j); + + /* trap_c() will be called for exceptions. During exceptions + * processing, the pc value should be set with retx value. + * With this change we can cleanup some code in signal.c- TODO + */ + fp->orig_pc = fp->retx; + /* printk("exception: 0x%x, ipend=%x, reti=%x, retx=%x\n", + trapnr, fp->ipend, fp->pc, fp->retx); */ + + /* send the appropriate signal to the user program */ + switch (trapnr) { + + /* This table works in conjuction with the one in ./mach-common/entry.S + * Some exceptions are handled there (in assembly, in exception space) + * Some are handled here, (in C, in interrupt space) + * Some, like CPLB, are handled in both, where the normal path is + * handled in assembly/exception space, and the error path is handled + * here + */ + + /* 0x00 - Linux Syscall, getting here is an error */ + /* 0x01 - userspace gdb breakpoint, handled here */ + case VEC_EXCPT01: + info.si_code = TRAP_ILLTRAP; + sig = SIGTRAP; + CHK_DEBUGGER_TRAP_MAYBE(); + /* Check if this is a breakpoint in kernel space */ + if (fp->ipend & 0xffc0) + return; + else + break; +#ifdef CONFIG_KGDB + case VEC_EXCPT02 : /* gdb connection */ + info.si_code = TRAP_ILLTRAP; + sig = SIGTRAP; + CHK_DEBUGGER_TRAP(); + return; +#else + /* 0x02 - User Defined, Caught by default */ +#endif + /* 0x03 - Atomic test and set */ + case VEC_EXCPT03: + info.si_code = SEGV_STACKFLOW; + sig = SIGSEGV; + printk(KERN_EMERG EXC_0x03); + CHK_DEBUGGER_TRAP(); + break; + /* 0x04 - spinlock - handled by _ex_spinlock, + getting here is an error */ + /* 0x05 - User Defined, Caught by default */ + /* 0x06 - User Defined, Caught by default */ + /* 0x07 - User Defined, Caught by default */ + /* 0x08 - User Defined, Caught by default */ + /* 0x09 - User Defined, Caught by default */ + /* 0x0A - User Defined, Caught by default */ + /* 0x0B - User Defined, Caught by default */ + /* 0x0C - User Defined, Caught by default */ + /* 0x0D - User Defined, Caught by default */ + /* 0x0E - User Defined, Caught by default */ + /* 0x0F - User Defined, Caught by default */ + /* 0x10 HW Single step, handled here */ + case VEC_STEP: + info.si_code = TRAP_STEP; + sig = SIGTRAP; + CHK_DEBUGGER_TRAP_MAYBE(); + /* Check if this is a single step in kernel space */ + if (fp->ipend & 0xffc0) + return; + else + break; + /* 0x11 - Trace Buffer Full, handled here */ + case VEC_OVFLOW: + info.si_code = TRAP_TRACEFLOW; + sig = SIGTRAP; + printk(KERN_EMERG EXC_0x11); + CHK_DEBUGGER_TRAP(); + break; + /* 0x12 - Reserved, Caught by default */ + /* 0x13 - Reserved, Caught by default */ + /* 0x14 - Reserved, Caught by default */ + /* 0x15 - Reserved, Caught by default */ + /* 0x16 - Reserved, Caught by default */ + /* 0x17 - Reserved, Caught by default */ + /* 0x18 - Reserved, Caught by default */ + /* 0x19 - Reserved, Caught by default */ + /* 0x1A - Reserved, Caught by default */ + /* 0x1B - Reserved, Caught by default */ + /* 0x1C - Reserved, Caught by default */ + /* 0x1D - Reserved, Caught by default */ + /* 0x1E - Reserved, Caught by default */ + /* 0x1F - Reserved, Caught by default */ + /* 0x20 - Reserved, Caught by default */ + /* 0x21 - Undefined Instruction, handled here */ + case VEC_UNDEF_I: + info.si_code = ILL_ILLOPC; + sig = SIGILL; + printk(KERN_EMERG EXC_0x21); + CHK_DEBUGGER_TRAP(); + break; + /* 0x22 - Illegal Instruction Combination, handled here */ + case VEC_ILGAL_I: + info.si_code = ILL_ILLPARAOP; + sig = SIGILL; + printk(KERN_EMERG EXC_0x22); + CHK_DEBUGGER_TRAP(); + break; + /* 0x23 - Data CPLB Protection Violation, + normal case is handled in _cplb_hdr */ + case VEC_CPLB_VL: + info.si_code = ILL_CPLB_VI; + sig = SIGILL; + printk(KERN_EMERG EXC_0x23); + CHK_DEBUGGER_TRAP(); + break; + /* 0x24 - Data access misaligned, handled here */ + case VEC_MISALI_D: + info.si_code = BUS_ADRALN; + sig = SIGBUS; + printk(KERN_EMERG EXC_0x24); + CHK_DEBUGGER_TRAP(); + break; + /* 0x25 - Unrecoverable Event, handled here */ + case VEC_UNCOV: + info.si_code = ILL_ILLEXCPT; + sig = SIGILL; + printk(KERN_EMERG EXC_0x25); + CHK_DEBUGGER_TRAP(); + break; + /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr, + error case is handled here */ + case VEC_CPLB_M: + info.si_code = BUS_ADRALN; + sig = SIGBUS; + printk(KERN_EMERG EXC_0x26); + CHK_DEBUGGER_TRAP(); + break; + /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */ + case VEC_CPLB_MHIT: + info.si_code = ILL_CPLB_MULHIT; +#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO + sig = SIGSEGV; + printk(KERN_EMERG "\n\nNULL pointer access (probably)\n"); +#else + sig = SIGILL; + printk(KERN_EMERG EXC_0x27); +#endif + CHK_DEBUGGER_TRAP(); + break; + /* 0x28 - Emulation Watchpoint, handled here */ + case VEC_WATCH: + info.si_code = TRAP_WATCHPT; + sig = SIGTRAP; + pr_debug(EXC_0x28); + CHK_DEBUGGER_TRAP_MAYBE(); + /* Check if this is a watchpoint in kernel space */ + if (fp->ipend & 0xffc0) + return; + else + break; +#ifdef CONFIG_BF535 + /* 0x29 - Instruction fetch access error (535 only) */ + case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */ + info.si_code = BUS_OPFETCH; + sig = SIGBUS; + printk(KERN_EMERG "BF535: VEC_ISTRU_VL\n"); + CHK_DEBUGGER_TRAP(); + break; +#else + /* 0x29 - Reserved, Caught by default */ +#endif + /* 0x2A - Instruction fetch misaligned, handled here */ + case VEC_MISALI_I: + info.si_code = BUS_ADRALN; + sig = SIGBUS; + printk(KERN_EMERG EXC_0x2A); + CHK_DEBUGGER_TRAP(); + break; + /* 0x2B - Instruction CPLB protection Violation, + handled in _cplb_hdr */ + case VEC_CPLB_I_VL: + info.si_code = ILL_CPLB_VI; + sig = SIGILL; + printk(KERN_EMERG EXC_0x2B); + CHK_DEBUGGER_TRAP(); + break; + /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */ + case VEC_CPLB_I_M: + info.si_code = ILL_CPLB_MISS; + sig = SIGBUS; + printk(KERN_EMERG EXC_0x2C); + CHK_DEBUGGER_TRAP(); + break; + /* 0x2D - Instruction CPLB Multiple Hits, handled here */ + case VEC_CPLB_I_MHIT: + info.si_code = ILL_CPLB_MULHIT; +#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO + sig = SIGSEGV; + printk(KERN_EMERG "\n\nJump to address 0 - 0x0fff\n"); +#else + sig = SIGILL; + printk(KERN_EMERG EXC_0x2D); +#endif + CHK_DEBUGGER_TRAP(); + break; + /* 0x2E - Illegal use of Supervisor Resource, handled here */ + case VEC_ILL_RES: + info.si_code = ILL_PRVOPC; + sig = SIGILL; + printk(KERN_EMERG EXC_0x2E); + CHK_DEBUGGER_TRAP(); + break; + /* 0x2F - Reserved, Caught by default */ + /* 0x30 - Reserved, Caught by default */ + /* 0x31 - Reserved, Caught by default */ + /* 0x32 - Reserved, Caught by default */ + /* 0x33 - Reserved, Caught by default */ + /* 0x34 - Reserved, Caught by default */ + /* 0x35 - Reserved, Caught by default */ + /* 0x36 - Reserved, Caught by default */ + /* 0x37 - Reserved, Caught by default */ + /* 0x38 - Reserved, Caught by default */ + /* 0x39 - Reserved, Caught by default */ + /* 0x3A - Reserved, Caught by default */ + /* 0x3B - Reserved, Caught by default */ + /* 0x3C - Reserved, Caught by default */ + /* 0x3D - Reserved, Caught by default */ + /* 0x3E - Reserved, Caught by default */ + /* 0x3F - Reserved, Caught by default */ + default: + info.si_code = TRAP_ILLTRAP; + sig = SIGTRAP; + printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n", + (fp->seqstat & SEQSTAT_EXCAUSE)); + CHK_DEBUGGER_TRAP(); + break; + } + + info.si_signo = sig; + info.si_errno = 0; + info.si_addr = (void *)fp->pc; + force_sig_info(sig, &info, current); + if (sig != 0 && sig != SIGTRAP) { + unsigned long stack; + dump_bfin_regs(fp, (void *)fp->retx); + dump_bfin_trace_buffer(); + show_stack(current, &stack); + if (current->mm == NULL) + panic("Kernel exception"); + } + + /* if the address that we are about to return to is not valid, set it + * to a valid address, if we have a current application or panic + */ + if (!(fp->pc <= physical_mem_end +#if L1_CODE_LENGTH != 0 + || (fp->pc >= L1_CODE_START && + fp->pc <= (L1_CODE_START + L1_CODE_LENGTH)) +#endif + )) { + if (current->mm) { + fp->pc = current->mm->start_code; + } else { + printk(KERN_EMERG "I can't return to memory that doesn't exist - bad things happen\n"); + panic("Help - I've fallen and can't get up\n"); + } + } + + trace_buffer_restore(j); + return; +} + +/* Typical exception handling routines */ + +void dump_bfin_trace_buffer(void) +{ + int tflags; + trace_buffer_save(tflags); + + if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) { + int i; + printk(KERN_EMERG "Hardware Trace:\n"); + for (i = 0; bfin_read_TBUFSTAT() & TBUFCNT; i++) { + printk(KERN_EMERG "%2i Target : ", i); + printk_address((unsigned long)bfin_read_TBUF()); + printk("\n" KERN_EMERG " Source : "); + printk_address((unsigned long)bfin_read_TBUF()); + printk("\n"); + } + } + + trace_buffer_restore(tflags); +} +EXPORT_SYMBOL(dump_bfin_trace_buffer); + +static void show_trace(struct task_struct *tsk, unsigned long *sp) +{ + unsigned long addr; + + printk("\nCall Trace:"); +#ifdef CONFIG_KALLSYMS + printk("\n"); +#endif + + while (!kstack_end(sp)) { + addr = *sp++; + /* + * If the address is either in the text segment of the + * kernel, or in the region which contains vmalloc'ed + * memory, it *may* be the address of a calling + * routine; if so, print it so that someone tracing + * down the cause of the crash will be able to figure + * out the call path that was taken. + */ + if (kernel_text_address(addr)) + print_ip_sym(addr); + } + + printk("\n"); +} + +void show_stack(struct task_struct *task, unsigned long *stack) +{ + unsigned long *endstack, addr; + int i; + + /* Cannot call dump_bfin_trace_buffer() here as show_stack() is + * called externally in some places in the kernel. + */ + + if (!stack) { + if (task) + stack = (unsigned long *)task->thread.ksp; + else + stack = (unsigned long *)&stack; + } + + addr = (unsigned long)stack; + endstack = (unsigned long *)PAGE_ALIGN(addr); + + printk(KERN_EMERG "Stack from %08lx:", (unsigned long)stack); + for (i = 0; i < kstack_depth_to_print; i++) { + if (stack + 1 > endstack) + break; + if (i % 8 == 0) + printk("\n" KERN_EMERG " "); + printk(" %08lx", *stack++); + } + + show_trace(task, stack); +} + +void dump_stack(void) +{ + unsigned long stack; + int tflags; + trace_buffer_save(tflags); + dump_bfin_trace_buffer(); + show_stack(current, &stack); + trace_buffer_restore(tflags); +} + +EXPORT_SYMBOL(dump_stack); + +void dump_bfin_regs(struct pt_regs *fp, void *retaddr) +{ + if (current->pid) { + printk("\nCURRENT PROCESS:\n\n"); + printk("COMM=%s PID=%d\n", current->comm, current->pid); + } else { + printk + ("\nNo Valid pid - Either things are really messed up, or you are in the kernel\n"); + } + + if (current->mm) { + printk("TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n" + "BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n", + (void*)current->mm->start_code, + (void*)current->mm->end_code, + (void*)current->mm->start_data, + (void*)current->mm->end_data, + (void*)current->mm->end_data, + (void*)current->mm->brk, + (void*)current->mm->start_stack); + } + + printk("return address: 0x%p; contents of [PC-16...PC+8]:\n", retaddr); + if (retaddr != 0 && retaddr <= (void*)physical_mem_end +#if L1_CODE_LENGTH != 0 + /* FIXME: Copy the code out of L1 Instruction SRAM through dma + memcpy. */ + && !(retaddr >= (void*)L1_CODE_START + && retaddr < (void*)(L1_CODE_START + L1_CODE_LENGTH)) +#endif + ) { + int i = 0; + unsigned short x = 0; + for (i = -16; i < 8; i++) { + if (get_user(x, (unsigned short *)retaddr + i)) + break; +#ifndef CONFIG_DEBUG_HWERR + /* If one of the last few instructions was a STI + * it is likily that the error occured awhile ago + * and we just noticed + */ + if (x >= 0x0040 && x <= 0x0047 && i <= 0) + panic("\n\nWARNING : You should reconfigure the kernel to turn on\n" + " 'Hardware error interrupt debugging'\n" + " The rest of this error is meanless\n"); +#endif + + if (i == -8) + printk("\n"); + if (i == 0) + printk("X\n"); + printk("%04x ", x); + } + } else + printk("Cannot look at the [PC] for it is in unreadable L1 SRAM - sorry\n"); + + printk("\n\n"); + + printk("RETE: %08lx RETN: %08lx RETX: %08lx RETS: %08lx\n", + fp->rete, fp->retn, fp->retx, fp->rets); + printk("IPEND: %04lx SYSCFG: %04lx\n", fp->ipend, fp->syscfg); + printk("SEQSTAT: %08lx SP: %08lx\n", (long)fp->seqstat, (long)fp); + printk("R0: %08lx R1: %08lx R2: %08lx R3: %08lx\n", + fp->r0, fp->r1, fp->r2, fp->r3); + printk("R4: %08lx R5: %08lx R6: %08lx R7: %08lx\n", + fp->r4, fp->r5, fp->r6, fp->r7); + printk("P0: %08lx P1: %08lx P2: %08lx P3: %08lx\n", + fp->p0, fp->p1, fp->p2, fp->p3); + printk("P4: %08lx P5: %08lx FP: %08lx\n", fp->p4, fp->p5, fp->fp); + printk("A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n", + fp->a0w, fp->a0x, fp->a1w, fp->a1x); + + printk("LB0: %08lx LT0: %08lx LC0: %08lx\n", fp->lb0, fp->lt0, + fp->lc0); + printk("LB1: %08lx LT1: %08lx LC1: %08lx\n", fp->lb1, fp->lt1, + fp->lc1); + printk("B0: %08lx L0: %08lx M0: %08lx I0: %08lx\n", fp->b0, fp->l0, + fp->m0, fp->i0); + printk("B1: %08lx L1: %08lx M1: %08lx I1: %08lx\n", fp->b1, fp->l1, + fp->m1, fp->i1); + printk("B2: %08lx L2: %08lx M2: %08lx I2: %08lx\n", fp->b2, fp->l2, + fp->m2, fp->i2); + printk("B3: %08lx L3: %08lx M3: %08lx I3: %08lx\n", fp->b3, fp->l3, + fp->m3, fp->i3); + + printk("\nUSP: %08lx ASTAT: %08lx\n", rdusp(), fp->astat); + if ((long)fp->seqstat & SEQSTAT_EXCAUSE) { + printk(KERN_EMERG "DCPLB_FAULT_ADDR=%p\n", (void*)bfin_read_DCPLB_FAULT_ADDR()); + printk(KERN_EMERG "ICPLB_FAULT_ADDR=%p\n", (void*)bfin_read_ICPLB_FAULT_ADDR()); + } + + printk("\n\n"); +} + +#ifdef CONFIG_SYS_BFIN_SPINLOCK_L1 +asmlinkage int sys_bfin_spinlock(int *spinlock)__attribute__((l1_text)); +#endif + +asmlinkage int sys_bfin_spinlock(int *spinlock) +{ + int ret = 0; + int tmp = 0; + + local_irq_disable(); + ret = get_user(tmp, spinlock); + if (ret == 0) { + if (tmp) + ret = 1; + tmp = 1; + put_user(tmp, spinlock); + } + local_irq_enable(); + return ret; +} + +void panic_cplb_error(int cplb_panic, struct pt_regs *fp) +{ + switch (cplb_panic) { + case CPLB_NO_UNLOCKED: + printk(KERN_EMERG "All CPLBs are locked\n"); + break; + case CPLB_PROT_VIOL: + return; + case CPLB_NO_ADDR_MATCH: + return; + case CPLB_UNKNOWN_ERR: + printk(KERN_EMERG "Unknown CPLB Exception\n"); + break; + } + + printk(KERN_EMERG "DCPLB_FAULT_ADDR=%p\n", (void*)bfin_read_DCPLB_FAULT_ADDR()); + printk(KERN_EMERG "ICPLB_FAULT_ADDR=%p\n", (void*)bfin_read_ICPLB_FAULT_ADDR()); + dump_bfin_regs(fp, (void *)fp->retx); + dump_stack(); + panic("Unrecoverable event\n"); +} diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..6ae9ebbd8e58 --- /dev/null +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -0,0 +1,228 @@ +/* + * File: arch/blackfin/kernel/vmlinux.lds.S + * Based on: none - original work + * Author: + * + * Created: Tue Sep 21 2004 + * Description: Master linker script for blackfin architecture + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define VMLINUX_SYMBOL(_sym_) _##_sym_ + +#include +#include + + +OUTPUT_FORMAT("elf32-bfin") +ENTRY(__start) +_jiffies = _jiffies_64; + +MEMORY +{ + ram : ORIGIN = CONFIG_BOOT_LOAD, LENGTH = (CONFIG_MEM_SIZE * 1024 * 1024) - (CONFIG_BOOT_LOAD) + l1_data_a : ORIGIN = L1_DATA_A_START, LENGTH = L1_DATA_A_LENGTH + l1_data_b : ORIGIN = L1_DATA_B_START, LENGTH = L1_DATA_B_LENGTH + l1_code : ORIGIN = L1_CODE_START, LENGTH = L1_CODE_LENGTH + l1_scratch : ORIGIN = L1_SCRATCH_START, LENGTH = L1_SCRATCH_LENGTH +} + +SECTIONS +{ + . = CONFIG_BOOT_LOAD; + + .text : + { + _text = .; + __stext = .; + *(.text) + SCHED_TEXT + *(.text.lock) + . = ALIGN(16); + ___start___ex_table = .; + *(__ex_table) + ___stop___ex_table = .; + + *($code) + *(.rodata) + *(.rodata.*) + *(__vermagic) /* Kernel version magic */ + *(.rodata1) + *(.fixup) + *(.spinlock.text) + + /* Kernel symbol table: Normal symbols */ + . = ALIGN(4); + ___start___ksymtab = .; + *(__ksymtab) + ___stop___ksymtab = .; + + /* Kernel symbol table: GPL-only symbols */ + ___start___ksymtab_gpl = .; + *(__ksymtab_gpl) + ___stop___ksymtab_gpl = .; + + /* Kernel symbol table: Normal unused symbols */ \ + ___start___ksymtab_unused = .; + *(__ksymtab_unused) + ___stop___ksymtab_unused = .; + + /* Kernel symbol table: GPL-only unused symbols */ + ___start___ksymtab_unused_gpl = .; + *(__ksymtab_unused_gpl) + ___stop___ksymtab_unused_gpl = .; + + + /* Kernel symbol table: GPL-future symbols */ + ___start___ksymtab_gpl_future = .; + *(__ksymtab_gpl_future) + ___stop___ksymtab_gpl_future = .; + + /* Kernel symbol table: Normal symbols */ + ___start___kcrctab = .; + *(__kcrctab) + ___stop___kcrctab = .; + + /* Kernel symbol table: GPL-only symbols */ + ___start___kcrctab_gpl = .; + *(__kcrctab_gpl) + ___stop___kcrctab_gpl = .; + + /* Kernel symbol table: GPL-future symbols */ + ___start___kcrctab_gpl_future = .; + *(__kcrctab_gpl_future) + ___stop___kcrctab_gpl_future = .; + + /* Kernel symbol table: strings */ + *(__ksymtab_strings) + + . = ALIGN(4); + __etext = .; + } > ram + + .init : + { + . = ALIGN(4096); + ___init_begin = .; + __sinittext = .; + *(.init.text) + __einittext = .; + *(.init.data) + . = ALIGN(16); + ___setup_start = .; + *(.init.setup) + ___setup_end = .; + ___start___param = .; + *(__param) + ___stop___param = .; + ___initcall_start = .; + INITCALLS + ___initcall_end = .; + ___con_initcall_start = .; + *(.con_initcall.init) + ___con_initcall_end = .; + ___security_initcall_start = .; + *(.security_initcall.init) + ___security_initcall_end = .; + . = ALIGN(4); + ___initramfs_start = .; + *(.init.ramfs) + ___initramfs_end = .; + . = ALIGN(4); + ___init_end = .; + } > ram + + __l1_lma_start = .; + + .text_l1 : + { + . = ALIGN(4); + __stext_l1 = .; + *(.l1.text) + + . = ALIGN(4); + __etext_l1 = .; + } > l1_code AT > ram + + .data_l1 : + { + . = ALIGN(4); + __sdata_l1 = .; + *(.l1.data) + __edata_l1 = .; + + . = ALIGN(4); + __sbss_l1 = .; + *(.l1.bss) + + . = ALIGN(32); + *(.data_l1.cacheline_aligned) + + . = ALIGN(4); + __ebss_l1 = .; + } > l1_data_a AT > ram + .data_b_l1 : + { + . = ALIGN(4); + __sdata_b_l1 = .; + *(.l1.data.B) + __edata_b_l1 = .; + + . = ALIGN(4); + __sbss_b_l1 = .; + *(.l1.bss.B) + + . = ALIGN(4); + __ebss_b_l1 = .; + } > l1_data_b AT > ram + + .data : + { + __sdata = .; + . = ALIGN(0x2000); + *(.data.init_task) + *(.data) + + . = ALIGN(32); + *(.data.cacheline_aligned) + + . = ALIGN(0x2000); + __edata = .; + } > ram + + /DISCARD/ : { /* Exit code and data*/ + *(.exit.text) + *(.exit.data) + *(.exitcall.exit) + } > ram + + .bss : + { + . = ALIGN(4); + ___bss_start = .; + *(.bss) + *(COMMON) + . = ALIGN(4); + ___bss_stop = .; + __end = . ; + } > ram +} diff --git a/arch/blackfin/lib/Makefile b/arch/blackfin/lib/Makefile new file mode 100644 index 000000000000..635288fc5f54 --- /dev/null +++ b/arch/blackfin/lib/Makefile @@ -0,0 +1,11 @@ +# +# arch/blackfin/lib/Makefile +# + +lib-y := \ + ashldi3.o ashrdi3.o lshrdi3.o \ + muldi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \ + checksum.o memcpy.o memset.o memcmp.o memchr.o memmove.o \ + strcmp.o strcpy.o strncmp.o strncpy.o \ + umulsi3_highpart.o smulsi3_highpart.o \ + ins.o outs.o diff --git a/arch/blackfin/lib/ashldi3.c b/arch/blackfin/lib/ashldi3.c new file mode 100644 index 000000000000..a8c279e9b192 --- /dev/null +++ b/arch/blackfin/lib/ashldi3.c @@ -0,0 +1,58 @@ +/* + * File: arch/blackfin/lib/ashldi3.c + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "gcclib.h" + +#ifdef CONFIG_ARITHMETIC_OPS_L1 +DItype __ashldi3(DItype u, word_type b)__attribute__((l1_text)); +#endif + +DItype __ashldi3(DItype u, word_type b) +{ + DIunion w; + word_type bm; + DIunion uu; + + if (b == 0) + return u; + + uu.ll = u; + + bm = (sizeof(SItype) * BITS_PER_UNIT) - b; + if (bm <= 0) { + w.s.low = 0; + w.s.high = (USItype) uu.s.low << -bm; + } else { + USItype carries = (USItype) uu.s.low >> bm; + w.s.low = (USItype) uu.s.low << b; + w.s.high = ((USItype) uu.s.high << b) | carries; + } + + return w.ll; +} diff --git a/arch/blackfin/lib/ashrdi3.c b/arch/blackfin/lib/ashrdi3.c new file mode 100644 index 000000000000..a0d3419329ca --- /dev/null +++ b/arch/blackfin/lib/ashrdi3.c @@ -0,0 +1,59 @@ +/* + * File: arch/blackfin/lib/ashrdi3.c + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "gcclib.h" + +#ifdef CONFIG_ARITHMETIC_OPS_L1 +DItype __ashrdi3(DItype u, word_type b)__attribute__((l1_text)); +#endif + +DItype __ashrdi3(DItype u, word_type b) +{ + DIunion w; + word_type bm; + DIunion uu; + + if (b == 0) + return u; + + uu.ll = u; + + bm = (sizeof(SItype) * BITS_PER_UNIT) - b; + if (bm <= 0) { + /* w.s.high = 1..1 or 0..0 */ + w.s.high = uu.s.high >> (sizeof(SItype) * BITS_PER_UNIT - 1); + w.s.low = uu.s.high >> -bm; + } else { + USItype carries = (USItype) uu.s.high << bm; + w.s.high = uu.s.high >> b; + w.s.low = ((USItype) uu.s.low >> b) | carries; + } + + return w.ll; +} diff --git a/arch/blackfin/lib/checksum.c b/arch/blackfin/lib/checksum.c new file mode 100644 index 000000000000..42768e0c80ca --- /dev/null +++ b/arch/blackfin/lib/checksum.c @@ -0,0 +1,140 @@ +/* + * File: arch/blackfin/lib/checksum.c + * Based on: none - original work + * Author: + * + * Created: + * Description: An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +#ifdef CONFIG_IP_CHECKSUM_L1 +static unsigned short do_csum(const unsigned char *buff, int len)__attribute__((l1_text)); +#endif + +static unsigned short do_csum(const unsigned char *buff, int len) +{ + register unsigned long sum = 0; + int swappem = 0; + + if (1 & (unsigned long)buff) { + sum = *buff << 8; + buff++; + len--; + ++swappem; + } + + while (len > 1) { + sum += *(unsigned short *)buff; + buff += 2; + len -= 2; + } + + if (len > 0) + sum += *buff; + + /* Fold 32-bit sum to 16 bits */ + while (sum >> 16) + sum = (sum & 0xffff) + (sum >> 16); + + if (swappem) + sum = ((sum & 0xff00) >> 8) + ((sum & 0x00ff) << 8); + + return sum; + +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl) +{ + return ~do_csum(iph, ihl * 4); +} + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum) +{ + /* + * Just in case we get nasty checksum data... + * Like 0xffff6ec3 in the case of our IPv6 multicast header. + * We fold to begin with, as well as at the end. + */ + sum = (sum & 0xffff) + (sum >> 16); + + sum += do_csum(buff, len); + + sum = (sum & 0xffff) + (sum >> 16); + + return sum; +} + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +unsigned short ip_compute_csum(const unsigned char *buff, int len) +{ + return ~do_csum(buff, len); +} + +/* + * copy from fs while checksumming, otherwise like csum_partial + */ + +unsigned int +csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, + int len, int sum, int *csum_err) +{ + if (csum_err) + *csum_err = 0; + memcpy(dst, src, len); + return csum_partial(dst, len, sum); +} + +/* + * copy from ds while checksumming, otherwise like csum_partial + */ + +unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst, + int len, int sum) +{ + memcpy(dst, src, len); + return csum_partial(dst, len, sum); +} diff --git a/arch/blackfin/lib/divsi3.S b/arch/blackfin/lib/divsi3.S new file mode 100644 index 000000000000..3e29861852b2 --- /dev/null +++ b/arch/blackfin/lib/divsi3.S @@ -0,0 +1,216 @@ +/* + * File: arch/blackfin/lib/divsi3.S + * Based on: + * Author: + * + * Created: + * Description: 16 / 32 bit signed division. + * Special cases : + * 1) If(numerator == 0) + * return 0 + * 2) If(denominator ==0) + * return positive max = 0x7fffffff + * 3) If(numerator == denominator) + * return 1 + * 4) If(denominator ==1) + * return numerator + * 5) If(denominator == -1) + * return -numerator + * + * Operand : R0 - Numerator (i) + * R1 - Denominator (i) + * R0 - Quotient (o) + * Registers Used : R2-R7,P0-P2 + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +.global ___divsi3; + +#ifdef CONFIG_ARITHMETIC_OPS_L1 +.section .l1.text +#else +.text +#endif + +.align 2; +___divsi3 : + + + R3 = R0 ^ R1; + R0 = ABS R0; + + CC = V; + + r3 = rot r3 by -1; + r1 = abs r1; /* now both positive, r3.30 means "negate result", + ** r3.31 means overflow, add one to result + */ + cc = r0 < r1; + if cc jump .Lret_zero; + r2 = r1 >> 15; + cc = r2; + if cc jump .Lidents; + r2 = r1 << 16; + cc = r2 <= r0; + if cc jump .Lidents; + + DIVS(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + DIVQ(R0, R1); + + R0 = R0.L (Z); + r1 = r3 >> 31; /* add overflow issue back in */ + r0 = r0 + r1; + r1 = -r0; + cc = bittst(r3, 30); + if cc r0 = r1; + RTS; + +/* Can't use the primitives. Test common identities. +** If the identity is true, return the value in R2. +*/ + +.Lidents: + CC = R1 == 0; /* check for divide by zero */ + IF CC JUMP .Lident_return; + + CC = R0 == 0; /* check for division of zero */ + IF CC JUMP .Lzero_return; + + CC = R0 == R1; /* check for identical operands */ + IF CC JUMP .Lident_return; + + CC = R1 == 1; /* check for divide by 1 */ + IF CC JUMP .Lident_return; + + R2.L = ONES R1; + R2 = R2.L (Z); + CC = R2 == 1; + IF CC JUMP .Lpower_of_two; + + /* Identities haven't helped either. + ** Perform the full division process. + */ + + P1 = 31; /* Set loop counter */ + + [--SP] = (R7:5); /* Push registers R5-R7 */ + R2 = -R1; + [--SP] = R2; + R2 = R0 << 1; /* R2 lsw of dividend */ + R6 = R0 ^ R1; /* Get sign */ + R5 = R6 >> 31; /* Shift sign to LSB */ + + R0 = 0 ; /* Clear msw partial remainder */ + R2 = R2 | R5; /* Shift quotient bit */ + R6 = R0 ^ R1; /* Get new quotient bit */ + + LSETUP(.Llst,.Llend) LC0 = P1; /* Setup loop */ +.Llst: R7 = R2 >> 31; /* record copy of carry from R2 */ + R2 = R2 << 1; /* Shift 64 bit dividend up by 1 bit */ + R0 = R0 << 1 || R5 = [SP]; + R0 = R0 | R7; /* and add carry */ + CC = R6 < 0; /* Check quotient(AQ) */ + /* we might be subtracting divisor (AQ==0) */ + IF CC R5 = R1; /* or we might be adding divisor (AQ==1)*/ + R0 = R0 + R5; /* do add or subtract, as indicated by AQ */ + R6 = R0 ^ R1; /* Generate next quotient bit */ + R5 = R6 >> 31; + /* Assume AQ==1, shift in zero */ + BITTGL(R5,0); /* tweak AQ to be what we want to shift in */ +.Llend: R2 = R2 + R5; /* and then set shifted-in value to + ** tweaked AQ. + */ + r1 = r3 >> 31; + r2 = r2 + r1; + cc = bittst(r3,30); + r0 = -r2; + if !cc r0 = r2; + SP += 4; + (R7:5)= [SP++]; /* Pop registers R6-R7 */ + RTS; + +.Lident_return: + CC = R1 == 0; /* check for divide by zero => 0x7fffffff */ + R2 = -1 (X); + R2 >>= 1; + IF CC JUMP .Ltrue_ident_return; + + CC = R0 == R1; /* check for identical operands => 1 */ + R2 = 1 (Z); + IF CC JUMP .Ltrue_ident_return; + + R2 = R0; /* assume divide by 1 => numerator */ + /*FALLTHRU*/ + +.Ltrue_ident_return: + R0 = R2; /* Return an identity value */ + R2 = -R2; + CC = bittst(R3,30); + IF CC R0 = R2; +.Lzero_return: + RTS; /* ...including zero */ + +.Lpower_of_two: + /* Y has a single bit set, which means it's a power of two. + ** That means we can perform the division just by shifting + ** X to the right the appropriate number of bits + */ + + /* signbits returns the number of sign bits, minus one. + ** 1=>30, 2=>29, ..., 0x40000000=>0. Which means we need + ** to shift right n-signbits spaces. It also means 0x80000000 + ** is a special case, because that *also* gives a signbits of 0 + */ + + R2 = R0 >> 31; + CC = R1 < 0; + IF CC JUMP .Ltrue_ident_return; + + R1.l = SIGNBITS R1; + R1 = R1.L (Z); + R1 += -30; + R0 = LSHIFT R0 by R1.L; + r1 = r3 >> 31; + r0 = r0 + r1; + R2 = -R0; // negate result if necessary + CC = bittst(R3,30); + IF CC R0 = R2; + RTS; + +.Lret_zero: + R0 = 0; + RTS; diff --git a/arch/blackfin/lib/gcclib.h b/arch/blackfin/lib/gcclib.h new file mode 100644 index 000000000000..9ccd39a135ee --- /dev/null +++ b/arch/blackfin/lib/gcclib.h @@ -0,0 +1,47 @@ +/* + * File: arch/blackfin/lib/gcclib.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define BITS_PER_UNIT 8 +#define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT) + +typedef unsigned int UQItype __attribute__ ((mode(QI))); +typedef int SItype __attribute__ ((mode(SI))); +typedef unsigned int USItype __attribute__ ((mode(SI))); +typedef int DItype __attribute__ ((mode(DI))); +typedef int word_type __attribute__ ((mode(__word__))); +typedef unsigned int UDItype __attribute__ ((mode(DI))); + +struct DIstruct { + SItype low, high; +}; + +typedef union { + struct DIstruct s; + DItype ll; +} DIunion; diff --git a/arch/blackfin/lib/ins.S b/arch/blackfin/lib/ins.S new file mode 100644 index 000000000000..730d2b427538 --- /dev/null +++ b/arch/blackfin/lib/ins.S @@ -0,0 +1,69 @@ +/* + * File: arch/blackfin/lib/ins.S + * Based on: + * Author: Bas Vermeulen + * + * Created: Tue Mar 22 15:27:24 CEST 2005 + * Description: Implementation of ins{bwl} for BlackFin processors using zero overhead loops. + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * Copyright (C) 2005 Bas Vermeulen, BuyWays BV + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +.align 2 + +ENTRY(_insl) + P0 = R0; /* P0 = port */ + cli R3; + P1 = R1; /* P1 = address */ + P2 = R2; /* P2 = count */ + SSYNC; + LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2; +.Llong_loop_s: R0 = [P0]; +.Llong_loop_e: [P1++] = R0; + sti R3; + RTS; + +ENTRY(_insw) + P0 = R0; /* P0 = port */ + cli R3; + P1 = R1; /* P1 = address */ + P2 = R2; /* P2 = count */ + SSYNC; + LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2; +.Lword_loop_s: R0 = W[P0]; +.Lword_loop_e: W[P1++] = R0; + sti R3; + RTS; + +ENTRY(_insb) + P0 = R0; /* P0 = port */ + cli R3; + P1 = R1; /* P1 = address */ + P2 = R2; /* P2 = count */ + SSYNC; + LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2; +.Lbyte_loop_s: R0 = B[P0]; +.Lbyte_loop_e: B[P1++] = R0; + sti R3; + RTS; diff --git a/arch/blackfin/lib/lshrdi3.c b/arch/blackfin/lib/lshrdi3.c new file mode 100644 index 000000000000..84b9c5592220 --- /dev/null +++ b/arch/blackfin/lib/lshrdi3.c @@ -0,0 +1,72 @@ +/* + * File: arch/blackfin/lib/lshrdi3.c + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define BITS_PER_UNIT 8 + +typedef int SItype __attribute__ ((mode(SI))); +typedef unsigned int USItype __attribute__ ((mode(SI))); +typedef int DItype __attribute__ ((mode(DI))); +typedef int word_type __attribute__ ((mode(__word__))); + +struct DIstruct { + SItype high, low; +}; + +typedef union { + struct DIstruct s; + DItype ll; +} DIunion; + +#ifdef CONFIG_ARITHMETIC_OPS_L1 +DItype __lshrdi3(DItype u, word_type b)__attribute__((l1_text)); +#endif + +DItype __lshrdi3(DItype u, word_type b) +{ + DIunion w; + word_type bm; + DIunion uu; + + if (b == 0) + return u; + + uu.ll = u; + + bm = (sizeof(SItype) * BITS_PER_UNIT) - b; + if (bm <= 0) { + w.s.high = 0; + w.s.low = (USItype) uu.s.high >> -bm; + } else { + USItype carries = (USItype) uu.s.high << bm; + w.s.high = (USItype) uu.s.high >> b; + w.s.low = ((USItype) uu.s.low >> b) | carries; + } + + return w.ll; +} diff --git a/arch/blackfin/lib/memchr.S b/arch/blackfin/lib/memchr.S new file mode 100644 index 000000000000..498122250d07 --- /dev/null +++ b/arch/blackfin/lib/memchr.S @@ -0,0 +1,70 @@ +/* + * File: arch/blackfin/lib/memchr.S + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +/* void *memchr(const void *s, int c, size_t n); + * R0 = address (s) + * R1 = sought byte (c) + * R2 = count (n) + * + * Returns pointer to located character. + */ + +.text + +.align 2 + +ENTRY(_memchr) + P0 = R0; /* P0 = address */ + P2 = R2; /* P2 = count */ + R1 = R1.B(Z); + CC = R2 == 0; + IF CC JUMP .Lfailed; + +.Lbytes: + LSETUP (.Lbyte_loop_s, .Lbyte_loop_e) LC0=P2; + +.Lbyte_loop_s: + R3 = B[P0++](Z); + CC = R3 == R1; + IF CC JUMP .Lfound; +.Lbyte_loop_e: + NOP; + +.Lfailed: + R0=0; + RTS; + +.Lfound: + R0 = P0; + R0 += -1; + RTS; + +.size _memchr,.-_memchr diff --git a/arch/blackfin/lib/memcmp.S b/arch/blackfin/lib/memcmp.S new file mode 100644 index 000000000000..5b9502368fc6 --- /dev/null +++ b/arch/blackfin/lib/memcmp.S @@ -0,0 +1,110 @@ +/* + * File: arch/blackfin/lib/memcmp.S + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +/* int memcmp(const void *s1, const void *s2, size_t n); + * R0 = First Address (s1) + * R1 = Second Address (s2) + * R2 = count (n) + * + * Favours word aligned data. + */ + +.text + +.align 2 + +ENTRY(_memcmp) + I1 = P3; + P0 = R0; /* P0 = s1 address */ + P3 = R1; /* P3 = s2 Address */ + P2 = R2 ; /* P2 = count */ + CC = R2 <= 7(IU); + IF CC JUMP .Ltoo_small; + I0 = R1; /* s2 */ + R1 = R1 | R0; /* OR addresses together */ + R1 <<= 30; /* check bottom two bits */ + CC = AZ; /* AZ set if zero. */ + IF !CC JUMP .Lbytes ; /* Jump if addrs not aligned. */ + + P1 = P2 >> 2; /* count = n/4 */ + R3 = 3; + R2 = R2 & R3; /* remainder */ + P2 = R2; /* set remainder */ + + LSETUP (.Lquad_loop_s, .Lquad_loop_e) LC0=P1; +.Lquad_loop_s: + MNOP || R0 = [P0++] || R1 = [I0++]; + CC = R0 == R1; + IF !CC JUMP .Lquad_different; +.Lquad_loop_e: + NOP; + + P3 = I0; /* s2 */ +.Ltoo_small: + CC = P2 == 0; /* Check zero count*/ + IF CC JUMP .Lfinished; /* very unlikely*/ + +.Lbytes: + LSETUP (.Lbyte_loop_s, .Lbyte_loop_e) LC0=P2; +.Lbyte_loop_s: + R1 = B[P3++](Z); /* *s2 */ + R0 = B[P0++](Z); /* *s1 */ + CC = R0 == R1; + IF !CC JUMP .Ldifferent; +.Lbyte_loop_e: + NOP; + +.Ldifferent: + R0 = R0 - R1; + P3 = I1; + RTS; + +.Lquad_different: + /* We've read two quads which don't match. + * Can't just compare them, because we're + * a little-endian machine, so the MSBs of + * the regs occur at later addresses in the + * string. + * Arrange to re-read those two quads again, + * byte-by-byte. + */ + P0 += -4; /* back up to the start of the */ + P3 = I0; /* quads, and increase the*/ + P2 += 4; /* remainder count*/ + P3 += -4; + JUMP .Lbytes; + +.Lfinished: + R0 = 0; + P3 = I1; + RTS; + +.size _memcmp,.-_memcmp diff --git a/arch/blackfin/lib/memcpy.S b/arch/blackfin/lib/memcpy.S new file mode 100644 index 000000000000..c1e00eff541c --- /dev/null +++ b/arch/blackfin/lib/memcpy.S @@ -0,0 +1,142 @@ +/* + * File: arch/blackfin/lib/memcpy.S + * Based on: + * Author: + * + * Created: + * Description: internal version of memcpy(), issued by the compiler + * to copy blocks of data around. + * This is really memmove() - it has to be able to deal with + * possible overlaps, because that ambiguity is when the compiler + * gives up and calls a function. We have our own, internal version + * so that we get something we trust, even if the user has redefined + * the normal symbol. + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +/* void *memcpy(void *dest, const void *src, size_t n); + * R0 = To Address (dest) (leave unchanged to form result) + * R1 = From Address (src) + * R2 = count + * + * Note: Favours word alignment + */ + +#ifdef CONFIG_MEMCPY_L1 +.section .l1.text +#else +.text +#endif + +.align 2 + +ENTRY(_memcpy) + CC = R2 <= 0; /* length not positive? */ + IF CC JUMP .L_P1L2147483647; /* Nothing to do */ + + P0 = R0 ; /* dst*/ + P1 = R1 ; /* src*/ + P2 = R2 ; /* length */ + + /* check for overlapping data */ + CC = R1 < R0; /* src < dst */ + IF !CC JUMP .Lno_overlap; + R3 = R1 + R2; + CC = R0 < R3; /* and dst < src+len */ + IF CC JUMP .Lhas_overlap; + +.Lno_overlap: + /* Check for aligned data.*/ + + R3 = R1 | R0; + R0 = 0x3; + R3 = R3 & R0; + CC = R3; /* low bits set on either address? */ + IF CC JUMP .Lnot_aligned; + + /* Both addresses are word-aligned, so we can copy + at least part of the data using word copies.*/ + P2 = P2 >> 2; + CC = P2 <= 2; + IF !CC JUMP .Lmore_than_seven; + /* less than eight bytes... */ + P2 = R2; + LSETUP(.Lthree_start, .Lthree_end) LC0=P2; + R0 = R1; /* setup src address for return */ +.Lthree_start: + R3 = B[P1++] (X); +.Lthree_end: + B[P0++] = R3; + + RTS; + +.Lmore_than_seven: + /* There's at least eight bytes to copy. */ + P2 += -1; /* because we unroll one iteration */ + LSETUP(.Lword_loop, .Lword_loop) LC0=P2; + R0 = R1; + I1 = P1; + R3 = [I1++]; +.Lword_loop: + MNOP || [P0++] = R3 || R3 = [I1++]; + + [P0++] = R3; + /* Any remaining bytes to copy? */ + R3 = 0x3; + R3 = R2 & R3; + CC = R3 == 0; + P1 = I1; /* in case there's something left, */ + IF !CC JUMP .Lbytes_left; + RTS; +.Lbytes_left: P2 = R3; +.Lnot_aligned: + /* From here, we're copying byte-by-byte. */ + LSETUP (.Lbyte_start, .Lbyte_end) LC0=P2; + R0 = R1; /* Save src address for return */ +.Lbyte_start: + R1 = B[P1++] (X); +.Lbyte_end: + B[P0++] = R1; + +.L_P1L2147483647: + RTS; + +.Lhas_overlap: + /* Need to reverse the copying, because the + * dst would clobber the src. + * Don't bother to work out alignment for + * the reverse case. + */ + R0 = R1; /* save src for later. */ + P0 = P0 + P2; + P0 += -1; + P1 = P1 + P2; + P1 += -1; + LSETUP(.Lover_start, .Lover_end) LC0=P2; +.Lover_start: + R1 = B[P1--] (X); +.Lover_end: + B[P0--] = R1; + + RTS; diff --git a/arch/blackfin/lib/memmove.S b/arch/blackfin/lib/memmove.S new file mode 100644 index 000000000000..2e5fb7f8df13 --- /dev/null +++ b/arch/blackfin/lib/memmove.S @@ -0,0 +1,103 @@ +/* + * File: arch/blackfin/lib/memmove.S + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +.align 2 + +/* + * C Library function MEMMOVE + * R0 = To Address (leave unchanged to form result) + * R1 = From Address + * R2 = count + * Data may overlap + */ + +ENTRY(_memmove) + I1 = P3; + P0 = R0; /* P0 = To address */ + P3 = R1; /* P3 = From Address */ + P2 = R2; /* P2 = count */ + CC = P2 == 0; /* Check zero count*/ + IF CC JUMP .Lfinished; /* very unlikely */ + + CC = R1 < R0 (IU); /* From < To */ + IF !CC JUMP .Lno_overlap; + R3 = R1 + R2; + CC = R0 <= R3 (IU); /* (From+len) >= To */ + IF CC JUMP .Loverlap; +.Lno_overlap: + R3 = 11; + CC = R2 <= R3; + IF CC JUMP .Lbytes; + R3 = R1 | R0; /* OR addresses together */ + R3 <<= 30; /* check bottom two bits */ + CC = AZ; /* AZ set if zero.*/ + IF !CC JUMP .Lbytes; /* Jump if addrs not aligned.*/ + + I0 = P3; + P1 = P2 >> 2; /* count = n/4 */ + P1 += -1; + R3 = 3; + R2 = R2 & R3; /* remainder */ + P2 = R2; /* set remainder */ + R1 = [I0++]; + + LSETUP (.Lquad_loop, .Lquad_loop) LC0=P1; +.Lquad_loop: MNOP || [P0++] = R1 || R1 = [I0++]; + [P0++] = R1; + + CC = P2 == 0; /* any remaining bytes? */ + P3 = I0; /* Ammend P3 to updated ptr. */ + IF !CC JUMP .Lbytes; + P3 = I1; + RTS; + +.Lbytes: LSETUP (.Lbyte2_s, .Lbyte2_e) LC0=P2; +.Lbyte2_s: R1 = B[P3++](Z); +.Lbyte2_e: B[P0++] = R1; + +.Lfinished: P3 = I1; + RTS; + +.Loverlap: + P2 += -1; + P0 = P0 + P2; + P3 = P3 + P2; + R1 = B[P3--] (Z); + CC = P2 == 0; + IF CC JUMP .Lno_loop; + LSETUP (.Lol_s, .Lol_e) LC0 = P2; +.Lol_s: B[P0--] = R1; +.Lol_e: R1 = B[P3--] (Z); +.Lno_loop: B[P0] = R1; + P3 = I1; + RTS; + +.size _memmove,.-_memmove diff --git a/arch/blackfin/lib/memset.S b/arch/blackfin/lib/memset.S new file mode 100644 index 000000000000..ba6d047568dd --- /dev/null +++ b/arch/blackfin/lib/memset.S @@ -0,0 +1,109 @@ +/* + * File: arch/blackfin/lib/memset.S + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +.align 2 + +#ifdef CONFIG_MEMSET_L1 +.section .l1.text +#else +.text +#endif + +/* + * C Library function MEMSET + * R0 = address (leave unchanged to form result) + * R1 = filler byte + * R2 = count + * Favours word aligned data. + */ + +ENTRY(_memset) + P0 = R0 ; /* P0 = address */ + P2 = R2 ; /* P2 = count */ + R3 = R0 + R2; /* end */ + CC = R2 <= 7(IU); + IF CC JUMP .Ltoo_small; + R1 = R1.B (Z); /* R1 = fill char */ + R2 = 3; + R2 = R0 & R2; /* addr bottom two bits */ + CC = R2 == 0; /* AZ set if zero. */ + IF !CC JUMP .Lforce_align ; /* Jump if addr not aligned. */ + +.Laligned: + P1 = P2 >> 2; /* count = n/4 */ + R2 = R1 << 8; /* create quad filler */ + R2.L = R2.L + R1.L(NS); + R2.H = R2.L + R1.H(NS); + P2 = R3; + + LSETUP (.Lquad_loop , .Lquad_loop) LC0=P1; +.Lquad_loop: + [P0++] = R2; + + CC = P0 == P2; + IF !CC JUMP .Lbytes_left; + RTS; + +.Lbytes_left: + R2 = R3; /* end point */ + R3 = P0; /* current position */ + R2 = R2 - R3; /* bytes left */ + P2 = R2; + +.Ltoo_small: + CC = P2 == 0; /* Check zero count */ + IF CC JUMP .Lfinished; /* Unusual */ + +.Lbytes: + LSETUP (.Lbyte_loop , .Lbyte_loop) LC0=P2; +.Lbyte_loop: + B[P0++] = R1; + +.Lfinished: + RTS; + +.Lforce_align: + CC = BITTST (R0, 0); /* odd byte */ + R0 = 4; + R0 = R0 - R2; + P1 = R0; + R0 = P0; /* Recover return address */ + IF !CC JUMP .Lskip1; + B[P0++] = R1; +.Lskip1: + CC = R2 <= 2; /* 2 bytes */ + P2 -= P1; /* reduce count */ + IF !CC JUMP .Laligned; + B[P0++] = R1; + B[P0++] = R1; + JUMP .Laligned; + +.size _memset,.-_memset diff --git a/arch/blackfin/lib/modsi3.S b/arch/blackfin/lib/modsi3.S new file mode 100644 index 000000000000..528b8b1ccb34 --- /dev/null +++ b/arch/blackfin/lib/modsi3.S @@ -0,0 +1,79 @@ +/* + * File: arch/blackfin/lib/modsi3.S + * Based on: + * Author: + * + * Created: + * Description: This program computes 32 bit signed remainder. It calls div32 function + * for quotient estimation. + * + * Registers used : + * Numerator/ Denominator in R0, R1 + * R0 - returns remainder. + * R2-R7 + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +.global ___modsi3; +.type ___modsi3, STT_FUNC; +.extern ___divsi3; +.type ___divsi3, STT_FUNC; + +#ifdef CONFIG_ARITHMETIC_OPS_L1 +.section .l1.text +#else +.text +#endif + +___modsi3: + + CC=R0==0; + IF CC JUMP .LRETURN_R0; /* Return 0, if numerator == 0 */ + CC=R1==0; + IF CC JUMP .LRETURN_ZERO; /* Return 0, if denominator == 0 */ + CC=R0==R1; + IF CC JUMP .LRETURN_ZERO; /* Return 0, if numerator == denominator */ + CC = R1 == 1; + IF CC JUMP .LRETURN_ZERO; /* Return 0, if denominator == 1 */ + CC = R1 == -1; + IF CC JUMP .LRETURN_ZERO; /* Return 0, if denominator == -1 */ + + /* Valid input. Use __divsi3() to compute the quotient, and then + * derive the remainder from that. */ + + [--SP] = (R7:6); /* Push R7 and R6 */ + [--SP] = RETS; /* and return address */ + R7 = R0; /* Copy of R0 */ + R6 = R1; /* Save for later */ + SP += -12; /* Should always provide this space */ + CALL ___divsi3; /* Compute signed quotient using ___divsi3()*/ + SP += 12; + R0 *= R6; /* Quotient * divisor */ + R0 = R7 - R0; /* Dividend - (quotient * divisor) */ + RETS = [SP++]; /* Get back return address */ + (R7:6) = [SP++]; /* Pop registers R7 and R4 */ + RTS; /* Store remainder */ + +.LRETURN_ZERO: + R0 = 0; +.LRETURN_R0: + RTS; diff --git a/arch/blackfin/lib/muldi3.c b/arch/blackfin/lib/muldi3.c new file mode 100644 index 000000000000..303d0c6a6dba --- /dev/null +++ b/arch/blackfin/lib/muldi3.c @@ -0,0 +1,99 @@ +/* + * File: arch/blackfin/lib/muldi3.c + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SI_TYPE_SIZE +#define SI_TYPE_SIZE 32 +#endif +#define __ll_b (1L << (SI_TYPE_SIZE / 2)) +#define __ll_lowpart(t) ((usitype) (t) % __ll_b) +#define __ll_highpart(t) ((usitype) (t) / __ll_b) +#define BITS_PER_UNIT 8 + +#if !defined(umul_ppmm) +#define umul_ppmm(w1, w0, u, v) \ + do { \ + usitype __x0, __x1, __x2, __x3; \ + usitype __ul, __vl, __uh, __vh; \ + \ + __ul = __ll_lowpart (u); \ + __uh = __ll_highpart (u); \ + __vl = __ll_lowpart (v); \ + __vh = __ll_highpart (v); \ + \ + __x0 = (usitype) __ul * __vl; \ + __x1 = (usitype) __ul * __vh; \ + __x2 = (usitype) __uh * __vl; \ + __x3 = (usitype) __uh * __vh; \ + \ + __x1 += __ll_highpart (__x0);/* this can't give carry */ \ + __x1 += __x2; /* but this indeed can */ \ + if (__x1 < __x2) /* did we get it? */ \ + __x3 += __ll_b; /* yes, add it in the proper pos. */ \ + \ + (w1) = __x3 + __ll_highpart (__x1); \ + (w0) = __ll_lowpart (__x1) * __ll_b + __ll_lowpart (__x0); \ + } while (0) +#endif + +#if !defined(__umulsidi3) +#define __umulsidi3(u, v) \ + ({diunion __w; \ + umul_ppmm (__w.s.high, __w.s.low, u, v); \ + __w.ll; }) +#endif + +typedef unsigned int usitype __attribute__ ((mode(SI))); +typedef int sitype __attribute__ ((mode(SI))); +typedef int ditype __attribute__ ((mode(DI))); +typedef int word_type __attribute__ ((mode(__word__))); + +struct distruct { + sitype low, high; +}; +typedef union { + struct distruct s; + ditype ll; +} diunion; + +#ifdef CONFIG_ARITHMETIC_OPS_L1 +ditype __muldi3(ditype u, ditype v)__attribute__((l1_text)); +#endif + +ditype __muldi3(ditype u, ditype v) +{ + diunion w; + diunion uu, vv; + + uu.ll = u, vv.ll = v; + w.ll = __umulsidi3(uu.s.low, vv.s.low); + w.s.high += ((usitype) uu.s.low * (usitype) vv.s.high + + (usitype) uu.s.high * (usitype) vv.s.low); + + return w.ll; +} diff --git a/arch/blackfin/lib/outs.S b/arch/blackfin/lib/outs.S new file mode 100644 index 000000000000..f8c876fe8930 --- /dev/null +++ b/arch/blackfin/lib/outs.S @@ -0,0 +1,62 @@ +/* + * File: arch/blackfin/lib/outs.S + * Based on: + * Author: Bas Vermeulen + * + * Created: Tue Mar 22 15:27:24 CEST 2005 + * Description: Implementation of outs{bwl} for BlackFin processors using zero overhead loops. + * + * Modified: Copyright (C) 2005 Bas Vermeulen, BuyWays BV + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +.align 2 + +ENTRY(_outsl) + P0 = R0; /* P0 = port */ + P1 = R1; /* P1 = address */ + P2 = R2; /* P2 = count */ + + LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2; +.Llong_loop_s: R0 = [P1++]; +.Llong_loop_e: [P0] = R0; + RTS; + +ENTRY(_outsw) + P0 = R0; /* P0 = port */ + P1 = R1; /* P1 = address */ + P2 = R2; /* P2 = count */ + + LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2; +.Lword_loop_s: R0 = W[P1++]; +.Lword_loop_e: W[P0] = R0; + RTS; + +ENTRY(_outsb) + P0 = R0; /* P0 = port */ + P1 = R1; /* P1 = address */ + P2 = R2; /* P2 = count */ + + LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2; +.Lbyte_loop_s: R0 = B[P1++]; +.Lbyte_loop_e: B[P0] = R0; + RTS; diff --git a/arch/blackfin/lib/smulsi3_highpart.S b/arch/blackfin/lib/smulsi3_highpart.S new file mode 100644 index 000000000000..10b8f8da576f --- /dev/null +++ b/arch/blackfin/lib/smulsi3_highpart.S @@ -0,0 +1,30 @@ +.align 2 +.global ___smulsi3_highpart; +.type ___smulsi3_highpart, STT_FUNC; + +#ifdef CONFIG_ARITHMETIC_OPS_L1 +.section .l1.text +#else +.text +#endif + +___smulsi3_highpart: + R2 = R1.L * R0.L (FU); + R3 = R1.H * R0.L (IS,M); + R0 = R0.H * R1.H, R1 = R0.H * R1.L (IS,M); + + R1.L = R2.H + R1.L; + cc = ac0; + R2 = cc; + + R1.L = R1.L + R3.L; + cc = ac0; + R1 >>>= 16; + R3 >>>= 16; + R1 = R1 + R3; + R1 = R1 + R2; + R2 = cc; + R1 = R1 + R2; + + R0 = R0 + R1; + RTS; diff --git a/arch/blackfin/lib/strcmp.c b/arch/blackfin/lib/strcmp.c new file mode 100644 index 000000000000..2ad47c4254ba --- /dev/null +++ b/arch/blackfin/lib/strcmp.c @@ -0,0 +1,11 @@ +#include + +#define strcmp __inline_strcmp +#include +#undef strcmp + +int strcmp(const char *dest, const char *src) +{ + return __inline_strcmp(dest, src); +} + diff --git a/arch/blackfin/lib/strcpy.c b/arch/blackfin/lib/strcpy.c new file mode 100644 index 000000000000..4dc835a8a19b --- /dev/null +++ b/arch/blackfin/lib/strcpy.c @@ -0,0 +1,11 @@ +#include + +#define strcpy __inline_strcpy +#include +#undef strcpy + +char *strcpy(char *dest, const char *src) +{ + return __inline_strcpy(dest, src); +} + diff --git a/arch/blackfin/lib/strncmp.c b/arch/blackfin/lib/strncmp.c new file mode 100644 index 000000000000..947bcfe3f3bb --- /dev/null +++ b/arch/blackfin/lib/strncmp.c @@ -0,0 +1,11 @@ +#include + +#define strncmp __inline_strncmp +#include +#undef strncmp + +int strncmp(const char *cs, const char *ct, size_t count) +{ + return __inline_strncmp(cs, ct, count); +} + diff --git a/arch/blackfin/lib/strncpy.c b/arch/blackfin/lib/strncpy.c new file mode 100644 index 000000000000..77a9b2e95097 --- /dev/null +++ b/arch/blackfin/lib/strncpy.c @@ -0,0 +1,11 @@ +#include + +#define strncpy __inline_strncpy +#include +#undef strncpy + +char *strncpy(char *dest, const char *src, size_t n) +{ + return __inline_strncpy(dest, src, n); +} + diff --git a/arch/blackfin/lib/udivsi3.S b/arch/blackfin/lib/udivsi3.S new file mode 100644 index 000000000000..d39a12916259 --- /dev/null +++ b/arch/blackfin/lib/udivsi3.S @@ -0,0 +1,298 @@ +/* + * File: arch/blackfin/lib/udivsi3.S + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +#define CARRY AC0 + +#ifdef CONFIG_ARITHMETIC_OPS_L1 +.section .l1.text +#else +.text +#endif + + +ENTRY(___udivsi3) + + CC = R0 < R1 (IU); /* If X < Y, always return 0 */ + IF CC JUMP .Lreturn_ident; + + R2 = R1 << 16; + CC = R2 <= R0 (IU); + IF CC JUMP .Lidents; + + R2 = R0 >> 31; /* if X is a 31-bit number */ + R3 = R1 >> 15; /* and Y is a 15-bit number */ + R2 = R2 | R3; /* then it's okay to use the DIVQ builtins (fallthrough to fast)*/ + CC = R2; + IF CC JUMP .Ly_16bit; + +/* METHOD 1: FAST DIVQ + We know we have a 31-bit dividend, and 15-bit divisor so we can use the + simple divq approach (first setting AQ to 0 - implying unsigned division, + then 16 DIVQ's). +*/ + + AQ = CC; /* Clear AQ (CC==0) */ + +/* ISR States: When dividing two integers (32.0/16.0) using divide primitives, + we need to shift the dividend one bit to the left. + We have already checked that we have a 31-bit number so we are safe to do + that. +*/ + R0 <<= 1; + DIVQ(R0, R1); // 1 + DIVQ(R0, R1); // 2 + DIVQ(R0, R1); // 3 + DIVQ(R0, R1); // 4 + DIVQ(R0, R1); // 5 + DIVQ(R0, R1); // 6 + DIVQ(R0, R1); // 7 + DIVQ(R0, R1); // 8 + DIVQ(R0, R1); // 9 + DIVQ(R0, R1); // 10 + DIVQ(R0, R1); // 11 + DIVQ(R0, R1); // 12 + DIVQ(R0, R1); // 13 + DIVQ(R0, R1); // 14 + DIVQ(R0, R1); // 15 + DIVQ(R0, R1); // 16 + R0 = R0.L (Z); + RTS; + +.Ly_16bit: + /* We know that the upper 17 bits of Y might have bits set, + ** or that the sign bit of X might have a bit. If Y is a + ** 16-bit number, but not bigger, then we can use the builtins + ** with a post-divide correction. + ** R3 currently holds Y>>15, which means R3's LSB is the + ** bit we're interested in. + */ + + /* According to the ISR, to use the Divide primitives for + ** unsigned integer divide, the useable range is 31 bits + */ + CC = ! BITTST(R0, 31); + + /* IF condition is true we can scale our inputs and use the divide primitives, + ** with some post-adjustment + */ + R3 += -1; /* if so, Y is 0x00008nnn */ + CC &= AZ; + + /* If condition is true we can scale our inputs and use the divide primitives, + ** with some post-adjustment + */ + R3 = R1 >> 1; /* Pre-scaled divisor for primitive case */ + R2 = R0 >> 16; + + R2 = R3 - R2; /* shifted divisor < upper 16 bits of dividend */ + CC &= CARRY; + IF CC JUMP .Lshift_and_correct; + + /* Fall through to the identities */ + +/* METHOD 2: identities and manual calculation + We are not able to use the divide primites, but may still catch some special + cases. +*/ +.Lidents: + /* Test for common identities. Value to be returned is placed in R2. */ + CC = R0 == 0; /* 0/Y => 0 */ + IF CC JUMP .Lreturn_r0; + CC = R0 == R1; /* X==Y => 1 */ + IF CC JUMP .Lreturn_ident; + CC = R1 == 1; /* X/1 => X */ + IF CC JUMP .Lreturn_ident; + + R2.L = ONES R1; + R2 = R2.L (Z); + CC = R2 == 1; + IF CC JUMP .Lpower_of_two; + + [--SP] = (R7:5); /* Push registers R5-R7 */ + + /* Idents don't match. Go for the full operation. */ + + + R6 = 2; /* assume we'll shift two */ + R3 = 1; + + P2 = R1; + /* If either R0 or R1 have sign set, */ + /* divide them by two, and note it's */ + /* been done. */ + CC = R1 < 0; + R2 = R1 >> 1; + IF CC R1 = R2; /* Possibly-shifted R1 */ + IF !CC R6 = R3; /* R1 doesn't, so at most 1 shifted */ + + P0 = 0; + R3 = -R1; + [--SP] = R3; + R2 = R0 >> 1; + R2 = R0 >> 1; + CC = R0 < 0; + IF CC P0 = R6; /* Number of values divided */ + IF !CC R2 = R0; /* Shifted R0 */ + + /* P0 is 0, 1 (NR/=2) or 2 (NR/=2, DR/=2) */ + + /* r2 holds Copy dividend */ + R3 = 0; /* Clear partial remainder */ + R7 = 0; /* Initialise quotient bit */ + + P1 = 32; /* Set loop counter */ + LSETUP(.Lulst, .Lulend) LC0 = P1; /* Set loop counter */ +.Lulst: R6 = R2 >> 31; /* R6 = sign bit of R2, for carry */ + R2 = R2 << 1; /* Shift 64 bit dividend up by 1 bit */ + R3 = R3 << 1 || R5 = [SP]; + R3 = R3 | R6; /* Include any carry */ + CC = R7 < 0; /* Check quotient(AQ) */ + /* If AQ==0, we'll sub divisor */ + IF CC R5 = R1; /* and if AQ==1, we'll add it. */ + R3 = R3 + R5; /* Add/sub divsor to partial remainder */ + R7 = R3 ^ R1; /* Generate next quotient bit */ + + R5 = R7 >> 31; /* Get AQ */ + BITTGL(R5, 0); /* Invert it, to get what we'll shift */ +.Lulend: R2 = R2 + R5; /* and "shift" it in. */ + + CC = P0 == 0; /* Check how many inputs we shifted */ + IF CC JUMP .Lno_mult; /* if none... */ + R6 = R2 << 1; + CC = P0 == 1; + IF CC R2 = R6; /* if 1, Q = Q*2 */ + IF !CC R1 = P2; /* if 2, restore stored divisor */ + + R3 = R2; /* Copy of R2 */ + R3 *= R1; /* Q * divisor */ + R5 = R0 - R3; /* Z = (dividend - Q * divisor) */ + CC = R1 <= R5 (IU); /* Check if divisor <= Z? */ + R6 = CC; /* if yes, R6 = 1 */ + R2 = R2 + R6; /* if yes, add one to quotient(Q) */ +.Lno_mult: + SP += 4; + (R7:5) = [SP++]; /* Pop registers R5-R7 */ + R0 = R2; /* Store quotient */ + RTS; + +.Lreturn_ident: + CC = R0 < R1 (IU); /* If X < Y, always return 0 */ + R2 = 0; + IF CC JUMP .Ltrue_return_ident; + R2 = -1 (X); /* X/0 => 0xFFFFFFFF */ + CC = R1 == 0; + IF CC JUMP .Ltrue_return_ident; + R2 = -R2; /* R2 now 1 */ + CC = R0 == R1; /* X==Y => 1 */ + IF CC JUMP .Ltrue_return_ident; + R2 = R0; /* X/1 => X */ + /*FALLTHRU*/ + +.Ltrue_return_ident: + R0 = R2; +.Lreturn_r0: + RTS; + +.Lpower_of_two: + /* Y has a single bit set, which means it's a power of two. + ** That means we can perform the division just by shifting + ** X to the right the appropriate number of bits + */ + + /* signbits returns the number of sign bits, minus one. + ** 1=>30, 2=>29, ..., 0x40000000=>0. Which means we need + ** to shift right n-signbits spaces. It also means 0x80000000 + ** is a special case, because that *also* gives a signbits of 0 + */ + + R2 = R0 >> 31; + CC = R1 < 0; + IF CC JUMP .Ltrue_return_ident; + + R1.l = SIGNBITS R1; + R1 = R1.L (Z); + R1 += -30; + R0 = LSHIFT R0 by R1.L; + RTS; + +/* METHOD 3: PRESCALE AND USE THE DIVIDE PRIMITIVES WITH SOME POST-CORRECTION + Two scaling operations are required to use the divide primitives with a + divisor > 0x7FFFF. + Firstly (as in method 1) we need to shift the dividend 1 to the left for + integer division. + Secondly we need to shift both the divisor and dividend 1 to the right so + both are in range for the primitives. + The left/right shift of the dividend does nothing so we can skip it. +*/ +.Lshift_and_correct: + R2 = R0; + // R3 is already R1 >> 1 + CC=!CC; + AQ = CC; /* Clear AQ, got here with CC = 0 */ + DIVQ(R2, R3); // 1 + DIVQ(R2, R3); // 2 + DIVQ(R2, R3); // 3 + DIVQ(R2, R3); // 4 + DIVQ(R2, R3); // 5 + DIVQ(R2, R3); // 6 + DIVQ(R2, R3); // 7 + DIVQ(R2, R3); // 8 + DIVQ(R2, R3); // 9 + DIVQ(R2, R3); // 10 + DIVQ(R2, R3); // 11 + DIVQ(R2, R3); // 12 + DIVQ(R2, R3); // 13 + DIVQ(R2, R3); // 14 + DIVQ(R2, R3); // 15 + DIVQ(R2, R3); // 16 + + /* According to the Instruction Set Reference: + To divide by a divisor > 0x7FFF, + 1. prescale and perform divide to obtain quotient (Q) (done above), + 2. multiply quotient by unscaled divisor (result M) + 3. subtract the product from the divident to get an error (E = X - M) + 4. if E < divisor (Y) subtract 1, if E > divisor (Y) add 1, else return quotient (Q) + */ + R3 = R2.L (Z); /* Q = X' / Y' */ + R2 = R3; /* Preserve Q */ + R2 *= R1; /* M = Q * Y */ + R2 = R0 - R2; /* E = X - M */ + R0 = R3; /* Copy Q into result reg */ + +/* Correction: If result of the multiply is negative, we overflowed + and need to correct the result by subtracting 1 from the result.*/ + R3 = 0xFFFF (Z); + R2 = R2 >> 16; /* E >> 16 */ + CC = R2 == R3; + R3 = 1 ; + R1 = R0 - R3; + IF CC R0 = R1; + RTS; diff --git a/arch/blackfin/lib/umodsi3.S b/arch/blackfin/lib/umodsi3.S new file mode 100644 index 000000000000..b55ce96ab89f --- /dev/null +++ b/arch/blackfin/lib/umodsi3.S @@ -0,0 +1,66 @@ +/* + * File: arch/blackfin/lib/umodsi3.S + * Based on: + * Author: + * + * Created: + * Description: libgcc1 routines for Blackfin 5xx + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifdef CONFIG_ARITHMETIC_OPS_L1 +.section .l1.text +#else +.text +#endif + +.extern ___udivsi3; +.globl ___umodsi3 +___umodsi3: + + CC=R0==0; + IF CC JUMP .LRETURN_R0; /* Return 0, if NR == 0 */ + CC= R1==0; + IF CC JUMP .LRETURN_ZERO_VAL; /* Return 0, if DR == 0 */ + CC=R0==R1; + IF CC JUMP .LRETURN_ZERO_VAL; /* Return 0, if NR == DR */ + CC = R1 == 1; + IF CC JUMP .LRETURN_ZERO_VAL; /* Return 0, if DR == 1 */ + CC = R0>= 16; + /* Unsigned multiplication has the nice property that we can + ignore carry on this first addition. */ + R0 = R0 + R3; + R0 = R0 + R1; + cc = ac0; + R1 = cc; + R1 = PACK(R1.l,R0.h); + R0 = R1 + R2; + RTS; diff --git a/arch/blackfin/mach-bf533/Kconfig b/arch/blackfin/mach-bf533/Kconfig new file mode 100644 index 000000000000..14297b3ed5c3 --- /dev/null +++ b/arch/blackfin/mach-bf533/Kconfig @@ -0,0 +1,92 @@ +if (BF533 || BF532 || BF531) + +menu "BF533/2/1 Specific Configuration" + +comment "Interrupt Priority Assignment" +menu "Priority" + +config UART_ERROR + int "UART ERROR" + default 7 +config SPORT0_ERROR + int "SPORT0 ERROR" + default 7 +config SPI_ERROR + int "SPI ERROR" + default 7 +config SPORT1_ERROR + int "SPORT1 ERROR" + default 7 +config PPI_ERROR + int "PPI ERROR" + default 7 +config DMA_ERROR + int "DMA ERROR" + default 7 +config PLLWAKE_ERROR + int "PLL WAKEUP ERROR" + default 7 + +config RTC_ERROR + int "RTC ERROR" + default 8 +config DMA0_PPI + int "DMA0 PPI" + default 8 + +config DMA1_SPORT0RX + int "DMA1 (SPORT0 RX)" + default 9 +config DMA2_SPORT0TX + int "DMA2 (SPORT0 TX)" + default 9 +config DMA3_SPORT1RX + int "DMA3 (SPORT1 RX)" + default 9 +config DMA4_SPORT1TX + int "DMA4 (SPORT1 TX)" + default 9 +config DMA5_SPI + int "DMA5 (SPI)" + default 10 +config DMA6_UARTRX + int "DMA6 (UART0 RX)" + default 10 +config DMA7_UARTTX + int "DMA7 (UART0 TX)" + default 10 +config TIMER0 + int "TIMER0" + default 11 +config TIMER1 + int "TIMER1" + default 11 +config TIMER2 + int "TIMER2" + default 11 +config PFA + int "PF Interrupt A" + default 12 +config PFB + int "PF Interrupt B" + default 12 +config MEMDMA0 + int "MEMORY DMA0" + default 13 +config MEMDMA1 + int "MEMORY DMA1" + default 13 +config WDTIMER + int "WATCH DOG TIMER" + default 13 + + help + Enter the priority numbers between 7-13 ONLY. Others are Reserved. + This applies to all the above. It is not recommended to assign the + highest priority number 7 to UART or any other device. + +endmenu + +endmenu + +endif diff --git a/arch/blackfin/mach-bf533/Makefile b/arch/blackfin/mach-bf533/Makefile new file mode 100644 index 000000000000..76d2c2b8579a --- /dev/null +++ b/arch/blackfin/mach-bf533/Makefile @@ -0,0 +1,9 @@ +# +# arch/blackfin/mach-bf533/Makefile +# + +extra-y := head.o + +obj-y := ints-priority.o + +obj-$(CONFIG_CPU_FREQ_BF533) += cpu.o diff --git a/arch/blackfin/mach-bf533/boards/Makefile b/arch/blackfin/mach-bf533/boards/Makefile new file mode 100644 index 000000000000..12a631ab389d --- /dev/null +++ b/arch/blackfin/mach-bf533/boards/Makefile @@ -0,0 +1,8 @@ +# +# arch/blackfin/mach-bf533/boards/Makefile +# + +obj-$(CONFIG_GENERIC_BOARD) += generic_board.o +obj-$(CONFIG_BFIN533_STAMP) += stamp.o +obj-$(CONFIG_BFIN533_EZKIT) += ezkit.o +obj-$(CONFIG_BFIN533_BLUETECHNIX_CM) += cm_bf533.o diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c new file mode 100644 index 000000000000..23a7f607df3f --- /dev/null +++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c @@ -0,0 +1,267 @@ +/* + * File: arch/blackfin/mach-bf533/boards/cm_bf533.c + * Based on: arch/blackfin/mach-bf533/boards/ezkit.c + * Author: Aidan Williams Copright 2005 + * + * Created: 2005 + * Description: Board description file + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Name the Board for the /proc/cpuinfo + */ +char *bfin_board_name = "Bluetechnix CM BF533"; + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) +/* all SPI peripherals info goes here */ + +static struct mtd_partition bfin_spi_flash_partitions[] = { + { + .name = "bootloader", + .size = 0x00020000, + .offset = 0, + .mask_flags = MTD_CAP_ROM + },{ + .name = "kernel", + .size = 0xe0000, + .offset = 0x20000 + },{ + .name = "file system", + .size = 0x700000, + .offset = 0x00100000, + } +}; + +static struct flash_platform_data bfin_spi_flash_data = { + .name = "m25p80", + .parts = bfin_spi_flash_partitions, + .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), + .type = "m25p64", +}; + +/* SPI flash chip (m25p64) */ +static struct bfin5xx_spi_chip spi_flash_chip_info = { + .enable_dma = 0, /* use dma transfer with this chip*/ + .bits_per_word = 8, +}; + +/* SPI ADC chip */ +static struct bfin5xx_spi_chip spi_adc_chip_info = { + .enable_dma = 1, /* use dma transfer with this chip*/ + .bits_per_word = 16, +}; + +#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +static struct bfin5xx_spi_chip ad1836_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + +static struct spi_board_info bfin_spi_board_info[] __initdata = { + { + /* the modalias must be the same as spi device driver name */ + .modalias = "m25p80", /* Name of spi_driver for this device */ + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, /* Framework bus number */ + .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ + .platform_data = &bfin_spi_flash_data, + .controller_data = &spi_flash_chip_info, + .mode = SPI_MODE_3, + },{ + .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ + .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, /* Framework bus number */ + .chip_select = 2, /* Framework chip select. */ + .platform_data = NULL, /* No spi_driver specific config */ + .controller_data = &spi_adc_chip_info, + }, +#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) + { + .modalias = "ad1836-spi", + .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = CONFIG_SND_BLACKFIN_SPI_PFBIT, + .controller_data = &ad1836_spi_chip_info, + }, +#endif +}; + +/* SPI controller data */ +static struct bfin5xx_spi_master spi_bfin_master_info = { + .num_chipselect = 8, + .enable_dma = 1, /* master has the ability to do dma transfer */ +}; + +static struct platform_device spi_bfin_master_device = { + .name = "bfin-spi-master", + .id = 1, /* Bus number */ + .dev = { + .platform_data = &spi_bfin_master_info, /* Passed to driver */ + }, +}; +#endif /* spi master and devices */ + +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) +static struct platform_device rtc_device = { + .name = "rtc-bfin", + .id = -1, +}; +#endif + +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) +static struct resource smc91x_resources[] = { + { + .start = 0x20200300, + .end = 0x20200300 + 16, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PF0, + .end = IRQ_PF0, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; +static struct platform_device smc91x_device = { + .name = "smc91x", + .id = 0, + .num_resources = ARRAY_SIZE(smc91x_resources), + .resource = smc91x_resources, +}; +#endif + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) +static struct resource bfin_uart_resources[] = { + { + .start = 0xFFC00400, + .end = 0xFFC004FF, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device bfin_uart_device = { + .name = "bfin-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_uart_resources), + .resource = bfin_uart_resources, +}; +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +static struct platform_device bfin_sport0_uart_device = { + .name = "bfin-sport-uart", + .id = 0, +}; + +static struct platform_device bfin_sport1_uart_device = { + .name = "bfin-sport-uart", + .id = 1, +}; +#endif + +#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) +static struct resource isp1362_hcd_resources[] = { + { + .start = 0x20308000, + .end = 0x20308000, + .flags = IORESOURCE_MEM, + },{ + .start = 0x20308004, + .end = 0x20308004, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PF4, + .end = IRQ_PF4, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct isp1362_platform_data isp1362_priv = { + .sel15Kres = 1, + .clknotstop = 0, + .oc_enable = 0, + .int_act_high = 0, + .int_edge_triggered = 0, + .remote_wakeup_connected = 0, + .no_power_switching = 1, + .power_switching_mode = 0, +}; + +static struct platform_device isp1362_hcd_device = { + .name = "isp1362-hcd", + .id = 0, + .dev = { + .platform_data = &isp1362_priv, + }, + .num_resources = ARRAY_SIZE(isp1362_hcd_resources), + .resource = isp1362_hcd_resources, +}; +#endif + +static struct platform_device *cm_bf533_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) + &bfin_uart_device, +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) + &bfin_sport0_uart_device, + &bfin_sport1_uart_device, +#endif + +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) + &rtc_device, +#endif + +#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) + &isp1362_hcd_device, +#endif + +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) + &smc91x_device, +#endif + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + &spi_bfin_master_device, +#endif +}; + +static int __init cm_bf533_init(void) +{ + printk(KERN_INFO "%s(): registering device resources\n", __FUNCTION__); + platform_add_devices(cm_bf533_devices, ARRAY_SIZE(cm_bf533_devices)); +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); +#endif + return 0; +} + +arch_initcall(cm_bf533_init); diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c new file mode 100644 index 000000000000..747298ea907b --- /dev/null +++ b/arch/blackfin/mach-bf533/boards/ezkit.c @@ -0,0 +1,224 @@ +/* + * File: arch/blackfin/mach-bf533/ezkit.c + * Based on: Orginal Work + * Author: Aidan Williams + * + * Created: 2005 + * Description: + * + * Modified: Robin Getz - Named the boards + * Copyright 2005 National ICT Australia (NICTA) + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Name the Board for the /proc/cpuinfo + */ +char *bfin_board_name = "ADDS-BF533-EZKIT"; + +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) +static struct platform_device rtc_device = { + .name = "rtc-bfin", + .id = -1, +}; +#endif + +/* + * USB-LAN EzExtender board + * Driver needs to know address, irq and flag pin. + */ +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) +static struct resource smc91x_resources[] = { + { + .name = "smc91x-regs", + .start = 0x20310300, + .end = 0x20310300 + 16, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PF9, + .end = IRQ_PF9, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; +static struct platform_device smc91x_device = { + .name = "smc91x", + .id = 0, + .num_resources = ARRAY_SIZE(smc91x_resources), + .resource = smc91x_resources, +}; +#endif + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) +/* all SPI peripherals info goes here */ + +#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) +static struct mtd_partition bfin_spi_flash_partitions[] = { + { + .name = "bootloader", + .size = 0x00020000, + .offset = 0, + .mask_flags = MTD_CAP_ROM + },{ + .name = "kernel", + .size = 0xe0000, + .offset = 0x20000 + },{ + .name = "file system", + .size = 0x700000, + .offset = 0x00100000, + } +}; + +static struct flash_platform_data bfin_spi_flash_data = { + .name = "m25p80", + .parts = bfin_spi_flash_partitions, + .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), + .type = "m25p64", +}; + +/* SPI flash chip (m25p64) */ +static struct bfin5xx_spi_chip spi_flash_chip_info = { + .enable_dma = 0, /* use dma transfer with this chip*/ + .bits_per_word = 8, +}; +#endif + +#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) +/* SPI ADC chip */ +static struct bfin5xx_spi_chip spi_adc_chip_info = { + .enable_dma = 1, /* use dma transfer with this chip*/ + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +static struct bfin5xx_spi_chip ad1836_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + +static struct spi_board_info bfin_spi_board_info[] __initdata = { +#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) + { + /* the modalias must be the same as spi device driver name */ + .modalias = "m25p80", /* Name of spi_driver for this device */ + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, /* Framework bus number */ + .chip_select = 2, /* Framework chip select. On STAMP537 it is SPISSEL2*/ + .platform_data = &bfin_spi_flash_data, + .controller_data = &spi_flash_chip_info, + .mode = SPI_MODE_3, + }, +#endif + +#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) + { + .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ + .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, /* Framework bus number */ + .chip_select = 1, /* Framework chip select. */ + .platform_data = NULL, /* No spi_driver specific config */ + .controller_data = &spi_adc_chip_info, + }, +#endif + +#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) + { + .modalias = "ad1836-spi", + .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = CONFIG_SND_BLACKFIN_SPI_PFBIT, + .controller_data = &ad1836_spi_chip_info, + }, +#endif +}; + +/* SPI controller data */ +static struct bfin5xx_spi_master spi_bfin_master_info = { + .num_chipselect = 8, + .enable_dma = 1, /* master has the ability to do dma transfer */ +}; + +static struct platform_device spi_bfin_master_device = { + .name = "bfin-spi-master", + .id = 1, /* Bus number */ + .dev = { + .platform_data = &spi_bfin_master_info, /* Passed to driver */ + }, +}; +#endif /* spi master and devices */ + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) +static struct resource bfin_uart_resources[] = { + { + .start = 0xFFC00400, + .end = 0xFFC004FF, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device bfin_uart_device = { + .name = "bfin-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_uart_resources), + .resource = bfin_uart_resources, +}; +#endif + +static struct platform_device *ezkit_devices[] __initdata = { +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) + &smc91x_device, +#endif + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + &spi_bfin_master_device, +#endif + +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) + &rtc_device, +#endif + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) + &bfin_uart_device, +#endif +}; + +static int __init ezkit_init(void) +{ + printk(KERN_INFO "%s(): registering device resources\n", __FUNCTION__); + platform_add_devices(ezkit_devices, ARRAY_SIZE(ezkit_devices)); +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); +#endif + return 0; +} + +arch_initcall(ezkit_init); diff --git a/arch/blackfin/mach-bf533/boards/generic_board.c b/arch/blackfin/mach-bf533/boards/generic_board.c new file mode 100644 index 000000000000..c0f43ccfbfb5 --- /dev/null +++ b/arch/blackfin/mach-bf533/boards/generic_board.c @@ -0,0 +1,95 @@ +/* + * File: arch/blackfin/mach-bf533/generic_board.c + * Based on: arch/blackfin/mach-bf533/ezkit.c + * Author: Aidan Williams + * + * Created: 2005 + * Description: + * + * Modified: + * Copyright 2005 National ICT Australia (NICTA) + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +/* + * Name the Board for the /proc/cpuinfo + */ +char *bfin_board_name = "UNKNOWN BOARD"; + +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) +static struct platform_device rtc_device = { + .name = "rtc-bfin", + .id = -1, +}; +#endif + +/* + * Driver needs to know address, irq and flag pin. + */ +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) +static struct resource smc91x_resources[] = { + { + .start = 0x20300300, + .end = 0x20300300 + 16, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PROG_INTB, + .end = IRQ_PROG_INTB, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + },{ + /* + * denotes the flag pin and is used directly if + * CONFIG_IRQCHIP_DEMUX_GPIO is defined. + */ + .start = IRQ_PF7, + .end = IRQ_PF7, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +static struct platform_device smc91x_device = { + .name = "smc91x", + .id = 0, + .num_resources = ARRAY_SIZE(smc91x_resources), + .resource = smc91x_resources, +}; +#endif + +static struct platform_device *generic_board_devices[] __initdata = { +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) + &rtc_device, +#endif + +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) + &smc91x_device, +#endif +}; + +static int __init generic_board_init(void) +{ + printk(KERN_INFO "%s(): registering device resources\n", __FUNCTION__); + return platform_add_devices(generic_board_devices, ARRAY_SIZE(generic_board_devices)); +} + +arch_initcall(generic_board_init); diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c new file mode 100644 index 000000000000..d7b3a5d74e8c --- /dev/null +++ b/arch/blackfin/mach-bf533/boards/stamp.c @@ -0,0 +1,321 @@ +/* + * File: arch/blackfin/mach-bf533/stamp.c + * Based on: arch/blackfin/mach-bf533/ezkit.c + * Author: Aidan Williams + * + * Created: 2005 + * Description: Board Info File for the BF533-STAMP + * + * Modified: + * Copyright 2005 National ICT Australia (NICTA) + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) +#include +#endif +#include +#include + +/* + * Name the Board for the /proc/cpuinfo + */ +char *bfin_board_name = "ADDS-BF533-STAMP"; + +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) +static struct platform_device rtc_device = { + .name = "rtc-bfin", + .id = -1, +}; +#endif + +/* + * Driver needs to know address, irq and flag pin. + */ +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) +static struct resource smc91x_resources[] = { + { + .name = "smc91x-regs", + .start = 0x20300300, + .end = 0x20300300 + 16, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PF7, + .end = IRQ_PF7, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +static struct platform_device smc91x_device = { + .name = "smc91x", + .id = 0, + .num_resources = ARRAY_SIZE(smc91x_resources), + .resource = smc91x_resources, +}; +#endif + +#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) +static struct resource net2272_bfin_resources[] = { + { + .start = 0x20300000, + .end = 0x20300000 + 0x100, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PF10, + .end = IRQ_PF10, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +static struct platform_device net2272_bfin_device = { + .name = "net2272", + .id = -1, + .num_resources = ARRAY_SIZE(net2272_bfin_resources), + .resource = net2272_bfin_resources, +}; +#endif + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) +/* all SPI peripherals info goes here */ + +#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) +static struct mtd_partition bfin_spi_flash_partitions[] = { + { + .name = "bootloader", + .size = 0x00020000, + .offset = 0, + .mask_flags = MTD_CAP_ROM + },{ + .name = "kernel", + .size = 0xe0000, + .offset = 0x20000 + },{ + .name = "file system", + .size = 0x700000, + .offset = 0x00100000, + } +}; + +static struct flash_platform_data bfin_spi_flash_data = { + .name = "m25p80", + .parts = bfin_spi_flash_partitions, + .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), + .type = "m25p64", +}; + +/* SPI flash chip (m25p64) */ +static struct bfin5xx_spi_chip spi_flash_chip_info = { + .enable_dma = 0, /* use dma transfer with this chip*/ + .bits_per_word = 8, +}; +#endif + +#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) +/* SPI ADC chip */ +static struct bfin5xx_spi_chip spi_adc_chip_info = { + .enable_dma = 1, /* use dma transfer with this chip*/ + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +static struct bfin5xx_spi_chip ad1836_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_PBX) +static struct bfin5xx_spi_chip spi_si3xxx_chip_info = { + .ctl_reg = 0x4, /* send zero */ + .enable_dma = 0, + .bits_per_word = 8, + .cs_change_per_word = 1, +}; +#endif + +#if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE) +static struct bfin5xx_spi_chip ad5304_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + +static struct spi_board_info bfin_spi_board_info[] __initdata = { +#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) + { + /* the modalias must be the same as spi device driver name */ + .modalias = "m25p80", /* Name of spi_driver for this device */ + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, /* Framework bus number */ + .chip_select = 2, /* Framework chip select. On STAMP537 it is SPISSEL2*/ + .platform_data = &bfin_spi_flash_data, + .controller_data = &spi_flash_chip_info, + .mode = SPI_MODE_3, + }, +#endif + +#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) + { + .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ + .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, /* Framework bus number */ + .chip_select = 1, /* Framework chip select. */ + .platform_data = NULL, /* No spi_driver specific config */ + .controller_data = &spi_adc_chip_info, + }, +#endif + +#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) + { + .modalias = "ad1836-spi", + .max_speed_hz = 31250000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = CONFIG_SND_BLACKFIN_SPI_PFBIT, + .controller_data = &ad1836_spi_chip_info, + }, +#endif + +#if defined(CONFIG_PBX) + { + .modalias = "fxs-spi", + .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 3, + .controller_data= &spi_si3xxx_chip_info, + .mode = SPI_MODE_3, + }, + { + .modalias = "fxo-spi", + .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 2, + .controller_data= &spi_si3xxx_chip_info, + .mode = SPI_MODE_3, + }, +#endif + +#if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE) + { + .modalias = "ad5304_spi", + .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 2, + .platform_data = NULL, + .controller_data = &ad5304_chip_info, + .mode = SPI_MODE_2, + }, +#endif +}; + +/* SPI controller data */ +static struct bfin5xx_spi_master spi_bfin_master_info = { + .num_chipselect = 8, + .enable_dma = 1, /* master has the ability to do dma transfer */ +}; + +static struct platform_device spi_bfin_master_device = { + .name = "bfin-spi-master", + .id = 1, /* Bus number */ + .dev = { + .platform_data = &spi_bfin_master_info, /* Passed to driver */ + }, +}; +#endif /* spi master and devices */ + +#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) +static struct platform_device bfin_fb_device = { + .name = "bf537-fb", +}; +#endif + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) +static struct resource bfin_uart_resources[] = { + { + .start = 0xFFC00400, + .end = 0xFFC004FF, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device bfin_uart_device = { + .name = "bfin-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_uart_resources), + .resource = bfin_uart_resources, +}; +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +static struct platform_device bfin_sport0_uart_device = { + .name = "bfin-sport-uart", + .id = 0, +}; + +static struct platform_device bfin_sport1_uart_device = { + .name = "bfin-sport-uart", + .id = 1, +}; +#endif + +static struct platform_device *stamp_devices[] __initdata = { +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) + &rtc_device, +#endif + +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) + &smc91x_device, +#endif + +#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) + &net2272_bfin_device, +#endif + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + &spi_bfin_master_device, +#endif + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) + &bfin_uart_device, +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) + &bfin_sport0_uart_device, + &bfin_sport1_uart_device, +#endif +}; + +static int __init stamp_init(void) +{ + printk(KERN_INFO "%s(): registering device resources\n", __FUNCTION__); + platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); +#endif + return 0; +} + +arch_initcall(stamp_init); diff --git a/arch/blackfin/mach-bf533/cpu.c b/arch/blackfin/mach-bf533/cpu.c new file mode 100644 index 000000000000..99547c4c290e --- /dev/null +++ b/arch/blackfin/mach-bf533/cpu.c @@ -0,0 +1,161 @@ +/* + * File: arch/blackfin/mach-bf533/cpu.c + * Based on: + * Author: michael.kang@analog.com + * + * Created: + * Description: clock scaling for the bf533 + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include + +/* CONFIG_CLKIN_HZ=11059200 */ +#define VCO5 (CONFIG_CLKIN_HZ*45) /*497664000 */ +#define VCO4 (CONFIG_CLKIN_HZ*36) /*398131200 */ +#define VCO3 (CONFIG_CLKIN_HZ*27) /*298598400 */ +#define VCO2 (CONFIG_CLKIN_HZ*18) /*199065600 */ +#define VCO1 (CONFIG_CLKIN_HZ*9) /*99532800 */ +#define VCO(x) VCO##x + +#define FREQ(x) {VCO(x),VCO(x)/4},{VCO(x),VCO(x)/2},{VCO(x),VCO(x)} +/* frequency */ +static struct cpufreq_frequency_table bf533_freq_table[] = { + FREQ(1), + FREQ(3), + {VCO4, VCO4 / 2}, {VCO4, VCO4}, + FREQ(5), + {0, CPUFREQ_TABLE_END}, +}; + +/* + * dpmc_fops->ioctl() + * static int dpmc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) + */ +static int bf533_getfreq(unsigned int cpu) +{ + unsigned long cclk_mhz, vco_mhz; + + /* The driver only support single cpu */ + if (cpu == 0) + dpmc_fops.ioctl(NULL, NULL, IOCTL_GET_CORECLOCK, &cclk_mhz); + else + cclk_mhz = -1; + return cclk_mhz; +} + +static int bf533_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ + unsigned long cclk_mhz; + unsigned long vco_mhz; + unsigned long flags; + unsigned int index, vco_index; + int i; + + struct cpufreq_freqs freqs; + if (cpufreq_frequency_table_target + (policy, bf533_freq_table, target_freq, relation, &index)) + return -EINVAL; + cclk_mhz = bf533_freq_table[index].frequency; + vco_mhz = bf533_freq_table[index].index; + + dpmc_fops.ioctl(NULL, NULL, IOCTL_CHANGE_FREQUENCY, &vco_mhz); + freqs.old = bf533_getfreq(0); + freqs.new = cclk_mhz; + freqs.cpu = 0; + + pr_debug("cclk begin change to cclk %d,vco=%d,index=%d,target=%d,oldfreq=%d\n", + cclk_mhz, vco_mhz, index, target_freq, freqs.old); + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + local_irq_save(flags); + dpmc_fops.ioctl(NULL, NULL, IOCTL_SET_CCLK, &cclk_mhz); + local_irq_restore(flags); + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + vco_mhz = get_vco(); + cclk_mhz = get_cclk(); + return 0; +} + +/* make sure that only the "userspace" governor is run -- anything else wouldn't make sense on + * this platform, anyway. + */ +static int bf533_verify_speed(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &bf533_freq_table); +} + +static int __init __bf533_cpu_init(struct cpufreq_policy *policy) +{ + int result; + + if (policy->cpu != 0) + return -EINVAL; + + policy->governor = CPUFREQ_DEFAULT_GOVERNOR; + + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + /*Now ,only support one cpu */ + policy->cur = bf533_getfreq(0); + cpufreq_frequency_table_get_attr(bf533_freq_table, policy->cpu); + return cpufreq_frequency_table_cpuinfo(policy, bf533_freq_table); +} + +static struct freq_attr *bf533_freq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver bf533_driver = { + .verify = bf533_verify_speed, + .target = bf533_target, + .get = bf533_getfreq, + .init = __bf533_cpu_init, + .name = "bf533", + .owner = THIS_MODULE, + .attr = bf533_freq_attr, +}; + +static int __init bf533_cpu_init(void) +{ + return cpufreq_register_driver(&bf533_driver); +} + +static void __exit bf533_cpu_exit(void) +{ + cpufreq_unregister_driver(&bf533_driver); +} + +MODULE_AUTHOR("Mickael Kang"); +MODULE_DESCRIPTION("cpufreq driver for BF533 CPU"); +MODULE_LICENSE("GPL"); + +module_init(bf533_cpu_init); +module_exit(bf533_cpu_exit); diff --git a/arch/blackfin/mach-bf533/head.S b/arch/blackfin/mach-bf533/head.S new file mode 100644 index 000000000000..4808edb0680f --- /dev/null +++ b/arch/blackfin/mach-bf533/head.S @@ -0,0 +1,774 @@ +/* + * File: arch/blackfin/mach-bf533/head.S + * Based on: + * Author: Jeff Dionne COPYRIGHT 1998 D. Jeff Dionne + * + * Created: 1998 + * Description: bf533 startup file + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#if CONFIG_BFIN_KERNEL_CLOCK +#include +#endif +#if CONFIG_DEBUG_KERNEL_START +#include +#endif + +.global __rambase +.global __ramstart +.global __ramend +.extern ___bss_stop +.extern ___bss_start +.extern _bf53x_relocate_l1_mem + +#define INITIAL_STACK 0xFFB01000 + +.text + +ENTRY(__start) +ENTRY(__stext) + /* R0: argument of command line string, passed from uboot, save it */ + R7 = R0; + /* Set the SYSCFG register */ + R0 = 0x36; + /*Enable Cycle Counter and Nesting Of Interrupts(3rd Bit)*/ + SYSCFG = R0; + R0 = 0; + + /*Clear Out All the data and pointer Registers*/ + R1 = R0; + R2 = R0; + R3 = R0; + R4 = R0; + R5 = R0; + R6 = R0; + + P0 = R0; + P1 = R0; + P2 = R0; + P3 = R0; + P4 = R0; + P5 = R0; + + LC0 = r0; + LC1 = r0; + L0 = r0; + L1 = r0; + L2 = r0; + L3 = r0; + + /* Clear Out All the DAG Registers*/ + B0 = r0; + B1 = r0; + B2 = r0; + B3 = r0; + + I0 = r0; + I1 = r0; + I2 = r0; + I3 = r0; + + M0 = r0; + M1 = r0; + M2 = r0; + M3 = r0; + +#if CONFIG_DEBUG_KERNEL_START + +/* + * Set up a temporary Event Vector Table, so if something bad happens before + * the kernel is fully started, it doesn't vector off into the bootloaders + * table + */ + P0.l = lo(EVT2); + P0.h = hi(EVT2); + P1.l = lo(EVT15); + P1.h = hi(EVT15); + P2.l = debug_kernel_start_trap; + P2.h = debug_kernel_start_trap; + + RTS = P2; + RTI = P2; + RTX = P2; + RTN = P2; + RTE = P2; + +.Lfill_temp_vector_table: + [P0++] = P2; /* Core Event Vector Table */ + CC = P0 == P1; + if !CC JUMP .Lfill_temp_vector_table + P0 = r0; + P1 = r0; + P2 = r0; + +#endif + + p0.h = hi(FIO_MASKA_C); + p0.l = lo(FIO_MASKA_C); + r0 = 0xFFFF(Z); + w[p0] = r0.L; /* Disable all interrupts */ + ssync; + + p0.h = hi(FIO_MASKB_C); + p0.l = lo(FIO_MASKB_C); + r0 = 0xFFFF(Z); + w[p0] = r0.L; /* Disable all interrupts */ + ssync; + + /* Turn off the icache */ + p0.l = (IMEM_CONTROL & 0xFFFF); + p0.h = (IMEM_CONTROL >> 16); + R1 = [p0]; + R0 = ~ENICPLB; + R0 = R0 & R1; + + /* Anomaly 05000125 */ +#ifdef ANOMALY_05000125 + CLI R2; + SSYNC; +#endif + [p0] = R0; + SSYNC; +#ifdef ANOMALY_05000125 + STI R2; +#endif + + /* Turn off the dcache */ + p0.l = (DMEM_CONTROL & 0xFFFF); + p0.h = (DMEM_CONTROL >> 16); + R1 = [p0]; + R0 = ~ENDCPLB; + R0 = R0 & R1; + + /* Anomaly 05000125 */ +#ifdef ANOMALY_05000125 + CLI R2; + SSYNC; +#endif + [p0] = R0; + SSYNC; +#ifdef ANOMALY_05000125 + STI R2; +#endif + + /* Initialise UART */ + p0.h = hi(UART_LCR); + p0.l = lo(UART_LCR); + r0 = 0x0(Z); + w[p0] = r0.L; /* To enable DLL writes */ + ssync; + + p0.h = hi(UART_DLL); + p0.l = lo(UART_DLL); + r0 = 0x0(Z); + w[p0] = r0.L; + ssync; + + p0.h = hi(UART_DLH); + p0.l = lo(UART_DLH); + r0 = 0x00(Z); + w[p0] = r0.L; + ssync; + + p0.h = hi(UART_GCTL); + p0.l = lo(UART_GCTL); + r0 = 0x0(Z); + w[p0] = r0.L; /* To enable UART clock */ + ssync; + + /* Initialize stack pointer */ + sp.l = lo(INITIAL_STACK); + sp.h = hi(INITIAL_STACK); + fp = sp; + usp = sp; + + /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */ + call _bf53x_relocate_l1_mem; +#if CONFIG_BFIN_KERNEL_CLOCK + call _start_dma_code; +#endif + + /* Code for initializing Async memory banks */ + + p2.h = hi(EBIU_AMBCTL1); + p2.l = lo(EBIU_AMBCTL1); + r0.h = hi(AMBCTL1VAL); + r0.l = lo(AMBCTL1VAL); + [p2] = r0; + ssync; + + p2.h = hi(EBIU_AMBCTL0); + p2.l = lo(EBIU_AMBCTL0); + r0.h = hi(AMBCTL0VAL); + r0.l = lo(AMBCTL0VAL); + [p2] = r0; + ssync; + + p2.h = hi(EBIU_AMGCTL); + p2.l = lo(EBIU_AMGCTL); + r0 = AMGCTLVAL; + w[p2] = r0; + ssync; + + /* This section keeps the processor in supervisor mode + * during kernel boot. Switches to user mode at end of boot. + * See page 3-9 of Hardware Reference manual for documentation. + */ + + /* EVT15 = _real_start */ + + p0.l = lo(EVT15); + p0.h = hi(EVT15); + p1.l = _real_start; + p1.h = _real_start; + [p0] = p1; + csync; + + p0.l = lo(IMASK); + p0.h = hi(IMASK); + p1.l = IMASK_IVG15; + p1.h = 0x0; + [p0] = p1; + csync; + + raise 15; + p0.l = .LWAIT_HERE; + p0.h = .LWAIT_HERE; + reti = p0; +#if defined(ANOMALY_05000281) + nop; nop; nop; +#endif + rti; + +.LWAIT_HERE: + jump .LWAIT_HERE; + +ENTRY(_real_start) + [ -- sp ] = reti; + p0.l = lo(WDOG_CTL); + p0.h = hi(WDOG_CTL); + r0 = 0xAD6(z); + w[p0] = r0; /* watchdog off for now */ + ssync; + + /* Code update for BSS size == 0 + * Zero out the bss region. + */ + + p1.l = ___bss_start; + p1.h = ___bss_start; + p2.l = ___bss_stop; + p2.h = ___bss_stop; + r0 = 0; + p2 -= p1; + lsetup (.L_clear_bss, .L_clear_bss) lc0 = p2; +.L_clear_bss: + B[p1++] = r0; + + /* In case there is a NULL pointer reference + * Zero out region before stext + */ + + p1.l = 0x0; + p1.h = 0x0; + r0.l = __stext; + r0.h = __stext; + r0 = r0 >> 1; + p2 = r0; + r0 = 0; + lsetup (.L_clear_zero, .L_clear_zero) lc0 = p2; +.L_clear_zero: + W[p1++] = r0; + +/* pass the uboot arguments to the global value command line */ + R0 = R7; + call _cmdline_init; + + p1.l = __rambase; + p1.h = __rambase; + r0.l = __sdata; + r0.h = __sdata; + [p1] = r0; + + p1.l = __ramstart; + p1.h = __ramstart; + p3.l = ___bss_stop; + p3.h = ___bss_stop; + + r1 = p3; + [p1] = r1; + + /* + * load the current thread pointer and stack + */ + r1.l = _init_thread_union; + r1.h = _init_thread_union; + + r2.l = 0x2000; + r2.h = 0x0000; + r1 = r1 + r2; + sp = r1; + usp = sp; + fp = sp; + call _start_kernel; +.L_exit: + jump.s .L_exit; + +.section .l1.text +#if CONFIG_BFIN_KERNEL_CLOCK +ENTRY(_start_dma_code) + p0.h = hi(SIC_IWR); + p0.l = lo(SIC_IWR); + r0.l = 0x1; + r0.h = 0x0; + [p0] = r0; + SSYNC; + + /* + * Set PLL_CTL + * - [14:09] = MSEL[5:0] : CLKIN / VCO multiplication factors + * - [8] = BYPASS : BYPASS the PLL, run CLKIN into CCLK/SCLK + * - [7] = output delay (add 200ps of delay to mem signals) + * - [6] = input delay (add 200ps of input delay to mem signals) + * - [5] = PDWN : 1=All Clocks off + * - [3] = STOPCK : 1=Core Clock off + * - [1] = PLL_OFF : 1=Disable Power to PLL + * - [0] = DF : 1=Pass CLKIN/2 to PLL / 0=Pass CLKIN to PLL + * all other bits set to zero + */ + + p0.h = hi(PLL_LOCKCNT); + p0.l = lo(PLL_LOCKCNT); + r0 = 0x300(Z); + w[p0] = r0.l; + ssync; + + P2.H = hi(EBIU_SDGCTL); + P2.L = lo(EBIU_SDGCTL); + R0 = [P2]; + BITSET (R0, 24); + [P2] = R0; + SSYNC; + + r0 = CONFIG_VCO_MULT & 63; /* Load the VCO multiplier */ + r0 = r0 << 9; /* Shift it over, */ + r1 = CLKIN_HALF; /* Do we need to divide CLKIN by 2?*/ + r0 = r1 | r0; + r1 = PLL_BYPASS; /* Bypass the PLL? */ + r1 = r1 << 8; /* Shift it over */ + r0 = r1 | r0; /* add them all together */ + + p0.h = hi(PLL_CTL); + p0.l = lo(PLL_CTL); /* Load the address */ + cli r2; /* Disable interrupts */ + ssync; + w[p0] = r0.l; /* Set the value */ + idle; /* Wait for the PLL to stablize */ + sti r2; /* Enable interrupts */ + +.Lcheck_again: + p0.h = hi(PLL_STAT); + p0.l = lo(PLL_STAT); + R0 = W[P0](Z); + CC = BITTST(R0,5); + if ! CC jump .Lcheck_again; + + /* Configure SCLK & CCLK Dividers */ + r0 = (CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV); + p0.h = hi(PLL_DIV); + p0.l = lo(PLL_DIV); + w[p0] = r0.l; + ssync; + + p0.l = lo(EBIU_SDRRC); + p0.h = hi(EBIU_SDRRC); + r0 = mem_SDRRC; + w[p0] = r0.l; + ssync; + + p0.l = (EBIU_SDBCTL & 0xFFFF); + p0.h = (EBIU_SDBCTL >> 16); /* SDRAM Memory Bank Control Register */ + r0 = mem_SDBCTL; + w[p0] = r0.l; + ssync; + + P2.H = hi(EBIU_SDGCTL); + P2.L = lo(EBIU_SDGCTL); + R0 = [P2]; + BITCLR (R0, 24); + p0.h = hi(EBIU_SDSTAT); + p0.l = lo(EBIU_SDSTAT); + r2.l = w[p0]; + cc = bittst(r2,3); + if !cc jump .Lskip; + NOP; + BITSET (R0, 23); +.Lskip: + [P2] = R0; + SSYNC; + + R0.L = lo(mem_SDGCTL); + R0.H = hi(mem_SDGCTL); + R1 = [p2]; + R1 = R1 | R0; + [P2] = R1; + SSYNC; + + p0.h = hi(SIC_IWR); + p0.l = lo(SIC_IWR); + r0.l = lo(IWR_ENABLE_ALL) + r0.h = hi(IWR_ENABLE_ALL) + [p0] = r0; + SSYNC; + + RTS; +#endif /* CONFIG_BFIN_KERNEL_CLOCK */ + +ENTRY(_bfin_reset) + /* No more interrupts to be handled*/ + CLI R6; + SSYNC; + +#if defined(CONFIG_BFIN_SHARED_FLASH_ENET) + p0.h = hi(FIO_INEN); + p0.l = lo(FIO_INEN); + r0.l = ~(1 << CONFIG_ENET_FLASH_PIN); + w[p0] = r0.l; + + p0.h = hi(FIO_DIR); + p0.l = lo(FIO_DIR); + r0.l = (1 << CONFIG_ENET_FLASH_PIN); + w[p0] = r0.l; + + p0.h = hi(FIO_FLAG_C); + p0.l = lo(FIO_FLAG_C); + r0.l = (1 << CONFIG_ENET_FLASH_PIN); + w[p0] = r0.l; +#endif + + /* Clear the bits 13-15 in SWRST if they werent cleared */ + p0.h = hi(SWRST); + p0.l = lo(SWRST); + csync; + r0.l = w[p0]; + + /* Clear the IMASK register */ + p0.h = hi(IMASK); + p0.l = lo(IMASK); + r0 = 0x0; + [p0] = r0; + + /* Clear the ILAT register */ + p0.h = hi(ILAT); + p0.l = lo(ILAT); + r0 = [p0]; + [p0] = r0; + SSYNC; + + /* Disable the WDOG TIMER */ + p0.h = hi(WDOG_CTL); + p0.l = lo(WDOG_CTL); + r0.l = 0xAD6; + w[p0] = r0.l; + SSYNC; + + /* Clear the sticky bit incase it is already set */ + p0.h = hi(WDOG_CTL); + p0.l = lo(WDOG_CTL); + r0.l = 0x8AD6; + w[p0] = r0.l; + SSYNC; + + /* Program the count value */ + R0.l = 0x100; + R0.h = 0x0; + P0.h = hi(WDOG_CNT); + P0.l = lo(WDOG_CNT); + [P0] = R0; + SSYNC; + + /* Program WDOG_STAT if necessary */ + P0.h = hi(WDOG_CTL); + P0.l = lo(WDOG_CTL); + R0 = W[P0](Z); + CC = BITTST(R0,1); + if !CC JUMP .LWRITESTAT; + CC = BITTST(R0,2); + if !CC JUMP .LWRITESTAT; + JUMP .LSKIP_WRITE; + +.LWRITESTAT: + /* When watch dog timer is enabled, a write to STAT will load the contents of CNT to STAT */ + R0 = 0x0000(z); + P0.h = hi(WDOG_STAT); + P0.l = lo(WDOG_STAT) + [P0] = R0; + SSYNC; + +.LSKIP_WRITE: + /* Enable the reset event */ + P0.h = hi(WDOG_CTL); + P0.l = lo(WDOG_CTL); + R0 = W[P0](Z); + BITCLR(R0,1); + BITCLR(R0,2); + W[P0] = R0.L; + SSYNC; + NOP; + + /* Enable the wdog counter */ + R0 = W[P0](Z); + BITCLR(R0,4); + W[P0] = R0.L; + SSYNC; + + IDLE; + + RTS; + +#if CONFIG_DEBUG_KERNEL_START +debug_kernel_start_trap: + /* Set up a temp stack in L1 - SDRAM might not be working */ + P0.L = lo(L1_DATA_A_START + 0x100); + P0.H = hi(L1_DATA_A_START + 0x100); + SP = P0; + + /* Make sure the Clocks are the way I think they should be */ + r0 = CONFIG_VCO_MULT & 63; /* Load the VCO multiplier */ + r0 = r0 << 9; /* Shift it over, */ + r1 = CLKIN_HALF; /* Do we need to divide CLKIN by 2?*/ + r0 = r1 | r0; + r1 = PLL_BYPASS; /* Bypass the PLL? */ + r1 = r1 << 8; /* Shift it over */ + r0 = r1 | r0; /* add them all together */ + + p0.h = hi(PLL_CTL); + p0.l = lo(PLL_CTL); /* Load the address */ + cli r2; /* Disable interrupts */ + ssync; + w[p0] = r0.l; /* Set the value */ + idle; /* Wait for the PLL to stablize */ + sti r2; /* Enable interrupts */ + +.Lcheck_again1: + p0.h = hi(PLL_STAT); + p0.l = lo(PLL_STAT); + R0 = W[P0](Z); + CC = BITTST(R0,5); + if ! CC jump .Lcheck_again1; + + /* Configure SCLK & CCLK Dividers */ + r0 = (CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV); + p0.h = hi(PLL_DIV); + p0.l = lo(PLL_DIV); + w[p0] = r0.l; + ssync; + + /* Make sure UART is enabled - you can never be sure */ + +/* + * Setup for console. Argument comes from the menuconfig + */ + +#ifdef CONFIG_BAUD_9600 +#define CONSOLE_BAUD_RATE 9600 +#elif CONFIG_BAUD_19200 +#define CONSOLE_BAUD_RATE 19200 +#elif CONFIG_BAUD_38400 +#define CONSOLE_BAUD_RATE 38400 +#elif CONFIG_BAUD_57600 +#define CONSOLE_BAUD_RATE 57600 +#elif CONFIG_BAUD_115200 +#define CONSOLE_BAUD_RATE 115200 +#endif + + p0.h = hi(UART_GCTL); + p0.l = lo(UART_GCTL); + r0 = 0x00(Z); + w[p0] = r0.L; /* To Turn off UART clocks */ + ssync; + + p0.h = hi(UART_LCR); + p0.l = lo(UART_LCR); + r0 = 0x83(Z); + w[p0] = r0.L; /* To enable DLL writes */ + ssync; + + R1 = (((CONFIG_CLKIN_HZ * CONFIG_VCO_MULT) / CONFIG_SCLK_DIV) / (CONSOLE_BAUD_RATE * 16)); + + p0.h = hi(UART_DLL); + p0.l = lo(UART_DLL); + r0 = 0xFF(Z); + r0 = R1 & R0; + w[p0] = r0.L; + ssync; + + p0.h = hi(UART_DLH); + p0.l = lo(UART_DLH); + r1 >>= 8 ; + w[p0] = r1.L; + ssync; + + p0.h = hi(UART_GCTL); + p0.l = lo(UART_GCTL); + r0 = 0x0(Z); + w[p0] = r0.L; /* To enable UART clock */ + ssync; + + p0.h = hi(UART_LCR); + p0.l = lo(UART_LCR); + r0 = 0x03(Z); + w[p0] = r0.L; /* To Turn on UART */ + ssync; + + p0.h = hi(UART_GCTL); + p0.l = lo(UART_GCTL); + r0 = 0x01(Z); + w[p0] = r0.L; /* To Turn on UART Clocks */ + ssync; + + P0.h = hi(UART_THR); + P0.l = lo(UART_THR); + P1.h = hi(UART_LSR); + P1.l = lo(UART_LSR); + + R0.L = 'K'; + call .Lwait_char; + R0.L='e'; + call .Lwait_char; + R0.L='r'; + call .Lwait_char; + R0.L='n' + call .Lwait_char; + R0.L='e' + call .Lwait_char; + R0.L='l'; + call .Lwait_char; + R0.L=' '; + call .Lwait_char; + R0.L='c'; + call .Lwait_char; + R0.L='r'; + call .Lwait_char; + R0.L='a'; + call .Lwait_char; + R0.L='s'; + call .Lwait_char; + R0.L='h'; + call .Lwait_char; + R0.L='\r'; + call .Lwait_char; + R0.L='\n'; + call .Lwait_char; + + R0.L='S'; + call .Lwait_char; + R0.L='E'; + call .Lwait_char; + R0.L='Q' + call .Lwait_char; + R0.L='S' + call .Lwait_char; + R0.L='T'; + call .Lwait_char; + R0.L='A'; + call .Lwait_char; + R0.L='T'; + call .Lwait_char; + R0.L='='; + call .Lwait_char; + R2 = SEQSTAT; + call .Ldump_reg; + + R0.L=' '; + call .Lwait_char; + R0.L='R'; + call .Lwait_char; + R0.L='E' + call .Lwait_char; + R0.L='T' + call .Lwait_char; + R0.L='X'; + call .Lwait_char; + R0.L='='; + call .Lwait_char; + R2 = RETX; + call .Ldump_reg; + + R0.L='\r'; + call .Lwait_char; + R0.L='\n'; + call .Lwait_char; + +.Ldebug_kernel_start_trap_done: + JUMP .Ldebug_kernel_start_trap_done; +.Ldump_reg: + R3 = 32; + R4 = 0x0F; + R5 = ':'; /* one past 9 */ + +.Ldump_reg2: + R0 = R2; + R3 += -4; + R0 >>>= R3; + R0 = R0 & R4; + R0 += 0x30; + CC = R0 <= R5; + if CC JUMP .Ldump_reg1; + R0 += 7; + +.Ldump_reg1: + R1.l = W[P1]; + CC = BITTST(R1, 5); + if !CC JUMP .Ldump_reg1; + W[P0] = r0; + + CC = R3 == 0; + if !CC JUMP .Ldump_reg2 + RTS; + +.Lwait_char: + R1.l = W[P1]; + CC = BITTST(R1, 5); + if !CC JUMP .Lwait_char; + W[P0] = r0; + RTS; + +#endif /* CONFIG_DEBUG_KERNEL_START */ + +.data + +/* + * Set up the usable of RAM stuff. Size of RAM is determined then + * an initial stack set up at the end. + */ + +.align 4 +__rambase: +.long 0 +__ramstart: +.long 0 +__ramend: +.long 0 diff --git a/arch/blackfin/mach-bf533/ints-priority.c b/arch/blackfin/mach-bf533/ints-priority.c new file mode 100644 index 000000000000..36a693345204 --- /dev/null +++ b/arch/blackfin/mach-bf533/ints-priority.c @@ -0,0 +1,65 @@ +/* + * File: arch/blackfin/mach-bf533/ints-priority.c + * Based on: + * Author: Michael Hennerich + * + * Created: ? + * Description: Set up the interupt priorities + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +void program_IAR(void) +{ + /* Program the IAR0 Register with the configured priority */ + bfin_write_SIC_IAR0(((CONFIG_PLLWAKE_ERROR - 7) << PLLWAKE_ERROR_POS) | + ((CONFIG_DMA_ERROR - 7) << DMA_ERROR_POS) | + ((CONFIG_PPI_ERROR - 7) << PPI_ERROR_POS) | + ((CONFIG_SPORT0_ERROR - 7) << SPORT0_ERROR_POS) | + ((CONFIG_SPI_ERROR - 7) << SPI_ERROR_POS) | + ((CONFIG_SPORT1_ERROR - 7) << SPORT1_ERROR_POS) | + ((CONFIG_UART_ERROR - 7) << UART_ERROR_POS) | + ((CONFIG_RTC_ERROR - 7) << RTC_ERROR_POS)); + + bfin_write_SIC_IAR1(((CONFIG_DMA0_PPI - 7) << DMA0_PPI_POS) | + ((CONFIG_DMA1_SPORT0RX - 7) << DMA1_SPORT0RX_POS) | + ((CONFIG_DMA2_SPORT0TX - 7) << DMA2_SPORT0TX_POS) | + ((CONFIG_DMA3_SPORT1RX - 7) << DMA3_SPORT1RX_POS) | + ((CONFIG_DMA4_SPORT1TX - 7) << DMA4_SPORT1TX_POS) | + ((CONFIG_DMA5_SPI - 7) << DMA5_SPI_POS) | + ((CONFIG_DMA6_UARTRX - 7) << DMA6_UARTRX_POS) | + ((CONFIG_DMA7_UARTTX - 7) << DMA7_UARTTX_POS)); + + bfin_write_SIC_IAR2(((CONFIG_TIMER0 - 7) << TIMER0_POS) | + ((CONFIG_TIMER1 - 7) << TIMER1_POS) | + ((CONFIG_TIMER2 - 7) << TIMER2_POS) | + ((CONFIG_PFA - 7) << PFA_POS) | + ((CONFIG_PFB - 7) << PFB_POS) | + ((CONFIG_MEMDMA0 - 7) << MEMDMA0_POS) | + ((CONFIG_MEMDMA1 - 7) << MEMDMA1_POS) | + ((CONFIG_WDTIMER - 7) << WDTIMER_POS)); + + SSYNC(); +} diff --git a/arch/blackfin/mach-bf537/Kconfig b/arch/blackfin/mach-bf537/Kconfig new file mode 100644 index 000000000000..cc9ae38a4dda --- /dev/null +++ b/arch/blackfin/mach-bf537/Kconfig @@ -0,0 +1,141 @@ +if (BF537 || BF534 || BF536) + +menu "BF537 Specific Configuration" + +comment "PORT F/G Selection" +choice + prompt "Select BF537/6/4 default GPIO PFx PORTx" + help + Quick Hack for BF537/6/4 default GPIO PFx PORTF. + +config BF537_PORT_F + bool "Select BF537/6/4 default GPIO PFx PORTF" + depends on (BF537 || BF536 || BF534) + help + Quick Hack for BF537/6/4 default GPIO PFx PORTF. + +config BF537_PORT_G + bool "Select BF537/6/4 default GPIO PFx PORTG" + depends on (BF537 || BF536 || BF534) + help + Quick Hack for BF537/6/4 default GPIO PFx PORTG. + +config BF537_PORT_H + bool "Select BF537/6/4 default GPIO PFx PORTH" + depends on (BF537 || BF536 || BF534) + help + Quick Hack for BF537/6/4 default GPIO PFx PORTH + Use only when Blackfin EMAC support is not required. + +endchoice + +comment "Interrupt Priority Assignment" +menu "Priority" + +config IRQ_PLL_WAKEUP + int "IRQ_PLL_WAKEUP" + default 7 +config IRQ_DMA_ERROR + int "IRQ_DMA_ERROR Generic" + default 7 +config IRQ_ERROR + int "IRQ_ERROR: CAN MAC SPORT0 SPORT1 SPI UART0 UART1" + default 7 +config IRQ_RTC + int "IRQ_RTC" + default 8 +config IRQ_PPI + int "IRQ_PPI" + default 8 +config IRQ_SPORT0_RX + int "IRQ_SPORT0_RX" + default 9 +config IRQ_SPORT0_TX + int "IRQ_SPORT0_TX" + default 9 +config IRQ_SPORT1_RX + int "IRQ_SPORT1_RX" + default 9 +config IRQ_SPORT1_TX + int "IRQ_SPORT1_TX" + default 9 +config IRQ_TWI + int "IRQ_TWI" + default 10 +config IRQ_SPI + int "IRQ_SPI" + default 10 +config IRQ_UART0_RX + int "IRQ_UART0_RX" + default 10 +config IRQ_UART0_TX + int "IRQ_UART0_TX" + default 10 +config IRQ_UART1_RX + int "IRQ_UART1_RX" + default 10 +config IRQ_UART1_TX + int "IRQ_UART1_TX" + default 10 +config IRQ_CAN_RX + int "IRQ_CAN_RX" + default 11 +config IRQ_CAN_TX + int "IRQ_CAN_TX" + default 11 +config IRQ_MAC_RX + int "IRQ_MAC_RX" + default 11 +config IRQ_MAC_TX + int "IRQ_MAC_TX" + default 11 +config IRQ_TMR0 + int "IRQ_TMR0" + default 12 +config IRQ_TMR1 + int "IRQ_TMR1" + default 12 +config IRQ_TMR2 + int "IRQ_TMR2" + default 12 +config IRQ_TMR3 + int "IRQ_TMR3" + default 12 +config IRQ_TMR4 + int "IRQ_TMR4" + default 12 +config IRQ_TMR5 + int "IRQ_TMR5" + default 12 +config IRQ_TMR6 + int "IRQ_TMR6" + default 12 +config IRQ_TMR7 + int "IRQ_TMR7" + default 12 +config IRQ_PROG_INTA + int "IRQ_PROG_INTA" + default 12 +config IRQ_PORTG_INTB + int "IRQ_PORTG_INTB" + default 12 +config IRQ_MEM_DMA0 + int "IRQ_MEM_DMA0" + default 13 +config IRQ_MEM_DMA1 + int "IRQ_MEM_DMA1" + default 13 +config IRQ_WATCH + int "IRQ_WATCH" + default 13 + + help + Enter the priority numbers between 7-13 ONLY. Others are Reserved. + This applies to all the above. It is not recommended to assign the + highest priority number 7 to UART or any other device. + +endmenu + +endmenu + +endif diff --git a/arch/blackfin/mach-bf537/Makefile b/arch/blackfin/mach-bf537/Makefile new file mode 100644 index 000000000000..f32d44215bb7 --- /dev/null +++ b/arch/blackfin/mach-bf537/Makefile @@ -0,0 +1,9 @@ +# +# arch/blackfin/mach-bf537/Makefile +# + +extra-y := head.o + +obj-y := ints-priority.o + +obj-$(CONFIG_CPU_FREQ) += cpu.o diff --git a/arch/blackfin/mach-bf537/boards/Makefile b/arch/blackfin/mach-bf537/boards/Makefile new file mode 100644 index 000000000000..23323cacc3aa --- /dev/null +++ b/arch/blackfin/mach-bf537/boards/Makefile @@ -0,0 +1,9 @@ +# +# arch/blackfin/mach-bf537/boards/Makefile +# + +obj-y += eth_mac.o +obj-$(CONFIG_GENERIC_BOARD) += generic_board.o +obj-$(CONFIG_BFIN537_STAMP) += stamp.o led.o +obj-$(CONFIG_BFIN537_BLUETECHNIX_CM) += cm_bf537.o +obj-$(CONFIG_PNAV10) += pnav10.o diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537.c b/arch/blackfin/mach-bf537/boards/cm_bf537.c new file mode 100644 index 000000000000..6a60618a78ec --- /dev/null +++ b/arch/blackfin/mach-bf537/boards/cm_bf537.c @@ -0,0 +1,364 @@ +/* + * File: arch/blackfin/mach-bf537/boards/cm_bf537.c + * Based on: arch/blackfin/mach-bf533/boards/ezkit.c + * Author: Aidan Williams + * + * Created: 2005 + * Description: Board description file + * + * Modified: + * Copyright 2005 National ICT Australia (NICTA) + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Name the Board for the /proc/cpuinfo + */ +char *bfin_board_name = "Bluetechnix CM BF537"; + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) +/* all SPI peripherals info goes here */ + +#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) +static struct mtd_partition bfin_spi_flash_partitions[] = { + { + .name = "bootloader", + .size = 0x00020000, + .offset = 0, + .mask_flags = MTD_CAP_ROM + },{ + .name = "kernel", + .size = 0xe0000, + .offset = 0x20000 + },{ + .name = "file system", + .size = 0x700000, + .offset = 0x00100000, + } +}; + +static struct flash_platform_data bfin_spi_flash_data = { + .name = "m25p80", + .parts = bfin_spi_flash_partitions, + .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), + .type = "m25p64", +}; + +/* SPI flash chip (m25p64) */ +static struct bfin5xx_spi_chip spi_flash_chip_info = { + .enable_dma = 0, /* use dma transfer with this chip*/ + .bits_per_word = 8, +}; +#endif + +#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) +/* SPI ADC chip */ +static struct bfin5xx_spi_chip spi_adc_chip_info = { + .enable_dma = 1, /* use dma transfer with this chip*/ + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +static struct bfin5xx_spi_chip ad1836_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_AD9960) || defined(CONFIG_AD9960_MODULE) +static struct bfin5xx_spi_chip ad9960_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) +static struct bfin5xx_spi_chip spi_mmc_chip_info = { + .enable_dma = 1, + .bits_per_word = 8, +}; +#endif + +static struct spi_board_info bfin_spi_board_info[] __initdata = { +#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) + { + /* the modalias must be the same as spi device driver name */ + .modalias = "m25p80", /* Name of spi_driver for this device */ + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, /* Framework bus number */ + .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ + .platform_data = &bfin_spi_flash_data, + .controller_data = &spi_flash_chip_info, + .mode = SPI_MODE_3, + }, +#endif + +#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) + { + .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ + .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, /* Framework bus number */ + .chip_select = 1, /* Framework chip select. */ + .platform_data = NULL, /* No spi_driver specific config */ + .controller_data = &spi_adc_chip_info, + }, +#endif + +#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) + { + .modalias = "ad1836-spi", + .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = CONFIG_SND_BLACKFIN_SPI_PFBIT, + .controller_data = &ad1836_spi_chip_info, + }, +#endif + +#if defined(CONFIG_AD9960) || defined(CONFIG_AD9960_MODULE) + { + .modalias = "ad9960-spi", + .max_speed_hz = 10000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 1, + .controller_data = &ad9960_spi_chip_info, + }, +#endif + +#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) + { + .modalias = "spi_mmc_dummy", + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 7, + .platform_data = NULL, + .controller_data = &spi_mmc_chip_info, + .mode = SPI_MODE_3, + }, + { + .modalias = "spi_mmc", + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = CONFIG_SPI_MMC_CS_CHAN, + .platform_data = NULL, + .controller_data = &spi_mmc_chip_info, + .mode = SPI_MODE_3, + }, +#endif +}; + +/* SPI controller data */ +static struct bfin5xx_spi_master spi_bfin_master_info = { + .num_chipselect = 8, + .enable_dma = 1, /* master has the ability to do dma transfer */ +}; + +static struct platform_device spi_bfin_master_device = { + .name = "bfin-spi-master", + .id = 1, /* Bus number */ + .dev = { + .platform_data = &spi_bfin_master_info, /* Passed to driver */ + }, +}; +#endif /* spi master and devices */ + +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) +static struct platform_device rtc_device = { + .name = "rtc-bfin", + .id = -1, +}; +#endif + +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) +static struct resource smc91x_resources[] = { + { + .start = 0x20200300, + .end = 0x20200300 + 16, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PF14, + .end = IRQ_PF14, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +static struct platform_device smc91x_device = { + .name = "smc91x", + .id = 0, + .num_resources = ARRAY_SIZE(smc91x_resources), + .resource = smc91x_resources, +}; +#endif + +#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) +static struct resource isp1362_hcd_resources[] = { + { + .start = 0x20308000, + .end = 0x20308000, + .flags = IORESOURCE_MEM, + },{ + .start = 0x20308004, + .end = 0x20308004, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PG15, + .end = IRQ_PG15, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +static struct isp1362_platform_data isp1362_priv = { + .sel15Kres = 1, + .clknotstop = 0, + .oc_enable = 0, + .int_act_high = 0, + .int_edge_triggered = 0, + .remote_wakeup_connected = 0, + .no_power_switching = 1, + .power_switching_mode = 0, +}; + +static struct platform_device isp1362_hcd_device = { + .name = "isp1362-hcd", + .id = 0, + .dev = { + .platform_data = &isp1362_priv, + }, + .num_resources = ARRAY_SIZE(isp1362_hcd_resources), + .resource = isp1362_hcd_resources, +}; +#endif + +#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) +static struct resource net2272_bfin_resources[] = { + { + .start = 0x20200000, + .end = 0x20200000 + 0x100, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PF7, + .end = IRQ_PF7, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +static struct platform_device net2272_bfin_device = { + .name = "net2272", + .id = -1, + .num_resources = ARRAY_SIZE(net2272_bfin_resources), + .resource = net2272_bfin_resources, +}; +#endif + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) +static struct resource bfin_uart_resources[] = { + { + .start = 0xFFC00400, + .end = 0xFFC004FF, + .flags = IORESOURCE_MEM, + },{ + .start = 0xFFC02000, + .end = 0xFFC020FF, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device bfin_uart_device = { + .name = "bfin-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_uart_resources), + .resource = bfin_uart_resources, +}; +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +static struct platform_device bfin_sport0_uart_device = { + .name = "bfin-sport-uart", + .id = 0, +}; + +static struct platform_device bfin_sport1_uart_device = { + .name = "bfin-sport-uart", + .id = 1, +}; +#endif + +#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) +static struct platform_device bfin_mac_device = { + .name = "bfin_mac", +}; +#endif + +static struct platform_device *cm_bf537_devices[] __initdata = { +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) + &rtc_device, +#endif + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) + &bfin_uart_device, +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) + &bfin_sport0_uart_device, + &bfin_sport1_uart_device, +#endif + +#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) + &isp1362_hcd_device, +#endif + +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) + &smc91x_device, +#endif + +#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) + &bfin_mac_device, +#endif + +#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) + &net2272_bfin_device, +#endif + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + &spi_bfin_master_device, +#endif +}; + +static int __init cm_bf537_init(void) +{ + printk(KERN_INFO "%s(): registering device resources\n", __FUNCTION__); + platform_add_devices(cm_bf537_devices, ARRAY_SIZE(cm_bf537_devices)); +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); +#endif + return 0; +} + +arch_initcall(cm_bf537_init); diff --git a/arch/blackfin/mach-bf537/boards/eth_mac.c b/arch/blackfin/mach-bf537/boards/eth_mac.c new file mode 100644 index 000000000000..e129a08d63de --- /dev/null +++ b/arch/blackfin/mach-bf537/boards/eth_mac.c @@ -0,0 +1,51 @@ +/* + * arch/blackfin/mach-bf537/board/eth_mac.c + * + * Copyright (C) 2007 Analog Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include + +#if defined(CONFIG_GENERIC_BOARD) \ + || defined(CONFIG_BFIN537_STAMP) + +/* + * Currently the MAC address is saved in Flash by U-Boot + */ +#define FLASH_MAC 0x203f0000 + +void get_bf537_ether_addr(char *addr) +{ + unsigned int flash_mac = (unsigned int) FLASH_MAC; + *(u32 *)(&(addr[0])) = bfin_read32(flash_mac); + flash_mac += 4; + *(u16 *)(&(addr[4])) = bfin_read16(flash_mac); +} + +#else + +/* + * Provide MAC address function for other specific board setting + */ +void get_bf537_ether_addr(char *addr) +{ + printk(KERN_WARNING "%s: No valid Ethernet MAC address found\n",__FILE__); +} + +#endif + +EXPORT_SYMBOL(get_bf537_ether_addr); diff --git a/arch/blackfin/mach-bf537/boards/generic_board.c b/arch/blackfin/mach-bf537/boards/generic_board.c new file mode 100644 index 000000000000..9019c0edbe7c --- /dev/null +++ b/arch/blackfin/mach-bf537/boards/generic_board.c @@ -0,0 +1,445 @@ +/* + * File: arch/blackfin/mach-bf537/boards/generic_board.c + * Based on: arch/blackfin/mach-bf533/boards/ezkit.c + * Author: Aidan Williams + * + * Created: + * Description: + * + * Modified: + * Copyright 2005 National ICT Australia (NICTA) + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Name the Board for the /proc/cpuinfo + */ +char *bfin_board_name = "UNKNOWN BOARD"; + +/* + * Driver needs to know address, irq and flag pin. + */ + +#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) +static struct resource bfin_pcmcia_cf_resources[] = { + { + .start = 0x20310000, /* IO PORT */ + .end = 0x20312000, + .flags = IORESOURCE_MEM, + },{ + .start = 0x20311000, /* Attribute Memeory */ + .end = 0x20311FFF, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PROG_INTA, + .end = IRQ_PROG_INTA, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, + },{ + .start = IRQ_PF4, + .end = IRQ_PF4, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, + },{ + .start = 6, /* Card Detect PF6 */ + .end = 6, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_pcmcia_cf_device = { + .name = "bfin_cf_pcmcia", + .id = -1, + .num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources), + .resource = bfin_pcmcia_cf_resources, +}; +#endif + +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) +static struct platform_device rtc_device = { + .name = "rtc-bfin", + .id = -1, +}; +#endif + +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) +static struct resource smc91x_resources[] = { + { + .name = "smc91x-regs", + .start = 0x20300300, + .end = 0x20300300 + 16, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PROG_INTB, + .end = IRQ_PROG_INTB, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + },{ + /* + * denotes the flag pin and is used directly if + * CONFIG_IRQCHIP_DEMUX_GPIO is defined. + */ + .start = IRQ_PF7, + .end = IRQ_PF7, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; +static struct platform_device smc91x_device = { + .name = "smc91x", + .id = 0, + .num_resources = ARRAY_SIZE(smc91x_resources), + .resource = smc91x_resources, +}; +#endif + +#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) +static struct resource sl811_hcd_resources[] = { + { + .start = 0x20340000, + .end = 0x20340000, + .flags = IORESOURCE_MEM, + },{ + .start = 0x20340004, + .end = 0x20340004, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PROG_INTA, + .end = IRQ_PROG_INTA, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + },{ + .start = IRQ_PF0 + CONFIG_USB_SL811_BFIN_GPIO, + .end = IRQ_PF0 + CONFIG_USB_SL811_BFIN_GPIO, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS) +void sl811_port_power(struct device *dev, int is_on) +{ + unsigned short mask = (1< +#include + +/* All functions in this file save the registers they uses. + So there is no need to save any registers before calling them. */ + + .text; + +/* Initialize LEDs. */ + +ENTRY(_led_init) + LINK 12; + [--SP] = P0; + [--SP] = R0; + [--SP] = R1; + [--SP] = R2; + R1 = PF6|PF7|PF8|PF9|PF10|PF11 (Z); + R2 = ~R1; + + P0.H = hi(PORTF_FER); + P0.L = lo(PORTF_FER); + R0 = W[P0](Z); + SSYNC; + R0 = R0 & R2; + W[P0] = R0.L; + SSYNC; + + P0.H = hi(PORTFIO_DIR); + P0.L = lo(PORTFIO_DIR); + R0 = W[P0](Z); + SSYNC; + R0 = R0 | R1; + W[P0] = R0.L; + SSYNC; + + P0.H = hi(PORTFIO_INEN); + P0.L = lo(PORTFIO_INEN); + R0 = W[P0](Z); + SSYNC; + R0 = R0 & R2; + W[P0] = R0.L; + SSYNC; + + R2 = [SP++]; + R1 = [SP++]; + R0 = [SP++]; + P0 = [SP++]; + UNLINK; + RTS; + .size _led_init, .-_led_init + +/* Set one LED on. Leave other LEDs unchanged. + It expects the LED number passed through R0. */ + +ENTRY(_led_on) + LINK 12; + [--SP] = P0; + [--SP] = R1; + CALL _led_init; + R1 = 1; + R0 += 5; + R1 <<= R0; + P0.H = hi(PORTFIO); + P0.L = lo(PORTFIO); + R0 = W[P0](Z); + SSYNC; + R0 = R0 | R1; + W[P0] = R0.L; + SSYNC; + R1 = [SP++]; + P0 = [SP++]; + UNLINK; + RTS; + .size _led_on, .-_led_on + +/* Set one LED off. Leave other LEDs unchanged. */ + +ENTRY(_led_off) + LINK 12; + [--SP] = P0; + [--SP] = R1; + CALL _led_init; + R1 = 1; + R0 += 5; + R1 <<= R0; + R1 = ~R1; + P0.H = hi(PORTFIO); + P0.L = lo(PORTFIO); + R0 = W[P0](Z); + SSYNC; + R0 = R0 & R1; + W[P0] = R0.L; + SSYNC; + R1 = [SP++]; + P0 = [SP++]; + UNLINK; + RTS; + .size _led_off, .-_led_off + +/* Toggle one LED. Leave other LEDs unchanged. */ + +ENTRY(_led_toggle) + LINK 12; + [--SP] = P0; + [--SP] = R1; + CALL _led_init; + R1 = 1; + R0 += 5; + R1 <<= R0; + P0.H = hi(PORTFIO); + P0.L = lo(PORTFIO); + R0 = W[P0](Z); + SSYNC; + R0 = R0 ^ R1; + W[P0] = R0.L; + SSYNC; + R1 = [SP++]; + P0 = [SP++]; + UNLINK; + RTS; + .size _led_toggle, .-_led_toggle + +/* Display the number using LEDs in binary format. */ + +ENTRY(_led_disp_num) + LINK 12; + [--SP] = P0; + [--SP] = R1; + [--SP] = R2; + CALL _led_init; + R1 = 0x3f(X); + R0 = R0 & R1; + R2 = 6(X); + R0 <<= R2; + R1 <<= R2; + P0.H = hi(PORTFIO); + P0.L = lo(PORTFIO); + R2 = W[P0](Z); + SSYNC; + R1 = ~R1; + R2 = R2 & R1; + R2 = R2 | R0; + W[P0] = R2.L; + SSYNC; + R2 = [SP++]; + R1 = [SP++]; + P0 = [SP++]; + UNLINK; + RTS; + .size _led_disp_num, .-_led_disp_num + +/* Toggle the number using LEDs in binary format. */ + +ENTRY(_led_toggle_num) + LINK 12; + [--SP] = P0; + [--SP] = R1; + [--SP] = R2; + CALL _led_init; + R1 = 0x3f(X); + R0 = R0 & R1; + R1 = 6(X); + R0 <<= R1; + P0.H = hi(PORTFIO); + P0.L = lo(PORTFIO); + R1 = W[P0](Z); + SSYNC; + R1 = R1 ^ R0; + W[P0] = R1.L; + SSYNC; + R2 = [SP++]; + R1 = [SP++]; + P0 = [SP++]; + UNLINK; + RTS; + .size _led_toggle_num, .-_led_toggle_num + diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c new file mode 100644 index 000000000000..40d3a1b70ee7 --- /dev/null +++ b/arch/blackfin/mach-bf537/boards/pnav10.c @@ -0,0 +1,523 @@ +/* + * File: arch/blackfin/mach-bf537/boards/stamp.c + * Based on: arch/blackfin/mach-bf533/boards/ezkit.c + * Author: Aidan Williams + * + * Created: + * Description: + * + * Modified: + * Copyright 2005 National ICT Australia (NICTA) + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) +#include +#endif +#include +#include +#include + +#include + +/* + * Name the Board for the /proc/cpuinfo + */ +char *bfin_board_name = "PNAV-1.0"; + +/* + * Driver needs to know address, irq and flag pin. + */ + +#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) +static struct resource bfin_pcmcia_cf_resources[] = { + { + .start = 0x20310000, /* IO PORT */ + .end = 0x20312000, + .flags = IORESOURCE_MEM, + },{ + .start = 0x20311000, /* Attribute Memeory */ + .end = 0x20311FFF, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PF4, + .end = IRQ_PF4, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, + },{ + .start = 6, /* Card Detect PF6 */ + .end = 6, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_pcmcia_cf_device = { + .name = "bfin_cf_pcmcia", + .id = -1, + .num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources), + .resource = bfin_pcmcia_cf_resources, +}; +#endif + +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) +static struct platform_device rtc_device = { + .name = "rtc-bfin", + .id = -1, +}; +#endif + +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) +static struct resource smc91x_resources[] = { + { + .name = "smc91x-regs", + .start = 0x20300300, + .end = 0x20300300 + 16, + .flags = IORESOURCE_MEM, + },{ + + .start = IRQ_PF7, + .end = IRQ_PF7, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; +static struct platform_device smc91x_device = { + .name = "smc91x", + .id = 0, + .num_resources = ARRAY_SIZE(smc91x_resources), + .resource = smc91x_resources, +}; +#endif + +#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) +static struct resource sl811_hcd_resources[] = { + { + .start = 0x20340000, + .end = 0x20340000, + .flags = IORESOURCE_MEM, + },{ + .start = 0x20340004, + .end = 0x20340004, + .flags = IORESOURCE_MEM, + },{ + .start = CONFIG_USB_SL811_BFIN_IRQ, + .end = CONFIG_USB_SL811_BFIN_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS) +void sl811_port_power(struct device *dev, int is_on) +{ + unsigned short mask = (1 << CONFIG_USB_SL811_BFIN_GPIO_VBUS); + + bfin_write_PORT_FER(bfin_read_PORT_FER() & ~mask); + bfin_write_FIO_DIR(bfin_read_FIO_DIR() | mask); + + if (is_on) + bfin_write_FIO_FLAG_S(mask); + else + bfin_write_FIO_FLAG_C(mask); +} +#endif + +static struct sl811_platform_data sl811_priv = { + .potpg = 10, + .power = 250, /* == 500mA */ +#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS) + .port_power = &sl811_port_power, +#endif +}; + +static struct platform_device sl811_hcd_device = { + .name = "sl811-hcd", + .id = 0, + .dev = { + .platform_data = &sl811_priv, + }, + .num_resources = ARRAY_SIZE(sl811_hcd_resources), + .resource = sl811_hcd_resources, +}; +#endif + +#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) +static struct resource isp1362_hcd_resources[] = { + { + .start = 0x20360000, + .end = 0x20360000, + .flags = IORESOURCE_MEM, + },{ + .start = 0x20360004, + .end = 0x20360004, + .flags = IORESOURCE_MEM, + },{ + .start = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ, + .end = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +static struct isp1362_platform_data isp1362_priv = { + .sel15Kres = 1, + .clknotstop = 0, + .oc_enable = 0, + .int_act_high = 0, + .int_edge_triggered = 0, + .remote_wakeup_connected = 0, + .no_power_switching = 1, + .power_switching_mode = 0, +}; + +static struct platform_device isp1362_hcd_device = { + .name = "isp1362-hcd", + .id = 0, + .dev = { + .platform_data = &isp1362_priv, + }, + .num_resources = ARRAY_SIZE(isp1362_hcd_resources), + .resource = isp1362_hcd_resources, +}; +#endif + +#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) +static struct platform_device bfin_mac_device = { + .name = "bfin_mac", +}; +#endif + +#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) +static struct resource net2272_bfin_resources[] = { + { + .start = 0x20300000, + .end = 0x20300000 + 0x100, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PF7, + .end = IRQ_PF7, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +static struct platform_device net2272_bfin_device = { + .name = "net2272", + .id = -1, + .num_resources = ARRAY_SIZE(net2272_bfin_resources), + .resource = net2272_bfin_resources, +}; +#endif + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) +/* all SPI peripherals info goes here */ + +#if defined(CONFIG_MTD_M25P80) \ + || defined(CONFIG_MTD_M25P80_MODULE) +static struct mtd_partition bfin_spi_flash_partitions[] = { + { + .name = "bootloader", + .size = 0x00020000, + .offset = 0, + .mask_flags = MTD_CAP_ROM + },{ + .name = "kernel", + .size = 0xe0000, + .offset = 0x20000 + },{ + .name = "file system", + .size = 0x700000, + .offset = 0x00100000, + } +}; + +static struct flash_platform_data bfin_spi_flash_data = { + .name = "m25p80", + .parts = bfin_spi_flash_partitions, + .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), + .type = "m25p64", +}; + +/* SPI flash chip (m25p64) */ +static struct bfin5xx_spi_chip spi_flash_chip_info = { + .enable_dma = 0, /* use dma transfer with this chip*/ + .bits_per_word = 8, +}; +#endif + +#if defined(CONFIG_SPI_ADC_BF533) \ + || defined(CONFIG_SPI_ADC_BF533_MODULE) +/* SPI ADC chip */ +static struct bfin5xx_spi_chip spi_adc_chip_info = { + .enable_dma = 1, /* use dma transfer with this chip*/ + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_SND_BLACKFIN_AD1836) \ + || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +static struct bfin5xx_spi_chip ad1836_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_AD9960) || defined(CONFIG_AD9960_MODULE) +static struct bfin5xx_spi_chip ad9960_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) +static struct bfin5xx_spi_chip spi_mmc_chip_info = { + .enable_dma = 1, + .bits_per_word = 8, +}; +#endif + +#if defined(CONFIG_PBX) +static struct bfin5xx_spi_chip spi_si3xxx_chip_info = { + .ctl_reg = 0x4, /* send zero */ + .enable_dma = 0, + .bits_per_word = 8, + .cs_change_per_word = 1, +}; +#endif + + +#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) +static struct bfin5xx_spi_chip spi_ad7877_chip_info = { + .cs_change_per_word = 1, + .enable_dma = 0, + .bits_per_word = 16, +}; + +static const struct ad7877_platform_data bfin_ad7877_ts_info = { + .model = 7877, + .vref_delay_usecs = 50, /* internal, no capacitor */ + .x_plate_ohms = 419, + .y_plate_ohms = 486, + .pressure_max = 1000, + .pressure_min = 0, + .stopacq_polarity = 1, + .first_conversion_delay = 3, + .acquisition_time = 1, + .averaging = 1, + .pen_down_acc_interval = 1, +}; +#endif + +static struct spi_board_info bfin_spi_board_info[] __initdata = { +#if defined(CONFIG_MTD_M25P80) \ + || defined(CONFIG_MTD_M25P80_MODULE) + { + /* the modalias must be the same as spi device driver name */ + .modalias = "m25p80", /* Name of spi_driver for this device */ + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, /* Framework bus number */ + .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ + .platform_data = &bfin_spi_flash_data, + .controller_data = &spi_flash_chip_info, + .mode = SPI_MODE_3, + }, +#endif + +#if defined(CONFIG_SPI_ADC_BF533) \ + || defined(CONFIG_SPI_ADC_BF533_MODULE) + { + .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ + .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, /* Framework bus number */ + .chip_select = 1, /* Framework chip select. */ + .platform_data = NULL, /* No spi_driver specific config */ + .controller_data = &spi_adc_chip_info, + }, +#endif + +#if defined(CONFIG_SND_BLACKFIN_AD1836) \ + || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) + { + .modalias = "ad1836-spi", + .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = CONFIG_SND_BLACKFIN_SPI_PFBIT, + .controller_data = &ad1836_spi_chip_info, + }, +#endif +#if defined(CONFIG_AD9960) || defined(CONFIG_AD9960_MODULE) + { + .modalias = "ad9960-spi", + .max_speed_hz = 10000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 1, + .controller_data = &ad9960_spi_chip_info, + }, +#endif +#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) + { + .modalias = "spi_mmc_dummy", + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 7, + .platform_data = NULL, + .controller_data = &spi_mmc_chip_info, + .mode = SPI_MODE_3, + }, + { + .modalias = "spi_mmc", + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = CONFIG_SPI_MMC_CS_CHAN, + .platform_data = NULL, + .controller_data = &spi_mmc_chip_info, + .mode = SPI_MODE_3, + }, +#endif +#if defined(CONFIG_PBX) + { + .modalias = "fxs-spi", + .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 3, + .controller_data= &spi_si3xxx_chip_info, + .mode = SPI_MODE_3, + }, + { + .modalias = "fxo-spi", + .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 2, + .controller_data= &spi_si3xxx_chip_info, + .mode = SPI_MODE_3, + }, +#endif +#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) +{ + .modalias = "ad7877", + .platform_data = &bfin_ad7877_ts_info, + .irq = IRQ_PF2, + .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 5, + .controller_data = &spi_ad7877_chip_info, +}, +#endif + +}; + +/* SPI controller data */ +static struct bfin5xx_spi_master spi_bfin_master_info = { + .num_chipselect = 8, + .enable_dma = 1, /* master has the ability to do dma transfer */ +}; + +static struct platform_device spi_bfin_master_device = { + .name = "bfin-spi-master", + .id = 1, /* Bus number */ + .dev = { + .platform_data = &spi_bfin_master_info, /* Passed to driver */ + }, +}; +#endif /* spi master and devices */ + +#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) +static struct platform_device bfin_fb_device = { + .name = "bf537-fb", +}; +#endif + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) +static struct resource bfin_uart_resources[] = { + { + .start = 0xFFC00400, + .end = 0xFFC004FF, + .flags = IORESOURCE_MEM, + },{ + .start = 0xFFC02000, + .end = 0xFFC020FF, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device bfin_uart_device = { + .name = "bfin-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_uart_resources), + .resource = bfin_uart_resources, +}; +#endif + + +static struct platform_device *stamp_devices[] __initdata = { +#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) + &bfin_pcmcia_cf_device, +#endif + +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) + &rtc_device, +#endif + +#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) + &sl811_hcd_device, +#endif + +#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) + &isp1362_hcd_device, +#endif + +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) + &smc91x_device, +#endif + +#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) + &bfin_mac_device, +#endif + +#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) + &net2272_bfin_device, +#endif + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + &spi_bfin_master_device, +#endif + +#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) + &bfin_fb_device, +#endif + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) + &bfin_uart_device, +#endif +}; + +static int __init stamp_init(void) +{ + printk(KERN_INFO "%s(): registering device resources\n", __FUNCTION__); + platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + spi_register_board_info(bfin_spi_board_info, + ARRAY_SIZE(bfin_spi_board_info)); +#endif + return 0; +} + +arch_initcall(stamp_init); diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c new file mode 100644 index 000000000000..ba2f875a7f7d --- /dev/null +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -0,0 +1,615 @@ +/* + * File: arch/blackfin/mach-bf537/boards/stamp.c + * Based on: arch/blackfin/mach-bf533/boards/ezkit.c + * Author: Aidan Williams + * + * Created: + * Description: + * + * Modified: + * Copyright 2005 National ICT Australia (NICTA) + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) +#include +#endif +#include +#include +#include +#include +#include + +#include + +/* + * Name the Board for the /proc/cpuinfo + */ +char *bfin_board_name = "ADDS-BF537-STAMP"; + +/* + * Driver needs to know address, irq and flag pin. + */ + +#define ISP1761_BASE 0x203C0000 +#define ISP1761_IRQ IRQ_PF7 + +#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) +static struct resource bfin_isp1761_resources[] = { + [0] = { + .name = "isp1761-regs", + .start = ISP1761_BASE + 0x00000000, + .end = ISP1761_BASE + 0x000fffff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = ISP1761_IRQ, + .end = ISP1761_IRQ, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_isp1761_device = { + .name = "isp1761", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_isp1761_resources), + .resource = bfin_isp1761_resources, +}; + +static struct platform_device *bfin_isp1761_devices[] = { + &bfin_isp1761_device, +}; + +int __init bfin_isp1761_init(void) +{ + unsigned int num_devices=ARRAY_SIZE(bfin_isp1761_devices); + + printk(KERN_INFO "%s(): registering device resources\n", __FUNCTION__); + set_irq_type(ISP1761_IRQ, IRQF_TRIGGER_FALLING); + + return platform_add_devices(bfin_isp1761_devices, num_devices); +} + +void __exit bfin_isp1761_exit(void) +{ + platform_device_unregister(&bfin_isp1761_device); +} + +arch_initcall(bfin_isp1761_init); +#endif + +#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) +static struct resource bfin_pcmcia_cf_resources[] = { + { + .start = 0x20310000, /* IO PORT */ + .end = 0x20312000, + .flags = IORESOURCE_MEM, + },{ + .start = 0x20311000, /* Attribute Memeory */ + .end = 0x20311FFF, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PF4, + .end = IRQ_PF4, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, + },{ + .start = 6, /* Card Detect PF6 */ + .end = 6, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_pcmcia_cf_device = { + .name = "bfin_cf_pcmcia", + .id = -1, + .num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources), + .resource = bfin_pcmcia_cf_resources, +}; +#endif + +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) +static struct platform_device rtc_device = { + .name = "rtc-bfin", + .id = -1, +}; +#endif + +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) +static struct resource smc91x_resources[] = { + { + .name = "smc91x-regs", + .start = 0x20300300, + .end = 0x20300300 + 16, + .flags = IORESOURCE_MEM, + },{ + + .start = IRQ_PF7, + .end = IRQ_PF7, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; +static struct platform_device smc91x_device = { + .name = "smc91x", + .id = 0, + .num_resources = ARRAY_SIZE(smc91x_resources), + .resource = smc91x_resources, +}; +#endif + +#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) +static struct resource sl811_hcd_resources[] = { + { + .start = 0x20340000, + .end = 0x20340000, + .flags = IORESOURCE_MEM, + },{ + .start = 0x20340004, + .end = 0x20340004, + .flags = IORESOURCE_MEM, + },{ + .start = CONFIG_USB_SL811_BFIN_IRQ, + .end = CONFIG_USB_SL811_BFIN_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS) +void sl811_port_power(struct device *dev, int is_on) +{ + unsigned short mask = (1 << CONFIG_USB_SL811_BFIN_GPIO_VBUS); + + bfin_write_PORT_FER(bfin_read_PORT_FER() & ~mask); + bfin_write_FIO_DIR(bfin_read_FIO_DIR() | mask); + + if (is_on) + bfin_write_FIO_FLAG_S(mask); + else + bfin_write_FIO_FLAG_C(mask); +} +#endif + +static struct sl811_platform_data sl811_priv = { + .potpg = 10, + .power = 250, /* == 500mA */ +#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS) + .port_power = &sl811_port_power, +#endif +}; + +static struct platform_device sl811_hcd_device = { + .name = "sl811-hcd", + .id = 0, + .dev = { + .platform_data = &sl811_priv, + }, + .num_resources = ARRAY_SIZE(sl811_hcd_resources), + .resource = sl811_hcd_resources, +}; +#endif + +#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) +static struct resource isp1362_hcd_resources[] = { + { + .start = 0x20360000, + .end = 0x20360000, + .flags = IORESOURCE_MEM, + },{ + .start = 0x20360004, + .end = 0x20360004, + .flags = IORESOURCE_MEM, + },{ + .start = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ, + .end = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +static struct isp1362_platform_data isp1362_priv = { + .sel15Kres = 1, + .clknotstop = 0, + .oc_enable = 0, + .int_act_high = 0, + .int_edge_triggered = 0, + .remote_wakeup_connected = 0, + .no_power_switching = 1, + .power_switching_mode = 0, +}; + +static struct platform_device isp1362_hcd_device = { + .name = "isp1362-hcd", + .id = 0, + .dev = { + .platform_data = &isp1362_priv, + }, + .num_resources = ARRAY_SIZE(isp1362_hcd_resources), + .resource = isp1362_hcd_resources, +}; +#endif + +#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) +static struct platform_device bfin_mac_device = { + .name = "bfin_mac", +}; +#endif + +#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) +static struct resource net2272_bfin_resources[] = { + { + .start = 0x20300000, + .end = 0x20300000 + 0x100, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PF7, + .end = IRQ_PF7, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +static struct platform_device net2272_bfin_device = { + .name = "net2272", + .id = -1, + .num_resources = ARRAY_SIZE(net2272_bfin_resources), + .resource = net2272_bfin_resources, +}; +#endif + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) +/* all SPI peripherals info goes here */ + +#if defined(CONFIG_MTD_M25P80) \ + || defined(CONFIG_MTD_M25P80_MODULE) +static struct mtd_partition bfin_spi_flash_partitions[] = { + { + .name = "bootloader", + .size = 0x00020000, + .offset = 0, + .mask_flags = MTD_CAP_ROM + },{ + .name = "kernel", + .size = 0xe0000, + .offset = 0x20000 + },{ + .name = "file system", + .size = 0x700000, + .offset = 0x00100000, + } +}; + +static struct flash_platform_data bfin_spi_flash_data = { + .name = "m25p80", + .parts = bfin_spi_flash_partitions, + .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), + .type = "m25p64", +}; + +/* SPI flash chip (m25p64) */ +static struct bfin5xx_spi_chip spi_flash_chip_info = { + .enable_dma = 0, /* use dma transfer with this chip*/ + .bits_per_word = 8, +}; +#endif + +#if defined(CONFIG_SPI_ADC_BF533) \ + || defined(CONFIG_SPI_ADC_BF533_MODULE) +/* SPI ADC chip */ +static struct bfin5xx_spi_chip spi_adc_chip_info = { + .enable_dma = 1, /* use dma transfer with this chip*/ + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_SND_BLACKFIN_AD1836) \ + || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +static struct bfin5xx_spi_chip ad1836_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_AD9960) || defined(CONFIG_AD9960_MODULE) +static struct bfin5xx_spi_chip ad9960_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) +static struct bfin5xx_spi_chip spi_mmc_chip_info = { + .enable_dma = 1, + .bits_per_word = 8, +}; +#endif + +#if defined(CONFIG_PBX) +static struct bfin5xx_spi_chip spi_si3xxx_chip_info = { + .ctl_reg = 0x4, /* send zero */ + .enable_dma = 0, + .bits_per_word = 8, + .cs_change_per_word = 1, +}; +#endif + +#if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE) +static struct bfin5xx_spi_chip ad5304_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) +static struct bfin5xx_spi_chip spi_ad7877_chip_info = { +// .cs_change_per_word = 1, + .enable_dma = 0, + .bits_per_word = 16, +}; + +static const struct ad7877_platform_data bfin_ad7877_ts_info = { + .model = 7877, + .vref_delay_usecs = 50, /* internal, no capacitor */ + .x_plate_ohms = 419, + .y_plate_ohms = 486, + .pressure_max = 1000, + .pressure_min = 0, + .stopacq_polarity = 1, + .first_conversion_delay = 3, + .acquisition_time = 1, + .averaging = 1, + .pen_down_acc_interval = 1, +}; +#endif + +static struct spi_board_info bfin_spi_board_info[] __initdata = { +#if defined(CONFIG_MTD_M25P80) \ + || defined(CONFIG_MTD_M25P80_MODULE) + { + /* the modalias must be the same as spi device driver name */ + .modalias = "m25p80", /* Name of spi_driver for this device */ + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, /* Framework bus number */ + .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ + .platform_data = &bfin_spi_flash_data, + .controller_data = &spi_flash_chip_info, + .mode = SPI_MODE_3, + }, +#endif + +#if defined(CONFIG_SPI_ADC_BF533) \ + || defined(CONFIG_SPI_ADC_BF533_MODULE) + { + .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ + .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, /* Framework bus number */ + .chip_select = 1, /* Framework chip select. */ + .platform_data = NULL, /* No spi_driver specific config */ + .controller_data = &spi_adc_chip_info, + }, +#endif + +#if defined(CONFIG_SND_BLACKFIN_AD1836) \ + || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) + { + .modalias = "ad1836-spi", + .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = CONFIG_SND_BLACKFIN_SPI_PFBIT, + .controller_data = &ad1836_spi_chip_info, + }, +#endif +#if defined(CONFIG_AD9960) || defined(CONFIG_AD9960_MODULE) + { + .modalias = "ad9960-spi", + .max_speed_hz = 10000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 1, + .controller_data = &ad9960_spi_chip_info, + }, +#endif +#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) + { + .modalias = "spi_mmc_dummy", + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 0, + .platform_data = NULL, + .controller_data = &spi_mmc_chip_info, + .mode = SPI_MODE_3, + }, + { + .modalias = "spi_mmc", + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = CONFIG_SPI_MMC_CS_CHAN, + .platform_data = NULL, + .controller_data = &spi_mmc_chip_info, + .mode = SPI_MODE_3, + }, +#endif +#if defined(CONFIG_PBX) + { + .modalias = "fxs-spi", + .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 3, + .controller_data= &spi_si3xxx_chip_info, + .mode = SPI_MODE_3, + }, + { + .modalias = "fxo-spi", + .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 2, + .controller_data= &spi_si3xxx_chip_info, + .mode = SPI_MODE_3, + }, +#endif +#if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE) + { + .modalias = "ad5304_spi", + .max_speed_hz = 1250000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 2, + .platform_data = NULL, + .controller_data = &ad5304_chip_info, + .mode = SPI_MODE_2, + }, +#endif +#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) + { + .modalias = "ad7877", + .platform_data = &bfin_ad7877_ts_info, + .irq = IRQ_PF6, + .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 1, + .controller_data = &spi_ad7877_chip_info, + }, +#endif +}; + +/* SPI controller data */ +static struct bfin5xx_spi_master spi_bfin_master_info = { + .num_chipselect = 8, + .enable_dma = 1, /* master has the ability to do dma transfer */ +}; + +static struct platform_device spi_bfin_master_device = { + .name = "bfin-spi-master", + .id = 1, /* Bus number */ + .dev = { + .platform_data = &spi_bfin_master_info, /* Passed to driver */ + }, +}; +#endif /* spi master and devices */ + +#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) +static struct platform_device bfin_fb_device = { + .name = "bf537-fb", +}; +#endif + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) +static struct resource bfin_uart_resources[] = { + { + .start = 0xFFC00400, + .end = 0xFFC004FF, + .flags = IORESOURCE_MEM, + },{ + .start = 0xFFC02000, + .end = 0xFFC020FF, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device bfin_uart_device = { + .name = "bfin-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_uart_resources), + .resource = bfin_uart_resources, +}; +#endif + +#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) +static struct platform_device i2c_bfin_twi_device = { + .name = "i2c-bfin-twi", + .id = 0, +}; +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +static struct platform_device bfin_sport0_uart_device = { + .name = "bfin-sport-uart", + .id = 0, +}; + +static struct platform_device bfin_sport1_uart_device = { + .name = "bfin-sport-uart", + .id = 1, +}; +#endif + +static struct platform_device *stamp_devices[] __initdata = { +#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) + &bfin_pcmcia_cf_device, +#endif + +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) + &rtc_device, +#endif + +#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) + &sl811_hcd_device, +#endif + +#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) + &isp1362_hcd_device, +#endif + +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) + &smc91x_device, +#endif + +#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) + &bfin_mac_device, +#endif + +#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) + &net2272_bfin_device, +#endif + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + &spi_bfin_master_device, +#endif + +#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) + &bfin_fb_device, +#endif + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) + &bfin_uart_device, +#endif + +#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) + &i2c_bfin_twi_device, +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) + &bfin_sport0_uart_device, + &bfin_sport1_uart_device, +#endif +}; + +static int __init stamp_init(void) +{ + printk(KERN_INFO "%s(): registering device resources\n", __FUNCTION__); + platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + spi_register_board_info(bfin_spi_board_info, + ARRAY_SIZE(bfin_spi_board_info)); +#endif + return 0; +} + +arch_initcall(stamp_init); diff --git a/arch/blackfin/mach-bf537/cpu.c b/arch/blackfin/mach-bf537/cpu.c new file mode 100644 index 000000000000..2d83b7e35469 --- /dev/null +++ b/arch/blackfin/mach-bf537/cpu.c @@ -0,0 +1,161 @@ +/* + * File: arch/blackfin/mach-bf537/cpu.c + * Based on: + * Author: michael.kang@analog.com + * + * Created: + * Description: clock scaling for the bf537 + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include + +/* CONFIG_CLKIN_HZ=11059200 */ +#define VCO5 (CONFIG_CLKIN_HZ*45) /*497664000 */ +#define VCO4 (CONFIG_CLKIN_HZ*36) /*398131200 */ +#define VCO3 (CONFIG_CLKIN_HZ*27) /*298598400 */ +#define VCO2 (CONFIG_CLKIN_HZ*18) /*199065600 */ +#define VCO1 (CONFIG_CLKIN_HZ*9) /*99532800 */ +#define VCO(x) VCO##x + +#define FREQ(x) {VCO(x),VCO(x)/4},{VCO(x),VCO(x)/2},{VCO(x),VCO(x)} +/* frequency */ +static struct cpufreq_frequency_table bf537_freq_table[] = { + FREQ(1), + FREQ(3), + {VCO4, VCO4 / 2}, {VCO4, VCO4}, + FREQ(5), + {0, CPUFREQ_TABLE_END}, +}; + +/* + * dpmc_fops->ioctl() + * static int dpmc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) + */ +static int bf537_getfreq(unsigned int cpu) +{ + unsigned long cclk_mhz, vco_mhz; + + /* The driver only support single cpu */ + if (cpu == 0) + dpmc_fops.ioctl(NULL, NULL, IOCTL_GET_CORECLOCK, &cclk_mhz); + else + cclk_mhz = -1; + return cclk_mhz; +} + +static int bf537_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ + unsigned long cclk_mhz; + unsigned long vco_mhz; + unsigned long flags; + unsigned int index, vco_index; + int i; + + struct cpufreq_freqs freqs; + if (cpufreq_frequency_table_target + (policy, bf537_freq_table, target_freq, relation, &index)) + return -EINVAL; + cclk_mhz = bf537_freq_table[index].frequency; + vco_mhz = bf537_freq_table[index].index; + + dpmc_fops.ioctl(NULL, NULL, IOCTL_CHANGE_FREQUENCY, &vco_mhz); + freqs.old = bf537_getfreq(0); + freqs.new = cclk_mhz; + freqs.cpu = 0; + + pr_debug("cclk begin change to cclk %d,vco=%d,index=%d,target=%d,oldfreq=%d\n", + cclk_mhz, vco_mhz, index, target_freq, freqs.old); + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + local_irq_save(flags); + dpmc_fops.ioctl(NULL, NULL, IOCTL_SET_CCLK, &cclk_mhz); + local_irq_restore(flags); + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + vco_mhz = get_vco(); + cclk_mhz = get_cclk(); + return 0; +} + +/* make sure that only the "userspace" governor is run -- anything else wouldn't make sense on + * this platform, anyway. + */ +static int bf537_verify_speed(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &bf537_freq_table); +} + +static int __init __bf537_cpu_init(struct cpufreq_policy *policy) +{ + int result; + + if (policy->cpu != 0) + return -EINVAL; + + policy->governor = CPUFREQ_DEFAULT_GOVERNOR; + + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + /*Now ,only support one cpu */ + policy->cur = bf537_getfreq(0); + cpufreq_frequency_table_get_attr(bf537_freq_table, policy->cpu); + return cpufreq_frequency_table_cpuinfo(policy, bf537_freq_table); +} + +static struct freq_attr *bf537_freq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver bf537_driver = { + .verify = bf537_verify_speed, + .target = bf537_target, + .get = bf537_getfreq, + .init = __bf537_cpu_init, + .name = "bf537", + .owner = THIS_MODULE, + .attr = bf537_freq_attr, +}; + +static int __init bf537_cpu_init(void) +{ + return cpufreq_register_driver(&bf537_driver); +} + +static void __exit bf537_cpu_exit(void) +{ + cpufreq_unregister_driver(&bf537_driver); +} + +MODULE_AUTHOR("Mickael Kang"); +MODULE_DESCRIPTION("cpufreq driver for BF537 CPU"); +MODULE_LICENSE("GPL"); + +module_init(bf537_cpu_init); +module_exit(bf537_cpu_exit); diff --git a/arch/blackfin/mach-bf537/head.S b/arch/blackfin/mach-bf537/head.S new file mode 100644 index 000000000000..d104e1d8e07a --- /dev/null +++ b/arch/blackfin/mach-bf537/head.S @@ -0,0 +1,602 @@ +/* + * File: arch/blackfin/mach-bf537/head.S + * Based on: arch/blackfin/mach-bf533/head.S + * Author: Jeff Dionne COPYRIGHT 1998 D. Jeff Dionne + * + * Created: 1998 + * Description: Startup code for Blackfin BF537 + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#if CONFIG_BFIN_KERNEL_CLOCK +#include +#endif + +.global __rambase +.global __ramstart +.global __ramend +.extern ___bss_stop +.extern ___bss_start +.extern _bf53x_relocate_l1_mem + +#define INITIAL_STACK 0xFFB01000 + +.text + +ENTRY(__start) +ENTRY(__stext) + /* R0: argument of command line string, passed from uboot, save it */ + R7 = R0; + /* Set the SYSCFG register */ + R0 = 0x36; + SYSCFG = R0; /*Enable Cycle Counter and Nesting Of Interrupts(3rd Bit)*/ + R0 = 0; + + /* Clear Out All the data and pointer Registers*/ + R1 = R0; + R2 = R0; + R3 = R0; + R4 = R0; + R5 = R0; + R6 = R0; + + P0 = R0; + P1 = R0; + P2 = R0; + P3 = R0; + P4 = R0; + P5 = R0; + + LC0 = r0; + LC1 = r0; + L0 = r0; + L1 = r0; + L2 = r0; + L3 = r0; + + /* Clear Out All the DAG Registers*/ + B0 = r0; + B1 = r0; + B2 = r0; + B3 = r0; + + I0 = r0; + I1 = r0; + I2 = r0; + I3 = r0; + + M0 = r0; + M1 = r0; + M2 = r0; + M3 = r0; + + /* Turn off the icache */ + p0.l = (IMEM_CONTROL & 0xFFFF); + p0.h = (IMEM_CONTROL >> 16); + R1 = [p0]; + R0 = ~ENICPLB; + R0 = R0 & R1; + + /* Anomaly 05000125 */ +#ifdef ANOMALY_05000125 + CLI R2; + SSYNC; +#endif + [p0] = R0; + SSYNC; +#ifdef ANOMALY_05000125 + STI R2; +#endif + + /* Turn off the dcache */ + p0.l = (DMEM_CONTROL & 0xFFFF); + p0.h = (DMEM_CONTROL >> 16); + R1 = [p0]; + R0 = ~ENDCPLB; + R0 = R0 & R1; + + /* Anomaly 05000125 */ +#ifdef ANOMALY_05000125 + CLI R2; + SSYNC; +#endif + [p0] = R0; + SSYNC; +#ifdef ANOMALY_05000125 + STI R2; +#endif + + /* Initialise General-Purpose I/O Modules on BF537 */ + /* Rev 0.0 Anomaly 05000212 - PORTx_FER, + * PORT_MUX Registers Do Not accept "writes" correctly: + */ + p0.h = hi(BFIN_PORT_MUX); + p0.l = lo(BFIN_PORT_MUX); +#ifdef ANOMALY_05000212 + R0.L = W[P0]; /* Read */ + SSYNC; +#endif + R0 = (PGDE_UART | PFTE_UART)(Z); +#ifdef ANOMALY_05000212 + W[P0] = R0.L; /* Write */ + SSYNC; +#endif + W[P0] = R0.L; /* Enable both UARTS */ + SSYNC; + + p0.h = hi(PORTF_FER); + p0.l = lo(PORTF_FER); +#ifdef ANOMALY_05000212 + R0.L = W[P0]; /* Read */ + SSYNC; +#endif + R0 = 0x000F(Z); +#ifdef ANOMALY_05000212 + W[P0] = R0.L; /* Write */ + SSYNC; +#endif + /* Enable peripheral function of PORTF for UART0 and UART1 */ + W[P0] = R0.L; + SSYNC; + +#if !defined(CONFIG_BF534) + p0.h = hi(EMAC_SYSTAT); + p0.l = lo(EMAC_SYSTAT); + R0.h = 0xFFFF; /* Clear EMAC Interrupt Status bits */ + R0.l = 0xFFFF; + [P0] = R0; + SSYNC; +#endif + +#ifdef CONFIG_BF537_PORT_H + p0.h = hi(PORTH_FER); + p0.l = lo(PORTH_FER); + R0.L = W[P0]; /* Read */ + SSYNC; + R0 = 0x0000; + W[P0] = R0.L; /* Write */ + SSYNC; + W[P0] = R0.L; /* Disable peripheral function of PORTH */ + SSYNC; +#endif + + /*Initialise UART*/ + p0.h = hi(UART_LCR); + p0.l = lo(UART_LCR); + r0 = 0x0(Z); + w[p0] = r0.L; /* To enable DLL writes */ + ssync; + + p0.h = hi(UART_DLL); + p0.l = lo(UART_DLL); + r0 = 0x00(Z); + w[p0] = r0.L; + ssync; + + p0.h = hi(UART_DLH); + p0.l = lo(UART_DLH); + r0 = 0x00(Z); + w[p0] = r0.L; + ssync; + + p0.h = hi(UART_GCTL); + p0.l = lo(UART_GCTL); + r0 = 0x0(Z); + w[p0] = r0.L; /* To enable UART clock */ + ssync; + + /* Initialize stack pointer */ + sp.l = lo(INITIAL_STACK); + sp.h = hi(INITIAL_STACK); + fp = sp; + usp = sp; + + /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */ + call _bf53x_relocate_l1_mem; +#if CONFIG_BFIN_KERNEL_CLOCK + call _start_dma_code; +#endif + /* Code for initializing Async memory banks */ + + p2.h = hi(EBIU_AMBCTL1); + p2.l = lo(EBIU_AMBCTL1); + r0.h = hi(AMBCTL1VAL); + r0.l = lo(AMBCTL1VAL); + [p2] = r0; + ssync; + + p2.h = hi(EBIU_AMBCTL0); + p2.l = lo(EBIU_AMBCTL0); + r0.h = hi(AMBCTL0VAL); + r0.l = lo(AMBCTL0VAL); + [p2] = r0; + ssync; + + p2.h = hi(EBIU_AMGCTL); + p2.l = lo(EBIU_AMGCTL); + r0 = AMGCTLVAL; + w[p2] = r0; + ssync; + + /* This section keeps the processor in supervisor mode + * during kernel boot. Switches to user mode at end of boot. + * See page 3-9 of Hardware Reference manual for documentation. + */ + + /* EVT15 = _real_start */ + + p0.l = lo(EVT15); + p0.h = hi(EVT15); + p1.l = _real_start; + p1.h = _real_start; + [p0] = p1; + csync; + + p0.l = lo(IMASK); + p0.h = hi(IMASK); + p1.l = IMASK_IVG15; + p1.h = 0x0; + [p0] = p1; + csync; + + raise 15; + p0.l = .LWAIT_HERE; + p0.h = .LWAIT_HERE; + reti = p0; +#if defined(ANOMALY_05000281) + nop; nop; nop; +#endif + rti; + +.LWAIT_HERE: + jump .LWAIT_HERE; + +ENTRY(_real_start) + [ -- sp ] = reti; + p0.l = lo(WDOG_CTL); + p0.h = hi(WDOG_CTL); + r0 = 0xAD6(z); + w[p0] = r0; /* watchdog off for now */ + ssync; + + /* Code update for BSS size == 0 + * Zero out the bss region. + */ + + p1.l = ___bss_start; + p1.h = ___bss_start; + p2.l = ___bss_stop; + p2.h = ___bss_stop; + r0 = 0; + p2 -= p1; + lsetup (.L_clear_bss, .L_clear_bss ) lc0 = p2; +.L_clear_bss: + B[p1++] = r0; + + /* In case there is a NULL pointer reference + * Zero out region before stext + */ + + p1.l = 0x0; + p1.h = 0x0; + r0.l = __stext; + r0.h = __stext; + r0 = r0 >> 1; + p2 = r0; + r0 = 0; + lsetup (.L_clear_zero, .L_clear_zero ) lc0 = p2; +.L_clear_zero: + W[p1++] = r0; + + /* pass the uboot arguments to the global value command line */ + R0 = R7; + call _cmdline_init; + + p1.l = __rambase; + p1.h = __rambase; + r0.l = __sdata; + r0.h = __sdata; + [p1] = r0; + + p1.l = __ramstart; + p1.h = __ramstart; + p3.l = ___bss_stop; + p3.h = ___bss_stop; + + r1 = p3; + [p1] = r1; + + + /* + * load the current thread pointer and stack + */ + r1.l = _init_thread_union; + r1.h = _init_thread_union; + + r2.l = 0x2000; + r2.h = 0x0000; + r1 = r1 + r2; + sp = r1; + usp = sp; + fp = sp; + call _start_kernel; +.L_exit: + jump.s .L_exit; + +.section .l1.text +#if CONFIG_BFIN_KERNEL_CLOCK +ENTRY(_start_dma_code) + + /* Enable PHY CLK buffer output */ + p0.h = hi(VR_CTL); + p0.l = lo(VR_CTL); + r0.l = w[p0]; + bitset(r0, 14); + w[p0] = r0.l; + ssync; + + p0.h = hi(SIC_IWR); + p0.l = lo(SIC_IWR); + r0.l = 0x1; + r0.h = 0x0; + [p0] = r0; + SSYNC; + + /* + * Set PLL_CTL + * - [14:09] = MSEL[5:0] : CLKIN / VCO multiplication factors + * - [8] = BYPASS : BYPASS the PLL, run CLKIN into CCLK/SCLK + * - [7] = output delay (add 200ps of delay to mem signals) + * - [6] = input delay (add 200ps of input delay to mem signals) + * - [5] = PDWN : 1=All Clocks off + * - [3] = STOPCK : 1=Core Clock off + * - [1] = PLL_OFF : 1=Disable Power to PLL + * - [0] = DF : 1=Pass CLKIN/2 to PLL / 0=Pass CLKIN to PLL + * all other bits set to zero + */ + + p0.h = hi(PLL_LOCKCNT); + p0.l = lo(PLL_LOCKCNT); + r0 = 0x300(Z); + w[p0] = r0.l; + ssync; + + P2.H = hi(EBIU_SDGCTL); + P2.L = lo(EBIU_SDGCTL); + R0 = [P2]; + BITSET (R0, 24); + [P2] = R0; + SSYNC; + + r0 = CONFIG_VCO_MULT & 63; /* Load the VCO multiplier */ + r0 = r0 << 9; /* Shift it over, */ + r1 = CLKIN_HALF; /* Do we need to divide CLKIN by 2?*/ + r0 = r1 | r0; + r1 = PLL_BYPASS; /* Bypass the PLL? */ + r1 = r1 << 8; /* Shift it over */ + r0 = r1 | r0; /* add them all together */ + + p0.h = hi(PLL_CTL); + p0.l = lo(PLL_CTL); /* Load the address */ + cli r2; /* Disable interrupts */ + ssync; + w[p0] = r0.l; /* Set the value */ + idle; /* Wait for the PLL to stablize */ + sti r2; /* Enable interrupts */ + +.Lcheck_again: + p0.h = hi(PLL_STAT); + p0.l = lo(PLL_STAT); + R0 = W[P0](Z); + CC = BITTST(R0,5); + if ! CC jump .Lcheck_again; + + /* Configure SCLK & CCLK Dividers */ + r0 = (CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV); + p0.h = hi(PLL_DIV); + p0.l = lo(PLL_DIV); + w[p0] = r0.l; + ssync; + + p0.l = lo(EBIU_SDRRC); + p0.h = hi(EBIU_SDRRC); + r0 = mem_SDRRC; + w[p0] = r0.l; + ssync; + + p0.l = (EBIU_SDBCTL & 0xFFFF); + p0.h = (EBIU_SDBCTL >> 16); /* SDRAM Memory Bank Control Register */ + r0 = mem_SDBCTL; + w[p0] = r0.l; + ssync; + + P2.H = hi(EBIU_SDGCTL); + P2.L = lo(EBIU_SDGCTL); + R0 = [P2]; + BITCLR (R0, 24); + p0.h = hi(EBIU_SDSTAT); + p0.l = lo(EBIU_SDSTAT); + r2.l = w[p0]; + cc = bittst(r2,3); + if !cc jump .Lskip; + NOP; + BITSET (R0, 23); +.Lskip: + [P2] = R0; + SSYNC; + + R0.L = lo(mem_SDGCTL); + R0.H = hi(mem_SDGCTL); + R1 = [p2]; + R1 = R1 | R0; + [P2] = R1; + SSYNC; + + p0.h = hi(SIC_IWR); + p0.l = lo(SIC_IWR); + r0.l = lo(IWR_ENABLE_ALL); + r0.h = hi(IWR_ENABLE_ALL); + [p0] = r0; + SSYNC; + + RTS; +#endif /* CONFIG_BFIN_KERNEL_CLOCK */ + +ENTRY(_bfin_reset) + /* No more interrupts to be handled*/ + CLI R6; + SSYNC; + +#if defined(CONFIG_MTD_M25P80) +/* + * The following code fix the SPI flash reboot issue, + * /CS signal of the chip which is using PF10 return to GPIO mode + */ + p0.h = hi(PORTF_FER); + p0.l = lo(PORTF_FER); + r0.l = 0x0000; + w[p0] = r0.l; + SSYNC; + +/* /CS return to high */ + p0.h = hi(PORTFIO); + p0.l = lo(PORTFIO); + r0.l = 0xFFFF; + w[p0] = r0.l; + SSYNC; + +/* Delay some time, This is necessary */ + r1.h = 0; + r1.l = 0x400; + p1 = r1; + lsetup (_delay_lab1,_delay_lab1_end ) lc1 = p1; +_delay_lab1: + r0.h = 0; + r0.l = 0x8000; + p0 = r0; + lsetup (_delay_lab0,_delay_lab0_end ) lc0 = p0; +_delay_lab0: + nop; +_delay_lab0_end: + nop; +_delay_lab1_end: + nop; +#endif + + /* Clear the bits 13-15 in SWRST if they werent cleared */ + p0.h = hi(SWRST); + p0.l = lo(SWRST); + csync; + r0.l = w[p0]; + + /* Clear the IMASK register */ + p0.h = hi(IMASK); + p0.l = lo(IMASK); + r0 = 0x0; + [p0] = r0; + + /* Clear the ILAT register */ + p0.h = hi(ILAT); + p0.l = lo(ILAT); + r0 = [p0]; + [p0] = r0; + SSYNC; + + /* Disable the WDOG TIMER */ + p0.h = hi(WDOG_CTL); + p0.l = lo(WDOG_CTL); + r0.l = 0xAD6; + w[p0] = r0.l; + SSYNC; + + /* Clear the sticky bit incase it is already set */ + p0.h = hi(WDOG_CTL); + p0.l = lo(WDOG_CTL); + r0.l = 0x8AD6; + w[p0] = r0.l; + SSYNC; + + /* Program the count value */ + R0.l = 0x100; + R0.h = 0x0; + P0.h = hi(WDOG_CNT); + P0.l = lo(WDOG_CNT); + [P0] = R0; + SSYNC; + + /* Program WDOG_STAT if necessary */ + P0.h = hi(WDOG_CTL); + P0.l = lo(WDOG_CTL); + R0 = W[P0](Z); + CC = BITTST(R0,1); + if !CC JUMP .LWRITESTAT; + CC = BITTST(R0,2); + if !CC JUMP .LWRITESTAT; + JUMP .LSKIP_WRITE; + +.LWRITESTAT: + /* When watch dog timer is enabled, + * a write to STAT will load the contents of CNT to STAT + */ + R0 = 0x0000(z); + P0.h = hi(WDOG_STAT); + P0.l = lo(WDOG_STAT) + [P0] = R0; + SSYNC; + +.LSKIP_WRITE: + /* Enable the reset event */ + P0.h = hi(WDOG_CTL); + P0.l = lo(WDOG_CTL); + R0 = W[P0](Z); + BITCLR(R0,1); + BITCLR(R0,2); + W[P0] = R0.L; + SSYNC; + NOP; + + /* Enable the wdog counter */ + R0 = W[P0](Z); + BITCLR(R0,4); + W[P0] = R0.L; + SSYNC; + + IDLE; + + RTS; + +.data + +/* + * Set up the usable of RAM stuff. Size of RAM is determined then + * an initial stack set up at the end. + */ + +.align 4 +__rambase: +.long 0 +__ramstart: +.long 0 +__ramend: +.long 0 diff --git a/arch/blackfin/mach-bf537/ints-priority.c b/arch/blackfin/mach-bf537/ints-priority.c new file mode 100644 index 000000000000..fd6308eccbe6 --- /dev/null +++ b/arch/blackfin/mach-bf537/ints-priority.c @@ -0,0 +1,74 @@ +/* + * File: arch/blackfin/mach-bf537/ints-priority.c + * Based on: arch/blackfin/mach-bf533/ints-priority.c + * Author: Michael Hennerich + * + * Created: + * Description: Set up the interupt priorities + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +void program_IAR(void) +{ + /* Program the IAR0 Register with the configured priority */ + bfin_write_SIC_IAR0(((CONFIG_IRQ_PLL_WAKEUP - 7) << IRQ_PLL_WAKEUP_POS) | + ((CONFIG_IRQ_DMA_ERROR - 7) << IRQ_DMA_ERROR_POS) | + ((CONFIG_IRQ_ERROR - 7) << IRQ_ERROR_POS) | + ((CONFIG_IRQ_RTC - 7) << IRQ_RTC_POS) | + ((CONFIG_IRQ_PPI - 7) << IRQ_PPI_POS) | + ((CONFIG_IRQ_SPORT0_RX - 7) << IRQ_SPORT0_RX_POS) | + ((CONFIG_IRQ_SPORT0_TX - 7) << IRQ_SPORT0_TX_POS) | + ((CONFIG_IRQ_SPORT1_RX - 7) << IRQ_SPORT1_RX_POS)); + + bfin_write_SIC_IAR1(((CONFIG_IRQ_SPORT1_TX - 7) << IRQ_SPORT1_TX_POS) | + ((CONFIG_IRQ_TWI - 7) << IRQ_TWI_POS) | + ((CONFIG_IRQ_SPI - 7) << IRQ_SPI_POS) | + ((CONFIG_IRQ_UART0_RX - 7) << IRQ_UART0_RX_POS) | + ((CONFIG_IRQ_UART0_TX - 7) << IRQ_UART0_TX_POS) | + ((CONFIG_IRQ_UART1_RX - 7) << IRQ_UART1_RX_POS) | + ((CONFIG_IRQ_UART1_TX - 7) << IRQ_UART1_TX_POS) | + ((CONFIG_IRQ_CAN_RX - 7) << IRQ_CAN_RX_POS)); + + bfin_write_SIC_IAR2(((CONFIG_IRQ_CAN_TX - 7) << IRQ_CAN_TX_POS) | + ((CONFIG_IRQ_MAC_RX - 7) << IRQ_MAC_RX_POS) | + ((CONFIG_IRQ_MAC_TX - 7) << IRQ_MAC_TX_POS) | + ((CONFIG_IRQ_TMR0 - 7) << IRQ_TMR0_POS) | + ((CONFIG_IRQ_TMR1 - 7) << IRQ_TMR1_POS) | + ((CONFIG_IRQ_TMR2 - 7) << IRQ_TMR2_POS) | + ((CONFIG_IRQ_TMR3 - 7) << IRQ_TMR3_POS) | + ((CONFIG_IRQ_TMR4 - 7) << IRQ_TMR4_POS)); + + bfin_write_SIC_IAR3(((CONFIG_IRQ_TMR5 - 7) << IRQ_TMR5_POS) | + ((CONFIG_IRQ_TMR6 - 7) << IRQ_TMR6_POS) | + ((CONFIG_IRQ_TMR7 - 7) << IRQ_TMR7_POS) | + ((CONFIG_IRQ_PROG_INTA - 7) << IRQ_PROG_INTA_POS) | + ((CONFIG_IRQ_PORTG_INTB - 7) << IRQ_PORTG_INTB_POS) | + ((CONFIG_IRQ_MEM_DMA0 - 7) << IRQ_MEM_DMA0_POS) | + ((CONFIG_IRQ_MEM_DMA1 - 7) << IRQ_MEM_DMA1_POS) | + ((CONFIG_IRQ_WATCH - 7) << IRQ_WATCH_POS)); + + SSYNC(); +} diff --git a/arch/blackfin/mach-bf561/Kconfig b/arch/blackfin/mach-bf561/Kconfig new file mode 100644 index 000000000000..0a17c4cf0059 --- /dev/null +++ b/arch/blackfin/mach-bf561/Kconfig @@ -0,0 +1,222 @@ +if BF561 + +menu "BF561 Specific Configuration" + +comment "Core B Support" + +menu "Core B Support" + +config BF561_COREB + bool "Enable Core B support" + default y + +config BF561_COREB_RESET + bool "Enable Core B reset support" + default n + help + This requires code in the application that is loaded + into Core B. In order to reset, the application needs + to install an interrupt handler for Supplemental + Interrupt 0, that sets RETI to 0xff600000 and writes + bit 11 of SICB_SYSCR when bit 5 of SICA_SYSCR is 0. + This causes Core B to stall when Supplemental Interrupt + 0 is set, and will reset PC to 0xff600000 when + COREB_SRAM_INIT is cleared. + +endmenu + +comment "Interrupt Priority Assignment" + +menu "Priority" + +config IRQ_PLL_WAKEUP + int "PLL Wakeup Interrupt" + default 7 +config IRQ_DMA1_ERROR + int "DMA1 Error (generic)" + default 7 +config IRQ_DMA2_ERROR + int "DMA2 Error (generic)" + default 7 +config IRQ_IMDMA_ERROR + int "IMDMA Error (generic)" + default 7 +config IRQ_PPI0_ERROR + int "PPI0 Error Interrupt" + default 7 +config IRQ_PPI1_ERROR + int "PPI1 Error Interrupt" + default 7 +config IRQ_SPORT0_ERROR + int "SPORT0 Error Interrupt" + default 7 +config IRQ_SPORT1_ERROR + int "SPORT1 Error Interrupt" + default 7 +config IRQ_SPI_ERROR + int "SPI Error Interrupt" + default 7 +config IRQ_UART_ERROR + int "UART Error Interrupt" + default 7 +config IRQ_RESERVED_ERROR + int "Reserved Interrupt" + default 7 +config IRQ_DMA1_0 + int "DMA1 0 Interrupt(PPI1)" + default 8 +config IRQ_DMA1_1 + int "DMA1 1 Interrupt(PPI2)" + default 8 +config IRQ_DMA1_2 + int "DMA1 2 Interrupt" + default 8 +config IRQ_DMA1_3 + int "DMA1 3 Interrupt" + default 8 +config IRQ_DMA1_4 + int "DMA1 4 Interrupt" + default 8 +config IRQ_DMA1_5 + int "DMA1 5 Interrupt" + default 8 +config IRQ_DMA1_6 + int "DMA1 6 Interrupt" + default 8 +config IRQ_DMA1_7 + int "DMA1 7 Interrupt" + default 8 +config IRQ_DMA1_8 + int "DMA1 8 Interrupt" + default 8 +config IRQ_DMA1_9 + int "DMA1 9 Interrupt" + default 8 +config IRQ_DMA1_10 + int "DMA1 10 Interrupt" + default 8 +config IRQ_DMA1_11 + int "DMA1 11 Interrupt" + default 8 +config IRQ_DMA2_0 + int "DMA2 0 (SPORT0 RX)" + default 9 +config IRQ_DMA2_1 + int "DMA2 1 (SPORT0 TX)" + default 9 +config IRQ_DMA2_2 + int "DMA2 2 (SPORT1 RX)" + default 9 +config IRQ_DMA2_3 + int "DMA2 3 (SPORT2 TX)" + default 9 +config IRQ_DMA2_4 + int "DMA2 4 (SPI)" + default 9 +config IRQ_DMA2_5 + int "DMA2 5 (UART RX)" + default 9 +config IRQ_DMA2_6 + int "DMA2 6 (UART TX)" + default 9 +config IRQ_DMA2_7 + int "DMA2 7 Interrupt" + default 9 +config IRQ_DMA2_8 + int "DMA2 8 Interrupt" + default 9 +config IRQ_DMA2_9 + int "DMA2 9 Interrupt" + default 9 +config IRQ_DMA2_10 + int "DMA2 10 Interrupt" + default 9 +config IRQ_DMA2_11 + int "DMA2 11 Interrupt" + default 9 +config IRQ_TIMER0 + int "TIMER 0 Interrupt" + default 10 +config IRQ_TIMER1 + int "TIMER 1 Interrupt" + default 10 +config IRQ_TIMER2 + int "TIMER 2 Interrupt" + default 10 +config IRQ_TIMER3 + int "TIMER 3 Interrupt" + default 10 +config IRQ_TIMER4 + int "TIMER 4 Interrupt" + default 10 +config IRQ_TIMER5 + int "TIMER 5 Interrupt" + default 10 +config IRQ_TIMER6 + int "TIMER 6 Interrupt" + default 10 +config IRQ_TIMER7 + int "TIMER 7 Interrupt" + default 10 +config IRQ_TIMER8 + int "TIMER 8 Interrupt" + default 10 +config IRQ_TIMER9 + int "TIMER 9 Interrupt" + default 10 +config IRQ_TIMER10 + int "TIMER 10 Interrupt" + default 10 +config IRQ_TIMER11 + int "TIMER 11 Interrupt" + default 10 +config IRQ_PROG0_INTA + int "Programmable Flags0 A (8)" + default 11 +config IRQ_PROG0_INTB + int "Programmable Flags0 B (8)" + default 11 +config IRQ_PROG1_INTA + int "Programmable Flags1 A (8)" + default 11 +config IRQ_PROG1_INTB + int "Programmable Flags1 B (8)" + default 11 +config IRQ_PROG2_INTA + int "Programmable Flags2 A (8)" + default 11 +config IRQ_PROG2_INTB + int "Programmable Flags2 B (8)" + default 11 +config IRQ_DMA1_WRRD0 + int "MDMA1 0 write/read INT" + default 8 +config IRQ_DMA1_WRRD1 + int "MDMA1 1 write/read INT" + default 8 +config IRQ_DMA2_WRRD0 + int "MDMA2 0 write/read INT" + default 9 +config IRQ_DMA2_WRRD1 + int "MDMA2 1 write/read INT" + default 9 +config IRQ_IMDMA_WRRD0 + int "IMDMA 0 write/read INT" + default 12 +config IRQ_IMDMA_WRRD1 + int "IMDMA 1 write/read INT" + default 12 +config IRQ_WDTIMER + int "Watch Dog Timer" + default 13 + + help + Enter the priority numbers between 7-13 ONLY. Others are Reserved. + This applies to all the above. It is not recommended to assign the + highest priority number 7 to UART or any other device. + +endmenu + +endmenu + +endif diff --git a/arch/blackfin/mach-bf561/Makefile b/arch/blackfin/mach-bf561/Makefile new file mode 100644 index 000000000000..57f475a55161 --- /dev/null +++ b/arch/blackfin/mach-bf561/Makefile @@ -0,0 +1,9 @@ +# +# arch/blackfin/mach-bf561/Makefile +# + +extra-y := head.o + +obj-y := ints-priority.o + +obj-$(CONFIG_BF561_COREB) += coreb.o diff --git a/arch/blackfin/mach-bf561/boards/Makefile b/arch/blackfin/mach-bf561/boards/Makefile new file mode 100644 index 000000000000..886edc739ab4 --- /dev/null +++ b/arch/blackfin/mach-bf561/boards/Makefile @@ -0,0 +1,7 @@ +# +# arch/blackfin/mach-bf561/boards/Makefile +# + +obj-$(CONFIG_GENERIC_BOARD) += generic_board.o +obj-$(CONFIG_BFIN561_EZKIT) += ezkit.o +obj-$(CONFIG_BFIN561_BLUETECHNIX_CM) += cm_bf561.o diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c new file mode 100644 index 000000000000..6824e956d153 --- /dev/null +++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c @@ -0,0 +1,289 @@ +/* + * File: arch/blackfin/mach-bf533/boards/cm_bf561.c + * Based on: arch/blackfin/mach-bf533/boards/ezkit.c + * Author: Aidan Williams Copright 2005 + * + * Created: 2006 + * Description: Board description file + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Name the Board for the /proc/cpuinfo + */ +char *bfin_board_name = "Bluetechnix CM BF561"; + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) +/* all SPI perpherals info goes here */ + +#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) +static struct mtd_partition bfin_spi_flash_partitions[] = { + { + .name = "bootloader", + .size = 0x00020000, + .offset = 0, + .mask_flags = MTD_CAP_ROM + },{ + .name = "kernel", + .size = 0xe0000, + .offset = 0x20000 + },{ + .name = "file system", + .size = 0x700000, + .offset = 0x00100000, + } +}; + +static struct flash_platform_data bfin_spi_flash_data = { + .name = "m25p80", + .parts = bfin_spi_flash_partitions, + .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), + .type = "m25p64", +}; + +/* SPI flash chip (m25p64) */ +static struct bfin5xx_spi_chip spi_flash_chip_info = { + .enable_dma = 0, /* use dma transfer with this chip*/ + .bits_per_word = 8, +}; +#endif + +#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) +/* SPI ADC chip */ +static struct bfin5xx_spi_chip spi_adc_chip_info = { + .enable_dma = 1, /* use dma transfer with this chip*/ + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +static struct bfin5xx_spi_chip ad1836_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_AD9960) || defined(CONFIG_AD9960_MODULE) +static struct bfin5xx_spi_chip ad9960_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) +static struct bfin5xx_spi_chip spi_mmc_chip_info = { + .enable_dma = 1, + .bits_per_word = 8, +}; +#endif + +static struct spi_board_info bfin_spi_board_info[] __initdata = { +#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) + { + /* the modalias must be the same as spi device driver name */ + .modalias = "m25p80", /* Name of spi_driver for this device */ + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, /* Framework bus number */ + .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ + .platform_data = &bfin_spi_flash_data, + .controller_data = &spi_flash_chip_info, + .mode = SPI_MODE_3, + }, +#endif + +#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) + { + .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ + .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, /* Framework bus number */ + .chip_select = 1, /* Framework chip select. */ + .platform_data = NULL, /* No spi_driver specific config */ + .controller_data = &spi_adc_chip_info, + }, +#endif + +#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) + { + .modalias = "ad1836-spi", + .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = CONFIG_SND_BLACKFIN_SPI_PFBIT, + .controller_data = &ad1836_spi_chip_info, + }, +#endif +#if defined(CONFIG_AD9960) || defined(CONFIG_AD9960_MODULE) + { + .modalias = "ad9960-spi", + .max_speed_hz = 10000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = 1, + .controller_data = &ad9960_spi_chip_info, + }, +#endif +#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) + { + .modalias = "spi_mmc", + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = CONFIG_SPI_MMC_CS_CHAN, + .platform_data = NULL, + .controller_data = &spi_mmc_chip_info, + .mode = SPI_MODE_3, + }, +#endif +}; + +/* SPI controller data */ +static struct bfin5xx_spi_master spi_bfin_master_info = { + .num_chipselect = 8, + .enable_dma = 1, /* master has the ability to do dma transfer */ +}; + +static struct platform_device spi_bfin_master_device = { + .name = "bfin-spi-master", + .id = 1, /* Bus number */ + .dev = { + .platform_data = &spi_bfin_master_info, /* Passed to driver */ + }, +}; +#endif /* spi master and devices */ + + +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) + +static struct resource smc91x_resources[] = { + { + .name = "smc91x-regs", + .start = 0x28000300, + .end = 0x28000300 + 16, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PF0, + .end = IRQ_PF0, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; +static struct platform_device smc91x_device = { + .name = "smc91x", + .id = 0, + .num_resources = ARRAY_SIZE(smc91x_resources), + .resource = smc91x_resources, +}; +#endif + +#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) +static struct resource isp1362_hcd_resources[] = { + { + .start = 0x24008000, + .end = 0x24008000, + .flags = IORESOURCE_MEM, + },{ + .start = 0x24008004, + .end = 0x24008004, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PF47, + .end = IRQ_PF47, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +static struct isp1362_platform_data isp1362_priv = { + .sel15Kres = 1, + .clknotstop = 0, + .oc_enable = 0, + .int_act_high = 0, + .int_edge_triggered = 0, + .remote_wakeup_connected = 0, + .no_power_switching = 1, + .power_switching_mode = 0, +}; + +static struct platform_device isp1362_hcd_device = { + .name = "isp1362-hcd", + .id = 0, + .dev = { + .platform_data = &isp1362_priv, + }, + .num_resources = ARRAY_SIZE(isp1362_hcd_resources), + .resource = isp1362_hcd_resources, +}; +#endif + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) +static struct resource bfin_uart_resources[] = { + { + .start = 0xFFC00400, + .end = 0xFFC004FF, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device bfin_uart_device = { + .name = "bfin-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_uart_resources), + .resource = bfin_uart_resources, +}; +#endif + +static struct platform_device *cm_bf561_devices[] __initdata = { + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) + &bfin_uart_device, +#endif + +#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) + &isp1362_hcd_device, +#endif + +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) + &smc91x_device, +#endif + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + &spi_bfin_master_device, +#endif + +}; + +static int __init cm_bf561_init(void) +{ + printk(KERN_INFO "%s(): registering device resources\n", __FUNCTION__); + platform_add_devices(cm_bf561_devices, ARRAY_SIZE(cm_bf561_devices)); +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); +#endif + return 0; +} + +arch_initcall(cm_bf561_init); diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c new file mode 100644 index 000000000000..14eb4f9a68ea --- /dev/null +++ b/arch/blackfin/mach-bf561/boards/ezkit.c @@ -0,0 +1,147 @@ +/* + * File: arch/blackfin/mach-bf561/ezkit.c + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +/* + * Name the Board for the /proc/cpuinfo + */ +char *bfin_board_name = "ADDS-BF561-EZKIT"; + +/* + * USB-LAN EzExtender board + * Driver needs to know address, irq and flag pin. + */ +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) +static struct resource smc91x_resources[] = { + { + .name = "smc91x-regs", + .start = 0x2C010300, + .end = 0x2C010300 + 16, + .flags = IORESOURCE_MEM, + },{ + + .start = IRQ_PF9, + .end = IRQ_PF9, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +static struct platform_device smc91x_device = { + .name = "smc91x", + .id = 0, + .num_resources = ARRAY_SIZE(smc91x_resources), + .resource = smc91x_resources, +}; +#endif + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) +static struct resource bfin_uart_resources[] = { + { + .start = 0xFFC00400, + .end = 0xFFC004FF, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device bfin_uart_device = { + .name = "bfin-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_uart_resources), + .resource = bfin_uart_resources, +}; +#endif + +#ifdef CONFIG_SPI_BFIN +#if defined(CONFIG_SND_BLACKFIN_AD1836) \ + || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +static struct bfin5xx_spi_chip ad1836_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif +#endif + +/* SPI controller data */ +static struct bfin5xx_spi_master spi_bfin_master_info = { + .num_chipselect = 8, + .enable_dma = 1, /* master has the ability to do dma transfer */ +}; + +static struct platform_device spi_bfin_master_device = { + .name = "bfin-spi-master", + .id = 1, /* Bus number */ + .dev = { + .platform_data = &spi_bfin_master_info, /* Passed to driver */ + }, +}; + +static struct spi_board_info bfin_spi_board_info[] __initdata = { +#if defined(CONFIG_SND_BLACKFIN_AD1836) \ + || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) + { + .modalias = "ad1836-spi", + .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 1, + .chip_select = CONFIG_SND_BLACKFIN_SPI_PFBIT, + .controller_data = &ad1836_spi_chip_info, + }, +#endif +}; + +static struct platform_device *ezkit_devices[] __initdata = { +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) + &smc91x_device, +#endif +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + &spi_bfin_master_device, +#endif +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) + &bfin_uart_device, +#endif +}; + +static int __init ezkit_init(void) +{ + int ret; + + printk(KERN_INFO "%s(): registering device resources\n", __FUNCTION__); + ret = platform_add_devices(ezkit_devices, + ARRAY_SIZE(ezkit_devices)); + if (ret < 0) + return ret; + return spi_register_board_info(bfin_spi_board_info, + ARRAY_SIZE(bfin_spi_board_info)); +} + +arch_initcall(ezkit_init); diff --git a/arch/blackfin/mach-bf561/boards/generic_board.c b/arch/blackfin/mach-bf561/boards/generic_board.c new file mode 100644 index 000000000000..585ecdd2f6a5 --- /dev/null +++ b/arch/blackfin/mach-bf561/boards/generic_board.c @@ -0,0 +1,82 @@ +/* + * File: arch/blackfin/mach-bf561/generic_board.c + * Based on: arch/blackfin/mach-bf533/ezkit.c + * Author: Aidan Williams + * + * Created: + * Description: + * + * Modified: + * Copyright 2005 National ICT Australia (NICTA) + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +char *bfin_board_name = "UNKNOWN BOARD"; + +/* + * Driver needs to know address, irq and flag pin. + */ +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) +static struct resource smc91x_resources[] = { + { + .start = 0x2C010300, + .end = 0x2C010300 + 16, + .flags = IORESOURCE_MEM, + },{ + .start = IRQ_PROG_INTB, + .end = IRQ_PROG_INTB, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + },{ + /* + * denotes the flag pin and is used directly if + * CONFIG_IRQCHIP_DEMUX_GPIO is defined. + */ + .start = IRQ_PF9, + .end = IRQ_PF9, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, + }, +}; + +static struct platform_device smc91x_device = { + .name = "smc91x", + .id = 0, + .num_resources = ARRAY_SIZE(smc91x_resources), + .resource = smc91x_resources, +}; +#endif + +static struct platform_device *generic_board_devices[] __initdata = { +#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) + &smc91x_device, +#endif +}; + +static int __init generic_board_init(void) +{ + printk(KERN_INFO "%s(): registering device resources\n", __FUNCTION__); + return platform_add_devices(generic_board_devices, + ARRAY_SIZE(generic_board_devices)); +} + +arch_initcall(generic_board_init); diff --git a/arch/blackfin/mach-bf561/coreb.c b/arch/blackfin/mach-bf561/coreb.c new file mode 100644 index 000000000000..b28582fe083c --- /dev/null +++ b/arch/blackfin/mach-bf561/coreb.c @@ -0,0 +1,402 @@ +/* + * File: arch/blackfin/mach-bf561/coreb.c + * Based on: + * Author: + * + * Created: + * Description: Handle CoreB on a BF561 + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include + +#define MODULE_VER "v0.1" + +static spinlock_t coreb_lock; +static wait_queue_head_t coreb_dma_wait; + +#define COREB_IS_OPEN 0x00000001 +#define COREB_IS_RUNNING 0x00000010 + +#define CMD_COREB_INDEX 1 +#define CMD_COREB_START 2 +#define CMD_COREB_STOP 3 +#define CMD_COREB_RESET 4 + +#define COREB_MINOR 229 + +static unsigned long coreb_status = 0; +static unsigned long coreb_base = 0xff600000; +static unsigned long coreb_size = 0x4000; +int coreb_dma_done; + +static loff_t coreb_lseek(struct file *file, loff_t offset, int origin); +static ssize_t coreb_read(struct file *file, char *buf, size_t count, + loff_t * ppos); +static ssize_t coreb_write(struct file *file, const char *buf, size_t count, + loff_t * ppos); +static int coreb_ioctl(struct inode *inode, struct file *file, unsigned int cmd, + unsigned long arg); +static int coreb_open(struct inode *inode, struct file *file); +static int coreb_release(struct inode *inode, struct file *file); + +static irqreturn_t coreb_dma_interrupt(int irq, void *dev_id) +{ + clear_dma_irqstat(CH_MEM_STREAM2_DEST); + coreb_dma_done = 1; + wake_up_interruptible(&coreb_dma_wait); + return IRQ_HANDLED; +} + +static ssize_t coreb_write(struct file *file, const char *buf, size_t count, + loff_t * ppos) +{ + unsigned long p = *ppos; + ssize_t wrote = 0; + + if (p + count > coreb_size) + return -EFAULT; + + while (count > 0) { + int len = count; + + if (len > PAGE_SIZE) + len = PAGE_SIZE; + + coreb_dma_done = 0; + + /* Source Channel */ + set_dma_start_addr(CH_MEM_STREAM2_SRC, (unsigned long)buf); + set_dma_x_count(CH_MEM_STREAM2_SRC, len); + set_dma_x_modify(CH_MEM_STREAM2_SRC, sizeof(char)); + set_dma_config(CH_MEM_STREAM2_SRC, RESTART); + /* Destination Channel */ + set_dma_start_addr(CH_MEM_STREAM2_DEST, coreb_base + p); + set_dma_x_count(CH_MEM_STREAM2_DEST, len); + set_dma_x_modify(CH_MEM_STREAM2_DEST, sizeof(char)); + set_dma_config(CH_MEM_STREAM2_DEST, WNR | RESTART | DI_EN); + + enable_dma(CH_MEM_STREAM2_SRC); + enable_dma(CH_MEM_STREAM2_DEST); + + wait_event_interruptible(coreb_dma_wait, coreb_dma_done); + + disable_dma(CH_MEM_STREAM2_SRC); + disable_dma(CH_MEM_STREAM2_DEST); + + count -= len; + wrote += len; + buf += len; + p += len; + } + *ppos = p; + return wrote; +} + +static ssize_t coreb_read(struct file *file, char *buf, size_t count, + loff_t * ppos) +{ + unsigned long p = *ppos; + ssize_t read = 0; + + if ((p + count) > coreb_size) + return -EFAULT; + + while (count > 0) { + int len = count; + + if (len > PAGE_SIZE) + len = PAGE_SIZE; + + coreb_dma_done = 0; + + /* Source Channel */ + set_dma_start_addr(CH_MEM_STREAM2_SRC, coreb_base + p); + set_dma_x_count(CH_MEM_STREAM2_SRC, len); + set_dma_x_modify(CH_MEM_STREAM2_SRC, sizeof(char)); + set_dma_config(CH_MEM_STREAM2_SRC, RESTART); + /* Destination Channel */ + set_dma_start_addr(CH_MEM_STREAM2_DEST, (unsigned long)buf); + set_dma_x_count(CH_MEM_STREAM2_DEST, len); + set_dma_x_modify(CH_MEM_STREAM2_DEST, sizeof(char)); + set_dma_config(CH_MEM_STREAM2_DEST, WNR | RESTART | DI_EN); + + enable_dma(CH_MEM_STREAM2_SRC); + enable_dma(CH_MEM_STREAM2_DEST); + + wait_event_interruptible(coreb_dma_wait, coreb_dma_done); + + disable_dma(CH_MEM_STREAM2_SRC); + disable_dma(CH_MEM_STREAM2_DEST); + + count -= len; + read += len; + buf += len; + p += len; + } + + return read; +} + +static loff_t coreb_lseek(struct file *file, loff_t offset, int origin) +{ + loff_t ret; + + mutex_lock(&file->f_dentry->d_inode->i_mutex); + + switch (origin) { + case 0 /* SEEK_SET */ : + if (offset < coreb_size) { + file->f_pos = offset; + ret = file->f_pos; + } else + ret = -EINVAL; + break; + case 1 /* SEEK_CUR */ : + if ((offset + file->f_pos) < coreb_size) { + file->f_pos += offset; + ret = file->f_pos; + } else + ret = -EINVAL; + default: + ret = -EINVAL; + } + mutex_unlock(&file->f_dentry->d_inode->i_mutex); + return ret; +} + +static int coreb_open(struct inode *inode, struct file *file) +{ + spin_lock_irq(&coreb_lock); + + if (coreb_status & COREB_IS_OPEN) + goto out_busy; + + coreb_status |= COREB_IS_OPEN; + + spin_unlock_irq(&coreb_lock); + return 0; + + out_busy: + spin_unlock_irq(&coreb_lock); + return -EBUSY; +} + +static int coreb_release(struct inode *inode, struct file *file) +{ + spin_lock_irq(&coreb_lock); + coreb_status &= ~COREB_IS_OPEN; + spin_unlock_irq(&coreb_lock); + return 0; +} + +static int coreb_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + int retval = 0; + int coreb_index = 0; + + switch (cmd) { + case CMD_COREB_INDEX: + if (copy_from_user(&coreb_index, (int *)arg, sizeof(int))) { + retval = -EFAULT; + break; + } + + spin_lock_irq(&coreb_lock); + switch (coreb_index) { + case 0: + coreb_base = 0xff600000; + coreb_size = 0x4000; + break; + case 1: + coreb_base = 0xff610000; + coreb_size = 0x4000; + break; + case 2: + coreb_base = 0xff500000; + coreb_size = 0x8000; + break; + case 3: + coreb_base = 0xff400000; + coreb_size = 0x8000; + break; + default: + retval = -EINVAL; + break; + } + spin_unlock_irq(&coreb_lock); + + mutex_lock(&file->f_dentry->d_inode->i_mutex); + file->f_pos = 0; + mutex_unlock(&file->f_dentry->d_inode->i_mutex); + break; + case CMD_COREB_START: + spin_lock_irq(&coreb_lock); + if (coreb_status & COREB_IS_RUNNING) { + retval = -EBUSY; + break; + } + printk(KERN_INFO "Starting Core B\n"); + coreb_status |= COREB_IS_RUNNING; + bfin_write_SICA_SYSCR(bfin_read_SICA_SYSCR() & ~0x0020); + SSYNC(); + spin_lock_irq(&coreb_lock); + break; +#if defined(CONFIG_BF561_COREB_RESET) + case CMD_COREB_STOP: + spin_lock_irq(&coreb_lock); + printk(KERN_INFO "Stopping Core B\n"); + bfin_write_SICA_SYSCR(bfin_read_SICA_SYSCR() | 0x0020); + bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | 0x0080); + coreb_status &= ~COREB_IS_RUNNING; + spin_lock_irq(&coreb_lock); + break; + case CMD_COREB_RESET: + printk(KERN_INFO "Resetting Core B\n"); + bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | 0x0080); + break; +#endif + } + + return retval; +} + +static struct file_operations coreb_fops = { + .owner = THIS_MODULE, + .llseek = coreb_lseek, + .read = coreb_read, + .write = coreb_write, + .ioctl = coreb_ioctl, + .open = coreb_open, + .release = coreb_release +}; + +static struct miscdevice coreb_dev = { + COREB_MINOR, + "coreb", + &coreb_fops +}; + +static ssize_t coreb_show_status(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, + "Base Address:\t0x%08lx\n" + "Core B is %s\n" + "SICA_SYSCR:\t%04x\n" + "SICB_SYSCR:\t%04x\n" + "\n" + "IRQ Status:\tCore A\t\tCore B\n" + "ISR0:\t\t%08x\t\t%08x\n" + "ISR1:\t\t%08x\t\t%08x\n" + "IMASK0:\t\t%08x\t\t%08x\n" + "IMASK1:\t\t%08x\t\t%08x\n", + coreb_base, + coreb_status & COREB_IS_RUNNING ? "running" : "stalled", + bfin_read_SICA_SYSCR(), bfin_read_SICB_SYSCR(), + bfin_read_SICA_ISR0(), bfin_read_SICB_ISR0(), + bfin_read_SICA_ISR1(), bfin_read_SICB_ISR0(), + bfin_read_SICA_IMASK0(), bfin_read_SICB_IMASK0(), + bfin_read_SICA_IMASK1(), bfin_read_SICB_IMASK1()); +} + +static DEVICE_ATTR(coreb_status, S_IRUGO, coreb_show_status, NULL); + +int __init bf561_coreb_init(void) +{ + init_waitqueue_head(&coreb_dma_wait); + + spin_lock_init(&coreb_lock); + /* Request the core memory regions for Core B */ + if (request_mem_region(0xff600000, 0x4000, + "Core B - Instruction SRAM") == NULL) + goto exit; + + if (request_mem_region(0xFF610000, 0x4000, + "Core B - Instruction SRAM") == NULL) + goto release_instruction_a_sram; + + if (request_mem_region(0xFF500000, 0x8000, + "Core B - Data Bank B SRAM") == NULL) + goto release_instruction_b_sram; + + if (request_mem_region(0xff400000, 0x8000, + "Core B - Data Bank A SRAM") == NULL) + goto release_data_b_sram; + + if (request_dma(CH_MEM_STREAM2_DEST, "Core B - DMA Destination") < 0) + goto release_data_a_sram; + + if (request_dma(CH_MEM_STREAM2_SRC, "Core B - DMA Source") < 0) + goto release_dma_dest; + + set_dma_callback(CH_MEM_STREAM2_DEST, coreb_dma_interrupt, NULL); + + misc_register(&coreb_dev); + + if (device_create_file(coreb_dev.this_device, &dev_attr_coreb_status)) + goto release_dma_src; + + printk(KERN_INFO "BF561 Core B driver %s initialized.\n", MODULE_VER); + return 0; + + release_dma_src: + free_dma(CH_MEM_STREAM2_SRC); + release_dma_dest: + free_dma(CH_MEM_STREAM2_DEST); + release_data_a_sram: + release_mem_region(0xff400000, 0x8000); + release_data_b_sram: + release_mem_region(0xff500000, 0x8000); + release_instruction_b_sram: + release_mem_region(0xff610000, 0x4000); + release_instruction_a_sram: + release_mem_region(0xff600000, 0x4000); + exit: + return -ENOMEM; +} + +void __exit bf561_coreb_exit(void) +{ + device_remove_file(coreb_dev.this_device, &dev_attr_coreb_status); + misc_deregister(&coreb_dev); + + release_mem_region(0xff610000, 0x4000); + release_mem_region(0xff600000, 0x4000); + release_mem_region(0xff500000, 0x8000); + release_mem_region(0xff400000, 0x8000); + + free_dma(CH_MEM_STREAM2_DEST); + free_dma(CH_MEM_STREAM2_SRC); +} + +module_init(bf561_coreb_init); +module_exit(bf561_coreb_exit); + +MODULE_AUTHOR("Bas Vermeulen "); +MODULE_DESCRIPTION("BF561 Core B Support"); diff --git a/arch/blackfin/mach-bf561/head.S b/arch/blackfin/mach-bf561/head.S new file mode 100644 index 000000000000..7bca478526b9 --- /dev/null +++ b/arch/blackfin/mach-bf561/head.S @@ -0,0 +1,512 @@ +/* + * File: arch/blackfin/mach-bf561/head.S + * Based on: arch/blackfin/mach-bf533/head.S + * Author: + * + * Created: + * Description: BF561 startup file + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#if CONFIG_BFIN_KERNEL_CLOCK +#include +#endif + +.global __rambase +.global __ramstart +.global __ramend +.extern ___bss_stop +.extern ___bss_start +.extern _bf53x_relocate_l1_mem + +#define INITIAL_STACK 0xFFB01000 + +.text + +ENTRY(__start) +ENTRY(__stext) + /* R0: argument of command line string, passed from uboot, save it */ + R7 = R0; + /* Set the SYSCFG register */ + R0 = 0x36; + SYSCFG = R0; /*Enable Cycle Counter and Nesting Of Interrupts(3rd Bit)*/ + R0 = 0; + + /*Clear Out All the data and pointer Registers*/ + R1 = R0; + R2 = R0; + R3 = R0; + R4 = R0; + R5 = R0; + R6 = R0; + + P0 = R0; + P1 = R0; + P2 = R0; + P3 = R0; + P4 = R0; + P5 = R0; + + LC0 = r0; + LC1 = r0; + L0 = r0; + L1 = r0; + L2 = r0; + L3 = r0; + + /* Clear Out All the DAG Registers*/ + B0 = r0; + B1 = r0; + B2 = r0; + B3 = r0; + + I0 = r0; + I1 = r0; + I2 = r0; + I3 = r0; + + M0 = r0; + M1 = r0; + M2 = r0; + M3 = r0; + + /* Turn off the icache */ + p0.l = (IMEM_CONTROL & 0xFFFF); + p0.h = (IMEM_CONTROL >> 16); + R1 = [p0]; + R0 = ~ENICPLB; + R0 = R0 & R1; + + /* Anomaly 05000125 */ +#ifdef ANOMALY_05000125 + CLI R2; + SSYNC; +#endif + [p0] = R0; + SSYNC; +#ifdef ANOMALY_05000125 + STI R2; +#endif + + /* Turn off the dcache */ + p0.l = (DMEM_CONTROL & 0xFFFF); + p0.h = (DMEM_CONTROL >> 16); + R1 = [p0]; + R0 = ~ENDCPLB; + R0 = R0 & R1; + + /* Anomaly 05000125 */ +#ifdef ANOMALY_05000125 + CLI R2; + SSYNC; +#endif + [p0] = R0; + SSYNC; +#ifdef ANOMALY_05000125 + STI R2; +#endif + + /* Initialise UART*/ + p0.h = hi(UART_LCR); + p0.l = lo(UART_LCR); + r0 = 0x0(Z); + w[p0] = r0.L; /* To enable DLL writes */ + ssync; + + p0.h = hi(UART_DLL); + p0.l = lo(UART_DLL); + r0 = 0x0(Z); + w[p0] = r0.L; + ssync; + + p0.h = hi(UART_DLH); + p0.l = lo(UART_DLH); + r0 = 0x00(Z); + w[p0] = r0.L; + ssync; + + p0.h = hi(UART_GCTL); + p0.l = lo(UART_GCTL); + r0 = 0x0(Z); + w[p0] = r0.L; /* To enable UART clock */ + ssync; + + /* Initialize stack pointer */ + sp.l = lo(INITIAL_STACK); + sp.h = hi(INITIAL_STACK); + fp = sp; + usp = sp; + + /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */ + call _bf53x_relocate_l1_mem; +#if CONFIG_BFIN_KERNEL_CLOCK + call _start_dma_code; +#endif + + /* Code for initializing Async memory banks */ + + p2.h = hi(EBIU_AMBCTL1); + p2.l = lo(EBIU_AMBCTL1); + r0.h = hi(AMBCTL1VAL); + r0.l = lo(AMBCTL1VAL); + [p2] = r0; + ssync; + + p2.h = hi(EBIU_AMBCTL0); + p2.l = lo(EBIU_AMBCTL0); + r0.h = hi(AMBCTL0VAL); + r0.l = lo(AMBCTL0VAL); + [p2] = r0; + ssync; + + p2.h = hi(EBIU_AMGCTL); + p2.l = lo(EBIU_AMGCTL); + r0 = AMGCTLVAL; + w[p2] = r0; + ssync; + + /* This section keeps the processor in supervisor mode + * during kernel boot. Switches to user mode at end of boot. + * See page 3-9 of Hardware Reference manual for documentation. + */ + + /* EVT15 = _real_start */ + + p0.l = lo(EVT15); + p0.h = hi(EVT15); + p1.l = _real_start; + p1.h = _real_start; + [p0] = p1; + csync; + + p0.l = lo(IMASK); + p0.h = hi(IMASK); + p1.l = IMASK_IVG15; + p1.h = 0x0; + [p0] = p1; + csync; + + raise 15; + p0.l = .LWAIT_HERE; + p0.h = .LWAIT_HERE; + reti = p0; +#if defined(ANOMALY_05000281) + nop; nop; nop; +#endif + rti; + +.LWAIT_HERE: + jump .LWAIT_HERE; + +ENTRY(_real_start) + [ -- sp ] = reti; + p0.l = lo(WDOGA_CTL); + p0.h = hi(WDOGA_CTL); + r0 = 0xAD6(z); + w[p0] = r0; /* watchdog off for now */ + ssync; + + /* Code update for BSS size == 0 + * Zero out the bss region. + */ + + p1.l = ___bss_start; + p1.h = ___bss_start; + p2.l = ___bss_stop; + p2.h = ___bss_stop; + r0 = 0; + p2 -= p1; + lsetup (.L_clear_bss, .L_clear_bss ) lc0 = p2; +.L_clear_bss: + B[p1++] = r0; + + /* In case there is a NULL pointer reference + * Zero out region before stext + */ + + p1.l = 0x0; + p1.h = 0x0; + r0.l = __stext; + r0.h = __stext; + r0 = r0 >> 1; + p2 = r0; + r0 = 0; + lsetup (.L_clear_zero, .L_clear_zero ) lc0 = p2; +.L_clear_zero: + W[p1++] = r0; + +/* pass the uboot arguments to the global value command line */ + R0 = R7; + call _cmdline_init; + + p1.l = __rambase; + p1.h = __rambase; + r0.l = __sdata; + r0.h = __sdata; + [p1] = r0; + + p1.l = __ramstart; + p1.h = __ramstart; + p3.l = ___bss_stop; + p3.h = ___bss_stop; + + r1 = p3; + [p1] = r1; + + /* + * load the current thread pointer and stack + */ + r1.l = _init_thread_union; + r1.h = _init_thread_union; + + r2.l = 0x2000; + r2.h = 0x0000; + r1 = r1 + r2; + sp = r1; + usp = sp; + fp = sp; + call _start_kernel; +.L_exit: + jump.s .L_exit; + +.section .l1.text +#if CONFIG_BFIN_KERNEL_CLOCK +ENTRY(_start_dma_code) + p0.h = hi(SICA_IWR0); + p0.l = lo(SICA_IWR0); + r0.l = 0x1; + [p0] = r0; + SSYNC; + + /* + * Set PLL_CTL + * - [14:09] = MSEL[5:0] : CLKIN / VCO multiplication factors + * - [8] = BYPASS : BYPASS the PLL, run CLKIN into CCLK/SCLK + * - [7] = output delay (add 200ps of delay to mem signals) + * - [6] = input delay (add 200ps of input delay to mem signals) + * - [5] = PDWN : 1=All Clocks off + * - [3] = STOPCK : 1=Core Clock off + * - [1] = PLL_OFF : 1=Disable Power to PLL + * - [0] = DF : 1=Pass CLKIN/2 to PLL / 0=Pass CLKIN to PLL + * all other bits set to zero + */ + + p0.h = hi(PLL_LOCKCNT); + p0.l = lo(PLL_LOCKCNT); + r0 = 0x300(Z); + w[p0] = r0.l; + ssync; + + P2.H = hi(EBIU_SDGCTL); + P2.L = lo(EBIU_SDGCTL); + R0 = [P2]; + BITSET (R0, 24); + [P2] = R0; + SSYNC; + + r0 = CONFIG_VCO_MULT & 63; /* Load the VCO multiplier */ + r0 = r0 << 9; /* Shift it over, */ + r1 = CLKIN_HALF; /* Do we need to divide CLKIN by 2?*/ + r0 = r1 | r0; + r1 = PLL_BYPASS; /* Bypass the PLL? */ + r1 = r1 << 8; /* Shift it over */ + r0 = r1 | r0; /* add them all together */ + + p0.h = hi(PLL_CTL); + p0.l = lo(PLL_CTL); /* Load the address */ + cli r2; /* Disable interrupts */ + ssync; + w[p0] = r0.l; /* Set the value */ + idle; /* Wait for the PLL to stablize */ + sti r2; /* Enable interrupts */ + +.Lcheck_again: + p0.h = hi(PLL_STAT); + p0.l = lo(PLL_STAT); + R0 = W[P0](Z); + CC = BITTST(R0,5); + if ! CC jump .Lcheck_again; + + /* Configure SCLK & CCLK Dividers */ + r0 = (CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV); + p0.h = hi(PLL_DIV); + p0.l = lo(PLL_DIV); + w[p0] = r0.l; + ssync; + + p0.l = lo(EBIU_SDRRC); + p0.h = hi(EBIU_SDRRC); + r0 = mem_SDRRC; + w[p0] = r0.l; + ssync; + + p0.l = (EBIU_SDBCTL & 0xFFFF); + p0.h = (EBIU_SDBCTL >> 16); /* SDRAM Memory Bank Control Register */ + r0 = mem_SDBCTL; + w[p0] = r0.l; + ssync; + + P2.H = hi(EBIU_SDGCTL); + P2.L = lo(EBIU_SDGCTL); + R0 = [P2]; + BITCLR (R0, 24); + p0.h = hi(EBIU_SDSTAT); + p0.l = lo(EBIU_SDSTAT); + r2.l = w[p0]; + cc = bittst(r2,3); + if !cc jump .Lskip; + NOP; + BITSET (R0, 23); +.Lskip: + [P2] = R0; + SSYNC; + + R0.L = lo(mem_SDGCTL); + R0.H = hi(mem_SDGCTL); + R1 = [p2]; + R1 = R1 | R0; + [P2] = R1; + SSYNC; + + RTS; +#endif /* CONFIG_BFIN_KERNEL_CLOCK */ + +ENTRY(_bfin_reset) + /* No more interrupts to be handled*/ + CLI R6; + SSYNC; + +#if defined(CONFIG_BFIN_SHARED_FLASH_ENET) + p0.h = hi(FIO_INEN); + p0.l = lo(FIO_INEN); + r0.l = ~(PF1 | PF0); + w[p0] = r0.l; + + p0.h = hi(FIO_DIR); + p0.l = lo(FIO_DIR); + r0.l = (PF1 | PF0); + w[p0] = r0.l; + + p0.h = hi(FIO_FLAG_C); + p0.l = lo(FIO_FLAG_C); + r0.l = (PF1 | PF0); + w[p0] = r0.l; +#endif + + /* Clear the bits 13-15 in SWRST if they werent cleared */ + p0.h = hi(SICA_SWRST); + p0.l = lo(SICA_SWRST); + csync; + r0.l = w[p0]; + + /* Clear the IMASK register */ + p0.h = hi(IMASK); + p0.l = lo(IMASK); + r0 = 0x0; + [p0] = r0; + + /* Clear the ILAT register */ + p0.h = hi(ILAT); + p0.l = lo(ILAT); + r0 = [p0]; + [p0] = r0; + SSYNC; + + /* Disable the WDOG TIMER */ + p0.h = hi(WDOGA_CTL); + p0.l = lo(WDOGA_CTL); + r0.l = 0xAD6; + w[p0] = r0.l; + SSYNC; + + /* Clear the sticky bit incase it is already set */ + p0.h = hi(WDOGA_CTL); + p0.l = lo(WDOGA_CTL); + r0.l = 0x8AD6; + w[p0] = r0.l; + SSYNC; + + /* Program the count value */ + R0.l = 0x100; + R0.h = 0x0; + P0.h = hi(WDOGA_CNT); + P0.l = lo(WDOGA_CNT); + [P0] = R0; + SSYNC; + + /* Program WDOG_STAT if necessary */ + P0.h = hi(WDOGA_CTL); + P0.l = lo(WDOGA_CTL); + R0 = W[P0](Z); + CC = BITTST(R0,1); + if !CC JUMP .LWRITESTAT; + CC = BITTST(R0,2); + if !CC JUMP .LWRITESTAT; + JUMP .LSKIP_WRITE; + +.LWRITESTAT: + /* When watch dog timer is enabled, + * a write to STAT will load the contents of CNT to STAT + */ + R0 = 0x0000(z); + P0.h = hi(WDOGA_STAT); + P0.l = lo(WDOGA_STAT) + [P0] = R0; + SSYNC; + +.LSKIP_WRITE: + /* Enable the reset event */ + P0.h = hi(WDOGA_CTL); + P0.l = lo(WDOGA_CTL); + R0 = W[P0](Z); + BITCLR(R0,1); + BITCLR(R0,2); + W[P0] = R0.L; + SSYNC; + NOP; + + /* Enable the wdog counter */ + R0 = W[P0](Z); + BITCLR(R0,4); + W[P0] = R0.L; + SSYNC; + + IDLE; + + RTS; + +.data + +/* + * Set up the usable of RAM stuff. Size of RAM is determined then + * an initial stack set up at the end. + */ + +.align 4 +__rambase: +.long 0 +__ramstart: +.long 0 +__ramend: +.long 0 diff --git a/arch/blackfin/mach-bf561/ints-priority.c b/arch/blackfin/mach-bf561/ints-priority.c new file mode 100644 index 000000000000..89c52ff95b27 --- /dev/null +++ b/arch/blackfin/mach-bf561/ints-priority.c @@ -0,0 +1,108 @@ +/* + * File: arch/blackfin/mach-bf561/ints-priority.c + * Based on: arch/blackfin/mach-bf537/ints-priority.c + * Author: Michael Hennerich + * + * Created: + * Description: Set up the interupt priorities + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +void program_IAR(void) +{ + /* Program the IAR0 Register with the configured priority */ + bfin_write_SICA_IAR0(((CONFIG_IRQ_PLL_WAKEUP - 7) << IRQ_PLL_WAKEUP_POS) | + ((CONFIG_IRQ_DMA1_ERROR - 7) << IRQ_DMA1_ERROR_POS) | + ((CONFIG_IRQ_DMA2_ERROR - 7) << IRQ_DMA2_ERROR_POS) | + ((CONFIG_IRQ_IMDMA_ERROR - 7) << IRQ_IMDMA_ERROR_POS) | + ((CONFIG_IRQ_PPI0_ERROR - 7) << IRQ_PPI0_ERROR_POS) | + ((CONFIG_IRQ_PPI1_ERROR - 7) << IRQ_PPI1_ERROR_POS) | + ((CONFIG_IRQ_SPORT0_ERROR - 7) << IRQ_SPORT0_ERROR_POS) | + ((CONFIG_IRQ_SPORT1_ERROR - 7) << IRQ_SPORT1_ERROR_POS)); + + bfin_write_SICA_IAR1(((CONFIG_IRQ_SPI_ERROR - 7) << IRQ_SPI_ERROR_POS) | + ((CONFIG_IRQ_UART_ERROR - 7) << IRQ_UART_ERROR_POS) | + ((CONFIG_IRQ_RESERVED_ERROR - 7) << IRQ_RESERVED_ERROR_POS) | + ((CONFIG_IRQ_DMA1_0 - 7) << IRQ_DMA1_0_POS) | + ((CONFIG_IRQ_DMA1_1 - 7) << IRQ_DMA1_1_POS) | + ((CONFIG_IRQ_DMA1_2 - 7) << IRQ_DMA1_2_POS) | + ((CONFIG_IRQ_DMA1_3 - 7) << IRQ_DMA1_3_POS) | + ((CONFIG_IRQ_DMA1_4 - 7) << IRQ_DMA1_4_POS)); + + bfin_write_SICA_IAR2(((CONFIG_IRQ_DMA1_5 - 7) << IRQ_DMA1_5_POS) | + ((CONFIG_IRQ_DMA1_6 - 7) << IRQ_DMA1_6_POS) | + ((CONFIG_IRQ_DMA1_7 - 7) << IRQ_DMA1_7_POS) | + ((CONFIG_IRQ_DMA1_8 - 7) << IRQ_DMA1_8_POS) | + ((CONFIG_IRQ_DMA1_9 - 7) << IRQ_DMA1_9_POS) | + ((CONFIG_IRQ_DMA1_10 - 7) << IRQ_DMA1_10_POS) | + ((CONFIG_IRQ_DMA1_11 - 7) << IRQ_DMA1_11_POS) | + ((CONFIG_IRQ_DMA2_0 - 7) << IRQ_DMA2_0_POS)); + + bfin_write_SICA_IAR3(((CONFIG_IRQ_DMA2_1 - 7) << IRQ_DMA2_1_POS) | + ((CONFIG_IRQ_DMA2_2 - 7) << IRQ_DMA2_2_POS) | + ((CONFIG_IRQ_DMA2_3 - 7) << IRQ_DMA2_3_POS) | + ((CONFIG_IRQ_DMA2_4 - 7) << IRQ_DMA2_4_POS) | + ((CONFIG_IRQ_DMA2_5 - 7) << IRQ_DMA2_5_POS) | + ((CONFIG_IRQ_DMA2_6 - 7) << IRQ_DMA2_6_POS) | + ((CONFIG_IRQ_DMA2_7 - 7) << IRQ_DMA2_7_POS) | + ((CONFIG_IRQ_DMA2_8 - 7) << IRQ_DMA2_8_POS)); + + bfin_write_SICA_IAR4(((CONFIG_IRQ_DMA2_9 - 7) << IRQ_DMA2_9_POS) | + ((CONFIG_IRQ_DMA2_10 - 7) << IRQ_DMA2_10_POS) | + ((CONFIG_IRQ_DMA2_11 - 7) << IRQ_DMA2_11_POS) | + ((CONFIG_IRQ_TIMER0 - 7) << IRQ_TIMER0_POS) | + ((CONFIG_IRQ_TIMER1 - 7) << IRQ_TIMER1_POS) | + ((CONFIG_IRQ_TIMER2 - 7) << IRQ_TIMER2_POS) | + ((CONFIG_IRQ_TIMER3 - 7) << IRQ_TIMER3_POS) | + ((CONFIG_IRQ_TIMER4 - 7) << IRQ_TIMER4_POS)); + + bfin_write_SICA_IAR5(((CONFIG_IRQ_TIMER5 - 7) << IRQ_TIMER5_POS) | + ((CONFIG_IRQ_TIMER6 - 7) << IRQ_TIMER6_POS) | + ((CONFIG_IRQ_TIMER7 - 7) << IRQ_TIMER7_POS) | + ((CONFIG_IRQ_TIMER8 - 7) << IRQ_TIMER8_POS) | + ((CONFIG_IRQ_TIMER9 - 7) << IRQ_TIMER9_POS) | + ((CONFIG_IRQ_TIMER10 - 7) << IRQ_TIMER10_POS) | + ((CONFIG_IRQ_TIMER11 - 7) << IRQ_TIMER11_POS) | + ((CONFIG_IRQ_PROG0_INTA - 7) << IRQ_PROG0_INTA_POS)); + + bfin_write_SICA_IAR6(((CONFIG_IRQ_PROG0_INTB - 7) << IRQ_PROG0_INTB_POS) | + ((CONFIG_IRQ_PROG1_INTA - 7) << IRQ_PROG1_INTA_POS) | + ((CONFIG_IRQ_PROG1_INTB - 7) << IRQ_PROG1_INTB_POS) | + ((CONFIG_IRQ_PROG2_INTA - 7) << IRQ_PROG2_INTA_POS) | + ((CONFIG_IRQ_PROG2_INTB - 7) << IRQ_PROG2_INTB_POS) | + ((CONFIG_IRQ_DMA1_WRRD0 - 7) << IRQ_DMA1_WRRD0_POS) | + ((CONFIG_IRQ_DMA1_WRRD1 - 7) << IRQ_DMA1_WRRD1_POS) | + ((CONFIG_IRQ_DMA2_WRRD0 - 7) << IRQ_DMA2_WRRD0_POS)); + + bfin_write_SICA_IAR7(((CONFIG_IRQ_DMA2_WRRD1 - 7) << IRQ_DMA2_WRRD1_POS) | + ((CONFIG_IRQ_IMDMA_WRRD0 - 7) << IRQ_IMDMA_WRRD0_POS) | + ((CONFIG_IRQ_IMDMA_WRRD1 - 7) << IRQ_IMDMA_WRRD1_POS) | + ((CONFIG_IRQ_WDTIMER - 7) << IRQ_WDTIMER_POS) | + (0 << IRQ_RESERVED_1_POS) | (0 << IRQ_RESERVED_2_POS) | + (0 << IRQ_SUPPLE_0_POS) | (0 << IRQ_SUPPLE_1_POS)); + + SSYNC(); +} diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile new file mode 100644 index 000000000000..d3a49073d196 --- /dev/null +++ b/arch/blackfin/mach-common/Makefile @@ -0,0 +1,12 @@ +# +# arch/blackfin/mach-common/Makefile +# + +obj-y := \ + cache.o cacheinit.o cplbhdlr.o cplbmgr.o entry.o \ + interrupt.o lock.o dpmc.o irqpanic.o + +obj-$(CONFIG_CPLB_INFO) += cplbinfo.o +obj-$(CONFIG_BFIN_SINGLE_CORE) += ints-priority-sc.o +obj-$(CONFIG_BFIN_DUAL_CORE) += ints-priority-dc.o +obj-$(CONFIG_PM) += pm.o diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S new file mode 100644 index 000000000000..bb9446ef66ef --- /dev/null +++ b/arch/blackfin/mach-common/cache.S @@ -0,0 +1,253 @@ +/* + * File: arch/blackfin/mach-common/cache.S + * Based on: + * Author: LG Soft India + * + * Created: + * Description: cache control support + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +.text +.align 2 +ENTRY(_cache_invalidate) + + /* + * Icache or DcacheA or DcacheB Invalidation + * or any combination thereof + * R0 has bits + * CPLB_ENABLE_ICACHE_P,CPLB_ENABLE_DCACHE_P,CPLB_ENABLE_DCACHE2_P + * set as required + */ + [--SP] = R7; + + R7 = R0; + CC = BITTST(R7,CPLB_ENABLE_ICACHE_P); + IF !CC JUMP .Lno_icache; + [--SP] = RETS; + CALL _icache_invalidate; + RETS = [SP++]; +.Lno_icache: + CC = BITTST(R7,CPLB_ENABLE_DCACHE_P); + IF !CC JUMP .Lno_dcache_a; + R0 = 0; /* specifies bank A */ + [--SP] = RETS; + CALL _dcache_invalidate; + RETS = [SP++]; +.Lno_dcache_a: + CC = BITTST(R7,CPLB_ENABLE_DCACHE2_P); + IF !CC JUMP .Lno_dcache_b; + R0 = 0; + BITSET(R0, 23); /* specifies bank B */ + [--SP] = RETS; + CALL _dcache_invalidate; + RETS = [SP++]; +.Lno_dcache_b: + R7 = [SP++]; + RTS; + +/* Invalidate the Entire Instruction cache by + * disabling IMC bit + */ +ENTRY(_icache_invalidate) +ENTRY(_invalidate_entire_icache) + [--SP] = ( R7:5); + + P0.L = (IMEM_CONTROL & 0xFFFF); + P0.H = (IMEM_CONTROL >> 16); + R7 = [P0]; + + /* Clear the IMC bit , All valid bits in the instruction + * cache are set to the invalid state + */ + BITCLR(R7,IMC_P); + CLI R6; + SSYNC; /* SSYNC required before invalidating cache. */ + .align 8; + [P0] = R7; + SSYNC; + STI R6; + + /* Configures the instruction cache agian */ + R6 = (IMC | ENICPLB); + R7 = R7 | R6; + + CLI R6; + SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ + .align 8; + [P0] = R7; + SSYNC; + STI R6; + + ( R7:5) = [SP++]; + RTS; + +/* + * blackfin_cache_flush_range(start, end) + * Invalidate all cache lines assocoiated with this + * area of memory. + * + * start: Start address + * end: End address + */ +ENTRY(_blackfin_icache_flush_range) + R2 = -L1_CACHE_BYTES; + R2 = R0 & R2; + P0 = R2; + P1 = R1; + CSYNC; + IFLUSH [P0]; +1: + IFLUSH [P0++]; + CC = P0 < P1 (iu); + IF CC JUMP 1b (bp); + IFLUSH [P0]; + SSYNC; + RTS; + +/* + * blackfin_icache_dcache_flush_range(start, end) + * FLUSH all cache lines assocoiated with this + * area of memory. + * + * start: Start address + * end: End address + */ + +ENTRY(_blackfin_icache_dcache_flush_range) + R2 = -L1_CACHE_BYTES; + R2 = R0 & R2; + P0 = R2; + P1 = R1; + CSYNC; + IFLUSH [P0]; +1: + FLUSH [P0]; + IFLUSH [P0++]; + CC = P0 < P1 (iu); + IF CC JUMP 1b (bp); + IFLUSH [P0]; + FLUSH [P0]; + SSYNC; + RTS; + +/* Throw away all D-cached data in specified region without any obligation to + * write them back. However, we must clean the D-cached entries around the + * boundaries of the start and/or end address is not cache aligned. + * + * Start: start address, + * end : end address. + */ + +ENTRY(_blackfin_dcache_invalidate_range) + R2 = -L1_CACHE_BYTES; + R2 = R0 & R2; + P0 = R2; + P1 = R1; + CSYNC; + FLUSHINV[P0]; +1: + FLUSHINV[P0++]; + CC = P0 < P1 (iu); + IF CC JUMP 1b (bp); + + /* If the data crosses a cache line, then we'll be pointing to + * the last cache line, but won't have flushed/invalidated it yet, + * so do one more. + */ + FLUSHINV[P0]; + SSYNC; + RTS; + +/* Invalidate the Entire Data cache by + * clearing DMC[1:0] bits + */ +ENTRY(_invalidate_entire_dcache) +ENTRY(_dcache_invalidate) + [--SP] = ( R7:6); + + P0.L = (DMEM_CONTROL & 0xFFFF); + P0.H = (DMEM_CONTROL >> 16); + R7 = [P0]; + + /* Clear the DMC[1:0] bits, All valid bits in the data + * cache are set to the invalid state + */ + BITCLR(R7,DMC0_P); + BITCLR(R7,DMC1_P); + CLI R6; + SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */ + .align 8; + [P0] = R7; + SSYNC; + STI R6; + + /* Configures the data cache again */ + + R6 = DMEM_CNTR; + R7 = R7 | R6; + + CLI R6; + SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */ + .align 8; + [P0] = R7; + SSYNC; + STI R6; + + ( R7:6) = [SP++]; + RTS; + +ENTRY(_blackfin_dcache_flush_range) + R2 = -L1_CACHE_BYTES; + R2 = R0 & R2; + P0 = R2; + P1 = R1; + CSYNC; + FLUSH[P0]; +1: + FLUSH[P0++]; + CC = P0 < P1 (iu); + IF CC JUMP 1b (bp); + + /* If the data crosses a cache line, then we'll be pointing to + * the last cache line, but won't have flushed it yet, so do + * one more. + */ + FLUSH[P0]; + SSYNC; + RTS; + +ENTRY(_blackfin_dflush_page) + P1 = 1 << (PAGE_SHIFT - L1_CACHE_SHIFT); + P0 = R0; + CSYNC; + FLUSH[P0]; + LSETUP (.Lfl1, .Lfl1) LC0 = P1; +.Lfl1: FLUSH [P0++]; + SSYNC; + RTS; diff --git a/arch/blackfin/mach-common/cacheinit.S b/arch/blackfin/mach-common/cacheinit.S new file mode 100644 index 000000000000..8c17f099e5eb --- /dev/null +++ b/arch/blackfin/mach-common/cacheinit.S @@ -0,0 +1,137 @@ +/* + * File: arch/blackfin/mach-common/cacheinit.S + * Based on: + * Author: LG Soft India + * + * Created: ? + * Description: cache initialization + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* This function sets up the data and instruction cache. The + * tables like icplb table, dcplb table and Page Descriptor table + * are defined in cplbtab.h. You can configure those tables for + * your suitable requirements + */ + +#include +#include + +.text + +#if defined(CONFIG_BLKFIN_CACHE) +ENTRY(_bfin_icache_init) + + /* Initialize Instruction CPLBS */ + + I0.L = (ICPLB_ADDR0 & 0xFFFF); + I0.H = (ICPLB_ADDR0 >> 16); + + I1.L = (ICPLB_DATA0 & 0xFFFF); + I1.H = (ICPLB_DATA0 >> 16); + + I2.L = _icplb_table; + I2.H = _icplb_table; + + r1 = -1; /* end point comparison */ + r3 = 15; /* max counter */ + +/* read entries from table */ + +.Lread_iaddr: + R0 = [I2++]; + CC = R0 == R1; + IF CC JUMP .Lidone; + [I0++] = R0; + +.Lread_idata: + R2 = [I2++]; + [I1++] = R2; + R3 = R3 + R1; + CC = R3 == R1; + IF !CC JUMP .Lread_iaddr; + +.Lidone: + /* Enable Instruction Cache */ + P0.l = (IMEM_CONTROL & 0xFFFF); + P0.h = (IMEM_CONTROL >> 16); + R1 = [P0]; + R0 = (IMC | ENICPLB); + R0 = R0 | R1; + + /* Anomaly 05000125 */ + CLI R2; + SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ + .align 8; + [P0] = R0; + SSYNC; + STI R2; + RTS; +#endif + +#if defined(CONFIG_BLKFIN_DCACHE) +ENTRY(_bfin_dcache_init) + + /* Initialize Data CPLBS */ + + I0.L = (DCPLB_ADDR0 & 0xFFFF); + I0.H = (DCPLB_ADDR0 >> 16); + + I1.L = (DCPLB_DATA0 & 0xFFFF); + I1.H = (DCPLB_DATA0 >> 16); + + I2.L = _dcplb_table; + I2.H = _dcplb_table; + + R1 = -1; /* end point comparison */ + R3 = 15; /* max counter */ + + /* read entries from table */ +.Lread_daddr: + R0 = [I2++]; + cc = R0 == R1; + IF CC JUMP .Lddone; + [I0++] = R0; + +.Lread_ddata: + R2 = [I2++]; + [I1++] = R2; + R3 = R3 + R1; + CC = R3 == R1; + IF !CC JUMP .Lread_daddr; +.Lddone: + P0.L = (DMEM_CONTROL & 0xFFFF); + P0.H = (DMEM_CONTROL >> 16); + R1 = [P0]; + + R0 = DMEM_CNTR; + + R0 = R0 | R1; + /* Anomaly 05000125 */ + CLI R2; + SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */ + .align 8; + [P0] = R0; + SSYNC; + STI R2; + RTS; +#endif diff --git a/arch/blackfin/mach-common/cplbhdlr.S b/arch/blackfin/mach-common/cplbhdlr.S new file mode 100644 index 000000000000..b979067c49ef --- /dev/null +++ b/arch/blackfin/mach-common/cplbhdlr.S @@ -0,0 +1,130 @@ +/* + * File: arch/blackfin/mach-common/cplbhdlr.S + * Based on: + * Author: LG Soft India + * + * Created: ? + * Description: CPLB exception handler + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +#ifdef CONFIG_EXCPT_IRQ_SYSC_L1 +.section .l1.text +#else +.text +#endif + +.type _cplb_mgr, STT_FUNC; +.type _panic_cplb_error, STT_FUNC; + +.align 2 + +.global __cplb_hdr; +.type __cplb_hdr, STT_FUNC; +ENTRY(__cplb_hdr) + R2 = SEQSTAT; + + /* Mask the contents of SEQSTAT and leave only EXCAUSE in R2 */ + R2 <<= 26; + R2 >>= 26; + + R1 = 0x23; /* Data access CPLB protection violation */ + CC = R2 == R1; + IF !CC JUMP .Lnot_data_write; + R0 = 2; /* is a write to data space*/ + JUMP .Lis_icplb_miss; + +.Lnot_data_write: + R1 = 0x2C; /* CPLB miss on an instruction fetch */ + CC = R2 == R1; + R0 = 0; /* is_data_miss == False*/ + IF CC JUMP .Lis_icplb_miss; + + R1 = 0x26; + CC = R2 == R1; + IF !CC JUMP .Lunknown; + + R0 = 1; /* is_data_miss == True*/ + +.Lis_icplb_miss: + +#if defined(CONFIG_BLKFIN_CACHE) || defined(CONFIG_BLKFIN_DCACHE) +# if defined(CONFIG_BLKFIN_CACHE) && !defined(CONFIG_BLKFIN_DCACHE) + R1 = CPLB_ENABLE_ICACHE; +# endif +# if !defined(CONFIG_BLKFIN_CACHE) && defined(CONFIG_BLKFIN_DCACHE) + R1 = CPLB_ENABLE_DCACHE; +# endif +# if defined(CONFIG_BLKFIN_CACHE) && defined(CONFIG_BLKFIN_DCACHE) + R1 = CPLB_ENABLE_DCACHE | CPLB_ENABLE_ICACHE; +# endif +#else + R1 = 0; +#endif + + [--SP] = RETS; + CALL _cplb_mgr; + RETS = [SP++]; + CC = R0 == 0; + IF !CC JUMP .Lnot_replaced; + RTS; + +/* + * Diagnostic exception handlers + */ +.Lunknown: + R0 = CPLB_UNKNOWN_ERR; + JUMP .Lcplb_error; + +.Lnot_replaced: + CC = R0 == CPLB_NO_UNLOCKED; + IF !CC JUMP .Lnext_check; + R0 = CPLB_NO_UNLOCKED; + JUMP .Lcplb_error; + +.Lnext_check: + CC = R0 == CPLB_NO_ADDR_MATCH; + IF !CC JUMP .Lnext_check2; + R0 = CPLB_NO_ADDR_MATCH; + JUMP .Lcplb_error; + +.Lnext_check2: + CC = R0 == CPLB_PROT_VIOL; + IF !CC JUMP .Lstrange_return_from_cplb_mgr; + R0 = CPLB_PROT_VIOL; + JUMP .Lcplb_error; + +.Lstrange_return_from_cplb_mgr: + IDLE; + CSYNC; + JUMP .Lstrange_return_from_cplb_mgr; + +.Lcplb_error: + R1 = sp; + SP += -12; + call _panic_cplb_error; + SP += 12; + JUMP _handle_bad_cplb; diff --git a/arch/blackfin/mach-common/cplbinfo.c b/arch/blackfin/mach-common/cplbinfo.c new file mode 100644 index 000000000000..d65fac39d1bf --- /dev/null +++ b/arch/blackfin/mach-common/cplbinfo.c @@ -0,0 +1,211 @@ +/* + * File: arch/blackfin/mach-common/cplbinfo.c + * Based on: + * Author: Sonic Zhang + * + * Created: Jan. 2005 + * Description: Display CPLB status + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#define CPLB_I 1 +#define CPLB_D 2 + +#define SYNC_SYS SSYNC() +#define SYNC_CORE CSYNC() + +#define CPLB_BIT_PAGESIZE 0x30000 + +static int page_size_table[4] = { + 0x00000400, /* 1K */ + 0x00001000, /* 4K */ + 0x00100000, /* 1M */ + 0x00400000 /* 4M */ +}; + +static char page_size_string_table[][4] = { "1K", "4K", "1M", "4M" }; + +static int cplb_find_entry(unsigned long *cplb_addr, + unsigned long *cplb_data, unsigned long addr, + unsigned long data) +{ + int ii; + + for (ii = 0; ii < 16; ii++) + if (addr >= cplb_addr[ii] && addr < cplb_addr[ii] + + page_size_table[(cplb_data[ii] & CPLB_BIT_PAGESIZE) >> 16] + && (cplb_data[ii] == data)) + return ii; + + return -1; +} + +static char *cplb_print_entry(char *buf, int type) +{ + unsigned long *p_addr = dpdt_table; + unsigned long *p_data = dpdt_table + 1; + unsigned long *p_icount = dpdt_swapcount_table; + unsigned long *p_ocount = dpdt_swapcount_table + 1; + unsigned long *cplb_addr = (unsigned long *)DCPLB_ADDR0; + unsigned long *cplb_data = (unsigned long *)DCPLB_DATA0; + int entry = 0, used_cplb = 0; + + if (type == CPLB_I) { + buf += sprintf(buf, "Instrction CPLB entry:\n"); + p_addr = ipdt_table; + p_data = ipdt_table + 1; + p_icount = ipdt_swapcount_table; + p_ocount = ipdt_swapcount_table + 1; + cplb_addr = (unsigned long *)ICPLB_ADDR0; + cplb_data = (unsigned long *)ICPLB_DATA0; + } else + buf += sprintf(buf, "Data CPLB entry:\n"); + + buf += sprintf(buf, "Address\t\tData\tSize\tValid\tLocked\tSwapin\ +\tiCount\toCount\n"); + + while (*p_addr != 0xffffffff) { + entry = cplb_find_entry(cplb_addr, cplb_data, *p_addr, *p_data); + if (entry >= 0) + used_cplb |= 1 << entry; + + buf += + sprintf(buf, + "0x%08lx\t0x%05lx\t%s\t%c\t%c\t%2d\t%ld\t%ld\n", + *p_addr, *p_data, + page_size_string_table[(*p_data & 0x30000) >> 16], + (*p_data & CPLB_VALID) ? 'Y' : 'N', + (*p_data & CPLB_LOCK) ? 'Y' : 'N', entry, *p_icount, + *p_ocount); + + p_addr += 2; + p_data += 2; + p_icount += 2; + p_ocount += 2; + } + + if (used_cplb != 0xffff) { + buf += sprintf(buf, "Unused/mismatched CPLBs:\n"); + + for (entry = 0; entry < 16; entry++) + if (0 == ((1 << entry) & used_cplb)) { + int flags = cplb_data[entry]; + buf += + sprintf(buf, + "%2d: 0x%08lx\t0x%05x\t%s\t%c\t%c\n", + entry, cplb_addr[entry], flags, + page_size_string_table[(flags & + 0x30000) >> + 16], + (flags & CPLB_VALID) ? 'Y' : 'N', + (flags & CPLB_LOCK) ? 'Y' : 'N'); + } + } + + buf += sprintf(buf, "\n"); + + return buf; +} + +static int cplbinfo_proc_output(char *buf) +{ + char *p; + + p = buf; + + p += sprintf(p, + "------------------ CPLB Information ------------------\n\n"); + + if (bfin_read_IMEM_CONTROL() & ENICPLB) + p = cplb_print_entry(p, CPLB_I); + else + p += sprintf(p, "Instruction CPLB is disabled.\n\n"); + + if (bfin_read_DMEM_CONTROL() & ENDCPLB) + p = cplb_print_entry(p, CPLB_D); + else + p += sprintf(p, "Data CPLB is disabled.\n"); + + return p - buf; +} + +static int cplbinfo_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len; + + len = cplbinfo_proc_output(page); + if (len <= off + count) + *eof = 1; + *start = page + off; + len -= off; + if (len > count) + len = count; + if (len < 0) + len = 0; + return len; +} + +static int cplbinfo_write_proc(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + printk(KERN_INFO "Reset the CPLB swap in/out counts.\n"); + memset(ipdt_swapcount_table, 0, MAX_SWITCH_I_CPLBS * sizeof(unsigned long)); + memset(dpdt_swapcount_table, 0, MAX_SWITCH_D_CPLBS * sizeof(unsigned long)); + + return count; +} + +static int __init cplbinfo_init(void) +{ + struct proc_dir_entry *entry; + + if ((entry = create_proc_entry("cplbinfo", 0, NULL)) == NULL) { + return -ENOMEM; + } + + entry->read_proc = cplbinfo_read_proc; + entry->write_proc = cplbinfo_write_proc; + entry->data = NULL; + + return 0; +} + +static void __exit cplbinfo_exit(void) +{ + remove_proc_entry("cplbinfo", NULL); +} + +module_init(cplbinfo_init); +module_exit(cplbinfo_exit); diff --git a/arch/blackfin/mach-common/cplbmgr.S b/arch/blackfin/mach-common/cplbmgr.S new file mode 100644 index 000000000000..f5efc4bc65e6 --- /dev/null +++ b/arch/blackfin/mach-common/cplbmgr.S @@ -0,0 +1,607 @@ +/* + * File: arch/blackfin/mach-common/cplbmgtr.S + * Based on: + * Author: LG Soft India + * + * Created: ? + * Description: CPLB replacement routine for CPLB mismatch + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* Usage: int _cplb_mgr(is_data_miss,int enable_cache) + * is_data_miss==2 => Mark as Dirty, write to the clean data page + * is_data_miss==1 => Replace a data CPLB. + * is_data_miss==0 => Replace an instruction CPLB. + * + * Returns: + * CPLB_RELOADED => Successfully updated CPLB table. + * CPLB_NO_UNLOCKED => All CPLBs are locked, so cannot be evicted. + * This indicates that the CPLBs in the configuration + * tablei are badly configured, as this should never + * occur. + * CPLB_NO_ADDR_MATCH => The address being accessed, that triggered the + * exception, is not covered by any of the CPLBs in + * the configuration table. The application is + * presumably misbehaving. + * CPLB_PROT_VIOL => The address being accessed, that triggered the + * exception, was not a first-write to a clean Write + * Back Data page, and so presumably is a genuine + * violation of the page's protection attributes. + * The application is misbehaving. + */ + +#include +#include +#include + +#ifdef CONFIG_EXCPT_IRQ_SYSC_L1 +.section .l1.text +#else +.text +#endif + +.align 2; +ENTRY(_cplb_mgr) + + [--SP]=( R7:4,P5:3 ); + + CC = R0 == 2; + IF CC JUMP .Ldcplb_write; + + CC = R0 == 0; + IF !CC JUMP .Ldcplb_miss_compare; + + /* ICPLB Miss Exception. We need to choose one of the + * currently-installed CPLBs, and replace it with one + * from the configuration table. + */ + + P4.L = (ICPLB_FAULT_ADDR & 0xFFFF); + P4.H = (ICPLB_FAULT_ADDR >> 16); + + P1 = 16; + P5.L = _page_size_table; + P5.H = _page_size_table; + + P0.L = (ICPLB_DATA0 & 0xFFFF); + P0.H = (ICPLB_DATA0 >> 16); + R4 = [P4]; /* Get faulting address*/ + R6 = 64; /* Advance past the fault address, which*/ + R6 = R6 + R4; /* we'll use if we find a match*/ + R3 = ((16 << 8) | 2); /* Extract mask, bits 16 and 17.*/ + + R5 = 0; +.Lisearch: + + R1 = [P0-0x100]; /* Address for this CPLB */ + + R0 = [P0++]; /* Info for this CPLB*/ + CC = BITTST(R0,0); /* Is the CPLB valid?*/ + IF !CC JUMP .Lnomatch; /* Skip it, if not.*/ + CC = R4 < R1(IU); /* If fault address less than page start*/ + IF CC JUMP .Lnomatch; /* then skip this one.*/ + R2 = EXTRACT(R0,R3.L) (Z); /* Get page size*/ + P1 = R2; + P1 = P5 + (P1<<2); /* index into page-size table*/ + R2 = [P1]; /* Get the page size*/ + R1 = R1 + R2; /* and add to page start, to get page end*/ + CC = R4 < R1(IU); /* and see whether fault addr is in page.*/ + IF !CC R4 = R6; /* If so, advance the address and finish loop.*/ + IF !CC JUMP .Lisearch_done; +.Lnomatch: + /* Go around again*/ + R5 += 1; + CC = BITTST(R5, 4); /* i.e CC = R5 >= 16*/ + IF !CC JUMP .Lisearch; + +.Lisearch_done: + I0 = R4; /* Fault address we'll search for*/ + + /* set up pointers */ + P0.L = (ICPLB_DATA0 & 0xFFFF); + P0.H = (ICPLB_DATA0 >> 16); + + /* The replacement procedure for ICPLBs */ + + P4.L = (IMEM_CONTROL & 0xFFFF); + P4.H = (IMEM_CONTROL >> 16); + + /* disable cplbs */ + R5 = [P4]; /* Control Register*/ + BITCLR(R5,ENICPLB_P); + CLI R1; + SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ + .align 8; + [P4] = R5; + SSYNC; + STI R1; + + R1 = -1; /* end point comparison */ + R3 = 16; /* counter */ + + /* Search through CPLBs for first non-locked entry */ + /* Overwrite it by moving everyone else up by 1 */ +.Licheck_lock: + R0 = [P0++]; + R3 = R3 + R1; + CC = R3 == R1; + IF CC JUMP .Lall_locked; + CC = BITTST(R0, 0); /* an invalid entry is good */ + IF !CC JUMP .Lifound_victim; + CC = BITTST(R0,1); /* but a locked entry isn't */ + IF CC JUMP .Licheck_lock; + +.Lifound_victim: +#ifdef CONFIG_CPLB_INFO + R7 = [P0 - 0x104]; + P2.L = _ipdt_table; + P2.H = _ipdt_table; + P3.L = _ipdt_swapcount_table; + P3.H = _ipdt_swapcount_table; + P3 += -4; +.Licount: + R2 = [P2]; /* address from config table */ + P2 += 8; + P3 += 8; + CC = R2==-1; + IF CC JUMP .Licount_done; + CC = R7==R2; + IF !CC JUMP .Licount; + R7 = [P3]; + R7 += 1; + [P3] = R7; + CSYNC; +.Licount_done: +#endif + LC0=R3; + LSETUP(.Lis_move,.Lie_move) LC0; +.Lis_move: + R0 = [P0]; + [P0 - 4] = R0; + R0 = [P0 - 0x100]; + [P0-0x104] = R0; +.Lie_move:P0+=4; + + /* We've made space in the ICPLB table, so that ICPLB15 + * is now free to be overwritten. Next, we have to determine + * which CPLB we need to install, from the configuration + * table. This is a matter of getting the start-of-page + * addresses and page-lengths from the config table, and + * determining whether the fault address falls within that + * range. + */ + + P2.L = _ipdt_table; + P2.H = _ipdt_table; +#ifdef CONFIG_CPLB_INFO + P3.L = _ipdt_swapcount_table; + P3.H = _ipdt_swapcount_table; + P3 += -8; +#endif + P0.L = _page_size_table; + P0.H = _page_size_table; + + /* Retrieve our fault address (which may have been advanced + * because the faulting instruction crossed a page boundary). + */ + + R0 = I0; + + /* An extraction pattern, to get the page-size bits from + * the CPLB data entry. Bits 16-17, so two bits at posn 16. + */ + + R1 = ((16<<8)|2); +.Linext: R4 = [P2++]; /* address from config table */ + R2 = [P2++]; /* data from config table */ +#ifdef CONFIG_CPLB_INFO + P3 += 8; +#endif + + CC = R4 == -1; /* End of config table*/ + IF CC JUMP .Lno_page_in_table; + + /* See if failed address > start address */ + CC = R4 <= R0(IU); + IF !CC JUMP .Linext; + + /* extract page size (17:16)*/ + R3 = EXTRACT(R2, R1.L) (Z); + + /* add page size to addr to get range */ + + P5 = R3; + P5 = P0 + (P5 << 2); /* scaled, for int access*/ + R3 = [P5]; + R3 = R3 + R4; + + /* See if failed address < (start address + page size) */ + CC = R0 < R3(IU); + IF !CC JUMP .Linext; + + /* We've found a CPLB in the config table that covers + * the faulting address, so install this CPLB into the + * last entry of the table. + */ + + P1.L = (ICPLB_DATA15 & 0xFFFF); /* ICPLB_DATA15 */ + P1.H = (ICPLB_DATA15 >> 16); + [P1] = R2; + [P1-0x100] = R4; +#ifdef CONFIG_CPLB_INFO + R3 = [P3]; + R3 += 1; + [P3] = R3; +#endif + + /* P4 points to IMEM_CONTROL, and R5 contains its old + * value, after we disabled ICPLBS. Re-enable them. + */ + + BITSET(R5,ENICPLB_P); + CLI R2; + SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ + .align 8; + [P4] = R5; + SSYNC; + STI R2; + + ( R7:4,P5:3 ) = [SP++]; + R0 = CPLB_RELOADED; + RTS; + +/* FAILED CASES*/ +.Lno_page_in_table: + ( R7:4,P5:3 ) = [SP++]; + R0 = CPLB_NO_ADDR_MATCH; + RTS; +.Lall_locked: + ( R7:4,P5:3 ) = [SP++]; + R0 = CPLB_NO_UNLOCKED; + RTS; +.Lprot_violation: + ( R7:4,P5:3 ) = [SP++]; + R0 = CPLB_PROT_VIOL; + RTS; + +.Ldcplb_write: + + /* if a DCPLB is marked as write-back (CPLB_WT==0), and + * it is clean (CPLB_DIRTY==0), then a write to the + * CPLB's page triggers a protection violation. We have to + * mark the CPLB as dirty, to indicate that there are + * pending writes associated with the CPLB. + */ + + P4.L = (DCPLB_STATUS & 0xFFFF); + P4.H = (DCPLB_STATUS >> 16); + P3.L = (DCPLB_DATA0 & 0xFFFF); + P3.H = (DCPLB_DATA0 >> 16); + R5 = [P4]; + + /* A protection violation can be caused by more than just writes + * to a clean WB page, so we have to ensure that: + * - It's a write + * - to a clean WB page + * - and is allowed in the mode the access occurred. + */ + + CC = BITTST(R5, 16); /* ensure it was a write*/ + IF !CC JUMP .Lprot_violation; + + /* to check the rest, we have to retrieve the DCPLB.*/ + + /* The low half of DCPLB_STATUS is a bit mask*/ + + R2 = R5.L (Z); /* indicating which CPLB triggered the event.*/ + R3 = 30; /* so we can use this to determine the offset*/ + R2.L = SIGNBITS R2; + R2 = R2.L (Z); /* into the DCPLB table.*/ + R3 = R3 - R2; + P4 = R3; + P3 = P3 + (P4<<2); + R3 = [P3]; /* Retrieve the CPLB*/ + + /* Now we can check whether it's a clean WB page*/ + + CC = BITTST(R3, 14); /* 0==WB, 1==WT*/ + IF CC JUMP .Lprot_violation; + CC = BITTST(R3, 7); /* 0 == clean, 1 == dirty*/ + IF CC JUMP .Lprot_violation; + + /* Check whether the write is allowed in the mode that was active.*/ + + R2 = 1<<3; /* checking write in user mode*/ + CC = BITTST(R5, 17); /* 0==was user, 1==was super*/ + R5 = CC; + R2 <<= R5; /* if was super, check write in super mode*/ + R2 = R3 & R2; + CC = R2 == 0; + IF CC JUMP .Lprot_violation; + + /* It's a genuine write-to-clean-page.*/ + + BITSET(R3, 7); /* mark as dirty*/ + [P3] = R3; /* and write back.*/ + NOP; + CSYNC; + ( R7:4,P5:3 ) = [SP++]; + R0 = CPLB_RELOADED; + RTS; + +.Ldcplb_miss_compare: + + /* Data CPLB Miss event. We need to choose a CPLB to + * evict, and then locate a new CPLB to install from the + * config table, that covers the faulting address. + */ + + P1.L = (DCPLB_DATA15 & 0xFFFF); + P1.H = (DCPLB_DATA15 >> 16); + + P4.L = (DCPLB_FAULT_ADDR & 0xFFFF); + P4.H = (DCPLB_FAULT_ADDR >> 16); + R4 = [P4]; + I0 = R4; + + /* The replacement procedure for DCPLBs*/ + + R6 = R1; /* Save for later*/ + + /* Turn off CPLBs while we work.*/ + P4.L = (DMEM_CONTROL & 0xFFFF); + P4.H = (DMEM_CONTROL >> 16); + R5 = [P4]; + BITCLR(R5,ENDCPLB_P); + CLI R0; + SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */ + .align 8; + [P4] = R5; + SSYNC; + STI R0; + + /* Start looking for a CPLB to evict. Our order of preference + * is: invalid CPLBs, clean CPLBs, dirty CPLBs. Locked CPLBs + * are no good. + */ + + I1.L = (DCPLB_DATA0 & 0xFFFF); + I1.H = (DCPLB_DATA0 >> 16); + P1 = 2; + P2 = 16; + I2.L = _dcplb_preference; + I2.H = _dcplb_preference; + LSETUP(.Lsdsearch1, .Ledsearch1) LC0 = P1; +.Lsdsearch1: + R0 = [I2++]; /* Get the bits we're interested in*/ + P0 = I1; /* Go back to start of table*/ + LSETUP (.Lsdsearch2, .Ledsearch2) LC1 = P2; +.Lsdsearch2: + R1 = [P0++]; /* Fetch each installed CPLB in turn*/ + R2 = R1 & R0; /* and test for interesting bits.*/ + CC = R2 == 0; /* If none are set, it'll do.*/ + IF !CC JUMP .Lskip_stack_check; + + R2 = [P0 - 0x104]; /* R2 - PageStart */ + P3.L = _page_size_table; /* retrieve end address */ + P3.H = _page_size_table; /* retrieve end address */ + R3 = 0x1002; /* 16th - position, 2 bits -length */ +#ifdef ANOMALY_05000209 + nop; /* Anomaly 05000209 */ +#endif + R7 = EXTRACT(R1,R3.l); + R7 = R7 << 2; /* Page size index offset */ + P5 = R7; + P3 = P3 + P5; + R7 = [P3]; /* page size in bytes */ + + R7 = R2 + R7; /* R7 - PageEnd */ + R4 = SP; /* Test SP is in range */ + + CC = R7 < R4; /* if PageEnd < SP */ + IF CC JUMP .Ldfound_victim; + R3 = 0x284; /* stack length from start of trap till + * the point. + * 20 stack locations for future modifications + */ + R4 = R4 + R3; + CC = R4 < R2; /* if SP + stacklen < PageStart */ + IF CC JUMP .Ldfound_victim; +.Lskip_stack_check: + +.Ledsearch2: NOP; +.Ledsearch1: NOP; + + /* If we got here, we didn't find a DCPLB we considered + * replacable, which means all of them were locked. + */ + + JUMP .Lall_locked; +.Ldfound_victim: + +#ifdef CONFIG_CPLB_INFO + R7 = [P0 - 0x104]; + P2.L = _dpdt_table; + P2.H = _dpdt_table; + P3.L = _dpdt_swapcount_table; + P3.H = _dpdt_swapcount_table; + P3 += -4; +.Ldicount: + R2 = [P2]; + P2 += 8; + P3 += 8; + CC = R2==-1; + IF CC JUMP .Ldicount_done; + CC = R7==R2; + IF !CC JUMP .Ldicount; + R7 = [P3]; + R7 += 1; + [P3] = R7; +.Ldicount_done: +#endif + + /* Clean down the hardware loops*/ + R2 = 0; + LC1 = R2; + LC0 = R2; + + /* There's a suitable victim in [P0-4] (because we've + * advanced already). + */ + +.LDdoverwrite: + + /* [P0-4] is a suitable victim CPLB, so we want to + * overwrite it by moving all the following CPLBs + * one space closer to the start. + */ + + R1.L = (DCPLB_DATA16 & 0xFFFF); /* DCPLB_DATA15 + 4 */ + R1.H = (DCPLB_DATA16 >> 16); + R0 = P0; + + /* If the victim happens to be in DCPLB15, + * we don't need to move anything. + */ + + CC = R1 == R0; + IF CC JUMP .Lde_moved; + R1 = R1 - R0; + R1 >>= 2; + P1 = R1; + LSETUP(.Lds_move, .Lde_move) LC0=P1; +.Lds_move: + R0 = [P0++]; /* move data */ + [P0 - 8] = R0; + R0 = [P0-0x104] /* move address */ +.Lde_move: [P0-0x108] = R0; + + /* We've now made space in DCPLB15 for the new CPLB to be + * installed. The next stage is to locate a CPLB in the + * config table that covers the faulting address. + */ + +.Lde_moved:NOP; + R0 = I0; /* Our faulting address */ + + P2.L = _dpdt_table; + P2.H = _dpdt_table; +#ifdef CONFIG_CPLB_INFO + P3.L = _dpdt_swapcount_table; + P3.H = _dpdt_swapcount_table; + P3 += -8; +#endif + + P1.L = _page_size_table; + P1.H = _page_size_table; + + /* An extraction pattern, to retrieve bits 17:16.*/ + + R1 = (16<<8)|2; +.Ldnext: R4 = [P2++]; /* address */ + R2 = [P2++]; /* data */ +#ifdef CONFIG_CPLB_INFO + P3 += 8; +#endif + + CC = R4 == -1; + IF CC JUMP .Lno_page_in_table; + + /* See if failed address > start address */ + CC = R4 <= R0(IU); + IF !CC JUMP .Ldnext; + + /* extract page size (17:16)*/ + R3 = EXTRACT(R2, R1.L) (Z); + + /* add page size to addr to get range */ + + P5 = R3; + P5 = P1 + (P5 << 2); + R3 = [P5]; + R3 = R3 + R4; + + /* See if failed address < (start address + page size) */ + CC = R0 < R3(IU); + IF !CC JUMP .Ldnext; + + /* We've found the CPLB that should be installed, so + * write it into CPLB15, masking off any caching bits + * if necessary. + */ + + P1.L = (DCPLB_DATA15 & 0xFFFF); + P1.H = (DCPLB_DATA15 >> 16); + + /* If the DCPLB has cache bits set, but caching hasn't + * been enabled, then we want to mask off the cache-in-L1 + * bit before installing. Moreover, if caching is off, we + * also want to ensure that the DCPLB has WT mode set, rather + * than WB, since WB pages still trigger first-write exceptions + * even when not caching is off, and the page isn't marked as + * cachable. Finally, we could mark the page as clean, not dirty, + * but we choose to leave that decision to the user; if the user + * chooses to have a CPLB pre-defined as dirty, then they always + * pay the cost of flushing during eviction, but don't pay the + * cost of first-write exceptions to mark the page as dirty. + */ + +#ifdef CONFIG_BLKFIN_WT + BITSET(R6, 14); /* Set WT*/ +#endif + + [P1] = R2; + [P1-0x100] = R4; +#ifdef CONFIG_CPLB_INFO + R3 = [P3]; + R3 += 1; + [P3] = R3; +#endif + + /* We've installed the CPLB, so re-enable CPLBs. P4 + * points to DMEM_CONTROL, and R5 is the value we + * last wrote to it, when we were disabling CPLBs. + */ + + BITSET(R5,ENDCPLB_P); + CLI R2; + .align 8; + [P4] = R5; + SSYNC; + STI R2; + + ( R7:4,P5:3 ) = [SP++]; + R0 = CPLB_RELOADED; + RTS; + +.data +.align 4; +_page_size_table: +.byte4 0x00000400; /* 1K */ +.byte4 0x00001000; /* 4K */ +.byte4 0x00100000; /* 1M */ +.byte4 0x00400000; /* 4M */ + +.align 4; +_dcplb_preference: +.byte4 0x00000001; /* valid bit */ +.byte4 0x00000002; /* lock bit */ diff --git a/arch/blackfin/mach-common/dpmc.S b/arch/blackfin/mach-common/dpmc.S new file mode 100644 index 000000000000..97cdcd6a00d4 --- /dev/null +++ b/arch/blackfin/mach-common/dpmc.S @@ -0,0 +1,418 @@ +/* + * File: arch/blackfin/mach-common/dpmc.S + * Based on: + * Author: LG Soft India + * + * Created: ? + * Description: Watchdog Timer APIs + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +.text + +ENTRY(_unmask_wdog_wakeup_evt) + [--SP] = ( R7:0, P5:0 ); +#if defined(CONFIG_BF561) + P0.H = hi(SICA_IWR1); + P0.L = lo(SICA_IWR1); +#else + P0.h = (SIC_IWR >> 16); + P0.l = (SIC_IWR & 0xFFFF); +#endif + R7 = [P0]; +#if defined(CONFIG_BF561) + BITSET(R7, 27); +#else + BITSET(R7,(IRQ_WATCH - IVG7)); +#endif + [P0] = R7; + SSYNC; + + ( R7:0, P5:0 ) = [SP++]; + RTS; + +.LWRITE_TO_STAT: + /* When watch dog timer is enabled, a write to STAT will load the + * contents of CNT to STAT + */ + R7 = 0x0000(z); +#if defined(CONFIG_BF561) + P0.h = (WDOGA_STAT >> 16); + P0.l = (WDOGA_STAT & 0xFFFF); +#else + P0.h = (WDOG_STAT >> 16); + P0.l = (WDOG_STAT & 0xFFFF); +#endif + [P0] = R7; + SSYNC; + JUMP .LSKIP_WRITE_TO_STAT; + +ENTRY(_program_wdog_timer) + [--SP] = ( R7:0, P5:0 ); +#if defined(CONFIG_BF561) + P0.h = (WDOGA_CNT >> 16); + P0.l = (WDOGA_CNT & 0xFFFF); +#else + P0.h = (WDOG_CNT >> 16); + P0.l = (WDOG_CNT & 0xFFFF); +#endif + [P0] = R0; + SSYNC; + +#if defined(CONFIG_BF561) + P0.h = (WDOGA_CTL >> 16); + P0.l = (WDOGA_CTL & 0xFFFF); +#else + P0.h = (WDOG_CTL >> 16); + P0.l = (WDOG_CTL & 0xFFFF); +#endif + R7 = W[P0](Z); + CC = BITTST(R7,1); + if !CC JUMP .LWRITE_TO_STAT; + CC = BITTST(R7,2); + if !CC JUMP .LWRITE_TO_STAT; + +.LSKIP_WRITE_TO_STAT: +#if defined(CONFIG_BF561) + P0.h = (WDOGA_CTL >> 16); + P0.l = (WDOGA_CTL & 0xFFFF); +#else + P0.h = (WDOG_CTL >> 16); + P0.l = (WDOG_CTL & 0xFFFF); +#endif + R7 = W[P0](Z); + BITCLR(R7,1); /* Enable GP event */ + BITSET(R7,2); + W[P0] = R7.L; + SSYNC; + NOP; + + R7 = W[P0](Z); + BITCLR(R7,4); /* Enable the wdog counter */ + W[P0] = R7.L; + SSYNC; + + ( R7:0, P5:0 ) = [SP++]; + RTS; + +ENTRY(_clear_wdog_wakeup_evt) + [--SP] = ( R7:0, P5:0 ); + +#if defined(CONFIG_BF561) + P0.h = (WDOGA_CTL >> 16); + P0.l = (WDOGA_CTL & 0xFFFF); +#else + P0.h = (WDOG_CTL >> 16); + P0.l = (WDOG_CTL & 0xFFFF); +#endif + R7 = 0x0AD6(Z); + W[P0] = R7.L; + SSYNC; + + R7 = W[P0](Z); + BITSET(R7,15); + W[P0] = R7.L; + SSYNC; + + R7 = W[P0](Z); + BITSET(R7,1); + BITSET(R7,2); + W[P0] = R7.L; + SSYNC; + + ( R7:0, P5:0 ) = [SP++]; + RTS; + +ENTRY(_disable_wdog_timer) + [--SP] = ( R7:0, P5:0 ); +#if defined(CONFIG_BF561) + P0.h = (WDOGA_CTL >> 16); + P0.l = (WDOGA_CTL & 0xFFFF); +#else + P0.h = (WDOG_CTL >> 16); + P0.l = (WDOG_CTL & 0xFFFF); +#endif + R7 = 0xAD6(Z); + W[P0] = R7.L; + SSYNC; + ( R7:0, P5:0 ) = [SP++]; + RTS; + +#if !defined(CONFIG_BF561) + +.section .l1.text + +ENTRY(_sleep_mode) + [--SP] = ( R7:0, P5:0 ); + [--SP] = RETS; + + call _set_sic_iwr; + + R0 = 0xFFFF (Z); + call _set_rtc_istat + + P0.H = hi(PLL_CTL); + P0.L = lo(PLL_CTL); + R1 = W[P0](z); + BITSET (R1, 3); + W[P0] = R1.L; + + CLI R2; + SSYNC; + IDLE; + STI R2; + + call _test_pll_locked; + + R0 = IWR_ENABLE(0); + call _set_sic_iwr; + + P0.H = hi(PLL_CTL); + P0.L = lo(PLL_CTL); + R7 = w[p0](z); + BITCLR (R7, 3); + BITCLR (R7, 5); + w[p0] = R7.L; + IDLE; + call _test_pll_locked; + + RETS = [SP++]; + ( R7:0, P5:0 ) = [SP++]; + RTS; + +ENTRY(_hibernate_mode) + [--SP] = ( R7:0, P5:0 ); + [--SP] = RETS; + + call _set_sic_iwr; + + R0 = 0xFFFF (Z); + call _set_rtc_istat + + P0.H = hi(VR_CTL); + P0.L = lo(VR_CTL); + R1 = W[P0](z); + BITSET (R1, 8); + BITCLR (R1, 0); + BITCLR (R1, 1); + W[P0] = R1.L; + SSYNC; + + CLI R2; + IDLE; + + /* Actually, adding anything may not be necessary...SDRAM contents + * are lost + */ + +ENTRY(_deep_sleep) + [--SP] = ( R7:0, P5:0 ); + [--SP] = RETS; + + CLI R4; + + call _set_sic_iwr; + + call _set_sdram_srfs; + + /* Clear all the interrupts,bits sticky */ + R0 = 0xFFFF (Z); + call _set_rtc_istat + + P0.H = hi(PLL_CTL); + P0.L = lo(PLL_CTL); + R0 = W[P0](z); + BITSET (R0, 5); + W[P0] = R0.L; + + call _test_pll_locked; + + SSYNC; + IDLE; + + call _unset_sdram_srfs; + + call _test_pll_locked; + + R0 = IWR_ENABLE(0); + call _set_sic_iwr; + + P0.H = hi(PLL_CTL); + P0.L = lo(PLL_CTL); + R0 = w[p0](z); + BITCLR (R0, 3); + BITCLR (R0, 5); + BITCLR (R0, 8); + w[p0] = R0; + IDLE; + call _test_pll_locked; + + STI R4; + + RETS = [SP++]; + ( R7:0, P5:0 ) = [SP++]; + RTS; + +ENTRY(_sleep_deeper) + [--SP] = ( R7:0, P5:0 ); + [--SP] = RETS; + + CLI R4; + + P3 = R0; + R0 = IWR_ENABLE(0); + call _set_sic_iwr; + call _set_sdram_srfs; + + /* Clear all the interrupts,bits sticky */ + R0 = 0xFFFF (Z); + call _set_rtc_istat + + P0.H = hi(PLL_DIV); + P0.L = lo(PLL_DIV); + R6 = W[P0](z); + R0.L = 0xF; + W[P0] = R0.l; + + P0.H = hi(PLL_CTL); + P0.L = lo(PLL_CTL); + R5 = W[P0](z); + R0.L = (MIN_VC/CONFIG_CLKIN_HZ) << 9; + W[P0] = R0.l; + + SSYNC; + IDLE; + + call _test_pll_locked; + + P0.H = hi(VR_CTL); + P0.L = lo(VR_CTL); + R7 = W[P0](z); + R1 = 0x6; + R1 <<= 16; + R2 = 0x0404(Z); + R1 = R1|R2; + + R2 = DEPOSIT(R7, R1); + W[P0] = R2; + + SSYNC; + IDLE; + + call _test_pll_locked; + + P0.H = hi(PLL_CTL); + P0.L = lo(PLL_CTL); + R0 = W[P0](z); + BITSET (R0, 3); + W[P0] = R0.L; + + R0 = P3; + call _set_sic_iwr; + + SSYNC; + IDLE; + + call _test_pll_locked; + + R0 = IWR_ENABLE(0); + call _set_sic_iwr; + + P0.H = hi(VR_CTL); + P0.L = lo(VR_CTL); + W[P0]= R7; + + SSYNC; + IDLE; + + call _test_pll_locked; + + P0.H = hi(PLL_DIV); + P0.L = lo(PLL_DIV); + W[P0]= R6; + + P0.H = hi(PLL_CTL); + P0.L = lo(PLL_CTL); + w[p0] = R5; + IDLE; + call _test_pll_locked; + + call _unset_sdram_srfs; + + STI R4; + + RETS = [SP++]; + ( R7:0, P5:0 ) = [SP++]; + RTS; + +ENTRY(_set_sdram_srfs) + /* set the sdram to self refresh mode */ + P0.H = hi(EBIU_SDGCTL); + P0.L = lo(EBIU_SDGCTL); + R2 = [P0]; + R3.H = hi(SRFS); + R3.L = lo(SRFS); + R2 = R2|R3; + [P0] = R2; + ssync; + RTS; + +ENTRY(_unset_sdram_srfs) + /* set the sdram out of self refresh mode */ + P0.H = hi(EBIU_SDGCTL); + P0.L = lo(EBIU_SDGCTL); + R2 = [P0]; + R3.H = hi(SRFS); + R3.L = lo(SRFS); + R3 = ~R3; + R2 = R2&R3; + [P0] = R2; + ssync; + RTS; + +ENTRY(_set_sic_iwr) + P0.H = hi(SIC_IWR); + P0.L = lo(SIC_IWR); + [P0] = R0; + SSYNC; + RTS; + +ENTRY(_set_rtc_istat) + P0.H = hi(RTC_ISTAT); + P0.L = lo(RTC_ISTAT); + w[P0] = R0.L; + SSYNC; + RTS; + +ENTRY(_test_pll_locked) + P0.H = hi(PLL_STAT); + P0.L = lo(PLL_STAT); +1: + R0 = W[P0] (Z); + CC = BITTST(R0,5); + IF !CC JUMP 1b; + RTS; +#endif diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S new file mode 100644 index 000000000000..8eb0a9023482 --- /dev/null +++ b/arch/blackfin/mach-common/entry.S @@ -0,0 +1,1207 @@ +/* + * File: arch/blackfin/mach-common/entry.S + * Based on: + * Author: Linus Torvalds + * + * Created: ? + * Description: contains the system-call and fault low-level handling routines. + * This also contains the timer-interrupt handler, as well as all + * interrupts and faults that can result in a task-switch. + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* + * 25-Dec-2004 - LG Soft India + * 1. Fix in return_from_int, to make sure any pending + * system call in ILAT for this process to get + * executed, otherwise in case context switch happens, + * system call of first process (i.e in ILAT) will be + * carried forward to the switched process. + * 2. Removed Constant references for the following + * a. IPEND + * b. EXCAUSE mask + * c. PAGE Mask + */ + +/* + * NOTE: This code handles signal-recognition, which happens every time + * after a timer-interrupt and after each system call. + */ + + +#include +#include +#include +#include +#include /* TIF_NEED_RESCHED */ +#include + +#include + +#ifdef CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE + /* + * TODO: this should be proper save/restore, but for now + * we'll just cheat and use 0x1/0x13 + */ +# define DEBUG_START_HWTRACE \ + P5.l = LO(TBUFCTL); \ + P5.h = HI(TBUFCTL); \ + R7 = 0x13; \ + [P5] = R7; +# define DEBUG_STOP_HWTRACE \ + P5.l = LO(TBUFCTL); \ + P5.h = HI(TBUFCTL); \ + R7 = 0x01; \ + [P5] = R7; +#else +# define DEBUG_START_HWTRACE +# define DEBUG_STOP_HWTRACE +#endif + +#ifdef CONFIG_EXCPT_IRQ_SYSC_L1 +.section .l1.text +#else +.text +#endif + +/* Slightly simplified and streamlined entry point for CPLB misses. + * This one does not lower the level to IRQ5, and thus can be used to + * patch up CPLB misses on the kernel stack. + */ +ENTRY(_ex_dcplb) +#if defined(ANOMALY_05000261) + /* + * Work around an anomaly: if we see a new DCPLB fault, return + * without doing anything. Then, if we get the same fault again, + * handle it. + */ + p5.l = _last_cplb_fault_retx; + p5.h = _last_cplb_fault_retx; + r7 = [p5]; + r6 = retx; + [p5] = r6; + cc = r6 == r7; + if !cc jump _return_from_exception; + /* fall through */ +#endif + +ENTRY(_ex_icplb) + (R7:6,P5:4) = [sp++]; + ASTAT = [sp++]; + SAVE_ALL_SYS + call __cplb_hdr; + DEBUG_START_HWTRACE + RESTORE_ALL_SYS + SP = RETN; + rtx; + +ENTRY(_ex_spinlock) + /* Transform this into a syscall - twiddle the syscall vector. */ + p5.l = lo(EVT15); + p5.h = hi(EVT15); + r7.l = _spinlock_bh; + r7.h = _spinlock_bh; + [p5] = r7; + csync; + /* Fall through. */ + +ENTRY(_ex_syscall) + DEBUG_START_HWTRACE + (R7:6,P5:4) = [sp++]; + ASTAT = [sp++]; + raise 15; /* invoked by TRAP #0, for sys call */ + sp = retn; + rtx + +ENTRY(_spinlock_bh) + SAVE_ALL_SYS + /* To end up here, vector 15 was changed - so we have to change it + * back. + */ + p0.l = lo(EVT15); + p0.h = hi(EVT15); + p1.l = _evt_system_call; + p1.h = _evt_system_call; + [p0] = p1; + csync; + r0 = [sp + PT_R0]; + sp += -12; + call _sys_bfin_spinlock; + sp += 12; + [SP + PT_R0] = R0; + RESTORE_ALL_SYS + rti; + +ENTRY(_ex_soft_bp) + r7 = retx; + r7 += -2; + retx = r7; + jump.s _ex_trap_c; + +ENTRY(_ex_single_step) + r7 = retx; + r6 = reti; + cc = r7 == r6; + if cc jump _return_from_exception + r7 = syscfg; + bitclr (r7, 0); + syscfg = R7; + + p5.l = lo(IPEND); + p5.h = hi(IPEND); + r6 = [p5]; + cc = bittst(r6, 5); + if !cc jump _ex_trap_c; + p4.l = lo(EVT5); + p4.h = hi(EVT5); + r6.h = _exception_to_level5; + r6.l = _exception_to_level5; + r7 = [p4]; + cc = r6 == r7; + if !cc jump _ex_trap_c; + +_return_from_exception: + DEBUG_START_HWTRACE + (R7:6,P5:4) = [sp++]; + ASTAT = [sp++]; + sp = retn; + rtx; + +ENTRY(_handle_bad_cplb) + /* To get here, we just tried and failed to change a CPLB + * so, handle things in trap_c (C code), by lowering to + * IRQ5, just like we normally do. Since this is not a + * "normal" return path, we have a do alot of stuff to + * the stack to get ready so, we can fall through - we + * need to make a CPLB exception look like a normal exception + */ + + DEBUG_START_HWTRACE + RESTORE_ALL_SYS + [--sp] = ASTAT; + [--sp] = (R7:6, P5:4); + +ENTRY(_ex_trap_c) + /* Call C code (trap_c) to handle the exception, which most + * likely involves sending a signal to the current process. + * To avoid double faults, lower our priority to IRQ5 first. + */ + P5.h = _exception_to_level5; + P5.l = _exception_to_level5; + p4.l = lo(EVT5); + p4.h = hi(EVT5); + [p4] = p5; + csync; + + /* Disable all interrupts, but make sure level 5 is enabled so + * we can switch to that level. Save the old mask. */ + cli r6; + p4.l = _excpt_saved_imask; + p4.h = _excpt_saved_imask; + [p4] = r6; + r6 = 0x3f; + sti r6; + + /* Save the excause into a circular buffer, in case the instruction + * which caused this excecptions causes others. + */ + P5.l = _in_ptr_excause; + P5.h = _in_ptr_excause; + R7 = [P5]; + R7 += 4; + R6 = 0xF; + R7 = R7 & R6; + [P5] = R7; + R6.l = _excause_circ_buf; + R6.h = _excause_circ_buf; + R7 = R7 + R6; + p5 = R7; + R6 = SEQSTAT; + [P5] = R6; + + DEBUG_START_HWTRACE + (R7:6,P5:4) = [sp++]; + ASTAT = [sp++]; + SP = RETN; + raise 5; + rtx; + +ENTRY(_exception_to_level5) + SAVE_ALL_SYS + + /* Restore interrupt mask. We haven't pushed RETI, so this + * doesn't enable interrupts until we return from this handler. */ + p4.l = _excpt_saved_imask; + p4.h = _excpt_saved_imask; + r6 = [p4]; + sti r6; + + /* Restore the hardware error vector. */ + P5.h = _evt_ivhw; + P5.l = _evt_ivhw; + p4.l = lo(EVT5); + p4.h = hi(EVT5); + [p4] = p5; + csync; + + p2.l = lo(IPEND); + p2.h = hi(IPEND); + csync; + r0 = [p2]; /* Read current IPEND */ + [sp + PT_IPEND] = r0; /* Store IPEND */ + + /* Pop the excause from the circular buffer and push it on the stack + * (in the right place - if you change the location of SEQSTAT, you + * must change this offset. + */ +.L_excep_to_5_again: + P5.l = _out_ptr_excause; + P5.h = _out_ptr_excause; + R7 = [P5]; + R7 += 4; + R6 = 0xF; + R7 = R7 & R6; + [P5] = R7; + R6.l = _excause_circ_buf; + R6.h = _excause_circ_buf; + R7 = R7 + R6; + P5 = R7; + R1 = [P5]; + [SP + 8] = r1; + + r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ + SP += -12; + call _trap_c; + SP += 12; + + /* See if anything else is in the exception buffer + * if there is, process it + */ + P5.l = _out_ptr_excause; + P5.h = _out_ptr_excause; + P4.l = _in_ptr_excause; + P4.h = _in_ptr_excause; + R6 = [P5]; + R7 = [P4]; + CC = R6 == R7; + if ! CC JUMP .L_excep_to_5_again + + call _ret_from_exception; + RESTORE_ALL_SYS + rti; + +ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/ + /* Since the kernel stack can be anywhere, it's not guaranteed to be + * covered by a CPLB. Switch to an exception stack; use RETN as a + * scratch register (for want of a better option). + */ + retn = sp; + sp.l = _exception_stack_top; + sp.h = _exception_stack_top; + /* Try to deal with syscalls quickly. */ + [--sp] = ASTAT; + [--sp] = (R7:6, P5:4); + DEBUG_STOP_HWTRACE + r7 = SEQSTAT; /* reason code is in bit 5:0 */ + r6.l = lo(SEQSTAT_EXCAUSE); + r6.h = hi(SEQSTAT_EXCAUSE); + r7 = r7 & r6; + p5.h = _extable; + p5.l = _extable; + p4 = r7; + p5 = p5 + (p4 << 2); + p4 = [p5]; + jump (p4); + +.Lbadsys: + r7 = -ENOSYS; /* signextending enough */ + [sp + PT_R0] = r7; /* return value from system call */ + jump .Lsyscall_really_exit; + +ENTRY(_kernel_execve) + link SIZEOF_PTREGS; + p0 = sp; + r3 = SIZEOF_PTREGS / 4; + r4 = 0(x); +0: + [p0++] = r4; + r3 += -1; + cc = r3 == 0; + if !cc jump 0b (bp); + + p0 = sp; + sp += -16; + [sp + 12] = p0; + call _do_execve; + SP += 16; + cc = r0 == 0; + if ! cc jump 1f; + /* Success. Copy our temporary pt_regs to the top of the kernel + * stack and do a normal exception return. + */ + r1 = sp; + r0 = (-KERNEL_STACK_SIZE) (x); + r1 = r1 & r0; + p2 = r1; + p3 = [p2]; + r0 = KERNEL_STACK_SIZE - 4 (z); + p1 = r0; + p1 = p1 + p2; + + p0 = fp; + r4 = [p0--]; + r3 = SIZEOF_PTREGS / 4; +0: + r4 = [p0--]; + [p1--] = r4; + r3 += -1; + cc = r3 == 0; + if ! cc jump 0b (bp); + + r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z); + p1 = r0; + p1 = p1 + p2; + sp = p1; + r0 = syscfg; + [SP + PT_SYSCFG] = r0; + [p3 + (TASK_THREAD + THREAD_KSP)] = sp; + + RESTORE_CONTEXT; + rti; +1: + unlink; + rts; + +ENTRY(_system_call) + /* Store IPEND */ + p2.l = lo(IPEND); + p2.h = hi(IPEND); + csync; + r0 = [p2]; + [sp + PT_IPEND] = r0; + + /* Store RETS for now */ + r0 = rets; + [sp + PT_RESERVED] = r0; + /* Set the stack for the current process */ + r7 = sp; + r6.l = lo(ALIGN_PAGE_MASK); + r6.h = hi(ALIGN_PAGE_MASK); + r7 = r7 & r6; /* thread_info */ + p2 = r7; + p2 = [p2]; + + [p2+(TASK_THREAD+THREAD_KSP)] = sp; + + /* Check the System Call */ + r7 = __NR_syscall; + /* System call number is passed in P0 */ + r6 = p0; + cc = r6 < r7; + if ! cc jump .Lbadsys; + + /* are we tracing syscalls?*/ + r7 = sp; + r6.l = lo(ALIGN_PAGE_MASK); + r6.h = hi(ALIGN_PAGE_MASK); + r7 = r7 & r6; + p2 = r7; + r7 = [p2+TI_FLAGS]; + CC = BITTST(r7,TIF_SYSCALL_TRACE); + if CC JUMP _sys_trace; + + /* Execute the appropriate system call */ + + p4 = p0; + p5.l = _sys_call_table; + p5.h = _sys_call_table; + p5 = p5 + (p4 << 2); + r0 = [sp + PT_R0]; + r1 = [sp + PT_R1]; + r2 = [sp + PT_R2]; + p5 = [p5]; + + [--sp] = r5; + [--sp] = r4; + [--sp] = r3; + SP += -12; + call (p5); + SP += 24; + [sp + PT_R0] = r0; + +.Lresume_userspace: + r7 = sp; + r4.l = lo(ALIGN_PAGE_MASK); + r4.h = hi(ALIGN_PAGE_MASK); + r7 = r7 & r4; /* thread_info->flags */ + p5 = r7; +.Lresume_userspace_1: + /* Disable interrupts. */ + [--sp] = reti; + reti = [sp++]; + + r7 = [p5 + TI_FLAGS]; + r4.l = lo(_TIF_WORK_MASK); + r4.h = hi(_TIF_WORK_MASK); + r7 = r7 & r4; + +.Lsyscall_resched: + cc = BITTST(r7, TIF_NEED_RESCHED); + if !cc jump .Lsyscall_sigpending; + + /* Reenable interrupts. */ + [--sp] = reti; + r0 = [sp++]; + + SP += -12; + call _schedule; + SP += 12; + + jump .Lresume_userspace_1; + +.Lsyscall_sigpending: + cc = BITTST(r7, TIF_RESTORE_SIGMASK); + if cc jump .Lsyscall_do_signals; + cc = BITTST(r7, TIF_SIGPENDING); + if !cc jump .Lsyscall_really_exit; +.Lsyscall_do_signals: + /* Reenable interrupts. */ + [--sp] = reti; + r0 = [sp++]; + + r0 = sp; + SP += -12; + call _do_signal; + SP += 12; + +.Lsyscall_really_exit: + r5 = [sp + PT_RESERVED]; + rets = r5; + rts; + +_sys_trace: + call _syscall_trace; + + /* Execute the appropriate system call */ + + p4 = [SP + PT_P0]; + p5.l = _sys_call_table; + p5.h = _sys_call_table; + p5 = p5 + (p4 << 2); + r0 = [sp + PT_R0]; + r1 = [sp + PT_R1]; + r2 = [sp + PT_R2]; + r3 = [sp + PT_R3]; + r4 = [sp + PT_R4]; + r5 = [sp + PT_R5]; + p5 = [p5]; + + [--sp] = r5; + [--sp] = r4; + [--sp] = r3; + SP += -12; + call (p5); + SP += 24; + [sp + PT_R0] = r0; + + call _syscall_trace; + jump .Lresume_userspace; + +ENTRY(_resume) + /* + * Beware - when entering resume, prev (the current task) is + * in r0, next (the new task) is in r1. + */ + p0 = r0; + p1 = r1; + [--sp] = rets; + [--sp] = fp; + [--sp] = (r7:4, p5:3); + + /* save usp */ + p2 = usp; + [p0+(TASK_THREAD+THREAD_USP)] = p2; + + /* save current kernel stack pointer */ + [p0+(TASK_THREAD+THREAD_KSP)] = sp; + + /* save program counter */ + r1.l = _new_old_task; + r1.h = _new_old_task; + [p0+(TASK_THREAD+THREAD_PC)] = r1; + + /* restore the kernel stack pointer */ + sp = [p1+(TASK_THREAD+THREAD_KSP)]; + + /* restore user stack pointer */ + p0 = [p1+(TASK_THREAD+THREAD_USP)]; + usp = p0; + + /* restore pc */ + p0 = [p1+(TASK_THREAD+THREAD_PC)]; + jump (p0); + + /* + * Following code actually lands up in a new (old) task. + */ + +_new_old_task: + (r7:4, p5:3) = [sp++]; + fp = [sp++]; + rets = [sp++]; + + /* + * When we come out of resume, r0 carries "old" task, becuase we are + * in "new" task. + */ + rts; + +ENTRY(_ret_from_exception) + p2.l = lo(IPEND); + p2.h = hi(IPEND); + + csync; + r0 = [p2]; + [sp + PT_IPEND] = r0; + +1: + r1 = 0x37(Z); + r2 = ~r1; + r2.h = 0; + r0 = r2 & r0; + cc = r0 == 0; + if !cc jump 4f; /* if not return to user mode, get out */ + + /* Make sure any pending system call or deferred exception + * return in ILAT for this process to get executed, otherwise + * in case context switch happens, system call of + * first process (i.e in ILAT) will be carried + * forward to the switched process + */ + + p2.l = lo(ILAT); + p2.h = hi(ILAT); + r0 = [p2]; + r1 = (EVT_IVG14 | EVT_IVG15) (z); + r0 = r0 & r1; + cc = r0 == 0; + if !cc jump 5f; + + /* Set the stack for the current process */ + r7 = sp; + r4.l = lo(ALIGN_PAGE_MASK); + r4.h = hi(ALIGN_PAGE_MASK); + r7 = r7 & r4; /* thread_info->flags */ + p5 = r7; + r7 = [p5 + TI_FLAGS]; + r4.l = lo(_TIF_WORK_MASK); + r4.h = hi(_TIF_WORK_MASK); + r7 = r7 & r4; + cc = r7 == 0; + if cc jump 4f; + + p0.l = lo(EVT15); + p0.h = hi(EVT15); + p1.l = _schedule_and_signal; + p1.h = _schedule_and_signal; + [p0] = p1; + csync; + raise 15; /* raise evt14 to do signal or reschedule */ +4: + r0 = syscfg; + bitclr(r0, 0); + syscfg = r0; +5: + rts; + +ENTRY(_return_from_int) + /* If someone else already raised IRQ 15, do nothing. */ + csync; + p2.l = lo(ILAT); + p2.h = hi(ILAT); + r0 = [p2]; + cc = bittst (r0, EVT_IVG15_P); + if cc jump 2f; + + /* if not return to user mode, get out */ + p2.l = lo(IPEND); + p2.h = hi(IPEND); + r0 = [p2]; + r1 = 0x17(Z); + r2 = ~r1; + r2.h = 0; + r0 = r2 & r0; + r1 = 1; + r1 = r0 - r1; + r2 = r0 & r1; + cc = r2 == 0; + if !cc jump 2f; + + /* Lower the interrupt level to 15. */ + p0.l = lo(EVT15); + p0.h = hi(EVT15); + p1.l = _schedule_and_signal_from_int; + p1.h = _schedule_and_signal_from_int; + [p0] = p1; + csync; +#if defined(ANOMALY_05000281) + r0.l = lo(CONFIG_BOOT_LOAD); + r0.h = hi(CONFIG_BOOT_LOAD); + reti = r0; +#endif + r0 = 0x801f (z); + STI r0; + raise 15; /* raise evt15 to do signal or reschedule */ + rti; +2: + rts; + +ENTRY(_lower_to_irq14) +#if defined(ANOMALY_05000281) + r0.l = lo(CONFIG_BOOT_LOAD); + r0.h = hi(CONFIG_BOOT_LOAD); + reti = r0; +#endif + r0 = 0x401f; + sti r0; + raise 14; + rti; +ENTRY(_evt14_softirq) +#ifdef CONFIG_DEBUG_HWERR + r0 = 0x3f; + sti r0; +#else + cli r0; +#endif + [--sp] = RETI; + SP += 4; + rts; + +_schedule_and_signal_from_int: + /* To end up here, vector 15 was changed - so we have to change it + * back. + */ + p0.l = lo(EVT15); + p0.h = hi(EVT15); + p1.l = _evt_system_call; + p1.h = _evt_system_call; + [p0] = p1; + csync; + p1 = rets; + [sp + PT_RESERVED] = p1; + + p0.l = _irq_flags; + p0.h = _irq_flags; + r0 = [p0]; + sti r0; + + jump.s .Lresume_userspace; + +_schedule_and_signal: + SAVE_CONTEXT_SYSCALL + /* To end up here, vector 15 was changed - so we have to change it + * back. + */ + p0.l = lo(EVT15); + p0.h = hi(EVT15); + p1.l = _evt_system_call; + p1.h = _evt_system_call; + [p0] = p1; + csync; + p0.l = 1f; + p0.h = 1f; + [sp + PT_RESERVED] = P0; + call .Lresume_userspace; +1: + RESTORE_CONTEXT + rti; + +/* Make sure when we start, that the circular buffer is initialized properly + * R0 and P0 are call clobbered, so we can use them here. + */ +ENTRY(_init_exception_buff) + r0 = 0; + p0.h = _in_ptr_excause; + p0.l = _in_ptr_excause; + [p0] = r0; + p0.h = _out_ptr_excause; + p0.l = _out_ptr_excause; + [p0] = r0; + rts; + +/* + * Put these in the kernel data section - that should always be covered by + * a CPLB. This is needed to ensure we don't get double fault conditions + */ + +#ifdef CONFIG_SYSCALL_TAB_L1 +.section .l1.data +#else +.data +#endif +ALIGN +_extable: + /* entry for each EXCAUSE[5:0] + * This table bmust be in sync with the table in ./kernel/traps.c + * EXCPT instruction can provide 4 bits of EXCAUSE, allowing 16 to be user defined + */ + .long _ex_syscall; /* 0x00 - User Defined - Linux Syscall */ + .long _ex_soft_bp /* 0x01 - User Defined - Software breakpoint */ + .long _ex_trap_c /* 0x02 - User Defined */ + .long _ex_trap_c /* 0x03 - User Defined - Atomic test and set service */ + .long _ex_spinlock /* 0x04 - User Defined */ + .long _ex_trap_c /* 0x05 - User Defined */ + .long _ex_trap_c /* 0x06 - User Defined */ + .long _ex_trap_c /* 0x07 - User Defined */ + .long _ex_trap_c /* 0x08 - User Defined */ + .long _ex_trap_c /* 0x09 - User Defined */ + .long _ex_trap_c /* 0x0A - User Defined */ + .long _ex_trap_c /* 0x0B - User Defined */ + .long _ex_trap_c /* 0x0C - User Defined */ + .long _ex_trap_c /* 0x0D - User Defined */ + .long _ex_trap_c /* 0x0E - User Defined */ + .long _ex_trap_c /* 0x0F - User Defined */ + .long _ex_single_step /* 0x10 - HW Single step */ + .long _ex_trap_c /* 0x11 - Trace Buffer Full */ + .long _ex_trap_c /* 0x12 - Reserved */ + .long _ex_trap_c /* 0x13 - Reserved */ + .long _ex_trap_c /* 0x14 - Reserved */ + .long _ex_trap_c /* 0x15 - Reserved */ + .long _ex_trap_c /* 0x16 - Reserved */ + .long _ex_trap_c /* 0x17 - Reserved */ + .long _ex_trap_c /* 0x18 - Reserved */ + .long _ex_trap_c /* 0x19 - Reserved */ + .long _ex_trap_c /* 0x1A - Reserved */ + .long _ex_trap_c /* 0x1B - Reserved */ + .long _ex_trap_c /* 0x1C - Reserved */ + .long _ex_trap_c /* 0x1D - Reserved */ + .long _ex_trap_c /* 0x1E - Reserved */ + .long _ex_trap_c /* 0x1F - Reserved */ + .long _ex_trap_c /* 0x20 - Reserved */ + .long _ex_trap_c /* 0x21 - Undefined Instruction */ + .long _ex_trap_c /* 0x22 - Illegal Instruction Combination */ + .long _ex_dcplb /* 0x23 - Data CPLB Protection Violation */ + .long _ex_trap_c /* 0x24 - Data access misaligned */ + .long _ex_trap_c /* 0x25 - Unrecoverable Event */ + .long _ex_dcplb /* 0x26 - Data CPLB Miss */ + .long _ex_trap_c /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero */ + .long _ex_trap_c /* 0x28 - Emulation Watchpoint */ + .long _ex_trap_c /* 0x29 - Instruction fetch access error (535 only) */ + .long _ex_trap_c /* 0x2A - Instruction fetch misaligned */ + .long _ex_icplb /* 0x2B - Instruction CPLB protection Violation */ + .long _ex_icplb /* 0x2C - Instruction CPLB miss */ + .long _ex_trap_c /* 0x2D - Instruction CPLB Multiple Hits */ + .long _ex_trap_c /* 0x2E - Illegal use of Supervisor Resource */ + .long _ex_trap_c /* 0x2E - Illegal use of Supervisor Resource */ + .long _ex_trap_c /* 0x2F - Reserved */ + .long _ex_trap_c /* 0x30 - Reserved */ + .long _ex_trap_c /* 0x31 - Reserved */ + .long _ex_trap_c /* 0x32 - Reserved */ + .long _ex_trap_c /* 0x33 - Reserved */ + .long _ex_trap_c /* 0x34 - Reserved */ + .long _ex_trap_c /* 0x35 - Reserved */ + .long _ex_trap_c /* 0x36 - Reserved */ + .long _ex_trap_c /* 0x37 - Reserved */ + .long _ex_trap_c /* 0x38 - Reserved */ + .long _ex_trap_c /* 0x39 - Reserved */ + .long _ex_trap_c /* 0x3A - Reserved */ + .long _ex_trap_c /* 0x3B - Reserved */ + .long _ex_trap_c /* 0x3C - Reserved */ + .long _ex_trap_c /* 0x3D - Reserved */ + .long _ex_trap_c /* 0x3E - Reserved */ + .long _ex_trap_c /* 0x3F - Reserved */ + +ALIGN +ENTRY(_sys_call_table) + .long _sys_ni_syscall /* 0 - old "setup()" system call*/ + .long _sys_exit + .long _sys_fork + .long _sys_read + .long _sys_write + .long _sys_open /* 5 */ + .long _sys_close + .long _sys_ni_syscall /* old waitpid */ + .long _sys_creat + .long _sys_link + .long _sys_unlink /* 10 */ + .long _sys_execve + .long _sys_chdir + .long _sys_time + .long _sys_mknod + .long _sys_chmod /* 15 */ + .long _sys_chown /* chown16 */ + .long _sys_ni_syscall /* old break syscall holder */ + .long _sys_ni_syscall /* old stat */ + .long _sys_lseek + .long _sys_getpid /* 20 */ + .long _sys_mount + .long _sys_ni_syscall /* old umount */ + .long _sys_setuid + .long _sys_getuid + .long _sys_stime /* 25 */ + .long _sys_ptrace + .long _sys_alarm + .long _sys_ni_syscall /* old fstat */ + .long _sys_pause + .long _sys_ni_syscall /* old utime */ /* 30 */ + .long _sys_ni_syscall /* old stty syscall holder */ + .long _sys_ni_syscall /* old gtty syscall holder */ + .long _sys_access + .long _sys_nice + .long _sys_ni_syscall /* 35 */ /* old ftime syscall holder */ + .long _sys_sync + .long _sys_kill + .long _sys_rename + .long _sys_mkdir + .long _sys_rmdir /* 40 */ + .long _sys_dup + .long _sys_pipe + .long _sys_times + .long _sys_ni_syscall /* old prof syscall holder */ + .long _sys_brk /* 45 */ + .long _sys_setgid + .long _sys_getgid + .long _sys_ni_syscall /* old sys_signal */ + .long _sys_geteuid /* geteuid16 */ + .long _sys_getegid /* getegid16 */ /* 50 */ + .long _sys_acct + .long _sys_umount /* recycled never used phys() */ + .long _sys_ni_syscall /* old lock syscall holder */ + .long _sys_ioctl + .long _sys_fcntl /* 55 */ + .long _sys_ni_syscall /* old mpx syscall holder */ + .long _sys_setpgid + .long _sys_ni_syscall /* old ulimit syscall holder */ + .long _sys_ni_syscall /* old old uname */ + .long _sys_umask /* 60 */ + .long _sys_chroot + .long _sys_ustat + .long _sys_dup2 + .long _sys_getppid + .long _sys_getpgrp /* 65 */ + .long _sys_setsid + .long _sys_ni_syscall /* old sys_sigaction */ + .long _sys_sgetmask + .long _sys_ssetmask + .long _sys_setreuid /* setreuid16 */ /* 70 */ + .long _sys_setregid /* setregid16 */ + .long _sys_ni_syscall /* old sys_sigsuspend */ + .long _sys_ni_syscall /* old sys_sigpending */ + .long _sys_sethostname + .long _sys_setrlimit /* 75 */ + .long _sys_ni_syscall /* old getrlimit */ + .long _sys_getrusage + .long _sys_gettimeofday + .long _sys_settimeofday + .long _sys_getgroups /* getgroups16 */ /* 80 */ + .long _sys_setgroups /* setgroups16 */ + .long _sys_ni_syscall /* old_select */ + .long _sys_symlink + .long _sys_ni_syscall /* old lstat */ + .long _sys_readlink /* 85 */ + .long _sys_uselib + .long _sys_ni_syscall /* sys_swapon */ + .long _sys_reboot + .long _sys_ni_syscall /* old_readdir */ + .long _sys_ni_syscall /* sys_mmap */ /* 90 */ + .long _sys_munmap + .long _sys_truncate + .long _sys_ftruncate + .long _sys_fchmod + .long _sys_fchown /* fchown16 */ /* 95 */ + .long _sys_getpriority + .long _sys_setpriority + .long _sys_ni_syscall /* old profil syscall holder */ + .long _sys_statfs + .long _sys_fstatfs /* 100 */ + .long _sys_ni_syscall + .long _sys_ni_syscall /* old sys_socketcall */ + .long _sys_syslog + .long _sys_setitimer + .long _sys_getitimer /* 105 */ + .long _sys_newstat + .long _sys_newlstat + .long _sys_newfstat + .long _sys_ni_syscall /* old uname */ + .long _sys_ni_syscall /* iopl for i386 */ /* 110 */ + .long _sys_vhangup + .long _sys_ni_syscall /* obsolete idle() syscall */ + .long _sys_ni_syscall /* vm86old for i386 */ + .long _sys_wait4 + .long _sys_ni_syscall /* 115 */ /* sys_swapoff */ + .long _sys_sysinfo + .long _sys_ni_syscall /* old sys_ipc */ + .long _sys_fsync + .long _sys_ni_syscall /* old sys_sigreturn */ + .long _sys_clone /* 120 */ + .long _sys_setdomainname + .long _sys_newuname + .long _sys_ni_syscall /* old sys_modify_ldt */ + .long _sys_adjtimex + .long _sys_ni_syscall /* 125 */ /* sys_mprotect */ + .long _sys_ni_syscall /* old sys_sigprocmask */ + .long _sys_ni_syscall /* old "creat_module" */ + .long _sys_init_module + .long _sys_delete_module + .long _sys_ni_syscall /* 130: old "get_kernel_syms" */ + .long _sys_quotactl + .long _sys_getpgid + .long _sys_fchdir + .long _sys_bdflush + .long _sys_ni_syscall /* 135 */ /* sys_sysfs */ + .long _sys_personality + .long _sys_ni_syscall /* for afs_syscall */ + .long _sys_setfsuid /* setfsuid16 */ + .long _sys_setfsgid /* setfsgid16 */ + .long _sys_llseek /* 140 */ + .long _sys_getdents + .long _sys_ni_syscall /* sys_select */ + .long _sys_flock + .long _sys_ni_syscall /* sys_msync */ + .long _sys_readv /* 145 */ + .long _sys_writev + .long _sys_getsid + .long _sys_fdatasync + .long _sys_sysctl + .long _sys_ni_syscall /* 150 */ /* sys_mlock */ + .long _sys_ni_syscall /* sys_munlock */ + .long _sys_ni_syscall /* sys_mlockall */ + .long _sys_ni_syscall /* sys_munlockall */ + .long _sys_sched_setparam + .long _sys_sched_getparam /* 155 */ + .long _sys_sched_setscheduler + .long _sys_sched_getscheduler + .long _sys_sched_yield + .long _sys_sched_get_priority_max + .long _sys_sched_get_priority_min /* 160 */ + .long _sys_sched_rr_get_interval + .long _sys_nanosleep + .long _sys_ni_syscall /* sys_mremap */ + .long _sys_setresuid /* setresuid16 */ + .long _sys_getresuid /* getresuid16 */ /* 165 */ + .long _sys_ni_syscall /* for vm86 */ + .long _sys_ni_syscall /* old "query_module" */ + .long _sys_ni_syscall /* sys_poll */ + .long _sys_ni_syscall /* sys_nfsservctl */ + .long _sys_setresgid /* setresgid16 */ /* 170 */ + .long _sys_getresgid /* getresgid16 */ + .long _sys_prctl + .long _sys_rt_sigreturn + .long _sys_rt_sigaction + .long _sys_rt_sigprocmask /* 175 */ + .long _sys_rt_sigpending + .long _sys_rt_sigtimedwait + .long _sys_rt_sigqueueinfo + .long _sys_rt_sigsuspend + .long _sys_pread64 /* 180 */ + .long _sys_pwrite64 + .long _sys_lchown /* lchown16 */ + .long _sys_getcwd + .long _sys_capget + .long _sys_capset /* 185 */ + .long _sys_sigaltstack + .long _sys_sendfile + .long _sys_ni_syscall /* streams1 */ + .long _sys_ni_syscall /* streams2 */ + .long _sys_vfork /* 190 */ + .long _sys_getrlimit + .long _sys_mmap2 + .long _sys_truncate64 + .long _sys_ftruncate64 + .long _sys_stat64 /* 195 */ + .long _sys_lstat64 + .long _sys_fstat64 + .long _sys_chown + .long _sys_getuid + .long _sys_getgid /* 200 */ + .long _sys_geteuid + .long _sys_getegid + .long _sys_setreuid + .long _sys_setregid + .long _sys_getgroups /* 205 */ + .long _sys_setgroups + .long _sys_fchown + .long _sys_setresuid + .long _sys_getresuid + .long _sys_setresgid /* 210 */ + .long _sys_getresgid + .long _sys_lchown + .long _sys_setuid + .long _sys_setgid + .long _sys_setfsuid /* 215 */ + .long _sys_setfsgid + .long _sys_pivot_root + .long _sys_ni_syscall /* sys_mincore */ + .long _sys_ni_syscall /* sys_madvise */ + .long _sys_getdents64 /* 220 */ + .long _sys_fcntl64 + .long _sys_ni_syscall /* reserved for TUX */ + .long _sys_ni_syscall + .long _sys_gettid + .long _sys_ni_syscall /* 225 */ /* sys_readahead */ + .long _sys_setxattr + .long _sys_lsetxattr + .long _sys_fsetxattr + .long _sys_getxattr + .long _sys_lgetxattr /* 230 */ + .long _sys_fgetxattr + .long _sys_listxattr + .long _sys_llistxattr + .long _sys_flistxattr + .long _sys_removexattr /* 235 */ + .long _sys_lremovexattr + .long _sys_fremovexattr + .long _sys_tkill + .long _sys_sendfile64 + .long _sys_futex /* 240 */ + .long _sys_sched_setaffinity + .long _sys_sched_getaffinity + .long _sys_ni_syscall /* sys_set_thread_area */ + .long _sys_ni_syscall /* sys_get_thread_area */ + .long _sys_io_setup /* 245 */ + .long _sys_io_destroy + .long _sys_io_getevents + .long _sys_io_submit + .long _sys_io_cancel + .long _sys_ni_syscall /* 250 */ /* sys_alloc_hugepages */ + .long _sys_ni_syscall /* sys_freec_hugepages */ + .long _sys_exit_group + .long _sys_lookup_dcookie + .long _sys_bfin_spinlock + .long _sys_epoll_create /* 255 */ + .long _sys_epoll_ctl + .long _sys_epoll_wait + .long _sys_ni_syscall /* remap_file_pages */ + .long _sys_set_tid_address + .long _sys_timer_create /* 260 */ + .long _sys_timer_settime + .long _sys_timer_gettime + .long _sys_timer_getoverrun + .long _sys_timer_delete + .long _sys_clock_settime /* 265 */ + .long _sys_clock_gettime + .long _sys_clock_getres + .long _sys_clock_nanosleep + .long _sys_statfs64 + .long _sys_fstatfs64 /* 270 */ + .long _sys_tgkill + .long _sys_utimes + .long _sys_fadvise64_64 + .long _sys_ni_syscall /* vserver */ + .long _sys_ni_syscall /* 275, mbind */ + .long _sys_ni_syscall /* get_mempolicy */ + .long _sys_ni_syscall /* set_mempolicy */ + .long _sys_mq_open + .long _sys_mq_unlink + .long _sys_mq_timedsend /* 280 */ + .long _sys_mq_timedreceive + .long _sys_mq_notify + .long _sys_mq_getsetattr + .long _sys_ni_syscall /* kexec_load */ + .long _sys_waitid /* 285 */ + .long _sys_add_key + .long _sys_request_key + .long _sys_keyctl + .long _sys_ioprio_set + .long _sys_ioprio_get /* 290 */ + .long _sys_inotify_init + .long _sys_inotify_add_watch + .long _sys_inotify_rm_watch + .long _sys_ni_syscall /* migrate_pages */ + .long _sys_openat /* 295 */ + .long _sys_mkdirat + .long _sys_mknodat + .long _sys_fchownat + .long _sys_futimesat + .long _sys_fstatat64 /* 300 */ + .long _sys_unlinkat + .long _sys_renameat + .long _sys_linkat + .long _sys_symlinkat + .long _sys_readlinkat /* 305 */ + .long _sys_fchmodat + .long _sys_faccessat + .long _sys_pselect6 + .long _sys_ppoll + .long _sys_unshare /* 310 */ + .long _sys_sram_alloc + .long _sys_sram_free + .long _sys_dma_memcpy + .long _sys_accept + .long _sys_bind /* 315 */ + .long _sys_connect + .long _sys_getpeername + .long _sys_getsockname + .long _sys_getsockopt + .long _sys_listen /* 320 */ + .long _sys_recv + .long _sys_recvfrom + .long _sys_recvmsg + .long _sys_send + .long _sys_sendmsg /* 325 */ + .long _sys_sendto + .long _sys_setsockopt + .long _sys_shutdown + .long _sys_socket + .long _sys_socketpair /* 330 */ + .long _sys_semctl + .long _sys_semget + .long _sys_semop + .long _sys_msgctl + .long _sys_msgget /* 335 */ + .long _sys_msgrcv + .long _sys_msgsnd + .long _sys_shmat + .long _sys_shmctl + .long _sys_shmdt /* 340 */ + .long _sys_shmget + .rept NR_syscalls-(.-_sys_call_table)/4 + .long _sys_ni_syscall + .endr +_excpt_saved_imask: + .long 0; + +_exception_stack: + .rept 1024 + .long 0; + .endr +_exception_stack_top: + +#if defined(ANOMALY_05000261) +/* Used by the assembly entry point to work around an anomaly. */ +_last_cplb_fault_retx: + .long 0; +#endif +/* + * Single instructions can have multiple faults, which need to be + * handled by traps.c, in irq5. We store the exception cause to ensure + * we don't miss a double fault condition + */ +ENTRY(_in_ptr_excause) + .long 0; +ENTRY(_out_ptr_excause) + .long 0; +ALIGN +ENTRY(_excause_circ_buf) + .rept 4 + .long 0 + .endr diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S new file mode 100644 index 000000000000..dd45664f0d02 --- /dev/null +++ b/arch/blackfin/mach-common/interrupt.S @@ -0,0 +1,253 @@ +/* + * File: arch/blackfin/mach-common/interrupt.S + * Based on: + * Author: D. Jeff Dionne + * Kenneth Albanowski + * + * Created: ? + * Description: Interrupt Entries + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_I_ENTRY_L1 +.section .l1.text +#else +.text +#endif + +.align 4 /* just in case */ + +/* + * initial interrupt handlers + */ + +#ifndef CONFIG_KGDB + /* interrupt routine for emulation - 0 */ + /* Currently used only if GDB stub is not in - invalid */ + /* gdb-stub set the evt itself */ + /* save registers for post-mortem only */ +ENTRY(_evt_emulation) + SAVE_ALL_SYS +#ifdef CONFIG_FRAME_POINTER + fp = 0; +#endif + r0 = IRQ_EMU; + r1 = sp; + SP += -12; + call _irq_panic; + SP += 12; + /* - GDB stub fills this in by itself (if defined) */ + rte; +#endif + +/* Common interrupt entry code. First we do CLI, then push + * RETI, to keep interrupts disabled, but to allow this state to be changed + * by local_bh_enable. + * R0 contains the interrupt number, while R1 may contain the value of IPEND, + * or garbage if IPEND won't be needed by the ISR. */ +__common_int_entry: + [--sp] = fp; + [--sp] = usp; + + [--sp] = i0; + [--sp] = i1; + [--sp] = i2; + [--sp] = i3; + + [--sp] = m0; + [--sp] = m1; + [--sp] = m2; + [--sp] = m3; + + [--sp] = l0; + [--sp] = l1; + [--sp] = l2; + [--sp] = l3; + + [--sp] = b0; + [--sp] = b1; + [--sp] = b2; + [--sp] = b3; + [--sp] = a0.x; + [--sp] = a0.w; + [--sp] = a1.x; + [--sp] = a1.w; + + [--sp] = LC0; + [--sp] = LC1; + [--sp] = LT0; + [--sp] = LT1; + [--sp] = LB0; + [--sp] = LB1; + + [--sp] = ASTAT; + + [--sp] = r0; /* Skip reserved */ + [--sp] = RETS; + r2 = RETI; + [--sp] = r2; + [--sp] = RETX; + [--sp] = RETN; + [--sp] = RETE; + [--sp] = SEQSTAT; + [--sp] = r1; /* IPEND - R1 may or may not be set up before jumping here. */ + + /* Switch to other method of keeping interrupts disabled. */ +#ifdef CONFIG_DEBUG_HWERR + r1 = 0x3f; + sti r1; +#else + cli r1; +#endif + [--sp] = RETI; /* orig_pc */ + /* Clear all L registers. */ + r1 = 0 (x); + l0 = r1; + l1 = r1; + l2 = r1; + l3 = r1; +#ifdef CONFIG_FRAME_POINTER + fp = 0; +#endif + +#ifdef ANOMALY_05000283 + cc = r7 == r7; + p5.h = 0xffc0; + p5.l = 0x0014; + if cc jump 1f; + r7.l = W[p5]; +1: +#endif + r1 = sp; + SP += -12; + call _do_irq; + SP += 12; + call _return_from_int; +.Lcommon_restore_context: + RESTORE_CONTEXT + rti; + +/* interrupt routine for ivhw - 5 */ +ENTRY(_evt_ivhw) + SAVE_CONTEXT +#ifdef CONFIG_FRAME_POINTER + fp = 0; +#endif +#ifdef ANOMALY_05000283 + cc = r7 == r7; + p5.h = 0xffc0; + p5.l = 0x0014; + if cc jump 1f; + r7.l = W[p5]; +1: +#endif + p0.l = lo(TBUFCTL); + p0.h = hi(TBUFCTL); + r0 = 1; + [p0] = r0; + r0 = IRQ_HWERR; + r1 = sp; + +#ifdef CONFIG_HARDWARE_PM + r7 = SEQSTAT; + r7 = r7 >>> 0xe; + r6 = 0x1F; + r7 = r7 & r6; + r5 = 0x12; + cc = r7 == r5; + if cc jump .Lcall_do_ovf; /* deal with performance counter overflow */ +#endif + + SP += -12; + call _irq_panic; + SP += 12; + rti; +#ifdef CONFIG_HARDWARE_PM +.Lcall_do_ovf: + + SP += -12; + call _pm_overflow; + SP += 12; + + jump .Lcommon_restore_context; +#endif + +/* interrupt routine for evt2 - 2. This is NMI. */ +ENTRY(_evt_evt2) + SAVE_CONTEXT +#ifdef CONFIG_FRAME_POINTER + fp = 0; +#endif +#ifdef ANOMALY_05000283 + cc = r7 == r7; + p5.h = 0xffc0; + p5.l = 0x0014; + if cc jump 1f; + r7.l = W[p5]; +1: +#endif + r0 = IRQ_NMI; + r1 = sp; + SP += -12; + call _asm_do_IRQ; + SP += 12; + RESTORE_CONTEXT + rtn; + +/* interrupt routine for core timer - 6 */ +ENTRY(_evt_timer) + TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P) + +/* interrupt routine for evt7 - 7 */ +ENTRY(_evt_evt7) + INTERRUPT_ENTRY(EVT_IVG7_P) +ENTRY(_evt_evt8) + INTERRUPT_ENTRY(EVT_IVG8_P) +ENTRY(_evt_evt9) + INTERRUPT_ENTRY(EVT_IVG9_P) +ENTRY(_evt_evt10) + INTERRUPT_ENTRY(EVT_IVG10_P) +ENTRY(_evt_evt11) + INTERRUPT_ENTRY(EVT_IVG11_P) +ENTRY(_evt_evt12) + INTERRUPT_ENTRY(EVT_IVG12_P) +ENTRY(_evt_evt13) + INTERRUPT_ENTRY(EVT_IVG13_P) + + + /* interrupt routine for system_call - 15 */ +ENTRY(_evt_system_call) + SAVE_CONTEXT_SYSCALL +#ifdef CONFIG_FRAME_POINTER + fp = 0; +#endif + call _system_call; + jump .Lcommon_restore_context; diff --git a/arch/blackfin/mach-common/ints-priority-dc.c b/arch/blackfin/mach-common/ints-priority-dc.c new file mode 100644 index 000000000000..f3cf07036c2a --- /dev/null +++ b/arch/blackfin/mach-common/ints-priority-dc.c @@ -0,0 +1,476 @@ +/* + * File: arch/blackfin/mach-common/ints-priority-dc.c + * Based on: + * Author: + * + * Created: ? + * Description: Set up the interupt priorities + * + * Modified: + * 1996 Roman Zippel + * 1999 D. Jeff Dionne + * 2000-2001 Lineo, Inc. D. Jefff Dionne + * 2002 Arcturus Networks Inc. MaTed + * 2003 Metrowerks/Motorola + * 2003 Bas Vermeulen + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#ifdef CONFIG_KGDB +#include +#endif +#include +#include +#include +#include + +/* + * NOTES: + * - we have separated the physical Hardware interrupt from the + * levels that the LINUX kernel sees (see the description in irq.h) + * - + */ + +unsigned long irq_flags = 0; + +/* The number of spurious interrupts */ +atomic_t num_spurious; + +struct ivgx { + /* irq number for request_irq, available in mach-bf561/irq.h */ + int irqno; + /* corresponding bit in the SICA_ISR0 register */ + int isrflag0; + /* corresponding bit in the SICA_ISR1 register */ + int isrflag1; +} ivg_table[NR_PERI_INTS]; + +struct ivg_slice { + /* position of first irq in ivg_table for given ivg */ + struct ivgx *ifirst; + struct ivgx *istop; +} ivg7_13[IVG13 - IVG7 + 1]; + +static void search_IAR(void); + +/* + * Search SIC_IAR and fill tables with the irqvalues + * and their positions in the SIC_ISR register. + */ +static void __init search_IAR(void) +{ + unsigned ivg, irq_pos = 0; + for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) { + int irqn; + + ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos]; + + for (irqn = 0; irqn < NR_PERI_INTS; irqn++) { + int iar_shift = (irqn & 7) * 4; + if (ivg == + (0xf & + bfin_read32((unsigned long *)SICA_IAR0 + + (irqn >> 3)) >> iar_shift)) { + ivg_table[irq_pos].irqno = IVG7 + irqn; + ivg_table[irq_pos].isrflag0 = + (irqn < 32 ? (1 << irqn) : 0); + ivg_table[irq_pos].isrflag1 = + (irqn < 32 ? 0 : (1 << (irqn - 32))); + ivg7_13[ivg].istop++; + irq_pos++; + } + } + } +} + +/* + * This is for BF561 internal IRQs + */ + +static void ack_noop(unsigned int irq) +{ + /* Dummy function. */ +} + +static void bf561_core_mask_irq(unsigned int irq) +{ + irq_flags &= ~(1 << irq); + if (!irqs_disabled()) + local_irq_enable(); +} + +static void bf561_core_unmask_irq(unsigned int irq) +{ + irq_flags |= 1 << irq; + /* + * If interrupts are enabled, IMASK must contain the same value + * as irq_flags. Make sure that invariant holds. If interrupts + * are currently disabled we need not do anything; one of the + * callers will take care of setting IMASK to the proper value + * when reenabling interrupts. + * local_irq_enable just does "STI irq_flags", so it's exactly + * what we need. + */ + if (!irqs_disabled()) + local_irq_enable(); + return; +} + +static void bf561_internal_mask_irq(unsigned int irq) +{ + unsigned long irq_mask; + if ((irq - (IRQ_CORETMR + 1)) < 32) { + irq_mask = (1 << (irq - (IRQ_CORETMR + 1))); + bfin_write_SICA_IMASK0(bfin_read_SICA_IMASK0() & ~irq_mask); + } else { + irq_mask = (1 << (irq - (IRQ_CORETMR + 1) - 32)); + bfin_write_SICA_IMASK1(bfin_read_SICA_IMASK1() & ~irq_mask); + } +} + +static void bf561_internal_unmask_irq(unsigned int irq) +{ + unsigned long irq_mask; + + if ((irq - (IRQ_CORETMR + 1)) < 32) { + irq_mask = (1 << (irq - (IRQ_CORETMR + 1))); + bfin_write_SICA_IMASK0(bfin_read_SICA_IMASK0() | irq_mask); + } else { + irq_mask = (1 << (irq - (IRQ_CORETMR + 1) - 32)); + bfin_write_SICA_IMASK1(bfin_read_SICA_IMASK1() | irq_mask); + } + SSYNC(); +} + +static struct irq_chip bf561_core_irqchip = { + .ack = ack_noop, + .mask = bf561_core_mask_irq, + .unmask = bf561_core_unmask_irq, +}; + +static struct irq_chip bf561_internal_irqchip = { + .ack = ack_noop, + .mask = bf561_internal_mask_irq, + .unmask = bf561_internal_unmask_irq, +}; + +#ifdef CONFIG_IRQCHIP_DEMUX_GPIO +static unsigned short gpio_enabled[gpio_bank(MAX_BLACKFIN_GPIOS)]; +static unsigned short gpio_edge_triggered[gpio_bank(MAX_BLACKFIN_GPIOS)]; + +static void bf561_gpio_ack_irq(unsigned int irq) +{ + u16 gpionr = irq - IRQ_PF0; + + if(gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) { + set_gpio_data(gpionr, 0); + SSYNC(); + } +} + +static void bf561_gpio_mask_ack_irq(unsigned int irq) +{ + u16 gpionr = irq - IRQ_PF0; + + if(gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) { + set_gpio_data(gpionr, 0); + SSYNC(); + } + + set_gpio_maska(gpionr, 0); + SSYNC(); +} + +static void bf561_gpio_mask_irq(unsigned int irq) +{ + set_gpio_maska(irq - IRQ_PF0, 0); + SSYNC(); +} + +static void bf561_gpio_unmask_irq(unsigned int irq) +{ + set_gpio_maska(irq - IRQ_PF0, 1); + SSYNC(); +} + +static unsigned int bf561_gpio_irq_startup(unsigned int irq) +{ + unsigned int ret; + u16 gpionr = irq - IRQ_PF0; + + if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) { + + ret = gpio_request(gpionr, NULL); + if(ret) + return ret; + + } + + gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr); + bf561_gpio_unmask_irq(irq); + + return ret; + +} + +static void bf561_gpio_irq_shutdown(unsigned int irq) +{ + bf561_gpio_mask_irq(irq); + gpio_free(irq - IRQ_PF0); + gpio_enabled[gpio_bank(irq - IRQ_PF0)] &= ~gpio_bit(irq - IRQ_PF0); +} + +static int bf561_gpio_irq_type(unsigned int irq, unsigned int type) +{ + + unsigned int ret; + u16 gpionr = irq - IRQ_PF0; + + + if (type == IRQ_TYPE_PROBE) { + /* only probe unenabled GPIO interrupt lines */ + if (gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr)) + return 0; + type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; + + } + + if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | + IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { + + if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) { + + ret = gpio_request(gpionr, NULL); + if(ret) + return ret; + + } + + gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr); + } else { + gpio_enabled[gpio_bank(gpionr)] &= ~gpio_bit(gpionr); + return 0; + } + + + set_gpio_dir(gpionr, 0); + set_gpio_inen(gpionr, 1); + + + if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { + gpio_edge_triggered[gpio_bank(gpionr)] |= gpio_bit(gpionr); + set_gpio_edge(gpionr, 1); + } else { + set_gpio_edge(gpionr, 0); + gpio_edge_triggered[gpio_bank(gpionr)] &= ~gpio_bit(gpionr); + } + + if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) + == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) + set_gpio_both(gpionr, 1); + else + set_gpio_both(gpionr, 0); + + if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW))) + set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */ + else + set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */ + + SSYNC(); + + if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) + set_irq_handler(irq, handle_edge_irq); + else + set_irq_handler(irq, handle_level_irq); + + return 0; +} + +static struct irq_chip bf561_gpio_irqchip = { + .ack = bf561_gpio_ack_irq, + .mask = bf561_gpio_mask_irq, + .mask_ack = bf561_gpio_mask_ack_irq, + .unmask = bf561_gpio_unmask_irq, + .set_type = bf561_gpio_irq_type, + .startup = bf561_gpio_irq_startup, + .shutdown = bf561_gpio_irq_shutdown +}; + +static void bf561_demux_gpio_irq(unsigned int inta_irq, + struct irq_desc *intb_desc) +{ + int irq, flag_d, mask; + u16 gpio; + + switch (inta_irq) { + case IRQ_PROG0_INTA: + irq = IRQ_PF0; + break; + case IRQ_PROG1_INTA: + irq = IRQ_PF16; + break; + case IRQ_PROG2_INTA: + irq = IRQ_PF32; + break; + default: + dump_stack(); + return; + } + + gpio = irq - IRQ_PF0; + + flag_d = get_gpiop_data(gpio); + mask = flag_d & (gpio_enabled[gpio_bank(gpio)] & + get_gpiop_maska(gpio)); + + do { + if (mask & 1) { + struct irq_desc *desc = irq_desc + irq; + desc->handle_irq(irq, desc); + } + irq++; + mask >>= 1; + } while (mask); + + +} + +#endif /* CONFIG_IRQCHIP_DEMUX_GPIO */ + +/* + * This function should be called during kernel startup to initialize + * the BFin IRQ handling routines. + */ +int __init init_arch_irq(void) +{ + int irq; + unsigned long ilat = 0; + /* Disable all the peripheral intrs - page 4-29 HW Ref manual */ + bfin_write_SICA_IMASK0(SIC_UNMASK_ALL); + bfin_write_SICA_IMASK1(SIC_UNMASK_ALL); + SSYNC(); + + local_irq_disable(); + + init_exception_buff(); + +#ifndef CONFIG_KGDB + bfin_write_EVT0(evt_emulation); +#endif + bfin_write_EVT2(evt_evt2); + bfin_write_EVT3(trap); + bfin_write_EVT5(evt_ivhw); + bfin_write_EVT6(evt_timer); + bfin_write_EVT7(evt_evt7); + bfin_write_EVT8(evt_evt8); + bfin_write_EVT9(evt_evt9); + bfin_write_EVT10(evt_evt10); + bfin_write_EVT11(evt_evt11); + bfin_write_EVT12(evt_evt12); + bfin_write_EVT13(evt_evt13); + bfin_write_EVT14(evt14_softirq); + bfin_write_EVT15(evt_system_call); + CSYNC(); + + for (irq = 0; irq < SYS_IRQS; irq++) { + if (irq <= IRQ_CORETMR) + set_irq_chip(irq, &bf561_core_irqchip); + else + set_irq_chip(irq, &bf561_internal_irqchip); +#ifdef CONFIG_IRQCHIP_DEMUX_GPIO + if ((irq != IRQ_PROG0_INTA) && + (irq != IRQ_PROG1_INTA) && (irq != IRQ_PROG2_INTA)) { +#endif + set_irq_handler(irq, handle_simple_irq); +#ifdef CONFIG_IRQCHIP_DEMUX_GPIO + } else { + set_irq_chained_handler(irq, bf561_demux_gpio_irq); + } +#endif + + } + +#ifdef CONFIG_IRQCHIP_DEMUX_GPIO + for (irq = IRQ_PF0; irq <= IRQ_PF47; irq++) { + set_irq_chip(irq, &bf561_gpio_irqchip); + /* if configured as edge, then will be changed to do_edge_IRQ */ + set_irq_handler(irq, handle_level_irq); + } +#endif + bfin_write_IMASK(0); + CSYNC(); + ilat = bfin_read_ILAT(); + CSYNC(); + bfin_write_ILAT(ilat); + CSYNC(); + + printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n"); + /* IMASK=xxx is equivalent to STI xx or irq_flags=xx, + * local_irq_enable() + */ + program_IAR(); + /* Therefore it's better to setup IARs before interrupts enabled */ + search_IAR(); + + /* Enable interrupts IVG7-15 */ + irq_flags = irq_flags | IMASK_IVG15 | + IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | + IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; + + return 0; +} + +#ifdef CONFIG_DO_IRQ_L1 +void do_irq(int vec, struct pt_regs *fp)__attribute__((l1_text)); +#endif + +void do_irq(int vec, struct pt_regs *fp) +{ + if (vec == EVT_IVTMR_P) { + vec = IRQ_CORETMR; + } else { + struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst; + struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop; + unsigned long sic_status0, sic_status1; + + SSYNC(); + sic_status0 = bfin_read_SICA_IMASK0() & bfin_read_SICA_ISR0(); + sic_status1 = bfin_read_SICA_IMASK1() & bfin_read_SICA_ISR1(); + + for (;; ivg++) { + if (ivg >= ivg_stop) { + atomic_inc(&num_spurious); + return; + } else if ((sic_status0 & ivg->isrflag0) || + (sic_status1 & ivg->isrflag1)) + break; + } + vec = ivg->irqno; + } + asm_do_IRQ(vec, fp); + +#ifdef CONFIG_KGDB + kgdb_process_breakpoint(); +#endif +} diff --git a/arch/blackfin/mach-common/ints-priority-sc.c b/arch/blackfin/mach-common/ints-priority-sc.c new file mode 100644 index 000000000000..34b62288ec3c --- /dev/null +++ b/arch/blackfin/mach-common/ints-priority-sc.c @@ -0,0 +1,577 @@ +/* + * File: arch/blackfin/mach-common/ints-priority-sc.c + * Based on: + * Author: + * + * Created: ? + * Description: Set up the interupt priorities + * + * Modified: + * 1996 Roman Zippel + * 1999 D. Jeff Dionne + * 2000-2001 Lineo, Inc. D. Jefff Dionne + * 2002 Arcturus Networks Inc. MaTed + * 2003 Metrowerks/Motorola + * 2003 Bas Vermeulen + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#ifdef CONFIG_KGDB +#include +#endif +#include +#include +#include +#include + +#ifdef BF537_FAMILY +# define BF537_GENERIC_ERROR_INT_DEMUX +#else +# undef BF537_GENERIC_ERROR_INT_DEMUX +#endif + +/* + * NOTES: + * - we have separated the physical Hardware interrupt from the + * levels that the LINUX kernel sees (see the description in irq.h) + * - + */ + +unsigned long irq_flags = 0; + +/* The number of spurious interrupts */ +atomic_t num_spurious; + +struct ivgx { + /* irq number for request_irq, available in mach-bf533/irq.h */ + int irqno; + /* corresponding bit in the SIC_ISR register */ + int isrflag; +} ivg_table[NR_PERI_INTS]; + +struct ivg_slice { + /* position of first irq in ivg_table for given ivg */ + struct ivgx *ifirst; + struct ivgx *istop; +} ivg7_13[IVG13 - IVG7 + 1]; + +static void search_IAR(void); + +/* + * Search SIC_IAR and fill tables with the irqvalues + * and their positions in the SIC_ISR register. + */ +static void __init search_IAR(void) +{ + unsigned ivg, irq_pos = 0; + for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) { + int irqn; + + ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = + &ivg_table[irq_pos]; + + for (irqn = 0; irqn < NR_PERI_INTS; irqn++) { + int iar_shift = (irqn & 7) * 4; + if (ivg == + (0xf & + bfin_read32((unsigned long *) SIC_IAR0 + + (irqn >> 3)) >> iar_shift)) { + ivg_table[irq_pos].irqno = IVG7 + irqn; + ivg_table[irq_pos].isrflag = 1 << irqn; + ivg7_13[ivg].istop++; + irq_pos++; + } + } + } +} + +/* + * This is for BF533 internal IRQs + */ + +static void ack_noop(unsigned int irq) +{ + /* Dummy function. */ +} + +static void bfin_core_mask_irq(unsigned int irq) +{ + irq_flags &= ~(1 << irq); + if (!irqs_disabled()) + local_irq_enable(); +} + +static void bfin_core_unmask_irq(unsigned int irq) +{ + irq_flags |= 1 << irq; + /* + * If interrupts are enabled, IMASK must contain the same value + * as irq_flags. Make sure that invariant holds. If interrupts + * are currently disabled we need not do anything; one of the + * callers will take care of setting IMASK to the proper value + * when reenabling interrupts. + * local_irq_enable just does "STI irq_flags", so it's exactly + * what we need. + */ + if (!irqs_disabled()) + local_irq_enable(); + return; +} + +static void bfin_internal_mask_irq(unsigned int irq) +{ + bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & + ~(1 << (irq - (IRQ_CORETMR + 1)))); + SSYNC(); +} + +static void bfin_internal_unmask_irq(unsigned int irq) +{ + bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | + (1 << (irq - (IRQ_CORETMR + 1)))); + SSYNC(); +} + +static struct irq_chip bfin_core_irqchip = { + .ack = ack_noop, + .mask = bfin_core_mask_irq, + .unmask = bfin_core_unmask_irq, +}; + +static struct irq_chip bfin_internal_irqchip = { + .ack = ack_noop, + .mask = bfin_internal_mask_irq, + .unmask = bfin_internal_unmask_irq, +}; + +#ifdef BF537_GENERIC_ERROR_INT_DEMUX +static int error_int_mask; + +static void bfin_generic_error_ack_irq(unsigned int irq) +{ + +} + +static void bfin_generic_error_mask_irq(unsigned int irq) +{ + error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR)); + + if (!error_int_mask) { + local_irq_disable(); + bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & + ~(1 << + (IRQ_GENERIC_ERROR - + (IRQ_CORETMR + 1)))); + SSYNC(); + local_irq_enable(); + } +} + +static void bfin_generic_error_unmask_irq(unsigned int irq) +{ + local_irq_disable(); + bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | 1 << + (IRQ_GENERIC_ERROR - (IRQ_CORETMR + 1))); + SSYNC(); + local_irq_enable(); + + error_int_mask |= 1L << (irq - IRQ_PPI_ERROR); +} + +static struct irq_chip bfin_generic_error_irqchip = { + .ack = bfin_generic_error_ack_irq, + .mask = bfin_generic_error_mask_irq, + .unmask = bfin_generic_error_unmask_irq, +}; + +static void bfin_demux_error_irq(unsigned int int_err_irq, + struct irq_desc *intb_desc) +{ + int irq = 0; + + SSYNC(); + +#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) + if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK) + irq = IRQ_MAC_ERROR; + else +#endif + if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK) + irq = IRQ_SPORT0_ERROR; + else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK) + irq = IRQ_SPORT1_ERROR; + else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK) + irq = IRQ_PPI_ERROR; + else if (bfin_read_CAN_GIF() & CAN_ERR_MASK) + irq = IRQ_CAN_ERROR; + else if (bfin_read_SPI_STAT() & SPI_ERR_MASK) + irq = IRQ_SPI_ERROR; + else if ((bfin_read_UART0_IIR() & UART_ERR_MASK_STAT1) && + (bfin_read_UART0_IIR() & UART_ERR_MASK_STAT0)) + irq = IRQ_UART0_ERROR; + else if ((bfin_read_UART1_IIR() & UART_ERR_MASK_STAT1) && + (bfin_read_UART1_IIR() & UART_ERR_MASK_STAT0)) + irq = IRQ_UART1_ERROR; + + if (irq) { + if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR))) { + struct irq_desc *desc = irq_desc + irq; + desc->handle_irq(irq, desc); + } else { + + switch (irq) { + case IRQ_PPI_ERROR: + bfin_write_PPI_STATUS(PPI_ERR_MASK); + break; +#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) + case IRQ_MAC_ERROR: + bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK); + break; +#endif + case IRQ_SPORT0_ERROR: + bfin_write_SPORT0_STAT(SPORT_ERR_MASK); + break; + + case IRQ_SPORT1_ERROR: + bfin_write_SPORT1_STAT(SPORT_ERR_MASK); + break; + + case IRQ_CAN_ERROR: + bfin_write_CAN_GIS(CAN_ERR_MASK); + break; + + case IRQ_SPI_ERROR: + bfin_write_SPI_STAT(SPI_ERR_MASK); + break; + + default: + break; + } + + pr_debug("IRQ %d:" + " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n", + irq); + } + } else + printk(KERN_ERR + "%s : %s : LINE %d :\nIRQ ?: PERIPHERAL ERROR" + " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n", + __FUNCTION__, __FILE__, __LINE__); + + +} +#endif /* BF537_GENERIC_ERROR_INT_DEMUX */ + +#ifdef CONFIG_IRQCHIP_DEMUX_GPIO + +static unsigned short gpio_enabled[gpio_bank(MAX_BLACKFIN_GPIOS)]; +static unsigned short gpio_edge_triggered[gpio_bank(MAX_BLACKFIN_GPIOS)]; + +static void bfin_gpio_ack_irq(unsigned int irq) +{ + u16 gpionr = irq - IRQ_PF0; + + if (gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) { + set_gpio_data(gpionr, 0); + SSYNC(); + } +} + +static void bfin_gpio_mask_ack_irq(unsigned int irq) +{ + u16 gpionr = irq - IRQ_PF0; + + if (gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) { + set_gpio_data(gpionr, 0); + SSYNC(); + } + + set_gpio_maska(gpionr, 0); + SSYNC(); +} + +static void bfin_gpio_mask_irq(unsigned int irq) +{ + set_gpio_maska(irq - IRQ_PF0, 0); + SSYNC(); +} + +static void bfin_gpio_unmask_irq(unsigned int irq) +{ + set_gpio_maska(irq - IRQ_PF0, 1); + SSYNC(); +} + +static unsigned int bfin_gpio_irq_startup(unsigned int irq) +{ + unsigned int ret; + u16 gpionr = irq - IRQ_PF0; + + if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) { + ret = gpio_request(gpionr, NULL); + if (ret) + return ret; + } + + gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr); + bfin_gpio_unmask_irq(irq); + + return ret; +} + +static void bfin_gpio_irq_shutdown(unsigned int irq) +{ + bfin_gpio_mask_irq(irq); + gpio_free(irq - IRQ_PF0); + gpio_enabled[gpio_bank(irq - IRQ_PF0)] &= ~gpio_bit(irq - IRQ_PF0); +} + +static int bfin_gpio_irq_type(unsigned int irq, unsigned int type) +{ + + unsigned int ret; + u16 gpionr = irq - IRQ_PF0; + + if (type == IRQ_TYPE_PROBE) { + /* only probe unenabled GPIO interrupt lines */ + if (gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr)) + return 0; + type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; + } + + if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | + IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) + { + if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) { + ret = gpio_request(gpionr, NULL); + if (ret) + return ret; + } + + gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr); + } else { + gpio_enabled[gpio_bank(gpionr)] &= ~gpio_bit(gpionr); + return 0; + } + + set_gpio_dir(gpionr, 0); + set_gpio_inen(gpionr, 1); + + if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { + gpio_edge_triggered[gpio_bank(gpionr)] |= gpio_bit(gpionr); + set_gpio_edge(gpionr, 1); + } else { + set_gpio_edge(gpionr, 0); + gpio_edge_triggered[gpio_bank(gpionr)] &= ~gpio_bit(gpionr); + } + + if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) + == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) + set_gpio_both(gpionr, 1); + else + set_gpio_both(gpionr, 0); + + if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW))) + set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */ + else + set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */ + + SSYNC(); + + if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) + set_irq_handler(irq, handle_edge_irq); + else + set_irq_handler(irq, handle_level_irq); + + return 0; +} + + +static struct irq_chip bfin_gpio_irqchip = { + .ack = bfin_gpio_ack_irq, + .mask = bfin_gpio_mask_irq, + .mask_ack = bfin_gpio_mask_ack_irq, + .unmask = bfin_gpio_unmask_irq, + .set_type = bfin_gpio_irq_type, + .startup = bfin_gpio_irq_startup, + .shutdown = bfin_gpio_irq_shutdown +}; + +static void bfin_demux_gpio_irq(unsigned int intb_irq, + struct irq_desc *intb_desc) +{ + u16 i; + + for (i = 0; i < MAX_BLACKFIN_GPIOS; i+=16) { + int irq = IRQ_PF0 + i; + int flag_d = get_gpiop_data(i); + int mask = + flag_d & (gpio_enabled[gpio_bank(i)] & + get_gpiop_maska(i)); + + while (mask) { + if (mask & 1) { + struct irq_desc *desc = irq_desc + irq; + desc->handle_irq(irq, desc); + } + irq++; + mask >>= 1; + } + } +} + +#endif /* CONFIG_IRQCHIP_DEMUX_GPIO */ + +/* + * This function should be called during kernel startup to initialize + * the BFin IRQ handling routines. + */ +int __init init_arch_irq(void) +{ + int irq; + unsigned long ilat = 0; + /* Disable all the peripheral intrs - page 4-29 HW Ref manual */ + bfin_write_SIC_IMASK(SIC_UNMASK_ALL); + SSYNC(); + + local_irq_disable(); + +#ifndef CONFIG_KGDB + bfin_write_EVT0(evt_emulation); +#endif + bfin_write_EVT2(evt_evt2); + bfin_write_EVT3(trap); + bfin_write_EVT5(evt_ivhw); + bfin_write_EVT6(evt_timer); + bfin_write_EVT7(evt_evt7); + bfin_write_EVT8(evt_evt8); + bfin_write_EVT9(evt_evt9); + bfin_write_EVT10(evt_evt10); + bfin_write_EVT11(evt_evt11); + bfin_write_EVT12(evt_evt12); + bfin_write_EVT13(evt_evt13); + bfin_write_EVT14(evt14_softirq); + bfin_write_EVT15(evt_system_call); + CSYNC(); + + for (irq = 0; irq < SYS_IRQS; irq++) { + if (irq <= IRQ_CORETMR) + set_irq_chip(irq, &bfin_core_irqchip); + else + set_irq_chip(irq, &bfin_internal_irqchip); +#ifdef BF537_GENERIC_ERROR_INT_DEMUX + if (irq != IRQ_GENERIC_ERROR) { +#endif + +#ifdef CONFIG_IRQCHIP_DEMUX_GPIO + if ((irq != IRQ_PROG_INTA) /*PORT F & G MASK_A Interrupt*/ +# if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) + && (irq != IRQ_MAC_RX) /*PORT H MASK_A Interrupt*/ +# endif + ) { +#endif + set_irq_handler(irq, handle_simple_irq); +#ifdef CONFIG_IRQCHIP_DEMUX_GPIO + } else { + set_irq_chained_handler(irq, + bfin_demux_gpio_irq); + } +#endif + +#ifdef BF537_GENERIC_ERROR_INT_DEMUX + } else { + set_irq_handler(irq, bfin_demux_error_irq); + } +#endif + } +#ifdef BF537_GENERIC_ERROR_INT_DEMUX + for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) { + set_irq_chip(irq, &bfin_generic_error_irqchip); + set_irq_handler(irq, handle_level_irq); + } +#endif + +#ifdef CONFIG_IRQCHIP_DEMUX_GPIO + for (irq = IRQ_PF0; irq < NR_IRQS; irq++) { + set_irq_chip(irq, &bfin_gpio_irqchip); + /* if configured as edge, then will be changed to do_edge_IRQ */ + set_irq_handler(irq, handle_level_irq); + } +#endif + bfin_write_IMASK(0); + CSYNC(); + ilat = bfin_read_ILAT(); + CSYNC(); + bfin_write_ILAT(ilat); + CSYNC(); + + printk(KERN_INFO + "Configuring Blackfin Priority Driven Interrupts\n"); + /* IMASK=xxx is equivalent to STI xx or irq_flags=xx, + * local_irq_enable() + */ + program_IAR(); + /* Therefore it's better to setup IARs before interrupts enabled */ + search_IAR(); + + /* Enable interrupts IVG7-15 */ + irq_flags = irq_flags | IMASK_IVG15 | + IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | + IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | + IMASK_IVGHW; + + return 0; +} + +#ifdef CONFIG_DO_IRQ_L1 +void do_irq(int vec, struct pt_regs *fp)__attribute__((l1_text)); +#endif + +void do_irq(int vec, struct pt_regs *fp) +{ + if (vec == EVT_IVTMR_P) { + vec = IRQ_CORETMR; + } else { + struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst; + struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop; + unsigned long sic_status; + + SSYNC(); + sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR(); + + for (;; ivg++) { + if (ivg >= ivg_stop) { + atomic_inc(&num_spurious); + return; + } else if (sic_status & ivg->isrflag) + break; + } + vec = ivg->irqno; + } + asm_do_IRQ(vec, fp); + +#ifdef CONFIG_KGDB + kgdb_process_breakpoint(); +#endif +} diff --git a/arch/blackfin/mach-common/irqpanic.c b/arch/blackfin/mach-common/irqpanic.c new file mode 100644 index 000000000000..f05e3dadaf33 --- /dev/null +++ b/arch/blackfin/mach-common/irqpanic.c @@ -0,0 +1,194 @@ +/* + * File: arch/blackfin/mach-common/irqpanic.c + * Based on: + * Author: + * + * Created: ? + * Description: panic kernel with dump information + * + * Modified: rgetz - added cache checking code 14Feb06 + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +#include "../oprofile/op_blackfin.h" + +#ifdef CONFIG_DEBUG_ICACHE_CHECK +#define L1_ICACHE_START 0xffa10000 +#define L1_ICACHE_END 0xffa13fff +void irq_panic(int reason, struct pt_regs *regs) __attribute__ ((l1_text)); +#endif + +/* + * irq_panic - calls panic with string setup + */ +asmlinkage void irq_panic(int reason, struct pt_regs *regs) +{ + int sig = 0; + siginfo_t info; + +#ifdef CONFIG_DEBUG_ICACHE_CHECK + unsigned int cmd, tag, ca, cache_hi, cache_lo, *pa; + unsigned short i, j, die; + unsigned int bad[10][6]; + + /* check entire cache for coherency + * Since printk is in cacheable memory, + * don't call it until you have checked everything + */ + + die = 0; + i = 0; + + /* check icache */ + + for (ca = L1_ICACHE_START; ca <= L1_ICACHE_END && i < 10; ca += 32) { + + /* Grab various address bits for the itest_cmd fields */ + cmd = (((ca & 0x3000) << 4) | /* ca[13:12] for SBNK[1:0] */ + ((ca & 0x0c00) << 16) | /* ca[11:10] for WAYSEL[1:0] */ + ((ca & 0x3f8)) | /* ca[09:03] for SET[4:0] and DW[1:0] */ + 0); /* Access Tag, Read access */ + + SSYNC(); + bfin_write_ITEST_COMMAND(cmd); + SSYNC(); + tag = bfin_read_ITEST_DATA0(); + SSYNC(); + + /* if tag is marked as valid, check it */ + if (tag & 1) { + /* The icache is arranged in 4 groups of 64-bits */ + for (j = 0; j < 32; j += 8) { + cmd = ((((ca + j) & 0x3000) << 4) | /* ca[13:12] for SBNK[1:0] */ + (((ca + j) & 0x0c00) << 16) | /* ca[11:10] for WAYSEL[1:0] */ + (((ca + j) & 0x3f8)) | /* ca[09:03] for SET[4:0] and DW[1:0] */ + 4); /* Access Data, Read access */ + + SSYNC(); + bfin_write_ITEST_COMMAND(cmd); + SSYNC(); + + cache_hi = bfin_read_ITEST_DATA1(); + cache_lo = bfin_read_ITEST_DATA0(); + + pa = ((unsigned int *)((tag & 0xffffcc00) | + ((ca + j) & ~(0xffffcc00)))); + + /* + * Debugging this, enable + * + * printk("addr: %08x %08x%08x | %08x%08x\n", + * ((unsigned int *)((tag & 0xffffcc00) | ((ca+j) & ~(0xffffcc00)))), + * cache_hi, cache_lo, *(pa+1), *pa); + */ + + if (cache_hi != *(pa + 1) || cache_lo != *pa) { + /* Since icache is not working, stay out of it, by not printing */ + die = 1; + bad[i][0] = (ca + j); + bad[i][1] = cache_hi; + bad[i][2] = cache_lo; + bad[i][3] = ((tag & 0xffffcc00) | + ((ca + j) & ~(0xffffcc00))); + bad[i][4] = *(pa + 1); + bad[i][5] = *(pa); + i++; + } + } + } + } + if (die) { + printk(KERN_EMERG "icache coherency error\n"); + for (j = 0; j <= i; j++) { + printk(KERN_EMERG + "cache address : %08x cache value : %08x%08x\n", + bad[j][0], bad[j][1], bad[j][2]); + printk(KERN_EMERG + "physical address: %08x SDRAM value : %08x%08x\n", + bad[j][3], bad[j][4], bad[j][5]); + } + panic("icache coherency error"); + } else { + printk(KERN_EMERG "icache checked, and OK\n"); + } +#endif + + printk(KERN_EMERG "\n"); + printk(KERN_EMERG "Exception: IRQ 0x%x entered\n", reason); + printk(KERN_EMERG " code=[0x%08lx], stack frame=0x%08lx, " + " bad PC=0x%08lx\n", + (unsigned long)regs->seqstat, + (unsigned long)regs, + (unsigned long)regs->pc); + if (reason == 0x5) { + printk(KERN_EMERG "----------- HARDWARE ERROR -----------\n"); + + /* There is only need to check for Hardware Errors, since other + * EXCEPTIONS are handled in TRAPS.c (MH) + */ + switch (regs->seqstat & SEQSTAT_HWERRCAUSE) { + case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR): /* System MMR Error */ + info.si_code = BUS_ADRALN; + sig = SIGBUS; + printk(KERN_EMERG HWC_x2); + break; + case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR): /* External Memory Addressing Error */ + info.si_code = BUS_ADRERR; + sig = SIGBUS; + printk(KERN_EMERG HWC_x3); + break; + case (SEQSTAT_HWERRCAUSE_PERF_FLOW): /* Performance Monitor Overflow */ + printk(KERN_EMERG HWC_x12); + break; + case (SEQSTAT_HWERRCAUSE_RAISE_5): /* RAISE 5 instruction */ + printk(KERN_EMERG HWC_x18); + break; + default: /* Reserved */ + printk(KERN_EMERG HWC_default); + break; + } + } + + regs->ipend = bfin_read_IPEND(); + dump_bfin_regs(regs, (void *)regs->pc); + if (0 == (info.si_signo = sig) || 0 == user_mode(regs)) /* in kernelspace */ + panic("Unhandled IRQ or exceptions!\n"); + else { /* in userspace */ + info.si_errno = 0; + info.si_addr = (void *)regs->pc; + force_sig_info(sig, &info, current); + } +} + +#ifdef CONFIG_HARDWARE_PM +/* + * call the handler of Performance overflow + */ +asmlinkage void pm_overflow(int irq, struct pt_regs *regs) +{ + pm_overflow_handler(irq, regs); +} +#endif diff --git a/arch/blackfin/mach-common/lock.S b/arch/blackfin/mach-common/lock.S new file mode 100644 index 000000000000..2cbb15b33925 --- /dev/null +++ b/arch/blackfin/mach-common/lock.S @@ -0,0 +1,204 @@ +/* + * File: arch/blackfin/mach-common/lock.S + * Based on: + * Author: LG Soft India + * + * Created: ? + * Description: kernel locks + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +.text + +#ifdef CONFIG_BLKFIN_CACHE_LOCK + +/* When you come here, it is assumed that + * R0 - Which way to be locked + */ + +ENTRY(_cache_grab_lock) + + [--SP]=( R7:0,P5:0 ); + + P1.H = (IMEM_CONTROL >> 16); + P1.L = (IMEM_CONTROL & 0xFFFF); + P5.H = (ICPLB_ADDR0 >> 16); + P5.L = (ICPLB_ADDR0 & 0xFFFF); + P4.H = (ICPLB_DATA0 >> 16); + P4.L = (ICPLB_DATA0 & 0xFFFF); + R7 = R0; + + /* If the code of interest already resides in the cache + * invalidate the entire cache itself. + * invalidate_entire_icache; + */ + + SP += -12; + [--SP] = RETS; + CALL _invalidate_entire_icache; + RETS = [SP++]; + SP += 12; + + /* Disable the Interrupts*/ + + CLI R3; + +.LLOCK_WAY: + + /* Way0 - 0xFFA133E0 + * Way1 - 0xFFA137E0 + * Way2 - 0xFFA13BE0 Total Way Size = 4K + * Way3 - 0xFFA13FE0 + */ + + /* Procedure Ex. -Set the locks for other ways by setting ILOC[3:1] + * Only Way0 of the instruction cache can now be + * replaced by a new code + */ + + R5 = R7; + CC = BITTST(R7,0); + IF CC JUMP .LCLEAR1; + R7 = 0; + BITSET(R7,0); + JUMP .LDONE1; + +.LCLEAR1: + R7 = 0; + BITCLR(R7,0); +.LDONE1: R4 = R7 << 3; + R7 = [P1]; + R7 = R7 | R4; + SSYNC; /* SSYNC required writing to IMEM_CONTROL. */ + .align 8; + [P1] = R7; + SSYNC; + + R7 = R5; + CC = BITTST(R7,1); + IF CC JUMP .LCLEAR2; + R7 = 0; + BITSET(R7,1); + JUMP .LDONE2; + +.LCLEAR2: + R7 = 0; + BITCLR(R7,1); +.LDONE2: R4 = R7 << 3; + R7 = [P1]; + R7 = R7 | R4; + SSYNC; /* SSYNC required writing to IMEM_CONTROL. */ + .align 8; + [P1] = R7; + SSYNC; + + R7 = R5; + CC = BITTST(R7,2); + IF CC JUMP .LCLEAR3; + R7 = 0; + BITSET(R7,2); + JUMP .LDONE3; +.LCLEAR3: + R7 = 0; + BITCLR(R7,2); +.LDONE3: R4 = R7 << 3; + R7 = [P1]; + R7 = R7 | R4; + SSYNC; /* SSYNC required writing to IMEM_CONTROL. */ + .align 8; + [P1] = R7; + SSYNC; + + + R7 = R5; + CC = BITTST(R7,3); + IF CC JUMP .LCLEAR4; + R7 = 0; + BITSET(R7,3); + JUMP .LDONE4; +.LCLEAR4: + R7 = 0; + BITCLR(R7,3); +.LDONE4: R4 = R7 << 3; + R7 = [P1]; + R7 = R7 | R4; + SSYNC; /* SSYNC required writing to IMEM_CONTROL. */ + .align 8; + [P1] = R7; + SSYNC; + + STI R3; + + ( R7:0,P5:0 ) = [SP++]; + + RTS; + +/* After the execution of critical code, the code is now locked into + * the cache way. Now we need to set ILOC. + * + * R0 - Which way to be locked + */ + +ENTRY(_cache_lock) + + [--SP]=( R7:0,P5:0 ); + + P1.H = (IMEM_CONTROL >> 16); + P1.L = (IMEM_CONTROL & 0xFFFF); + + /* Disable the Interrupts*/ + CLI R3; + + R7 = [P1]; + R2 = 0xFFFFFF87 (X); + R7 = R7 & R2; + R0 = R0 << 3; + R7 = R0 | R7; + SSYNC; /* SSYNC required writing to IMEM_CONTROL. */ + .align 8; + [P1] = R7; + SSYNC; + /* Renable the Interrupts */ + STI R3; + + ( R7:0,P5:0 ) = [SP++]; + RTS; + +#endif /* BLKFIN_CACHE_LOCK */ + +/* Return the ILOC bits of IMEM_CONTROL + */ + +ENTRY(_read_iloc) + + P1.H = (IMEM_CONTROL >> 16); + P1.L = (IMEM_CONTROL & 0xFFFF); + R1 = 0xF; + R0 = [P1]; + R0 = R0 >> 3; + R0 = R0 & R1; + + RTS; diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c new file mode 100644 index 000000000000..deb27272c658 --- /dev/null +++ b/arch/blackfin/mach-common/pm.c @@ -0,0 +1,181 @@ +/* + * File: arch/blackfin/mach-common/pm.c + * Based on: arm/mach-omap/pm.c + * Author: Cliff Brake Copyright (c) 2001 + * + * Created: 2001 + * Description: Power management for the bfin + * + * Modified: Nicolas Pitre - PXA250 support + * Copyright (c) 2002 Monta Vista Software, Inc. + * David Singleton - OMAP1510 + * Copyright (c) 2002 Monta Vista Software, Inc. + * Dirk Behme - OMAP1510/1610 + * Copyright 2004 + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +#include +#include +#include + + +#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_H +#define WAKEUP_TYPE PM_WAKE_HIGH +#endif + +#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_L +#define WAKEUP_TYPE PM_WAKE_LOW +#endif + +#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_F +#define WAKEUP_TYPE PM_WAKE_FALLING +#endif + +#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_R +#define WAKEUP_TYPE PM_WAKE_RISING +#endif + +#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_B +#define WAKEUP_TYPE PM_WAKE_BOTH_EDGES +#endif + +void bfin_pm_suspend_standby_enter(void) +{ +#ifdef CONFIG_PM_WAKEUP_BY_GPIO + gpio_pm_wakeup_request(CONFIG_PM_WAKEUP_GPIO_NUMBER, WAKEUP_TYPE); +#endif + +#if defined(CONFIG_PM_WAKEUP_BY_GPIO) || defined(CONFIG_PM_WAKEUP_GPIO_API) + { + u32 flags; + + local_irq_save(flags); + + sleep_deeper(gpio_pm_setup()); /*Goto Sleep*/ + + gpio_pm_restore(); + + bfin_write_SIC_IWR(IWR_ENABLE_ALL); + + local_irq_restore(flags); + } +#endif + +#if defined(CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR) + sleep_deeper(CONFIG_PM_WAKEUP_SIC_IWR); + bfin_write_SIC_IWR(IWR_ENABLE_ALL); +#endif /* CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR */ +} + + +/* + * bfin_pm_prepare - Do preliminary suspend work. + * @state: suspend state we're entering. + * + */ +static int bfin_pm_prepare(suspend_state_t state) +{ + int error = 0; + + switch (state) { + case PM_SUSPEND_STANDBY: + break; + case PM_SUSPEND_MEM: + return -ENOTSUPP; + + case PM_SUSPEND_DISK: + return -ENOTSUPP; + + default: + return -EINVAL; + } + + return error; +} + +/* + * bfin_pm_enter - Actually enter a sleep state. + * @state: State we're entering. + * + */ +static int bfin_pm_enter(suspend_state_t state) +{ + switch (state) { + case PM_SUSPEND_STANDBY: + bfin_pm_suspend_standby_enter(); + break; + case PM_SUSPEND_MEM: + return -ENOTSUPP; + + case PM_SUSPEND_DISK: + return -ENOTSUPP; + + default: + return -EINVAL; + } + + return 0; +} + +/* + * bfin_pm_finish - Finish up suspend sequence. + * @state: State we're coming out of. + * + * This is called after we wake back up (or if entering the sleep state + * failed). + */ +static int bfin_pm_finish(suspend_state_t state) +{ + switch (state) { + case PM_SUSPEND_STANDBY: + break; + + case PM_SUSPEND_MEM: + return -ENOTSUPP; + + case PM_SUSPEND_DISK: + return -ENOTSUPP; + + default: + return -EINVAL; + } + + return 0; +} + +struct pm_ops bfin_pm_ops = { + .pm_disk_mode = PM_DISK_PLATFORM, + .prepare = bfin_pm_prepare, + .enter = bfin_pm_enter, + .finish = bfin_pm_finish, +}; + +static int __init bfin_pm_init(void) +{ + pm_set_ops(&bfin_pm_ops); + return 0; +} + +__initcall(bfin_pm_init); diff --git a/arch/blackfin/mm/Makefile b/arch/blackfin/mm/Makefile new file mode 100644 index 000000000000..2a7202ce01fd --- /dev/null +++ b/arch/blackfin/mm/Makefile @@ -0,0 +1,5 @@ +# +# arch/blackfin/mm/Makefile +# + +obj-y := blackfin_sram.o init.o diff --git a/arch/blackfin/mm/blackfin_sram.c b/arch/blackfin/mm/blackfin_sram.c new file mode 100644 index 000000000000..dd0c6501c424 --- /dev/null +++ b/arch/blackfin/mm/blackfin_sram.c @@ -0,0 +1,540 @@ +/* + * File: arch/blackfin/mm/blackfin_sram.c + * Based on: + * Author: + * + * Created: + * Description: SRAM driver for Blackfin ADSP-BF5xx + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "blackfin_sram.h" + +spinlock_t l1sram_lock, l1_data_sram_lock, l1_inst_sram_lock; + +#if CONFIG_L1_MAX_PIECE < 16 +#undef CONFIG_L1_MAX_PIECE +#define CONFIG_L1_MAX_PIECE 16 +#endif + +#if CONFIG_L1_MAX_PIECE > 1024 +#undef CONFIG_L1_MAX_PIECE +#define CONFIG_L1_MAX_PIECE 1024 +#endif + +#define SRAM_SLT_NULL 0 +#define SRAM_SLT_FREE 1 +#define SRAM_SLT_ALLOCATED 2 + +/* the data structure for L1 scratchpad and DATA SRAM */ +struct l1_sram_piece { + void *paddr; + int size; + int flag; +}; + +static struct l1_sram_piece l1_ssram[CONFIG_L1_MAX_PIECE]; + +#if L1_DATA_A_LENGTH != 0 +static struct l1_sram_piece l1_data_A_sram[CONFIG_L1_MAX_PIECE]; +#endif + +#if L1_DATA_B_LENGTH != 0 +static struct l1_sram_piece l1_data_B_sram[CONFIG_L1_MAX_PIECE]; +#endif + +#if L1_CODE_LENGTH != 0 +static struct l1_sram_piece l1_inst_sram[CONFIG_L1_MAX_PIECE]; +#endif + +/* L1 Scratchpad SRAM initialization function */ +void l1sram_init(void) +{ + printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n", + L1_SCRATCH_LENGTH >> 10); + + memset(&l1_ssram, 0x00, sizeof(l1_ssram)); + l1_ssram[0].paddr = (void*)L1_SCRATCH_START; + l1_ssram[0].size = L1_SCRATCH_LENGTH; + l1_ssram[0].flag = SRAM_SLT_FREE; + + /* mutex initialize */ + spin_lock_init(&l1sram_lock); +} + +void l1_data_sram_init(void) +{ +#if L1_DATA_A_LENGTH != 0 + printk(KERN_INFO "Blackfin DATA_A SRAM: %d KB\n", + L1_DATA_A_LENGTH >> 10); + + memset(&l1_data_A_sram, 0x00, sizeof(l1_data_A_sram)); + l1_data_A_sram[0].paddr = (void*)L1_DATA_A_START + + (_ebss_l1 - _sdata_l1); + l1_data_A_sram[0].size = L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1); + l1_data_A_sram[0].flag = SRAM_SLT_FREE; +#endif +#if L1_DATA_B_LENGTH != 0 + printk(KERN_INFO "Blackfin DATA_B SRAM: %d KB\n", + L1_DATA_B_LENGTH >> 10); + + memset(&l1_data_B_sram, 0x00, sizeof(l1_data_B_sram)); + l1_data_B_sram[0].paddr = (void*)L1_DATA_B_START; + l1_data_B_sram[0].size = L1_DATA_B_LENGTH; + l1_data_B_sram[0].flag = SRAM_SLT_FREE; +#endif + + /* mutex initialize */ + spin_lock_init(&l1_data_sram_lock); +} + +void l1_inst_sram_init(void) +{ +#if L1_CODE_LENGTH != 0 + printk(KERN_INFO "Blackfin Instruction SRAM: %d KB\n", + L1_CODE_LENGTH >> 10); + + memset(&l1_inst_sram, 0x00, sizeof(l1_inst_sram)); + l1_inst_sram[0].paddr = (void*)L1_CODE_START + (_etext_l1 - _stext_l1); + l1_inst_sram[0].size = L1_CODE_LENGTH - (_etext_l1 - _stext_l1); + l1_inst_sram[0].flag = SRAM_SLT_FREE; +#endif + + /* mutex initialize */ + spin_lock_init(&l1_inst_sram_lock); +} + +/* L1 memory allocate function */ +static void *_l1_sram_alloc(size_t size, struct l1_sram_piece *pfree, int count) +{ + int i, index = 0; + void *addr = NULL; + + if (size <= 0) + return NULL; + + /* Align the size */ + size = (size + 3) & ~3; + + /* not use the good method to match the best slot !!! */ + /* search an available memeory slot */ + for (i = 0; i < count; i++) { + if ((pfree[i].flag == SRAM_SLT_FREE) + && (pfree[i].size >= size)) { + addr = pfree[i].paddr; + pfree[i].flag = SRAM_SLT_ALLOCATED; + index = i; + break; + } + } + if (i >= count) + return NULL; + + /* updated the NULL memeory slot !!! */ + if (pfree[i].size > size) { + for (i = 0; i < count; i++) { + if (pfree[i].flag == SRAM_SLT_NULL) { + pfree[i].flag = SRAM_SLT_FREE; + pfree[i].paddr = addr + size; + pfree[i].size = pfree[index].size - size; + pfree[index].size = size; + break; + } + } + } + + return addr; +} + +/* Allocate the largest available block. */ +static void *_l1_sram_alloc_max(struct l1_sram_piece *pfree, int count, + unsigned long *psize) +{ + unsigned long best = 0; + int i, index = -1; + void *addr = NULL; + + /* search an available memeory slot */ + for (i = 0; i < count; i++) { + if (pfree[i].flag == SRAM_SLT_FREE && pfree[i].size > best) { + addr = pfree[i].paddr; + index = i; + best = pfree[i].size; + } + } + if (index < 0) + return NULL; + *psize = best; + + pfree[index].flag = SRAM_SLT_ALLOCATED; + return addr; +} + +/* L1 memory free function */ +static int _l1_sram_free(const void *addr, + struct l1_sram_piece *pfree, int count) +{ + int i, index = 0; + + /* search the relevant memory slot */ + for (i = 0; i < count; i++) { + if (pfree[i].paddr == addr) { + if (pfree[i].flag != SRAM_SLT_ALLOCATED) { + /* error log */ + return -1; + } + index = i; + break; + } + } + if (i >= count) + return -1; + + pfree[index].flag = SRAM_SLT_FREE; + + /* link the next address slot */ + for (i = 0; i < count; i++) { + if (((pfree[index].paddr + pfree[index].size) == pfree[i].paddr) + && (pfree[i].flag == SRAM_SLT_FREE)) { + pfree[i].flag = SRAM_SLT_NULL; + pfree[index].size += pfree[i].size; + pfree[index].flag = SRAM_SLT_FREE; + break; + } + } + + /* link the last address slot */ + for (i = 0; i < count; i++) { + if (((pfree[i].paddr + pfree[i].size) == pfree[index].paddr) && + (pfree[i].flag == SRAM_SLT_FREE)) { + pfree[index].flag = SRAM_SLT_NULL; + pfree[i].size += pfree[index].size; + break; + } + } + + return 0; +} + +int sram_free(const void *addr) +{ + if (0) {} +#if L1_CODE_LENGTH != 0 + else if (addr >= (void *)L1_CODE_START + && addr < (void *)(L1_CODE_START + L1_CODE_LENGTH)) + return l1_inst_sram_free(addr); +#endif +#if L1_DATA_A_LENGTH != 0 + else if (addr >= (void *)L1_DATA_A_START + && addr < (void *)(L1_DATA_A_START + L1_DATA_A_LENGTH)) + return l1_data_A_sram_free(addr); +#endif +#if L1_DATA_B_LENGTH != 0 + else if (addr >= (void *)L1_DATA_B_START + && addr < (void *)(L1_DATA_B_START + L1_DATA_B_LENGTH)) + return l1_data_B_sram_free(addr); +#endif + else + return -1; +} +EXPORT_SYMBOL(sram_free); + +void *l1_data_A_sram_alloc(size_t size) +{ + unsigned flags; + void *addr = NULL; + + /* add mutex operation */ + spin_lock_irqsave(&l1_data_sram_lock, flags); + +#if L1_DATA_A_LENGTH != 0 + addr = _l1_sram_alloc(size, l1_data_A_sram, ARRAY_SIZE(l1_data_A_sram)); +#endif + + /* add mutex operation */ + spin_unlock_irqrestore(&l1_data_sram_lock, flags); + + pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n", + (long unsigned int)addr, size); + + return addr; +} +EXPORT_SYMBOL(l1_data_A_sram_alloc); + +int l1_data_A_sram_free(const void *addr) +{ + unsigned flags; + int ret; + + /* add mutex operation */ + spin_lock_irqsave(&l1_data_sram_lock, flags); + +#if L1_DATA_A_LENGTH != 0 + ret = _l1_sram_free(addr, + l1_data_A_sram, ARRAY_SIZE(l1_data_A_sram)); +#else + ret = -1; +#endif + + /* add mutex operation */ + spin_unlock_irqrestore(&l1_data_sram_lock, flags); + + return ret; +} +EXPORT_SYMBOL(l1_data_A_sram_free); + +void *l1_data_B_sram_alloc(size_t size) +{ +#if L1_DATA_B_LENGTH != 0 + unsigned flags; + void *addr; + + /* add mutex operation */ + spin_lock_irqsave(&l1_data_sram_lock, flags); + + addr = _l1_sram_alloc(size, l1_data_B_sram, ARRAY_SIZE(l1_data_B_sram)); + + /* add mutex operation */ + spin_unlock_irqrestore(&l1_data_sram_lock, flags); + + pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n", + (long unsigned int)addr, size); + + return addr; +#else + return NULL; +#endif +} +EXPORT_SYMBOL(l1_data_B_sram_alloc); + +int l1_data_B_sram_free(const void *addr) +{ +#if L1_DATA_B_LENGTH != 0 + unsigned flags; + int ret; + + /* add mutex operation */ + spin_lock_irqsave(&l1_data_sram_lock, flags); + + ret = _l1_sram_free(addr, l1_data_B_sram, ARRAY_SIZE(l1_data_B_sram)); + + /* add mutex operation */ + spin_unlock_irqrestore(&l1_data_sram_lock, flags); + + return ret; +#else + return -1; +#endif +} +EXPORT_SYMBOL(l1_data_B_sram_free); + +void *l1_data_sram_alloc(size_t size) +{ + void *addr = l1_data_A_sram_alloc(size); + + if (!addr) + addr = l1_data_B_sram_alloc(size); + + return addr; +} +EXPORT_SYMBOL(l1_data_sram_alloc); + +void *l1_data_sram_zalloc(size_t size) +{ + void *addr = l1_data_sram_alloc(size); + + if (addr) + memset(addr, 0x00, size); + + return addr; +} +EXPORT_SYMBOL(l1_data_sram_zalloc); + +int l1_data_sram_free(const void *addr) +{ + int ret; + ret = l1_data_A_sram_free(addr); + if (ret == -1) + ret = l1_data_B_sram_free(addr); + return ret; +} +EXPORT_SYMBOL(l1_data_sram_free); + +void *l1_inst_sram_alloc(size_t size) +{ +#if L1_DATA_A_LENGTH != 0 + unsigned flags; + void *addr; + + /* add mutex operation */ + spin_lock_irqsave(&l1_inst_sram_lock, flags); + + addr = _l1_sram_alloc(size, l1_inst_sram, ARRAY_SIZE(l1_inst_sram)); + + /* add mutex operation */ + spin_unlock_irqrestore(&l1_inst_sram_lock, flags); + + pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n", + (long unsigned int)addr, size); + + return addr; +#else + return NULL; +#endif +} +EXPORT_SYMBOL(l1_inst_sram_alloc); + +int l1_inst_sram_free(const void *addr) +{ +#if L1_CODE_LENGTH != 0 + unsigned flags; + int ret; + + /* add mutex operation */ + spin_lock_irqsave(&l1_inst_sram_lock, flags); + + ret = _l1_sram_free(addr, l1_inst_sram, ARRAY_SIZE(l1_inst_sram)); + + /* add mutex operation */ + spin_unlock_irqrestore(&l1_inst_sram_lock, flags); + + return ret; +#else + return -1; +#endif +} +EXPORT_SYMBOL(l1_inst_sram_free); + +/* L1 Scratchpad memory allocate function */ +void *l1sram_alloc(size_t size) +{ + unsigned flags; + void *addr; + + /* add mutex operation */ + spin_lock_irqsave(&l1sram_lock, flags); + + addr = _l1_sram_alloc(size, l1_ssram, ARRAY_SIZE(l1_ssram)); + + /* add mutex operation */ + spin_unlock_irqrestore(&l1sram_lock, flags); + + return addr; +} + +/* L1 Scratchpad memory allocate function */ +void *l1sram_alloc_max(size_t *psize) +{ + unsigned flags; + void *addr; + + /* add mutex operation */ + spin_lock_irqsave(&l1sram_lock, flags); + + addr = _l1_sram_alloc_max(l1_ssram, ARRAY_SIZE(l1_ssram), psize); + + /* add mutex operation */ + spin_unlock_irqrestore(&l1sram_lock, flags); + + return addr; +} + +/* L1 Scratchpad memory free function */ +int l1sram_free(const void *addr) +{ + unsigned flags; + int ret; + + /* add mutex operation */ + spin_lock_irqsave(&l1sram_lock, flags); + + ret = _l1_sram_free(addr, l1_ssram, ARRAY_SIZE(l1_ssram)); + + /* add mutex operation */ + spin_unlock_irqrestore(&l1sram_lock, flags); + + return ret; +} + +int sram_free_with_lsl(const void *addr) +{ + struct sram_list_struct *lsl, **tmp; + struct mm_struct *mm = current->mm; + + for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next) + if ((*tmp)->addr == addr) + goto found; + return -1; +found: + lsl = *tmp; + sram_free(addr); + *tmp = lsl->next; + kfree(lsl); + + return 0; +} +EXPORT_SYMBOL(sram_free_with_lsl); + +void *sram_alloc_with_lsl(size_t size, unsigned long flags) +{ + void *addr = NULL; + struct sram_list_struct *lsl = NULL; + struct mm_struct *mm = current->mm; + + lsl = kmalloc(sizeof(struct sram_list_struct), GFP_KERNEL); + if (!lsl) + return NULL; + memset(lsl, 0, sizeof(*lsl)); + + if (flags & L1_INST_SRAM) + addr = l1_inst_sram_alloc(size); + + if (addr == NULL && (flags & L1_DATA_A_SRAM)) + addr = l1_data_A_sram_alloc(size); + + if (addr == NULL && (flags & L1_DATA_B_SRAM)) + addr = l1_data_B_sram_alloc(size); + + if (addr == NULL) { + kfree(lsl); + return NULL; + } + lsl->addr = addr; + lsl->length = size; + lsl->next = mm->context.sram_list; + mm->context.sram_list = lsl; + return addr; +} +EXPORT_SYMBOL(sram_alloc_with_lsl); diff --git a/arch/blackfin/mm/blackfin_sram.h b/arch/blackfin/mm/blackfin_sram.h new file mode 100644 index 000000000000..0fb73b78dd60 --- /dev/null +++ b/arch/blackfin/mm/blackfin_sram.h @@ -0,0 +1,38 @@ +/* + * File: arch/blackfin/mm/blackfin_sram.h + * Based on: arch/blackfin/mm/blackfin_sram.c + * Author: Mike Frysinger + * + * Created: Aug 2006 + * Description: Local prototypes meant for internal use only + * + * Modified: + * Copyright 2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __BLACKFIN_SRAM_H__ +#define __BLACKFIN_SRAM_H__ + +extern void l1sram_init(void); +extern void l1_inst_sram_init(void); +extern void l1_data_sram_init(void); +extern void *l1sram_alloc(size_t); + +#endif diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c new file mode 100644 index 000000000000..73f72abed432 --- /dev/null +++ b/arch/blackfin/mm/init.c @@ -0,0 +1,208 @@ +/* + * File: arch/blackfin/mm/init.c + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include "blackfin_sram.h" + +/* + * BAD_PAGE is the page that is used for page faults when linux + * is out-of-memory. Older versions of linux just did a + * do_exit(), but using this instead means there is less risk + * for a process dying in kernel mode, possibly leaving a inode + * unused etc.. + * + * BAD_PAGETABLE is the accompanying page-table: it is initialized + * to point to BAD_PAGE entries. + * + * ZERO_PAGE is a special page that is used for zero-initialized + * data and COW. + */ +static unsigned long empty_bad_page_table; + +static unsigned long empty_bad_page; + +unsigned long empty_zero_page; + +void show_mem(void) +{ + unsigned long i; + int free = 0, total = 0, reserved = 0, shared = 0; + + int cached = 0; + printk(KERN_INFO "Mem-info:\n"); + show_free_areas(); + i = max_mapnr; + while (i-- > 0) { + total++; + if (PageReserved(mem_map + i)) + reserved++; + else if (PageSwapCache(mem_map + i)) + cached++; + else if (!page_count(mem_map + i)) + free++; + else + shared += page_count(mem_map + i) - 1; + } + printk(KERN_INFO "%d pages of RAM\n", total); + printk(KERN_INFO "%d free pages\n", free); + printk(KERN_INFO "%d reserved pages\n", reserved); + printk(KERN_INFO "%d pages shared\n", shared); + printk(KERN_INFO "%d pages swap cached\n", cached); +} + +/* + * paging_init() continues the virtual memory environment setup which + * was begun by the code in arch/head.S. + * The parameters are pointers to where to stick the starting and ending + * addresses of available kernel virtual memory. + */ +void paging_init(void) +{ + /* + * make sure start_mem is page aligned, otherwise bootmem and + * page_alloc get different views og the world + */ + unsigned long end_mem = memory_end & PAGE_MASK; + + pr_debug("start_mem is %#lx virtual_end is %#lx\n", PAGE_ALIGN(memory_start), end_mem); + + /* + * initialize the bad page table and bad page to point + * to a couple of allocated pages + */ + empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); + empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); + empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); + memset((void *)empty_zero_page, 0, PAGE_SIZE); + + /* + * Set up SFC/DFC registers (user data space) + */ + set_fs(KERNEL_DS); + + pr_debug("free_area_init -> start_mem is %#lx virtual_end is %#lx\n", + PAGE_ALIGN(memory_start), end_mem); + + { + unsigned long zones_size[MAX_NR_ZONES] = { 0, }; + + zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT; +#ifdef CONFIG_HIGHMEM + zones_size[ZONE_HIGHMEM] = 0; +#endif + free_area_init(zones_size); + } +} + +void mem_init(void) +{ + unsigned int codek = 0, datak = 0, initk = 0; + unsigned long tmp; + unsigned int len = _ramend - _rambase; + unsigned long start_mem = memory_start; + unsigned long end_mem = memory_end; + + end_mem &= PAGE_MASK; + high_memory = (void *)end_mem; + + start_mem = PAGE_ALIGN(start_mem); + max_mapnr = num_physpages = MAP_NR(high_memory); + printk(KERN_INFO "Physical pages: %lx\n", num_physpages); + + /* This will put all memory onto the freelists. */ + totalram_pages = free_all_bootmem(); + + codek = (_etext - _stext) >> 10; + datak = (__bss_stop - __bss_start) >> 10; + initk = (__init_end - __init_begin) >> 10; + + tmp = nr_free_pages() << PAGE_SHIFT; + printk(KERN_INFO + "Memory available: %luk/%uk RAM, (%uk init code, %uk kernel code, %uk data, %uk dma)\n", + tmp >> 10, len >> 10, initk, codek, datak, DMA_UNCACHED_REGION >> 10); + + /* Initialize the blackfin L1 Memory. */ + l1sram_init(); + l1_data_sram_init(); + l1_inst_sram_init(); + + /* Allocate this once; never free it. We assume this gives us a + pointer to the start of L1 scratchpad memory; panic if it + doesn't. */ + tmp = (unsigned long)l1sram_alloc(sizeof(struct l1_scratch_task_info)); + if (tmp != (unsigned long)L1_SCRATCH_TASK_INFO) { + printk(KERN_EMERG "mem_init(): Did not get the right address from l1sram_alloc: %08lx != %08lx\n", + tmp, (unsigned long)L1_SCRATCH_TASK_INFO); + panic("No L1, time to give up\n"); + } +} + +#ifdef CONFIG_BLK_DEV_INITRD +void free_initrd_mem(unsigned long start, unsigned long end) +{ + int pages = 0; + for (; start < end; start += PAGE_SIZE) { + ClearPageReserved(virt_to_page(start)); + init_page_count(virt_to_page(start)); + free_page(start); + totalram_pages++; + pages++; + } + printk(KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages); +} +#endif + +void free_initmem(void) +{ +#ifdef CONFIG_RAMKERNEL + unsigned long addr; +/* + * the following code should be cool even if these sections + * are not page aligned. + */ + addr = PAGE_ALIGN((unsigned long)(__init_begin)); + /* next to check that the page we free is not a partial page */ + for (; addr + PAGE_SIZE < (unsigned long)(__init_end); + addr += PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + init_page_count(virt_to_page(addr)); + free_page(addr); + totalram_pages++; + } + printk(KERN_NOTICE + "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n", + (addr - PAGE_ALIGN((long)__init_begin)) >> 10, + (int)(PAGE_ALIGN((unsigned long)(__init_begin))), + (int)(addr - PAGE_SIZE)); +#endif +} diff --git a/arch/blackfin/oprofile/Kconfig b/arch/blackfin/oprofile/Kconfig new file mode 100644 index 000000000000..0a2fd999c941 --- /dev/null +++ b/arch/blackfin/oprofile/Kconfig @@ -0,0 +1,29 @@ +menu "Profiling support" +depends on EXPERIMENTAL + +config PROFILING + bool "Profiling support (EXPERIMENTAL)" + help + Say Y here to enable the extended profiling support mechanisms used + by profilers such as OProfile. + +config OPROFILE + tristate "OProfile system profiling (EXPERIMENTAL)" + depends on PROFILING + help + OProfile is a profiling system capable of profiling the + whole system, include the kernel, kernel modules, libraries, + and applications. + + If unsure, say N. + +config HARDWARE_PM + tristate "Hardware Performance Monitor Profiling" + depends on PROFILING + help + take use of hardware performance monitor to profiling the kernel + and application. + + If unsure, say N. + +endmenu diff --git a/arch/blackfin/oprofile/Makefile b/arch/blackfin/oprofile/Makefile new file mode 100644 index 000000000000..634e300d67e2 --- /dev/null +++ b/arch/blackfin/oprofile/Makefile @@ -0,0 +1,14 @@ +# +# arch/blackfin/oprofile/Makefile +# + +obj-$(CONFIG_OPROFILE) += oprofile.o + +DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \ + oprof.o cpu_buffer.o buffer_sync.o \ + event_buffer.o oprofile_files.o \ + oprofilefs.o oprofile_stats.o \ + timer_int.o ) + +oprofile-y := $(DRIVER_OBJS) common.o +oprofile-$(CONFIG_HARDWARE_PM) += op_model_bf533.o diff --git a/arch/blackfin/oprofile/common.c b/arch/blackfin/oprofile/common.c new file mode 100644 index 000000000000..009a1700c854 --- /dev/null +++ b/arch/blackfin/oprofile/common.c @@ -0,0 +1,168 @@ +/* + * File: arch/blackfin/oprofile/common.c + * Based on: arch/alpha/oprofile/common.c + * Author: Anton Blanchard + * + * Created: + * Description: + * + * Modified: + * Copyright (C) 2004 Anton Blanchard , IBM + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "op_blackfin.h" + +#define BFIN_533_ID 0xE5040003 +#define BFIN_537_ID 0xE5040002 + +static int pfmon_enabled; +static struct mutex pfmon_lock; + +struct op_bfin533_model *model; + +struct op_counter_config ctr[OP_MAX_COUNTER]; + +static int op_bfin_setup(void) +{ + int ret; + + /* Pre-compute the values to stuff in the hardware registers. */ + spin_lock(&oprofilefs_lock); + ret = model->reg_setup(ctr); + spin_unlock(&oprofilefs_lock); + + return ret; +} + +static void op_bfin_shutdown(void) +{ +#if 0 + /* what is the difference between shutdown and stop? */ +#endif +} + +static int op_bfin_start(void) +{ + int ret = -EBUSY; + + printk(KERN_INFO "KSDBG:in %s\n", __FUNCTION__); + mutex_lock(&pfmon_lock); + if (!pfmon_enabled) { + ret = model->start(ctr); + pfmon_enabled = !ret; + } + mutex_unlock(&pfmon_lock); + + return ret; +} + +static void op_bfin_stop(void) +{ + mutex_lock(&pfmon_lock); + if (pfmon_enabled) { + model->stop(); + pfmon_enabled = 0; + } + mutex_unlock(&pfmon_lock); +} + +static int op_bfin_create_files(struct super_block *sb, struct dentry *root) +{ + int i; + + for (i = 0; i < model->num_counters; ++i) { + struct dentry *dir; + char buf[3]; + printk(KERN_INFO "Oprofile: creating files... \n"); + + snprintf(buf, sizeof buf, "%d", i); + dir = oprofilefs_mkdir(sb, root, buf); + + oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); + oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); + oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); + /* + * We dont support per counter user/kernel selection, but + * we leave the entries because userspace expects them + */ + oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); + oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); + oprofilefs_create_ulong(sb, dir, "unit_mask", + &ctr[i].unit_mask); + } + + return 0; +} +int __init oprofile_arch_init(struct oprofile_operations *ops) +{ +#ifdef CONFIG_HARDWARE_PM + unsigned int dspid; + + mutex_init(&pfmon_lock); + + dspid = bfin_read_DSPID(); + + printk(KERN_INFO "Oprofile got the cpu id is 0x%x. \n", dspid); + + switch (dspid) { + case BFIN_533_ID: + model = &op_model_bfin533; + model->num_counters = 2; + break; + case BFIN_537_ID: + model = &op_model_bfin533; + model->num_counters = 2; + break; + default: + return -ENODEV; + } + + ops->cpu_type = model->name; + ops->create_files = op_bfin_create_files; + ops->setup = op_bfin_setup; + ops->shutdown = op_bfin_shutdown; + ops->start = op_bfin_start; + ops->stop = op_bfin_stop; + + printk(KERN_INFO "oprofile: using %s performance monitoring.\n", + ops->cpu_type); + + return 0; +#else + return -1; +#endif +} + +void oprofile_arch_exit(void) +{ +} diff --git a/arch/blackfin/oprofile/op_blackfin.h b/arch/blackfin/oprofile/op_blackfin.h new file mode 100644 index 000000000000..f88f446c814f --- /dev/null +++ b/arch/blackfin/oprofile/op_blackfin.h @@ -0,0 +1,98 @@ +/* + * File: arch/blackfin/oprofile/op_blackfin.h + * Based on: + * Author: Anton Blanchard + * + * Created: + * Description: + * + * Modified: + * Copyright (C) 2004 Anton Blanchard , IBM + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef OP_BLACKFIN_H +#define OP_BLACKFIN_H 1 + +#define OP_MAX_COUNTER 2 + +#include + +/* Per-counter configuration as set via oprofilefs. */ +struct op_counter_config { + unsigned long valid; + unsigned long enabled; + unsigned long event; + unsigned long count; + unsigned long kernel; + unsigned long user; + unsigned long unit_mask; +}; + +/* System-wide configuration as set via oprofilefs. */ +struct op_system_config { + unsigned long enable_kernel; + unsigned long enable_user; +}; + +/* Per-arch configuration */ +struct op_bfin533_model { + int (*reg_setup) (struct op_counter_config *); + int (*start) (struct op_counter_config *); + void (*stop) (void); + int num_counters; + char *name; +}; + +extern struct op_bfin533_model op_model_bfin533; + +static inline unsigned int ctr_read(void) +{ + unsigned int tmp; + + tmp = bfin_read_PFCTL(); + __builtin_bfin_csync(); + + return tmp; +} + +static inline void ctr_write(unsigned int val) +{ + bfin_write_PFCTL(val); + __builtin_bfin_csync(); +} + +static inline void count_read(unsigned int *count) +{ + count[0] = bfin_read_PFCNTR0(); + count[1] = bfin_read_PFCNTR1(); + __builtin_bfin_csync(); +} + +static inline void count_write(unsigned int *count) +{ + bfin_write_PFCNTR0(count[0]); + bfin_write_PFCNTR1(count[1]); + __builtin_bfin_csync(); +} + +extern int pm_overflow_handler(int irq, struct pt_regs *regs); + +#endif diff --git a/arch/blackfin/oprofile/op_model_bf533.c b/arch/blackfin/oprofile/op_model_bf533.c new file mode 100644 index 000000000000..b7a20a006b49 --- /dev/null +++ b/arch/blackfin/oprofile/op_model_bf533.c @@ -0,0 +1,161 @@ +/* + * File: arch/blackfin/oprofile/op_model_bf533.c + * Based on: + * Author: Anton Blanchard + * + * Created: + * Description: + * + * Modified: + * Copyright (C) 2004 Anton Blanchard , IBM + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "op_blackfin.h" + +#define PM_ENABLE 0x01; +#define PM_CTL1_ENABLE 0x18 +#define PM_CTL0_ENABLE 0xC000 +#define COUNT_EDGE_ONLY 0x3000000 + +static int oprofile_running; + +static unsigned curr_pfctl, curr_count[2]; + +static int bfin533_reg_setup(struct op_counter_config *ctr) +{ + unsigned int pfctl = ctr_read(); + unsigned int count[2]; + + /* set Blackfin perf monitor regs with ctr */ + if (ctr[0].enabled) { + pfctl |= (PM_CTL0_ENABLE | ((char)ctr[0].event << 5)); + count[0] = 0xFFFFFFFF - ctr[0].count; + curr_count[0] = count[0]; + } + if (ctr[1].enabled) { + pfctl |= (PM_CTL1_ENABLE | ((char)ctr[1].event << 16)); + count[1] = 0xFFFFFFFF - ctr[1].count; + curr_count[1] = count[1]; + } + + pr_debug("ctr[0].enabled=%d,ctr[1].enabled=%d,ctr[0].event<<5=0x%x,ctr[1].event<<16=0x%x\n", ctr[0].enabled, ctr[1].enabled, ctr[0].event << 5, ctr[1].event << 16); + pfctl |= COUNT_EDGE_ONLY; + curr_pfctl = pfctl; + + pr_debug("write 0x%x to pfctl\n", pfctl); + ctr_write(pfctl); + count_write(count); + + return 0; +} + +static int bfin533_start(struct op_counter_config *ctr) +{ + unsigned int pfctl = ctr_read(); + + pfctl |= PM_ENABLE; + curr_pfctl = pfctl; + + ctr_write(pfctl); + + oprofile_running = 1; + pr_debug("start oprofile counter \n"); + + return 0; +} + +static void bfin533_stop(void) +{ + int pfctl; + + pfctl = ctr_read(); + pfctl &= ~PM_ENABLE; + /* freeze counters */ + ctr_write(pfctl); + + oprofile_running = 0; + pr_debug("stop oprofile counter \n"); +} + +static int get_kernel(void) +{ + int ipend, is_kernel; + + ipend = bfin_read_IPEND(); + + /* test bit 15 */ + is_kernel = ((ipend & 0x8000) != 0); + + return is_kernel; +} + +int pm_overflow_handler(int irq, struct pt_regs *regs) +{ + int is_kernel; + int i, cpu; + unsigned int pc, pfctl; + unsigned int count[2]; + + pr_debug("get interrupt in %s\n", __FUNCTION__); + if (oprofile_running == 0) { + pr_debug("error: entering interrupt when oprofile is stopped.\n\r"); + return -1; + } + + is_kernel = get_kernel(); + cpu = smp_processor_id(); + pc = regs->pc; + pfctl = ctr_read(); + + /* read the two event counter regs */ + count_read(count); + + /* if the counter overflows, add sample to oprofile buffer */ + for (i = 0; i < 2; ++i) { + if (oprofile_running) { + oprofile_add_sample(regs, i); + } + } + + /* reset the perfmon counter */ + ctr_write(curr_pfctl); + count_write(curr_count); + return 0; +} + +struct op_bfin533_model op_model_bfin533 = { + .reg_setup = bfin533_reg_setup, + .start = bfin533_start, + .stop = bfin533_stop, + .num_counters = 2, + .name = "blackfin/bf533" +}; diff --git a/arch/blackfin/oprofile/timer_int.c b/arch/blackfin/oprofile/timer_int.c new file mode 100644 index 000000000000..8fba16c846c9 --- /dev/null +++ b/arch/blackfin/oprofile/timer_int.c @@ -0,0 +1,74 @@ +/* + * File: arch/blackfin/oprofile/timer_int.c + * Based on: + * Author: Michael Kang + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include + +#include + +static void enable_sys_timer0() +{ +} +static void disable_sys_timer0() +{ +} + +static irqreturn_t sys_timer0_int_handler(int irq, void *dev_id, + struct pt_regs *regs) +{ + oprofile_add_sample(regs, 0); + return IRQ_HANDLED; +} + +static int sys_timer0_start(void) +{ + enable_sys_timer0(); + return request_irq(IVG11, sys_timer0_int_handler, 0, "sys_timer0", NULL); +} + +static void sys_timer0_stop(void) +{ + disable_sys_timer(); +} + +int __init sys_timer0_init(struct oprofile_operations *ops) +{ + extern int nmi_active; + + if (nmi_active <= 0) + return -ENODEV; + + ops->start = timer_start; + ops->stop = timer_stop; + ops->cpu_type = "timer"; + printk(KERN_INFO "oprofile: using NMI timer interrupt.\n"); + return 0; +} diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index f3d3d81eb7e9..74c64409ddbc 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt @@ -26,7 +26,7 @@ config BINFMT_ELF config BINFMT_ELF_FDPIC bool "Kernel support for FDPIC ELF binaries" default y - depends on FRV + depends on (FRV || BLACKFIN) help ELF FDPIC binaries are based on ELF, but allow the individual load segments of a binary to be located in memory independently of each diff --git a/include/asm-blackfin/Kbuild b/include/asm-blackfin/Kbuild new file mode 100644 index 000000000000..c68e1680da01 --- /dev/null +++ b/include/asm-blackfin/Kbuild @@ -0,0 +1 @@ +include include/asm-generic/Kbuild.asm diff --git a/include/asm-blackfin/a.out.h b/include/asm-blackfin/a.out.h new file mode 100644 index 000000000000..d37a6849bf74 --- /dev/null +++ b/include/asm-blackfin/a.out.h @@ -0,0 +1,25 @@ +#ifndef __BFIN_A_OUT_H__ +#define __BFIN_A_OUT_H__ + +struct exec { + unsigned long a_info; /* Use macros N_MAGIC, etc for access */ + unsigned a_text; /* length of text, in bytes */ + unsigned a_data; /* length of data, in bytes */ + unsigned a_bss; /* length of uninitialized data area for file, in bytes */ + unsigned a_syms; /* length of symbol table data in file, in bytes */ + unsigned a_entry; /* start address */ + unsigned a_trsize; /* length of relocation info for text, in bytes */ + unsigned a_drsize; /* length of relocation info for data, in bytes */ +}; + +#define N_TRSIZE(a) ((a).a_trsize) +#define N_DRSIZE(a) ((a).a_drsize) +#define N_SYMSIZE(a) ((a).a_syms) + +#ifdef __KERNEL__ + +#define STACK_TOP TASK_SIZE + +#endif + +#endif /* __BFIN_A_OUT_H__ */ diff --git a/include/asm-blackfin/atomic.h b/include/asm-blackfin/atomic.h new file mode 100644 index 000000000000..7cf508718605 --- /dev/null +++ b/include/asm-blackfin/atomic.h @@ -0,0 +1,144 @@ +#ifndef __ARCH_BLACKFIN_ATOMIC__ +#define __ARCH_BLACKFIN_ATOMIC__ + +#include /* local_irq_XXX() */ + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + * + * Generally we do not concern about SMP BFIN systems, so we don't have + * to deal with that. + * + * Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001 + */ + +typedef struct { + int counter; +} atomic_t; +#define ATOMIC_INIT(i) { (i) } + +#define atomic_read(v) ((v)->counter) +#define atomic_set(v, i) (((v)->counter) = i) + +static __inline__ void atomic_add(int i, atomic_t * v) +{ + long flags; + + local_irq_save(flags); + v->counter += i; + local_irq_restore(flags); +} + +static __inline__ void atomic_sub(int i, atomic_t * v) +{ + long flags; + + local_irq_save(flags); + v->counter -= i; + local_irq_restore(flags); + +} + +static inline int atomic_add_return(int i, atomic_t * v) +{ + int __temp = 0; + long flags; + + local_irq_save(flags); + v->counter += i; + __temp = v->counter; + local_irq_restore(flags); + + + return __temp; +} + +#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) +static inline int atomic_sub_return(int i, atomic_t * v) +{ + int __temp = 0; + long flags; + + local_irq_save(flags); + v->counter -= i; + __temp = v->counter; + local_irq_restore(flags); + + return __temp; +} + +static __inline__ void atomic_inc(volatile atomic_t * v) +{ + long flags; + + local_irq_save(flags); + v->counter++; + local_irq_restore(flags); +} + +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +static __inline__ void atomic_dec(volatile atomic_t * v) +{ + long flags; + + local_irq_save(flags); + v->counter--; + local_irq_restore(flags); +} + +static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v) +{ + long flags; + + local_irq_save(flags); + v->counter &= ~mask; + local_irq_restore(flags); +} + +static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v) +{ + long flags; + + local_irq_save(flags); + v->counter |= mask; + local_irq_restore(flags); +} + +/* Atomic operations are already serializing */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#define atomic_dec_return(v) atomic_sub_return(1,(v)) +#define atomic_inc_return(v) atomic_add_return(1,(v)) + +/* + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) + +#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) + +#include + +#endif /* __ARCH_BLACKFIN_ATOMIC __ */ diff --git a/include/asm-blackfin/auxvec.h b/include/asm-blackfin/auxvec.h new file mode 100644 index 000000000000..215506cd87b7 --- /dev/null +++ b/include/asm-blackfin/auxvec.h @@ -0,0 +1,4 @@ +#ifndef __ASMBFIN_AUXVEC_H +#define __ASMBFIN_AUXVEC_H + +#endif diff --git a/include/asm-blackfin/bf5xx_timers.h b/include/asm-blackfin/bf5xx_timers.h new file mode 100644 index 000000000000..86c770321b61 --- /dev/null +++ b/include/asm-blackfin/bf5xx_timers.h @@ -0,0 +1,209 @@ +/* + * include/asm/bf5xx_timers.h + * + * This file contains the major Data structures and constants + * used for General Purpose Timer Implementation in BF5xx + * + * Copyright (C) 2005 John DeHority + * Copyright (C) 2006 Hella Aglaia GmbH (awe@aglaia-gmbh.de) + * + */ + +#ifndef _BLACKFIN_TIMERS_H_ +#define _BLACKFIN_TIMERS_H_ + +#undef MAX_BLACKFIN_GPTIMERS +/* + * BF537: 8 timers: + */ +#if defined(CONFIG_BF537) +# define MAX_BLACKFIN_GPTIMERS 8 +# define TIMER0_GROUP_REG TIMER_ENABLE +#endif +/* + * BF561: 12 timers: + */ +#if defined(CONFIG_BF561) +# define MAX_BLACKFIN_GPTIMERS 12 +# define TIMER0_GROUP_REG TMRS8_ENABLE +# define TIMER8_GROUP_REG TMRS4_ENABLE +#endif +/* + * All others: 3 timers: + */ +#if !defined(MAX_BLACKFIN_GPTIMERS) +# define MAX_BLACKFIN_GPTIMERS 3 +# define TIMER0_GROUP_REG TIMER_ENABLE +#endif + +#define BLACKFIN_GPTIMER_IDMASK ((1UL << MAX_BLACKFIN_GPTIMERS) - 1) +#define BFIN_TIMER_OCTET(x) ((x) >> 3) + +/* used in masks for timer_enable() and timer_disable() */ +#define TIMER0bit 0x0001 /* 0001b */ +#define TIMER1bit 0x0002 /* 0010b */ +#define TIMER2bit 0x0004 /* 0100b */ + +#if (MAX_BLACKFIN_GPTIMERS > 3) +# define TIMER3bit 0x0008 +# define TIMER4bit 0x0010 +# define TIMER5bit 0x0020 +# define TIMER6bit 0x0040 +# define TIMER7bit 0x0080 +#endif + +#if (MAX_BLACKFIN_GPTIMERS > 8) +# define TIMER8bit 0x0100 +# define TIMER9bit 0x0200 +# define TIMER10bit 0x0400 +# define TIMER11bit 0x0800 +#endif + +#define TIMER0_id 0 +#define TIMER1_id 1 +#define TIMER2_id 2 + +#if (MAX_BLACKFIN_GPTIMERS > 3) +# define TIMER3_id 3 +# define TIMER4_id 4 +# define TIMER5_id 5 +# define TIMER6_id 6 +# define TIMER7_id 7 +#endif + +#if (MAX_BLACKFIN_GPTIMERS > 8) +# define TIMER8_id 8 +# define TIMER9_id 9 +# define TIMER10_id 10 +# define TIMER11_id 11 +#endif + +/* associated timers for ppi framesync: */ + +#if defined(CONFIG_BF561) +# define FS0_1_TIMER_ID TIMER8_id +# define FS0_2_TIMER_ID TIMER9_id +# define FS1_1_TIMER_ID TIMER10_id +# define FS1_2_TIMER_ID TIMER11_id +# define FS0_1_TIMER_BIT TIMER8bit +# define FS0_2_TIMER_BIT TIMER9bit +# define FS1_1_TIMER_BIT TIMER10bit +# define FS1_2_TIMER_BIT TIMER11bit +# undef FS1_TIMER_ID +# undef FS2_TIMER_ID +# undef FS1_TIMER_BIT +# undef FS2_TIMER_BIT +#else +# define FS1_TIMER_ID TIMER0_id +# define FS2_TIMER_ID TIMER1_id +# define FS1_TIMER_BIT TIMER0bit +# define FS2_TIMER_BIT TIMER1bit +#endif + +/* +** Timer Configuration Register Bits +*/ +#define TIMER_ERR 0xC000 +#define TIMER_ERR_OVFL 0x4000 +#define TIMER_ERR_PROG_PER 0x8000 +#define TIMER_ERR_PROG_PW 0xC000 +#define TIMER_EMU_RUN 0x0200 +#define TIMER_TOGGLE_HI 0x0100 +#define TIMER_CLK_SEL 0x0080 +#define TIMER_OUT_DIS 0x0040 +#define TIMER_TIN_SEL 0x0020 +#define TIMER_IRQ_ENA 0x0010 +#define TIMER_PERIOD_CNT 0x0008 +#define TIMER_PULSE_HI 0x0004 +#define TIMER_MODE 0x0003 +#define TIMER_MODE_PWM 0x0001 +#define TIMER_MODE_WDTH 0x0002 +#define TIMER_MODE_EXT_CLK 0x0003 + +/* +** Timer Status Register Bits +*/ +#define TIMER_STATUS_TIMIL0 0x0001 +#define TIMER_STATUS_TIMIL1 0x0002 +#define TIMER_STATUS_TIMIL2 0x0004 +#if (MAX_BLACKFIN_GPTIMERS > 3) +# define TIMER_STATUS_TIMIL3 0x00000008 +# define TIMER_STATUS_TIMIL4 0x00010000 +# define TIMER_STATUS_TIMIL5 0x00020000 +# define TIMER_STATUS_TIMIL6 0x00040000 +# define TIMER_STATUS_TIMIL7 0x00080000 +# if (MAX_BLACKFIN_GPTIMERS > 8) +# define TIMER_STATUS_TIMIL8 0x0001 +# define TIMER_STATUS_TIMIL9 0x0002 +# define TIMER_STATUS_TIMIL10 0x0004 +# define TIMER_STATUS_TIMIL11 0x0008 +# endif +# define TIMER_STATUS_INTR 0x000F000F +#else +# define TIMER_STATUS_INTR 0x0007 /* any timer interrupt */ +#endif + +#define TIMER_STATUS_TOVF0 0x0010 /* timer 0 overflow error */ +#define TIMER_STATUS_TOVF1 0x0020 +#define TIMER_STATUS_TOVF2 0x0040 +#if (MAX_BLACKFIN_GPTIMERS > 3) +# define TIMER_STATUS_TOVF3 0x00000080 +# define TIMER_STATUS_TOVF4 0x00100000 +# define TIMER_STATUS_TOVF5 0x00200000 +# define TIMER_STATUS_TOVF6 0x00400000 +# define TIMER_STATUS_TOVF7 0x00800000 +# if (MAX_BLACKFIN_GPTIMERS > 8) +# define TIMER_STATUS_TOVF8 0x0010 +# define TIMER_STATUS_TOVF9 0x0020 +# define TIMER_STATUS_TOVF10 0x0040 +# define TIMER_STATUS_TOVF11 0x0080 +# endif +# define TIMER_STATUS_OFLOW 0x00F000F0 +#else +# define TIMER_STATUS_OFLOW 0x0070 /* any timer overflow */ +#endif + +/* +** Timer Slave Enable Status : write 1 to clear +*/ +#define TIMER_STATUS_TRUN0 0x1000 +#define TIMER_STATUS_TRUN1 0x2000 +#define TIMER_STATUS_TRUN2 0x4000 +#if (MAX_BLACKFIN_GPTIMERS > 3) +# define TIMER_STATUS_TRUN3 0x00008000 +# define TIMER_STATUS_TRUN4 0x10000000 +# define TIMER_STATUS_TRUN5 0x20000000 +# define TIMER_STATUS_TRUN6 0x40000000 +# define TIMER_STATUS_TRUN7 0x80000000 +# define TIMER_STATUS_TRUN 0xF000F000 +# if (MAX_BLACKFIN_GPTIMERS > 8) +# define TIMER_STATUS_TRUN8 0x1000 +# define TIMER_STATUS_TRUN9 0x2000 +# define TIMER_STATUS_TRUN10 0x4000 +# define TIMER_STATUS_TRUN11 0x8000 +# endif +#else +# define TIMER_STATUS_TRUN 0x7000 +#endif + +/******************************************************************************* +* GP_TIMER API's +*******************************************************************************/ + +void set_gptimer_pwidth (int timer_id, int width); +int get_gptimer_pwidth (int timer_id); +void set_gptimer_period (int timer_id, int period); +int get_gptimer_period (int timer_id); +int get_gptimer_count (int timer_id); +short get_gptimer_intr (int timer_id); +void set_gptimer_config (int timer_id, short config); +short get_gptimer_config (int timer_id); +void set_gptimer_pulse_hi (int timer_id); +void clear_gptimer_pulse_hi(int timer_id); +void enable_gptimers (short mask); +void disable_gptimers (short mask); +short get_enabled_timers (void); +int get_gptimer_status (int octet); +void set_gptimer_status (int octet, int value); + +#endif diff --git a/include/asm-blackfin/bfin-global.h b/include/asm-blackfin/bfin-global.h new file mode 100644 index 000000000000..e37f81609fc3 --- /dev/null +++ b/include/asm-blackfin/bfin-global.h @@ -0,0 +1,120 @@ +/* + * File: include/asm-blackfin/bfin-global.h + * Based on: + * Author: * + * Created: + * Description: Global extern defines for blackfin + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _BFIN_GLOBAL_H_ +#define _BFIN_GLOBAL_H_ + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include +#include + +#if defined(CONFIG_DMA_UNCACHED_2M) +# define DMA_UNCACHED_REGION (2 * 1024 * 1024) +#elif defined(CONFIG_DMA_UNCACHED_1M) +# define DMA_UNCACHED_REGION (1024 * 1024) +#else +# define DMA_UNCACHED_REGION (0) +#endif + +extern unsigned long get_cclk(void); +extern unsigned long get_sclk(void); + +extern void dump_thread(struct pt_regs *regs, struct user *dump); +extern void dump_bfin_regs(struct pt_regs *fp, void *retaddr); +extern void dump_bfin_trace_buffer(void); + +extern int init_arch_irq(void); +extern void bfin_reset(void); +extern void _cplb_hdr(void); +/* Blackfin cache functions */ +extern void bfin_icache_init(void); +extern void bfin_dcache_init(void); +extern int read_iloc(void); +extern int bfin_console_init(void); +extern asmlinkage void lower_to_irq14(void); +extern void init_dma(void); +extern void program_IAR(void); +extern void evt14_softirq(void); +extern asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs); +extern void bfin_gpio_interrupt_setup(int irq, int irq_pfx, int type); + +extern void *l1_data_A_sram_alloc(size_t); +extern void *l1_data_B_sram_alloc(size_t); +extern void *l1_inst_sram_alloc(size_t); +extern void *l1_data_sram_alloc(size_t); +extern void *l1_data_sram_zalloc(size_t); +extern int l1_data_A_sram_free(const void*); +extern int l1_data_B_sram_free(const void*); +extern int l1_inst_sram_free(const void*); +extern int l1_data_sram_free(const void*); +extern int sram_free(const void*); + +#define L1_INST_SRAM 0x00000001 +#define L1_DATA_A_SRAM 0x00000002 +#define L1_DATA_B_SRAM 0x00000004 +#define L1_DATA_SRAM 0x00000006 +extern void *sram_alloc_with_lsl(size_t, unsigned long); +extern int sram_free_with_lsl(const void*); + +extern void led_on(int); +extern void led_off(int); +extern void led_toggle(int); +extern void led_disp_num(int); +extern void led_toggle_num(int); +extern void init_leds(void); + +extern char *bfin_board_name __attribute__ ((weak)); +extern unsigned long wall_jiffies; +extern unsigned long ipdt_table[]; +extern unsigned long dpdt_table[]; +extern unsigned long icplb_table[]; +extern unsigned long dcplb_table[]; + +extern unsigned long ipdt_swapcount_table[]; +extern unsigned long dpdt_swapcount_table[]; + +extern unsigned long table_start, table_end; + +extern struct file_operations dpmc_fops; +extern char _start; +extern unsigned long _ramstart, _ramend, _rambase; +extern unsigned long memory_start, memory_end, physical_mem_end; +extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], + _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _ebss_b_l1[]; + +#ifdef CONFIG_MTD_UCLINUX +extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size; +#endif + +#endif + +#endif /* _BLACKFIN_H_ */ diff --git a/include/asm-blackfin/bfin5xx_spi.h b/include/asm-blackfin/bfin5xx_spi.h new file mode 100644 index 000000000000..95c1c952e7c1 --- /dev/null +++ b/include/asm-blackfin/bfin5xx_spi.h @@ -0,0 +1,170 @@ +/************************************************************ +* +* Copyright (C) 2004, Analog Devices. All Rights Reserved +* +* FILE bfin5xx_spi.h +* PROGRAMMER(S): Luke Yang (Analog Devices Inc.) +* +* +* DATE OF CREATION: March. 10th 2006 +* +* SYNOPSIS: +* +* DESCRIPTION: header file for SPI controller driver for Blackfin5xx. +************************************************************** + +* MODIFICATION HISTORY: +* March 10, 2006 bfin5xx_spi.h Created. (Luke Yang) + +************************************************************/ + +#ifndef _SPI_CHANNEL_H_ +#define _SPI_CHANNEL_H_ + +#define SPI0_REGBASE 0xffc00500 + +#define SPI_READ 0 +#define SPI_WRITE 1 + +#define SPI_CTRL_OFF 0x0 +#define SPI_FLAG_OFF 0x4 +#define SPI_STAT_OFF 0x8 +#define SPI_TXBUFF_OFF 0xc +#define SPI_RXBUFF_OFF 0x10 +#define SPI_BAUD_OFF 0x14 +#define SPI_SHAW_OFF 0x18 + +#define CMD_SPI_OUT_ENABLE 1 +#define CMD_SPI_SET_BAUDRATE 2 +#define CMD_SPI_SET_POLAR 3 +#define CMD_SPI_SET_PHASE 4 +#define CMD_SPI_SET_MASTER 5 +#define CMD_SPI_SET_SENDOPT 6 +#define CMD_SPI_SET_RECVOPT 7 +#define CMD_SPI_SET_ORDER 8 +#define CMD_SPI_SET_LENGTH16 9 +#define CMD_SPI_GET_STAT 11 +#define CMD_SPI_GET_CFG 12 +#define CMD_SPI_SET_CSAVAIL 13 +#define CMD_SPI_SET_CSHIGH 14 /* CS unavail */ +#define CMD_SPI_SET_CSLOW 15 /* CS avail */ +#define CMD_SPI_MISO_ENABLE 16 +#define CMD_SPI_SET_CSENABLE 17 +#define CMD_SPI_SET_CSDISABLE 18 + +#define CMD_SPI_SET_TRIGGER_MODE 19 +#define CMD_SPI_SET_TRIGGER_SENSE 20 +#define CMD_SPI_SET_TRIGGER_EDGE 21 +#define CMD_SPI_SET_TRIGGER_LEVEL 22 + +#define CMD_SPI_SET_TIME_SPS 23 +#define CMD_SPI_SET_TIME_SAMPLES 24 +#define CMD_SPI_GET_SYSTEMCLOCK 25 + +#define CMD_SPI_SET_WRITECONTINUOUS 26 +#define CMD_SPI_SET_SKFS 27 + +#define CMD_SPI_GET_ALLCONFIG 32 /* For debug */ + +#define SPI_DEFAULT_BARD 0x0100 + +#define SPI0_IRQ_NUM IRQ_SPI +#define SPI_ERR_TRIG -1 + +#define BIT_CTL_ENABLE 0x4000 +#define BIT_CTL_OPENDRAIN 0x2000 +#define BIT_CTL_MASTER 0x1000 +#define BIT_CTL_POLAR 0x0800 +#define BIT_CTL_PHASE 0x0400 +#define BIT_CTL_BITORDER 0x0200 +#define BIT_CTL_WORDSIZE 0x0100 +#define BIT_CTL_MISOENABLE 0x0020 +#define BIT_CTL_RXMOD 0x0000 +#define BIT_CTL_TXMOD 0x0001 +#define BIT_CTL_TIMOD_DMA_TX 0x0003 +#define BIT_CTL_TIMOD_DMA_RX 0x0002 +#define BIT_CTL_SENDOPT 0x0004 +#define BIT_CTL_TIMOD 0x0003 + +#define BIT_STAT_SPIF 0x0001 +#define BIT_STAT_MODF 0x0002 +#define BIT_STAT_TXE 0x0004 +#define BIT_STAT_TXS 0x0008 +#define BIT_STAT_RBSY 0x0010 +#define BIT_STAT_RXS 0x0020 +#define BIT_STAT_TXCOL 0x0040 +#define BIT_STAT_CLR 0xFFFF + +#define BIT_STU_SENDOVER 0x0001 +#define BIT_STU_RECVFULL 0x0020 + +#define CFG_SPI_ENABLE 1 +#define CFG_SPI_DISABLE 0 + +#define CFG_SPI_OUTENABLE 1 +#define CFG_SPI_OUTDISABLE 0 + +#define CFG_SPI_ACTLOW 1 +#define CFG_SPI_ACTHIGH 0 + +#define CFG_SPI_PHASESTART 1 +#define CFG_SPI_PHASEMID 0 + +#define CFG_SPI_MASTER 1 +#define CFG_SPI_SLAVE 0 + +#define CFG_SPI_SENELAST 0 +#define CFG_SPI_SENDZERO 1 + +#define CFG_SPI_RCVFLUSH 1 +#define CFG_SPI_RCVDISCARD 0 + +#define CFG_SPI_LSBFIRST 1 +#define CFG_SPI_MSBFIRST 0 + +#define CFG_SPI_WORDSIZE16 1 +#define CFG_SPI_WORDSIZE8 0 + +#define CFG_SPI_MISOENABLE 1 +#define CFG_SPI_MISODISABLE 0 + +#define CFG_SPI_READ 0x00 +#define CFG_SPI_WRITE 0x01 +#define CFG_SPI_DMAREAD 0x02 +#define CFG_SPI_DMAWRITE 0x03 + +#define CFG_SPI_CSCLEARALL 0 +#define CFG_SPI_CHIPSEL1 1 +#define CFG_SPI_CHIPSEL2 2 +#define CFG_SPI_CHIPSEL3 3 +#define CFG_SPI_CHIPSEL4 4 +#define CFG_SPI_CHIPSEL5 5 +#define CFG_SPI_CHIPSEL6 6 +#define CFG_SPI_CHIPSEL7 7 + +#define CFG_SPI_CS1VALUE 1 +#define CFG_SPI_CS2VALUE 2 +#define CFG_SPI_CS3VALUE 3 +#define CFG_SPI_CS4VALUE 4 +#define CFG_SPI_CS5VALUE 5 +#define CFG_SPI_CS6VALUE 6 +#define CFG_SPI_CS7VALUE 7 + +/* device.platform_data for SSP controller devices */ +struct bfin5xx_spi_master { + u16 num_chipselect; + u8 enable_dma; +}; + +/* spi_board_info.controller_data for SPI slave devices, + * copied to spi_device.platform_data ... mostly for dma tuning + */ +struct bfin5xx_spi_chip { + u16 ctl_reg; + u8 enable_dma; + u8 bits_per_word; + u8 cs_change_per_word; + u8 cs_chg_udelay; +}; + +#endif /* _SPI_CHANNEL_H_ */ diff --git a/include/asm-blackfin/bfin_simple_timer.h b/include/asm-blackfin/bfin_simple_timer.h new file mode 100644 index 000000000000..fccbb595464a --- /dev/null +++ b/include/asm-blackfin/bfin_simple_timer.h @@ -0,0 +1,13 @@ +#ifndef _bfin_simple_timer_h_ +#define _bfin_simple_timer_h_ + +#include + +#define BFIN_SIMPLE_TIMER_IOCTL_MAGIC 't' + +#define BFIN_SIMPLE_TIMER_SET_PERIOD _IO (BFIN_SIMPLE_TIMER_IOCTL_MAGIC, 2) +#define BFIN_SIMPLE_TIMER_START _IO (BFIN_SIMPLE_TIMER_IOCTL_MAGIC, 6) +#define BFIN_SIMPLE_TIMER_STOP _IO (BFIN_SIMPLE_TIMER_IOCTL_MAGIC, 8) +#define BFIN_SIMPLE_TIMER_READ _IO (BFIN_SIMPLE_TIMER_IOCTL_MAGIC, 10) + +#endif diff --git a/include/asm-blackfin/bfin_sport.h b/include/asm-blackfin/bfin_sport.h new file mode 100644 index 000000000000..c76ed8def302 --- /dev/null +++ b/include/asm-blackfin/bfin_sport.h @@ -0,0 +1,175 @@ +/* + * File: include/asm-blackfin/bfin_sport.h + * Based on: + * Author: Roy Huang (roy.huang@analog.com) + * + * Created: Thu Aug. 24 2006 + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __BFIN_SPORT_H__ +#define __BFIN_SPORT_H__ + +#define SPORT_MAJOR 237 +#define SPORT_NR_DEVS 2 + +/* Sport mode: it can be set to TDM, i2s or others */ +#define NORM_MODE 0x0 +#define TDM_MODE 0x1 +#define I2S_MODE 0x2 + +/* Data format, normal, a-law or u-law */ +#define NORM_FORMAT 0x0 +#define ALAW_FORMAT 0x2 +#define ULAW_FORMAT 0x3 +struct sport_register; + +/* Function driver which use sport must initialize the structure */ +struct sport_config { + /*TDM (multichannels), I2S or other mode */ + unsigned int mode:3; + + /* if TDM mode is selected, channels must be set */ + int channels; /* Must be in 8 units */ + unsigned int frame_delay:4; /* Delay between frame sync pulse and first bit */ + + /* I2S mode */ + unsigned int right_first:1; /* Right stereo channel first */ + + /* In mormal mode, the following item need to be set */ + unsigned int lsb_first:1; /* order of transmit or receive data */ + unsigned int fsync:1; /* Frame sync required */ + unsigned int data_indep:1; /* data independent frame sync generated */ + unsigned int act_low:1; /* Active low TFS */ + unsigned int late_fsync:1; /* Late frame sync */ + unsigned int tckfe:1; + unsigned int sec_en:1; /* Secondary side enabled */ + + /* Choose clock source */ + unsigned int int_clk:1; /* Internal or external clock */ + + /* If external clock is used, the following fields are ignored */ + int serial_clk; + int fsync_clk; + + unsigned int data_format:2; /*Normal, u-law or a-law */ + + int word_len; /* How length of the word in bits, 3-32 bits */ + int dma_enabled; +}; + +struct sport_register { + unsigned short tcr1; + unsigned short reserved0; + unsigned short tcr2; + unsigned short reserved1; + unsigned short tclkdiv; + unsigned short reserved2; + unsigned short tfsdiv; + unsigned short reserved3; + unsigned long tx; + unsigned long reserved_l0; + unsigned long rx; + unsigned long reserved_l1; + unsigned short rcr1; + unsigned short reserved4; + unsigned short rcr2; + unsigned short reserved5; + unsigned short rclkdiv; + unsigned short reserved6; + unsigned short rfsdiv; + unsigned short reserved7; + unsigned short stat; + unsigned short reserved8; + unsigned short chnl; + unsigned short reserved9; + unsigned short mcmc1; + unsigned short reserved10; + unsigned short mcmc2; + unsigned short reserved11; + unsigned long mtcs0; + unsigned long mtcs1; + unsigned long mtcs2; + unsigned long mtcs3; + unsigned long mrcs0; + unsigned long mrcs1; + unsigned long mrcs2; + unsigned long mrcs3; +}; + +#define SPORT_IOC_MAGIC 'P' +#define SPORT_IOC_CONFIG _IOWR('P', 0x01, struct sport_config) + +/* Test purpose */ +#define ENABLE_AD73311 _IOWR('P', 0x02, int) + +struct sport_dev { + struct cdev cdev; /* Char device structure */ + + int sport_num; + + int dma_rx_chan; + int dma_tx_chan; + + int rx_irq; + unsigned char *rx_buf; /* Buffer store the received data */ + int rx_len; /* How many bytes will be received */ + int rx_received; /* How many bytes has been received */ + + int tx_irq; + const unsigned char *tx_buf; + int tx_len; + int tx_sent; + + int sport_err_irq; + + struct mutex mutex; /* mutual exclusion semaphore */ + struct task_struct *task; + + wait_queue_head_t waitq; + int wait_con; + struct sport_register *regs; + struct sport_config config; +}; + +#define SPORT_TCR1 0 +#define SPORT_TCR2 1 +#define SPORT_TCLKDIV 2 +#define SPORT_TFSDIV 3 +#define SPORT_RCR1 8 +#define SPORT_RCR2 9 +#define SPORT_RCLKDIV 10 +#define SPORT_RFSDIV 11 +#define SPORT_CHANNEL 13 +#define SPORT_MCMC1 14 +#define SPORT_MCMC2 15 +#define SPORT_MTCS0 16 +#define SPORT_MTCS1 17 +#define SPORT_MTCS2 18 +#define SPORT_MTCS3 19 +#define SPORT_MRCS0 20 +#define SPORT_MRCS1 21 +#define SPORT_MRCS2 22 +#define SPORT_MRCS3 23 + +#endif /*__BFIN_SPORT_H__*/ diff --git a/include/asm-blackfin/bitops.h b/include/asm-blackfin/bitops.h new file mode 100644 index 000000000000..27c2d0e48e1b --- /dev/null +++ b/include/asm-blackfin/bitops.h @@ -0,0 +1,213 @@ +#ifndef _BLACKFIN_BITOPS_H +#define _BLACKFIN_BITOPS_H + +/* + * Copyright 1992, Linus Torvalds. + */ + +#include +#include /* swab32 */ +#include /* save_flags */ + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +static __inline__ void set_bit(int nr, volatile unsigned long *addr) +{ + int *a = (int *)addr; + int mask; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + *a |= mask; + local_irq_restore(flags); +} + +static __inline__ void __set_bit(int nr, volatile unsigned long *addr) +{ + int *a = (int *)addr; + int mask; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + *a |= mask; +} + +/* + * clear_bit() doesn't provide any barrier for the compiler. + */ +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() + +static __inline__ void clear_bit(int nr, volatile unsigned long *addr) +{ + int *a = (int *)addr; + int mask; + unsigned long flags; + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + *a &= ~mask; + local_irq_restore(flags); +} + +static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) +{ + int *a = (int *)addr; + int mask; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + *a &= ~mask; +} + +static __inline__ void change_bit(int nr, volatile unsigned long *addr) +{ + int mask, flags; + unsigned long *ADDR = (unsigned long *)addr; + + ADDR += nr >> 5; + mask = 1 << (nr & 31); + local_irq_save(flags); + *ADDR ^= mask; + local_irq_restore(flags); +} + +static __inline__ void __change_bit(int nr, volatile unsigned long *addr) +{ + int mask; + unsigned long *ADDR = (unsigned long *)addr; + + ADDR += nr >> 5; + mask = 1 << (nr & 31); + *ADDR ^= mask; +} + +static __inline__ int test_and_set_bit(int nr, void *addr) +{ + int mask, retval; + volatile unsigned int *a = (volatile unsigned int *)addr; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + retval = (mask & *a) != 0; + *a |= mask; + local_irq_restore(flags); + + return retval; +} + +static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) +{ + int mask, retval; + volatile unsigned int *a = (volatile unsigned int *)addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + retval = (mask & *a) != 0; + *a |= mask; + return retval; +} + +static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + int mask, retval; + volatile unsigned int *a = (volatile unsigned int *)addr; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + retval = (mask & *a) != 0; + *a &= ~mask; + local_irq_restore(flags); + + return retval; +} + +static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + int mask, retval; + volatile unsigned int *a = (volatile unsigned int *)addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + retval = (mask & *a) != 0; + *a &= ~mask; + return retval; +} + +static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr) +{ + int mask, retval; + volatile unsigned int *a = (volatile unsigned int *)addr; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + retval = (mask & *a) != 0; + *a ^= mask; + local_irq_restore(flags); + return retval; +} + +static __inline__ int __test_and_change_bit(int nr, + volatile unsigned long *addr) +{ + int mask, retval; + volatile unsigned int *a = (volatile unsigned int *)addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + retval = (mask & *a) != 0; + *a ^= mask; + return retval; +} + +/* + * This routine doesn't need to be atomic. + */ +static __inline__ int __constant_test_bit(int nr, const void *addr) +{ + return ((1UL << (nr & 31)) & + (((const volatile unsigned int *)addr)[nr >> 5])) != 0; +} + +static __inline__ int __test_bit(int nr, const void *addr) +{ + int *a = (int *)addr; + int mask; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + return ((mask & *a) != 0); +} + +#define test_bit(nr,addr) \ +(__builtin_constant_p(nr) ? \ + __constant_test_bit((nr),(addr)) : \ + __test_bit((nr),(addr))) + +#include +#include + +#include +#include + +#include + +#endif /* __KERNEL__ */ + +#include +#include + +#endif /* _BLACKFIN_BITOPS_H */ diff --git a/include/asm-blackfin/blackfin.h b/include/asm-blackfin/blackfin.h new file mode 100644 index 000000000000..14e58de73973 --- /dev/null +++ b/include/asm-blackfin/blackfin.h @@ -0,0 +1,81 @@ +/* + * Common header file for blackfin family of processors. + * + */ + +#ifndef _BLACKFIN_H_ +#define _BLACKFIN_H_ + +#include +#include +#include + +#ifndef __ASSEMBLY__ + +/* SSYNC implementation for C file */ +#if defined(ANOMALY_05000312) && defined(ANOMALY_05000244) +static inline void SSYNC (void) +{ + int _tmp; + __asm__ __volatile__ ("cli %0;\n\t" + "nop;nop;\n\t" + "ssync;\n\t" + "sti %0;\n\t" + :"=d"(_tmp):); +} +#elif defined(ANOMALY_05000312) && !defined(ANOMALY_05000244) +static inline void SSYNC (void) +{ + int _tmp; + __asm__ __volatile__ ("cli %0;\n\t" + "ssync;\n\t" + "sti %0;\n\t" + :"=d"(_tmp):); +} +#elif !defined(ANOMALY_05000312) && defined(ANOMALY_05000244) +static inline void SSYNC (void) +{ + __builtin_bfin_ssync(); +} +#elif !defined(ANOMALY_05000312) && !defined(ANOMALY_05000244) +static inline void SSYNC (void) +{ + __asm__ __volatile__ ("ssync;\n\t"); +} +#endif + +/* CSYNC implementation for C file */ +#if defined(ANOMALY_05000312) && defined(ANOMALY_05000244) +static inline void CSYNC (void) +{ + int _tmp; + __asm__ __volatile__ ("cli %0;\n\t" + "nop;nop;\n\t" + "csync;\n\t" + "sti %0;\n\t" + :"=d"(_tmp):); +} +#elif defined(ANOMALY_05000312) && !defined(ANOMALY_05000244) +static inline void CSYNC (void) +{ + int _tmp; + __asm__ __volatile__ ("cli %0;\n\t" + "csync;\n\t" + "sti %0;\n\t" + :"=d"(_tmp):); +} +#elif !defined(ANOMALY_05000312) && defined(ANOMALY_05000244) +static inline void CSYNC (void) +{ + __builtin_bfin_csync(); +} +#elif !defined(ANOMALY_05000312) && !defined(ANOMALY_05000244) +static inline void CSYNC (void) +{ + __asm__ __volatile__ ("csync;\n\t"); +} +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* _BLACKFIN_H_ */ diff --git a/include/asm-blackfin/bug.h b/include/asm-blackfin/bug.h new file mode 100644 index 000000000000..41e53b29f167 --- /dev/null +++ b/include/asm-blackfin/bug.h @@ -0,0 +1,4 @@ +#ifndef _BLACKFIN_BUG_H +#define _BLACKFIN_BUG_H +#include +#endif diff --git a/include/asm-blackfin/bugs.h b/include/asm-blackfin/bugs.h new file mode 100644 index 000000000000..9093c9c1fb81 --- /dev/null +++ b/include/asm-blackfin/bugs.h @@ -0,0 +1,16 @@ +/* + * include/asm-blackfin/bugs.h + * + * Copyright (C) 1994 Linus Torvalds + */ + +/* + * This is included by init/main.c to check for architecture-dependent bugs. + * + * Needs: + * void check_bugs(void); + */ + +static void check_bugs(void) +{ +} diff --git a/include/asm-blackfin/byteorder.h b/include/asm-blackfin/byteorder.h new file mode 100644 index 000000000000..6a673d42da18 --- /dev/null +++ b/include/asm-blackfin/byteorder.h @@ -0,0 +1,48 @@ +#ifndef _BLACKFIN_BYTEORDER_H +#define _BLACKFIN_BYTEORDER_H + +#include +#include + +#ifdef __GNUC__ + +static __inline__ __attribute_const__ __u32 ___arch__swahb32(__u32 xx) +{ + __u32 tmp; + __asm__("%1 = %0 >> 8 (V);\n\t" + "%0 = %0 << 8 (V);\n\t" + "%0 = %0 | %1;\n\t" + : "+d"(xx), "=&d"(tmp)); + return xx; +} + +static __inline__ __attribute_const__ __u32 ___arch__swahw32(__u32 xx) +{ + __u32 rv; + __asm__("%0 = PACK(%1.L, %1.H);\n\t": "=d"(rv): "d"(xx)); + return rv; +} + +#define __arch__swahb32(x) ___arch__swahb32(x) +#define __arch__swahw32(x) ___arch__swahw32(x) +#define __arch__swab32(x) ___arch__swahb32(___arch__swahw32(x)) + +static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 xx) +{ + __u32 xw = xx; + __asm__("%0 <<= 8;\n %0.L = %0.L + %0.H (NS);\n": "+d"(xw)); + return (__u16)xw; +} + +#define __arch__swab16(x) ___arch__swab16(x) + +#endif + +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) +# define __BYTEORDER_HAS_U64__ +# define __SWAB_64_THRU_32__ +#endif + +#include + +#endif /* _BLACKFIN_BYTEORDER_H */ diff --git a/include/asm-blackfin/cache.h b/include/asm-blackfin/cache.h new file mode 100644 index 000000000000..023d72133b5a --- /dev/null +++ b/include/asm-blackfin/cache.h @@ -0,0 +1,29 @@ +/* + * include/asm-blackfin/cache.h + */ +#ifndef __ARCH_BLACKFIN_CACHE_H +#define __ARCH_BLACKFIN_CACHE_H + +/* + * Bytes per L1 cache line + * Blackfin loads 32 bytes for cache + */ +#define L1_CACHE_SHIFT 5 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define SMP_CACHE_BYTES L1_CACHE_BYTES + +/* + * Put cacheline_aliged data to L1 data memory + */ +#ifdef CONFIG_CACHELINE_ALIGNED_L1 +#define __cacheline_aligned \ + __attribute__((__aligned__(L1_CACHE_BYTES), \ + __section__(".data_l1.cacheline_aligned"))) +#endif + +/* + * largest L1 which this arch supports + */ +#define L1_CACHE_SHIFT_MAX 5 + +#endif diff --git a/include/asm-blackfin/cacheflush.h b/include/asm-blackfin/cacheflush.h new file mode 100644 index 000000000000..e5e000de3c36 --- /dev/null +++ b/include/asm-blackfin/cacheflush.h @@ -0,0 +1,90 @@ +/* + * File: include/asm-blackfin/cacheflush.h + * Based on: include/asm-m68knommu/cacheflush.h + * Author: LG Soft India + * Copyright (C) 2004 Analog Devices Inc. + * Created: Tue Sep 21 2004 + * Description: Blackfin low-level cache routines adapted from the i386 + * and PPC versions by Greg Ungerer (gerg@snapgear.com) + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _BLACKFIN_CACHEFLUSH_H +#define _BLACKFIN_CACHEFLUSH_H + +#include + +extern void blackfin_icache_dcache_flush_range(unsigned int, unsigned int); +extern void blackfin_icache_flush_range(unsigned int, unsigned int); +extern void blackfin_dcache_flush_range(unsigned int, unsigned int); +extern void blackfin_dcache_invalidate_range(unsigned int, unsigned int); +extern void blackfin_dflush_page(void *); + +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +static inline void flush_icache_range(unsigned start, unsigned end) +{ +#if defined(CONFIG_BLKFIN_DCACHE) && defined(CONFIG_BLKFIN_CACHE) + +# if defined(CONFIG_BLKFIN_WT) + blackfin_icache_flush_range((start), (end)); +# else + blackfin_icache_dcache_flush_range((start), (end)); +# endif + +#else + +# if defined(CONFIG_BLKFIN_CACHE) + blackfin_icache_flush_range((start), (end)); +# endif +# if defined(CONFIG_BLKFIN_DCACHE) + blackfin_dcache_flush_range((start), (end)); +# endif + +#endif +} + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { memcpy(dst, src, len); \ + flush_icache_range ((unsigned) (dst), (unsigned) (dst) + (len)); \ +} while (0) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len) + +#if defined(CONFIG_BLKFIN_DCACHE) +# define invalidate_dcache_range(start,end) blackfin_dcache_invalidate_range((start), (end)) +#else +# define invalidate_dcache_range(start,end) do { } while (0) +#endif +#if defined(CONFIG_BLKFIN_DCACHE) && defined(CONFIG_BLKFIN_WB) +# define flush_dcache_range(start,end) blackfin_dcache_flush_range((start), (end)) +# define flush_dcache_page(page) blackfin_dflush_page(page_address(page)) +#else +# define flush_dcache_range(start,end) do { } while (0) +# define flush_dcache_page(page) do { } while (0) +#endif + +#endif /* _BLACKFIN_CACHEFLUSH_H */ diff --git a/include/asm-blackfin/checksum.h b/include/asm-blackfin/checksum.h new file mode 100644 index 000000000000..2638f2586d2f --- /dev/null +++ b/include/asm-blackfin/checksum.h @@ -0,0 +1,101 @@ +#ifndef _BFIN_CHECKSUM_H +#define _BFIN_CHECKSUM_H + +/* + * MODIFIED FOR BFIN April 30, 2001 akbar.hussain@lineo.com + * + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ + +unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst, + int len, int sum); + +/* + * the same as csum_partial_copy, but copies from user space. + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ + +extern unsigned int csum_partial_copy_from_user(const unsigned char *src, + unsigned char *dst, int len, + int sum, int *csum_err); + +#define csum_partial_copy_nocheck(src, dst, len, sum) \ + csum_partial_copy((src), (dst), (len), (sum)) + +unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl); + +/* + * Fold a partial checksum + */ + +static inline unsigned int csum_fold(unsigned int sum) +{ + while (sum >> 16) + sum = (sum & 0xffff) + (sum >> 16); + return ((~(sum << 16)) >> 16); +} + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ + +static inline unsigned int +csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, + unsigned short proto, unsigned int sum) +{ + + __asm__ ("%0 = %0 + %1;\n\t" + "CC = AC0;\n\t" + "if !CC jump 4;\n\t" + "%0 = %0 + %4;\n\t" + "%0 = %0 + %2;\n\t" + "CC = AC0;\n\t" + "if !CC jump 4;\n\t" + "%0 = %0 + %4;\n\t" + "%0 = %0 + %3;\n\t" + "CC = AC0;\n\t" + "if !CC jump 4;\n\t" + "%0 = %0 + %4;\n\t" + "NOP;\n\t" + : "=d" (sum) + : "d" (daddr), "d" (saddr), "d" ((ntohs(len)<<16)+proto*256), "d" (1), "0"(sum)); + + return (sum); +} + +static inline unsigned short int +csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, + unsigned short proto, unsigned int sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); +} + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ + +extern unsigned short ip_compute_csum(const unsigned char *buff, int len); + +#endif /* _BFIN_CHECKSUM_H */ diff --git a/include/asm-blackfin/cplb.h b/include/asm-blackfin/cplb.h new file mode 100644 index 000000000000..e0dd56bfa4c7 --- /dev/null +++ b/include/asm-blackfin/cplb.h @@ -0,0 +1,51 @@ +/************************************************************************ + * + * cplb.h + * + * (c) Copyright 2002-2003 Analog Devices, Inc. All rights reserved. + * + ************************************************************************/ + +/* Defines necessary for cplb initialisation routines. */ + +#ifndef _CPLB_H +#define _CPLB_H + +# include + +#define CPLB_ENABLE_ICACHE_P 0 +#define CPLB_ENABLE_DCACHE_P 1 +#define CPLB_ENABLE_DCACHE2_P 2 +#define CPLB_ENABLE_CPLBS_P 3 /* Deprecated! */ +#define CPLB_ENABLE_ICPLBS_P 4 +#define CPLB_ENABLE_DCPLBS_P 5 + +#define CPLB_ENABLE_ICACHE (1< +#include + +#define INITIAL_T 0x1 +#define SWITCH_T 0x2 +#define I_CPLB 0x4 +#define D_CPLB 0x8 + +#define IN_KERNEL 1 + +enum +{ZERO_P, L1I_MEM, L1D_MEM, SDRAM_KERN , SDRAM_RAM_MTD, SDRAM_DMAZ, RES_MEM, ASYNC_MEM, L2_MEM}; + +struct cplb_desc { + u32 start; /* start address */ + u32 end; /* end address */ + u32 psize; /* prefered size if any otherwise 1MB or 4MB*/ + u16 attr;/* attributes */ + u16 i_conf;/* I-CPLB DATA */ + u16 d_conf;/* D-CPLB DATA */ + u16 valid;/* valid */ + const s8 name[30];/* name */ +}; + +struct cplb_tab { + u_long *tab; + u16 pos; + u16 size; +}; + +u_long icplb_table[MAX_CPLBS+1]; +u_long dcplb_table[MAX_CPLBS+1]; + +/* Till here we are discussing about the static memory management model. + * However, the operating envoronments commonly define more CPLB + * descriptors to cover the entire addressable memory than will fit into + * the available on-chip 16 CPLB MMRs. When this happens, the below table + * will be used which will hold all the potentially required CPLB descriptors + * + * This is how Page descriptor Table is implemented in uClinux/Blackfin. + */ + +#ifdef CONFIG_CPLB_SWITCH_TAB_L1 +u_long ipdt_table[MAX_SWITCH_I_CPLBS+1]__attribute__((l1_data)); +u_long dpdt_table[MAX_SWITCH_D_CPLBS+1]__attribute__((l1_data)); + +#ifdef CONFIG_CPLB_INFO +u_long ipdt_swapcount_table[MAX_SWITCH_I_CPLBS]__attribute__((l1_data)); +u_long dpdt_swapcount_table[MAX_SWITCH_D_CPLBS]__attribute__((l1_data)); +#endif /* CONFIG_CPLB_INFO */ + +#else + +u_long ipdt_table[MAX_SWITCH_I_CPLBS+1]; +u_long dpdt_table[MAX_SWITCH_D_CPLBS+1]; + +#ifdef CONFIG_CPLB_INFO +u_long ipdt_swapcount_table[MAX_SWITCH_I_CPLBS]; +u_long dpdt_swapcount_table[MAX_SWITCH_D_CPLBS]; +#endif /* CONFIG_CPLB_INFO */ + +#endif /*CONFIG_CPLB_SWITCH_TAB_L1*/ + +struct s_cplb { + struct cplb_tab init_i; + struct cplb_tab init_d; + struct cplb_tab switch_i; + struct cplb_tab switch_d; +}; + +#if defined(CONFIG_BLKFIN_DCACHE) || defined(CONFIG_BLKFIN_CACHE) +static struct cplb_desc cplb_data[] = { + { + .start = 0, + .end = SIZE_4K, + .psize = SIZE_4K, + .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB, + .i_conf = SDRAM_OOPS, + .d_conf = SDRAM_OOPS, +#if defined(CONFIG_DEBUG_HUNT_FOR_ZERO) + .valid = 1, +#else + .valid = 0, +#endif + .name = "ZERO Pointer Saveguard", + }, + { + .start = L1_CODE_START, + .end = L1_CODE_START + L1_CODE_LENGTH, + .psize = SIZE_4M, + .attr = INITIAL_T | SWITCH_T | I_CPLB, + .i_conf = L1_IMEMORY, + .d_conf = 0, + .valid = 1, + .name = "L1 I-Memory", + }, + { + .start = L1_DATA_A_START, + .end = L1_DATA_B_START + L1_DATA_B_LENGTH, + .psize = SIZE_4M, + .attr = INITIAL_T | SWITCH_T | D_CPLB, + .i_conf = 0, + .d_conf = L1_DMEMORY, +#if ((L1_DATA_A_LENGTH > 0) || (L1_DATA_B_LENGTH > 0)) + .valid = 1, +#else + .valid = 0, +#endif + .name = "L1 D-Memory", + }, + { + .start = 0, + .end = 0, /* dynamic */ + .psize = 0, + .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB, + .i_conf = SDRAM_IGENERIC, + .d_conf = SDRAM_DGENERIC, + .valid = 1, + .name = "SDRAM Kernel", + }, + { + .start = 0, /* dynamic */ + .end = 0, /* dynamic */ + .psize = 0, + .attr = INITIAL_T | SWITCH_T | D_CPLB, + .i_conf = SDRAM_IGENERIC, + .d_conf = SDRAM_DNON_CHBL, + .valid = 1, + .name = "SDRAM RAM MTD", + }, + { + .start = 0, /* dynamic */ + .end = 0, /* dynamic */ + .psize = SIZE_1M, + .attr = INITIAL_T | SWITCH_T | D_CPLB, + .d_conf = SDRAM_DNON_CHBL, + .valid = 1,//(DMA_UNCACHED_REGION > 0), + .name = "SDRAM Uncached DMA ZONE", + }, + { + .start = 0, /* dynamic */ + .end = 0, /* dynamic */ + .psize = 0, + .attr = SWITCH_T | D_CPLB, + .i_conf = 0, /* dynamic */ + .d_conf = 0, /* dynamic */ + .valid = 1, + .name = "SDRAM Reserved Memory", + }, + { + .start = ASYNC_BANK0_BASE, + .end = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE, + .psize = 0, + .attr = SWITCH_T | D_CPLB, + .d_conf = SDRAM_EBIU, + .valid = 1, + .name = "ASYNC Memory", + }, + { +#if defined(CONFIG_BF561) + .start = L2_SRAM, + .end = L2_SRAM_END, + .psize = SIZE_1M, + .attr = SWITCH_T | D_CPLB, + .i_conf = L2_MEMORY, + .d_conf = L2_MEMORY, + .valid = 1, +#else + .valid = 0, +#endif + .name = "L2 Memory", + } +}; +#endif diff --git a/include/asm-blackfin/cpumask.h b/include/asm-blackfin/cpumask.h new file mode 100644 index 000000000000..b20a8e9012cb --- /dev/null +++ b/include/asm-blackfin/cpumask.h @@ -0,0 +1,6 @@ +#ifndef _ASM_BLACKFIN_CPUMASK_H +#define _ASM_BLACKFIN_CPUMASK_H + +#include + +#endif /* _ASM_BLACKFIN_CPUMASK_H */ diff --git a/include/asm-blackfin/cputime.h b/include/asm-blackfin/cputime.h new file mode 100644 index 000000000000..2b19705f9885 --- /dev/null +++ b/include/asm-blackfin/cputime.h @@ -0,0 +1,6 @@ +#ifndef __BLACKFIN_CPUTIME_H +#define __BLACKFIN_CPUTIME_H + +#include + +#endif /* __BLACKFIN_CPUTIME_H */ diff --git a/include/asm-blackfin/current.h b/include/asm-blackfin/current.h new file mode 100644 index 000000000000..31918d29122c --- /dev/null +++ b/include/asm-blackfin/current.h @@ -0,0 +1,23 @@ +#ifndef _BLACKFIN_CURRENT_H +#define _BLACKFIN_CURRENT_H +/* + * current.h + * (C) Copyright 2000, Lineo, David McCullough + * + * rather than dedicate a register (as the m68k source does), we + * just keep a global, we should probably just change it all to be + * current and lose _current_task. + */ +#include + +struct task_struct; + +static inline struct task_struct *get_current(void) __attribute__ ((__const__)); +static inline struct task_struct *get_current(void) +{ + return (current_thread_info()->task); +} + +#define current (get_current()) + +#endif /* _BLACKFIN_CURRENT_H */ diff --git a/include/asm-blackfin/delay.h b/include/asm-blackfin/delay.h new file mode 100644 index 000000000000..52e7a10d7ff8 --- /dev/null +++ b/include/asm-blackfin/delay.h @@ -0,0 +1,44 @@ +#ifndef _BLACKFIN_DELAY_H +#define _BLACKFIN_DELAY_H + +static inline void __delay(unsigned long loops) +{ + +/* FIXME: Currently the assembler doesn't recognize Loop Register Clobbers, + uncomment this as soon those are implemented */ +/* + __asm__ __volatile__ ( "\t LSETUP (1f,1f) LC0= %0\n\t" + "1:\t NOP;\n\t" + : :"a" (loops) + : "LT0","LB0","LC0"); + +*/ + + __asm__ __volatile__("[--SP] = LC0;\n\t" + "[--SP] = LT0;\n\t" + "[--SP] = LB0;\n\t" + "LSETUP (1f,1f) LC0 = %0;\n\t" + "1:\t NOP;\n\t" + "LB0 = [SP++];\n\t" + "LT0 = [SP++];\n\t" + "LC0 = [SP++];\n" + : + :"a" (loops)); +} + +#include /* needed for HZ */ + +/* + * Use only for very small delays ( < 1 msec). Should probably use a + * lookup table, really, as the multiplications take much too long with + * short delays. This is a "reasonable" implementation, though (and the + * first constant multiplications gets optimized away if the delay is + * a constant) + */ +static inline void udelay(unsigned long usecs) +{ + extern unsigned long loops_per_jiffy; + __delay(usecs * loops_per_jiffy / (1000000 / HZ)); +} + +#endif /* defined(_BLACKFIN_DELAY_H) */ diff --git a/include/asm-blackfin/device.h b/include/asm-blackfin/device.h new file mode 100644 index 000000000000..d8f9872b0e2d --- /dev/null +++ b/include/asm-blackfin/device.h @@ -0,0 +1,7 @@ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#include + diff --git a/include/asm-blackfin/div64.h b/include/asm-blackfin/div64.h new file mode 100644 index 000000000000..6cd978cefb28 --- /dev/null +++ b/include/asm-blackfin/div64.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-blackfin/dma-mapping.h b/include/asm-blackfin/dma-mapping.h new file mode 100644 index 000000000000..7a77d7fe3a33 --- /dev/null +++ b/include/asm-blackfin/dma-mapping.h @@ -0,0 +1,66 @@ +#ifndef _BLACKFIN_DMA_MAPPING_H +#define _BLACKFIN_DMA_MAPPING_H + +#include + +void dma_alloc_init(unsigned long start, unsigned long end); +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp); +void dma_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle); + +/* + * Now for the API extensions over the pci_ one + */ +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) + +/* + * Map a single buffer of the indicated size for DMA in streaming mode. + * The 32-bit bus address to use is returned. + * + * Once the device is given the dma address, the device owns this memory + * until either pci_unmap_single or pci_dma_sync_single is performed. + */ +extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction); + +/* + * Unmap a single streaming mode DMA translation. The dma_addr and size + * must match what was provided for in a previous pci_map_single call. All + * other usages are undefined. + * + * After this call, reads by the cpu to the buffer are guarenteed to see + * whatever the device wrote there. + */ +extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction); + +/* + * Map a set of buffers described by scatterlist in streaming + * mode for DMA. This is the scather-gather version of the + * above pci_map_single interface. Here the scatter gather list + * elements are each tagged with the appropriate dma address + * and length. They are obtained via sg_dma_{address,length}(SG). + * + * NOTE: An implementation may be able to use a smaller number of + * DMA address/length pairs than there are SG table elements. + * (for example via virtual mapping capabilities) + * The routine returns the number of addr/length pairs actually + * used, at most nents. + * + * Device ownership issues as mentioned above for pci_map_single are + * the same here. + */ +extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction); + +/* + * Unmap a set of streaming mode DMA translations. + * Again, cpu read rules concerning calls here are the same as for + * pci_unmap_single() above. + */ +extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nhwentries, enum dma_data_direction direction); + +#endif /* _BLACKFIN_DMA_MAPPING_H */ diff --git a/include/asm-blackfin/dma.h b/include/asm-blackfin/dma.h new file mode 100644 index 000000000000..be0d913e5516 --- /dev/null +++ b/include/asm-blackfin/dma.h @@ -0,0 +1,188 @@ +/* + * File: include/asm-blackfin/simple_bf533_dma.h + * Based on: none - original work + * Author: LG Soft India + * Copyright (C) 2004-2005 Analog Devices Inc. + * Created: Tue Sep 21 2004 + * Description: This file contains the major Data structures and constants + * used for DMA Implementation in BF533 + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _BLACKFIN_DMA_H_ +#define _BLACKFIN_DMA_H_ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define MAX_DMA_ADDRESS PAGE_OFFSET + +/***************************************************************************** +* Generic DMA Declarations +* +****************************************************************************/ +enum dma_chan_status { + DMA_CHANNEL_FREE, + DMA_CHANNEL_REQUESTED, + DMA_CHANNEL_ENABLED, +}; + +/*------------------------- + * config reg bits value + *-------------------------*/ +#define DATA_SIZE_8 0 +#define DATA_SIZE_16 1 +#define DATA_SIZE_32 2 + +#define DMA_FLOW_STOP 0 +#define DMA_FLOW_AUTO 1 +#define DMA_FLOW_ARRAY 4 +#define DMA_FLOW_SMALL 6 +#define DMA_FLOW_LARGE 7 + +#define DIMENSION_LINEAR 0 +#define DIMENSION_2D 1 + +#define DIR_READ 0 +#define DIR_WRITE 1 + +#define INTR_DISABLE 0 +#define INTR_ON_BUF 2 +#define INTR_ON_ROW 3 + +struct dmasg { + unsigned long next_desc_addr; + unsigned long start_addr; + unsigned short cfg; + unsigned short x_count; + short x_modify; + unsigned short y_count; + short y_modify; +} __attribute__((packed)); + +struct dma_register { + unsigned long next_desc_ptr; /* DMA Next Descriptor Pointer register */ + unsigned long start_addr; /* DMA Start address register */ + + unsigned short cfg; /* DMA Configuration register */ + unsigned short dummy1; /* DMA Configuration register */ + + unsigned long reserved; + + unsigned short x_count; /* DMA x_count register */ + unsigned short dummy2; + + short x_modify; /* DMA x_modify register */ + unsigned short dummy3; + + unsigned short y_count; /* DMA y_count register */ + unsigned short dummy4; + + short y_modify; /* DMA y_modify register */ + unsigned short dummy5; + + unsigned long curr_desc_ptr; /* DMA Current Descriptor Pointer + register */ + unsigned short curr_addr_ptr_lo; /* DMA Current Address Pointer + register */ + unsigned short curr_addr_ptr_hi; /* DMA Current Address Pointer + register */ + unsigned short irq_status; /* DMA irq status register */ + unsigned short dummy6; + + unsigned short peripheral_map; /* DMA peripheral map register */ + unsigned short dummy7; + + unsigned short curr_x_count; /* DMA Current x-count register */ + unsigned short dummy8; + + unsigned long reserved2; + + unsigned short curr_y_count; /* DMA Current y-count register */ + unsigned short dummy9; + + unsigned long reserved3; + +}; + +typedef irqreturn_t(*dma_interrupt_t) (int irq, void *dev_id); + +struct dma_channel { + struct mutex dmalock; + char *device_id; + enum dma_chan_status chan_status; + struct dma_register *regs; + struct dmasg *sg; /* large mode descriptor */ + unsigned int ctrl_num; /* controller number */ + dma_interrupt_t irq_callback; + void *data; + unsigned int dma_enable_flag; + unsigned int loopback_flag; +}; + +/******************************************************************************* +* DMA API's +*******************************************************************************/ +/* functions to set register mode */ +void set_dma_start_addr(unsigned int channel, unsigned long addr); +void set_dma_next_desc_addr(unsigned int channel, unsigned long addr); +void set_dma_x_count(unsigned int channel, unsigned short x_count); +void set_dma_x_modify(unsigned int channel, short x_modify); +void set_dma_y_count(unsigned int channel, unsigned short y_count); +void set_dma_y_modify(unsigned int channel, short y_modify); +void set_dma_config(unsigned int channel, unsigned short config); +unsigned short set_bfin_dma_config(char direction, char flow_mode, + char intr_mode, char dma_mode, char width); + +/* get curr status for polling */ +unsigned short get_dma_curr_irqstat(unsigned int channel); +unsigned short get_dma_curr_xcount(unsigned int channel); +unsigned short get_dma_curr_ycount(unsigned int channel); + +/* set large DMA mode descriptor */ +void set_dma_sg(unsigned int channel, struct dmasg *sg, int nr_sg); + +/* check if current channel is in use */ +int dma_channel_active(unsigned int channel); + +/* common functions must be called in any mode */ +void free_dma(unsigned int channel); +int dma_channel_active(unsigned int channel); /* check if a channel is in use */ +void disable_dma(unsigned int channel); +void enable_dma(unsigned int channel); +int request_dma(unsigned int channel, char *device_id); +int set_dma_callback(unsigned int channel, dma_interrupt_t callback, + void *data); +void dma_disable_irq(unsigned int channel); +void dma_enable_irq(unsigned int channel); +void clear_dma_irqstat(unsigned int channel); +void *dma_memcpy(void *dest, const void *src, size_t count); +void *safe_dma_memcpy(void *dest, const void *src, size_t count); + +#endif diff --git a/include/asm-blackfin/dpmc.h b/include/asm-blackfin/dpmc.h new file mode 100644 index 000000000000..f162edb23033 --- /dev/null +++ b/include/asm-blackfin/dpmc.h @@ -0,0 +1,70 @@ +/* + * include/asm-blackfin/dpmc.h - Miscellaneous IOCTL commands for Dynamic Power + * Management Controller Driver. + * Copyright (C) 2004 Analog Device Inc. + * + */ +#ifndef _BLACKFIN_DPMC_H_ +#define _BLACKFIN_DPMC_H_ + +#define SLEEP_MODE 1 +#define DEEP_SLEEP_MODE 2 +#define ACTIVE_PLL_DISABLED 3 +#define FULLON_MODE 4 +#define ACTIVE_PLL_ENABLED 5 +#define HIBERNATE_MODE 6 + +#define IOCTL_FULL_ON_MODE _IO('s', 0xA0) +#define IOCTL_ACTIVE_MODE _IO('s', 0xA1) +#define IOCTL_SLEEP_MODE _IO('s', 0xA2) +#define IOCTL_DEEP_SLEEP_MODE _IO('s', 0xA3) +#define IOCTL_HIBERNATE_MODE _IO('s', 0xA4) +#define IOCTL_CHANGE_FREQUENCY _IOW('s', 0xA5, unsigned long) +#define IOCTL_CHANGE_VOLTAGE _IOW('s', 0xA6, unsigned long) +#define IOCTL_SET_CCLK _IOW('s', 0xA7, unsigned long) +#define IOCTL_SET_SCLK _IOW('s', 0xA8, unsigned long) +#define IOCTL_GET_PLLSTATUS _IOW('s', 0xA9, unsigned long) +#define IOCTL_GET_CORECLOCK _IOW('s', 0xAA, unsigned long) +#define IOCTL_GET_SYSTEMCLOCK _IOW('s', 0xAB, unsigned long) +#define IOCTL_GET_VCO _IOW('s', 0xAC, unsigned long) +#define IOCTL_DISABLE_WDOG_TIMER _IO('s', 0xAD) +#define IOCTL_UNMASK_WDOG_WAKEUP_EVENT _IO('s',0xAE) +#define IOCTL_PROGRAM_WDOG_TIMER _IOW('s',0xAF,unsigned long) +#define IOCTL_CLEAR_WDOG_WAKEUP_EVENT _IO('s',0xB0) +#define IOCTL_SLEEP_DEEPER_MODE _IO('s',0xB1) + +#define DPMC_MINOR 254 + +#define ON 0 +#define OFF 1 + +#ifdef __KERNEL__ + +unsigned long calc_volt(void); +int calc_vlev(int vlt); +unsigned long change_voltage(unsigned long volt); +int calc_msel(int vco_hz); +unsigned long change_frequency(unsigned long vco_mhz); +int set_pll_div(unsigned short sel, unsigned char flag); +int get_vco(void); +unsigned long change_system_clock(unsigned long clock); +unsigned long change_core_clock(unsigned long clock); +unsigned long get_pll_status(void); +void change_baud(int baud); +void fullon_mode(void); +void active_mode(void); +void sleep_mode(u32 sic_iwr); +void deep_sleep(u32 sic_iwr); +void hibernate_mode(u32 sic_iwr); +void sleep_deeper(u32 sic_iwr); +void program_wdog_timer(unsigned long); +void unmask_wdog_wakeup_evt(void); +void clear_wdog_wakeup_evt(void); +void disable_wdog_timer(void); + +extern unsigned long get_cclk(void); +extern unsigned long get_sclk(void); + +#endif /* __KERNEL__ */ + +#endif /*_BLACKFIN_DPMC_H_*/ diff --git a/include/asm-blackfin/elf.h b/include/asm-blackfin/elf.h new file mode 100644 index 000000000000..5264b5536a70 --- /dev/null +++ b/include/asm-blackfin/elf.h @@ -0,0 +1,127 @@ +/* Changes made by LG Soft Oct 2004*/ + +#ifndef __ASMBFIN_ELF_H +#define __ASMBFIN_ELF_H + +/* + * ELF register definitions.. + */ + +#include +#include + +/* Processor specific flags for the ELF header e_flags field. */ +#define EF_BFIN_PIC 0x00000001 /* -fpic */ +#define EF_BFIN_FDPIC 0x00000002 /* -mfdpic */ +#define EF_BFIN_CODE_IN_L1 0x00000010 /* --code-in-l1 */ +#define EF_BFIN_DATA_IN_L1 0x00000020 /* --data-in-l1 */ + +typedef unsigned long elf_greg_t; + +#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef struct user_bfinfp_struct elf_fpregset_t; +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == EM_BLACKFIN) + +#define elf_check_fdpic(x) ((x)->e_flags & EF_BFIN_FDPIC /* && !((x)->e_flags & EF_FRV_NON_PIC_RELOCS) */) +#define elf_check_const_displacement(x) ((x)->e_flags & EF_BFIN_PIC) + +/* EM_BLACKFIN defined in linux/elf.h */ + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_BLACKFIN + +#define ELF_PLAT_INIT(_r) _r->p1 = 0 + +#define ELF_FDPIC_PLAT_INIT(_regs, _exec_map_addr, _interp_map_addr, _dynamic_addr) \ +do { \ + _regs->r7 = 0; \ + _regs->p0 = _exec_map_addr; \ + _regs->p1 = _interp_map_addr; \ + _regs->p2 = _dynamic_addr; \ +} while(0) + +#define USE_ELF_CORE_DUMP +#define ELF_FDPIC_CORE_EFLAGS EF_BFIN_FDPIC +#define ELF_EXEC_PAGESIZE 4096 + +#define R_unused0 0 /* relocation type 0 is not defined */ +#define R_pcrel5m2 1 /*LSETUP part a */ +#define R_unused1 2 /* relocation type 2 is not defined */ +#define R_pcrel10 3 /* type 3, if cc jump */ +#define R_pcrel12_jump 4 /* type 4, jump */ +#define R_rimm16 5 /* type 0x5, rN = */ +#define R_luimm16 6 /* # 0x6, preg.l= Load imm 16 to lower half */ +#define R_huimm16 7 /* # 0x7, preg.h= Load imm 16 to upper half */ +#define R_pcrel12_jump_s 8 /* # 0x8 jump.s */ +#define R_pcrel24_jump_x 9 /* # 0x9 jump.x */ +#define R_pcrel24 10 /* # 0xa call , not expandable */ +#define R_unusedb 11 /* # 0xb not generated */ +#define R_unusedc 12 /* # 0xc not used */ +#define R_pcrel24_jump_l 13 /*0xd jump.l */ +#define R_pcrel24_call_x 14 /* 0xE, call.x if is above 24 bit limit call through P1 */ +#define R_var_eq_symb 15 /* 0xf, linker should treat it same as 0x12 */ +#define R_byte_data 16 /* 0x10, .byte var = symbol */ +#define R_byte2_data 17 /* 0x11, .byte2 var = symbol */ +#define R_byte4_data 18 /* 0x12, .byte4 var = symbol and .var var=symbol */ +#define R_pcrel11 19 /* 0x13, lsetup part b */ +#define R_unused14 20 /* 0x14, undefined */ +#define R_unused15 21 /* not generated by VDSP 3.5 */ + +/* arithmetic relocations */ +#define R_push 0xE0 +#define R_const 0xE1 +#define R_add 0xE2 +#define R_sub 0xE3 +#define R_mult 0xE4 +#define R_div 0xE5 +#define R_mod 0xE6 +#define R_lshift 0xE7 +#define R_rshift 0xE8 +#define R_and 0xE9 +#define R_or 0xEA +#define R_xor 0xEB +#define R_land 0xEC +#define R_lor 0xED +#define R_len 0xEE +#define R_neg 0xEF +#define R_comp 0xF0 +#define R_page 0xF1 +#define R_hwpage 0xF2 +#define R_addr 0xF3 + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE 0xD0000000UL + +#define ELF_CORE_COPY_REGS(pr_reg, regs) \ + memcpy((char *) &pr_reg, (char *)regs, \ + sizeof(struct pt_regs)); + +/* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. */ + +#define ELF_HWCAP (0) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. */ + +#define ELF_PLATFORM (NULL) + +#ifdef __KERNEL__ +#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX) +#endif + +#endif diff --git a/include/asm-blackfin/emergency-restart.h b/include/asm-blackfin/emergency-restart.h new file mode 100644 index 000000000000..27f6c785d103 --- /dev/null +++ b/include/asm-blackfin/emergency-restart.h @@ -0,0 +1,6 @@ +#ifndef _ASM_EMERGENCY_RESTART_H +#define _ASM_EMERGENCY_RESTART_H + +#include + +#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/include/asm-blackfin/entry.h b/include/asm-blackfin/entry.h new file mode 100644 index 000000000000..562c6d3a3232 --- /dev/null +++ b/include/asm-blackfin/entry.h @@ -0,0 +1,56 @@ +#ifndef __BFIN_ENTRY_H +#define __BFIN_ENTRY_H + +#include +#include + +#ifdef __ASSEMBLY__ + +#define LFLUSH_I_AND_D 0x00000808 +#define LSIGTRAP 5 + +/* process bits for task_struct.flags */ +#define PF_TRACESYS_OFF 3 +#define PF_TRACESYS_BIT 5 +#define PF_PTRACED_OFF 3 +#define PF_PTRACED_BIT 4 +#define PF_DTRACE_OFF 1 +#define PF_DTRACE_BIT 5 + +/* This one is used for exceptions, emulation, and NMI. It doesn't push + RETI and doesn't do cli. */ +#define SAVE_ALL_SYS save_context_no_interrupts +/* This is used for all normal interrupts. It saves a minimum of registers + to the stack, loads the IRQ number, and jumps to common code. */ +#define INTERRUPT_ENTRY(N) \ + [--sp] = SYSCFG; \ + \ + [--sp] = P0; /*orig_p0*/ \ + [--sp] = R0; /*orig_r0*/ \ + [--sp] = (R7:0,P5:0); \ + R0 = (N); \ + jump __common_int_entry; + +/* For timer interrupts, we need to save IPEND, since the user_mode + macro accesses it to determine where to account time. */ +#define TIMER_INTERRUPT_ENTRY(N) \ + [--sp] = SYSCFG; \ + \ + [--sp] = P0; /*orig_p0*/ \ + [--sp] = R0; /*orig_r0*/ \ + [--sp] = (R7:0,P5:0); \ + p0.l = lo(IPEND); \ + p0.h = hi(IPEND); \ + r1 = [p0]; \ + R0 = (N); \ + jump __common_int_entry; + +/* This one pushes RETI without using CLI. Interrupts are enabled. */ +#define SAVE_CONTEXT_SYSCALL save_context_syscall +#define SAVE_CONTEXT save_context_with_interrupts + +#define RESTORE_ALL_SYS restore_context_no_interrupts +#define RESTORE_CONTEXT restore_context_with_interrupts + +#endif /* __ASSEMBLY__ */ +#endif /* __BFIN_ENTRY_H */ diff --git a/include/asm-blackfin/errno.h b/include/asm-blackfin/errno.h new file mode 100644 index 000000000000..164e4f39bb57 --- /dev/null +++ b/include/asm-blackfin/errno.h @@ -0,0 +1,6 @@ +#ifndef _BFIN_ERRNO_H +#define _BFIN_ERRNO_H + +#include + +#endif /* _BFIN_ERRNO_H */ diff --git a/include/asm-blackfin/fcntl.h b/include/asm-blackfin/fcntl.h new file mode 100644 index 000000000000..9c4037127857 --- /dev/null +++ b/include/asm-blackfin/fcntl.h @@ -0,0 +1,13 @@ +#ifndef _BFIN_FCNTL_H +#define _BFIN_FCNTL_H + +/* open/fcntl - O_SYNC is only implemented on blocks devices and on files + located on an ext2 file system */ +#define O_DIRECTORY 040000 /* must be a directory */ +#define O_NOFOLLOW 0100000 /* don't follow links */ +#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */ +#define O_LARGEFILE 0400000 + +#include + +#endif diff --git a/include/asm-blackfin/flat.h b/include/asm-blackfin/flat.h new file mode 100644 index 000000000000..e70074e05f4e --- /dev/null +++ b/include/asm-blackfin/flat.h @@ -0,0 +1,58 @@ +/* + * include/asm-blackfin/flat.h -- uClinux flat-format executables + * + * Copyright (C) 2003, + * + */ + +#ifndef __BLACKFIN_FLAT_H__ +#define __BLACKFIN_FLAT_H__ + +#include + +#define flat_stack_align(sp) /* nothing needed */ +#define flat_argvp_envp_on_stack() 0 +#define flat_old_ram_flag(flags) (flags) + +extern unsigned long bfin_get_addr_from_rp (unsigned long *ptr, + unsigned long relval, + unsigned long flags, + unsigned long *persistent); + +extern void bfin_put_addr_at_rp(unsigned long *ptr, unsigned long addr, + unsigned long relval); + +/* The amount by which a relocation can exceed the program image limits + without being regarded as an error. */ + +#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) + +#define flat_get_addr_from_rp(rp, relval, flags, persistent) \ + bfin_get_addr_from_rp(rp, relval, flags, persistent) +#define flat_put_addr_at_rp(rp, val, relval) \ + bfin_put_addr_at_rp(rp, val, relval) + +/* Convert a relocation entry into an address. */ +static inline unsigned long +flat_get_relocate_addr (unsigned long relval) +{ + return relval & 0x03ffffff; /* Mask out top 6 bits */ +} + +static inline int flat_set_persistent(unsigned long relval, + unsigned long *persistent) +{ + int type = (relval >> 26) & 7; + if (type == 3) { + *persistent = relval << 16; + return 1; + } + return 0; +} + +static inline int flat_addr_absolute(unsigned long relval) +{ + return (relval & (1 << 29)) != 0; +} + +#endif /* __BLACKFIN_FLAT_H__ */ diff --git a/include/asm-blackfin/futex.h b/include/asm-blackfin/futex.h new file mode 100644 index 000000000000..6a332a9f099c --- /dev/null +++ b/include/asm-blackfin/futex.h @@ -0,0 +1,6 @@ +#ifndef _ASM_FUTEX_H +#define _ASM_FUTEX_H + +#include + +#endif diff --git a/include/asm-blackfin/gpio.h b/include/asm-blackfin/gpio.h new file mode 100644 index 000000000000..d16fe3cd6135 --- /dev/null +++ b/include/asm-blackfin/gpio.h @@ -0,0 +1,367 @@ +/* + * File: arch/blackfin/kernel/bfin_gpio.h + * Based on: + * Author: Michael Hennerich (hennerich@blackfin.uclinux.org) + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* +* Number BF537/6/4 BF561 BF533/2/1 +* +* GPIO_0 PF0 PF0 PF0 +* GPIO_1 PF1 PF1 PF1 +* GPIO_2 PF2 PF2 PF2 +* GPIO_3 PF3 PF3 PF3 +* GPIO_4 PF4 PF4 PF4 +* GPIO_5 PF5 PF5 PF5 +* GPIO_6 PF6 PF6 PF6 +* GPIO_7 PF7 PF7 PF7 +* GPIO_8 PF8 PF8 PF8 +* GPIO_9 PF9 PF9 PF9 +* GPIO_10 PF10 PF10 PF10 +* GPIO_11 PF11 PF11 PF11 +* GPIO_12 PF12 PF12 PF12 +* GPIO_13 PF13 PF13 PF13 +* GPIO_14 PF14 PF14 PF14 +* GPIO_15 PF15 PF15 PF15 +* GPIO_16 PG0 PF16 +* GPIO_17 PG1 PF17 +* GPIO_18 PG2 PF18 +* GPIO_19 PG3 PF19 +* GPIO_20 PG4 PF20 +* GPIO_21 PG5 PF21 +* GPIO_22 PG6 PF22 +* GPIO_23 PG7 PF23 +* GPIO_24 PG8 PF24 +* GPIO_25 PG9 PF25 +* GPIO_26 PG10 PF26 +* GPIO_27 PG11 PF27 +* GPIO_28 PG12 PF28 +* GPIO_29 PG13 PF29 +* GPIO_30 PG14 PF30 +* GPIO_31 PG15 PF31 +* GPIO_32 PH0 PF32 +* GPIO_33 PH1 PF33 +* GPIO_34 PH2 PF34 +* GPIO_35 PH3 PF35 +* GPIO_36 PH4 PF36 +* GPIO_37 PH5 PF37 +* GPIO_38 PH6 PF38 +* GPIO_39 PH7 PF39 +* GPIO_40 PH8 PF40 +* GPIO_41 PH9 PF41 +* GPIO_42 PH10 PF42 +* GPIO_43 PH11 PF43 +* GPIO_44 PH12 PF44 +* GPIO_45 PH13 PF45 +* GPIO_46 PH14 PF46 +* GPIO_47 PH15 PF47 +*/ + +#ifndef __ARCH_BLACKFIN_GPIO_H__ +#define __ARCH_BLACKFIN_GPIO_H__ + +#define gpio_bank(x) ((x) >> 4) +#define gpio_bit(x) (1<<((x) & 0xF)) +#define gpio_sub_n(x) ((x) & 0xF) + +#define GPIO_BANKSIZE 16 + +#define GPIO_0 0 +#define GPIO_1 1 +#define GPIO_2 2 +#define GPIO_3 3 +#define GPIO_4 4 +#define GPIO_5 5 +#define GPIO_6 6 +#define GPIO_7 7 +#define GPIO_8 8 +#define GPIO_9 9 +#define GPIO_10 10 +#define GPIO_11 11 +#define GPIO_12 12 +#define GPIO_13 13 +#define GPIO_14 14 +#define GPIO_15 15 +#define GPIO_16 16 +#define GPIO_17 17 +#define GPIO_18 18 +#define GPIO_19 19 +#define GPIO_20 20 +#define GPIO_21 21 +#define GPIO_22 22 +#define GPIO_23 23 +#define GPIO_24 24 +#define GPIO_25 25 +#define GPIO_26 26 +#define GPIO_27 27 +#define GPIO_28 28 +#define GPIO_29 29 +#define GPIO_30 30 +#define GPIO_31 31 +#define GPIO_32 32 +#define GPIO_33 33 +#define GPIO_34 34 +#define GPIO_35 35 +#define GPIO_36 36 +#define GPIO_37 37 +#define GPIO_38 38 +#define GPIO_39 39 +#define GPIO_40 40 +#define GPIO_41 41 +#define GPIO_42 42 +#define GPIO_43 43 +#define GPIO_44 44 +#define GPIO_45 45 +#define GPIO_46 46 +#define GPIO_47 47 + + +#define PERIPHERAL_USAGE 1 +#define GPIO_USAGE 0 + +#ifdef BF533_FAMILY +#define MAX_BLACKFIN_GPIOS 16 +#endif + +#ifdef BF537_FAMILY +#define MAX_BLACKFIN_GPIOS 48 +#define PORT_F 0 +#define PORT_G 1 +#define PORT_H 2 +#define PORT_J 3 + +#define GPIO_PF0 0 +#define GPIO_PF1 1 +#define GPIO_PF2 2 +#define GPIO_PF3 3 +#define GPIO_PF4 4 +#define GPIO_PF5 5 +#define GPIO_PF6 6 +#define GPIO_PF7 7 +#define GPIO_PF8 8 +#define GPIO_PF9 9 +#define GPIO_PF10 10 +#define GPIO_PF11 11 +#define GPIO_PF12 12 +#define GPIO_PF13 13 +#define GPIO_PF14 14 +#define GPIO_PF15 15 +#define GPIO_PG0 16 +#define GPIO_PG1 17 +#define GPIO_PG2 18 +#define GPIO_PG3 19 +#define GPIO_PG4 20 +#define GPIO_PG5 21 +#define GPIO_PG6 22 +#define GPIO_PG7 23 +#define GPIO_PG8 24 +#define GPIO_PG9 25 +#define GPIO_PG10 26 +#define GPIO_PG11 27 +#define GPIO_PG12 28 +#define GPIO_PG13 29 +#define GPIO_PG14 30 +#define GPIO_PG15 31 +#define GPIO_PH0 32 +#define GPIO_PH1 33 +#define GPIO_PH2 34 +#define GPIO_PH3 35 +#define GPIO_PH4 36 +#define GPIO_PH5 37 +#define GPIO_PH6 38 +#define GPIO_PH7 39 +#define GPIO_PH8 40 +#define GPIO_PH9 41 +#define GPIO_PH10 42 +#define GPIO_PH11 43 +#define GPIO_PH12 44 +#define GPIO_PH13 45 +#define GPIO_PH14 46 +#define GPIO_PH15 47 + +#endif + +#ifdef BF561_FAMILY +#define MAX_BLACKFIN_GPIOS 48 +#define PORT_FIO0 0 +#define PORT_FIO1 1 +#define PORT_FIO2 2 +#endif + +#ifndef __ASSEMBLY__ + +/*********************************************************** +* +* FUNCTIONS: Blackfin General Purpose Ports Access Functions +* +* INPUTS/OUTPUTS: +* gpio - GPIO Number between 0 and MAX_BLACKFIN_GPIOS +* +* +* DESCRIPTION: These functions abstract direct register access +* to Blackfin processor General Purpose +* Ports Regsiters +* +* CAUTION: These functions do not belong to the GPIO Driver API +************************************************************* +* MODIFICATION HISTORY : +**************************************************************/ + +void set_gpio_dir(unsigned short, unsigned short); +void set_gpio_inen(unsigned short, unsigned short); +void set_gpio_polar(unsigned short, unsigned short); +void set_gpio_edge(unsigned short, unsigned short); +void set_gpio_both(unsigned short, unsigned short); +void set_gpio_data(unsigned short, unsigned short); +void set_gpio_maska(unsigned short, unsigned short); +void set_gpio_maskb(unsigned short, unsigned short); +void set_gpio_toggle(unsigned short); +void set_gpiop_dir(unsigned short, unsigned short); +void set_gpiop_inen(unsigned short, unsigned short); +void set_gpiop_polar(unsigned short, unsigned short); +void set_gpiop_edge(unsigned short, unsigned short); +void set_gpiop_both(unsigned short, unsigned short); +void set_gpiop_data(unsigned short, unsigned short); +void set_gpiop_maska(unsigned short, unsigned short); +void set_gpiop_maskb(unsigned short, unsigned short); +unsigned short get_gpio_dir(unsigned short); +unsigned short get_gpio_inen(unsigned short); +unsigned short get_gpio_polar(unsigned short); +unsigned short get_gpio_edge(unsigned short); +unsigned short get_gpio_both(unsigned short); +unsigned short get_gpio_maska(unsigned short); +unsigned short get_gpio_maskb(unsigned short); +unsigned short get_gpio_data(unsigned short); +unsigned short get_gpiop_dir(unsigned short); +unsigned short get_gpiop_inen(unsigned short); +unsigned short get_gpiop_polar(unsigned short); +unsigned short get_gpiop_edge(unsigned short); +unsigned short get_gpiop_both(unsigned short); +unsigned short get_gpiop_maska(unsigned short); +unsigned short get_gpiop_maskb(unsigned short); +unsigned short get_gpiop_data(unsigned short); + +struct gpio_port_t { + unsigned short data; + unsigned short dummy1; + unsigned short data_clear; + unsigned short dummy2; + unsigned short data_set; + unsigned short dummy3; + unsigned short toggle; + unsigned short dummy4; + unsigned short maska; + unsigned short dummy5; + unsigned short maska_clear; + unsigned short dummy6; + unsigned short maska_set; + unsigned short dummy7; + unsigned short maska_toggle; + unsigned short dummy8; + unsigned short maskb; + unsigned short dummy9; + unsigned short maskb_clear; + unsigned short dummy10; + unsigned short maskb_set; + unsigned short dummy11; + unsigned short maskb_toggle; + unsigned short dummy12; + unsigned short dir; + unsigned short dummy13; + unsigned short polar; + unsigned short dummy14; + unsigned short edge; + unsigned short dummy15; + unsigned short both; + unsigned short dummy16; + unsigned short inen; +}; + +#ifdef CONFIG_PM +#define PM_WAKE_RISING 0x1 +#define PM_WAKE_FALLING 0x2 +#define PM_WAKE_HIGH 0x4 +#define PM_WAKE_LOW 0x8 +#define PM_WAKE_BOTH_EDGES (PM_WAKE_RISING | PM_WAKE_FALLING) + +int gpio_pm_wakeup_request(unsigned short gpio, unsigned char type); +void gpio_pm_wakeup_free(unsigned short gpio); +unsigned int gpio_pm_setup(void); +void gpio_pm_restore(void); + +struct gpio_port_s { + unsigned short data; + unsigned short data_clear; + unsigned short data_set; + unsigned short toggle; + unsigned short maska; + unsigned short maska_clear; + unsigned short maska_set; + unsigned short maska_toggle; + unsigned short maskb; + unsigned short maskb_clear; + unsigned short maskb_set; + unsigned short maskb_toggle; + unsigned short dir; + unsigned short polar; + unsigned short edge; + unsigned short both; + unsigned short inen; + + unsigned short fer; +}; +#endif /*CONFIG_PM*/ + +/*********************************************************** +* +* FUNCTIONS: Blackfin GPIO Driver +* +* INPUTS/OUTPUTS: +* gpio - GPIO Number between 0 and MAX_BLACKFIN_GPIOS +* +* +* DESCRIPTION: Blackfin GPIO Driver API +* +* CAUTION: +************************************************************* +* MODIFICATION HISTORY : +**************************************************************/ + +int gpio_request(unsigned short, const char *); +void gpio_free(unsigned short); + +void gpio_set_value(unsigned short gpio, unsigned short arg); +unsigned short gpio_get_value(unsigned short gpio); + +#define gpio_get_value(gpio) get_gpio_data(gpio) +#define gpio_set_value(gpio, value) set_gpio_data(gpio, value) + +void gpio_direction_input(unsigned short gpio); +void gpio_direction_output(unsigned short gpio); + +#endif /* __ASSEMBLY__ */ + +#endif /* __ARCH_BLACKFIN_GPIO_H__ */ diff --git a/include/asm-blackfin/hardirq.h b/include/asm-blackfin/hardirq.h new file mode 100644 index 000000000000..0cab0d35badc --- /dev/null +++ b/include/asm-blackfin/hardirq.h @@ -0,0 +1,41 @@ +#ifndef __BFIN_HARDIRQ_H +#define __BFIN_HARDIRQ_H + +#include +#include +#include + +typedef struct { + unsigned int __softirq_pending; + unsigned int __syscall_count; + struct task_struct *__ksoftirqd_task; +} ____cacheline_aligned irq_cpustat_t; + +#include /* Standard mappings for irq_cpustat_t above */ + +/* + * We put the hardirq and softirq counter into the preemption + * counter. The bitmask has the following meaning: + * + * - bits 0-7 are the preemption count (max preemption depth: 256) + * - bits 8-15 are the softirq count (max # of softirqs: 256) + * - bits 16-23 are the hardirq count (max # of hardirqs: 256) + * + * - ( bit 26 is the PREEMPT_ACTIVE flag. ) + * + * PREEMPT_MASK: 0x000000ff + * HARDIRQ_MASK: 0x0000ff00 + * SOFTIRQ_MASK: 0x00ff0000 + */ + +#define HARDIRQ_BITS 8 + +#ifdef NR_IRQS +# if (1 << HARDIRQ_BITS) < NR_IRQS +# error HARDIRQ_BITS is too low! +# endif +#endif + +#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 + +#endif diff --git a/include/asm-blackfin/hw_irq.h b/include/asm-blackfin/hw_irq.h new file mode 100644 index 000000000000..5b51eaec012c --- /dev/null +++ b/include/asm-blackfin/hw_irq.h @@ -0,0 +1,6 @@ +#ifndef __ASM_BFIN_HW_IRQ_H +#define __ASM_BFIN_HW_IRQ_H + +/* Dummy include. */ + +#endif diff --git a/include/asm-blackfin/ide.h b/include/asm-blackfin/ide.h new file mode 100644 index 000000000000..41b2db46a168 --- /dev/null +++ b/include/asm-blackfin/ide.h @@ -0,0 +1,32 @@ +/****************************************************************************/ + +/* + * linux/include/asm-blackfin/ide.h + * + * Copyright (C) 1994-1996 Linus Torvalds & authors + * Copyright (C) 2001 Lineo Inc., davidm@snapgear.com + * Copyright (C) 2002 Greg Ungerer (gerg@snapgear.com) + * Copyright (C) 2002 Yoshinori Sato (ysato@users.sourceforge.jp) + * Copyright (C) 2005 Hennerich Michael (hennerich@blackfin.uclinux.org) + */ + +/****************************************************************************/ +#ifndef _BLACKFIN_IDE_H +#define _BLACKFIN_IDE_H +/****************************************************************************/ +#ifdef __KERNEL__ +/****************************************************************************/ + +#define MAX_HWIFS 1 + +/* Legacy ... BLK_DEV_IDECS */ +#define IDE_ARCH_OBSOLETE_INIT +#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ + + +#include + +/****************************************************************************/ +#endif /* __KERNEL__ */ +#endif /* _BLACKFIN_IDE_H */ +/****************************************************************************/ diff --git a/include/asm-blackfin/io.h b/include/asm-blackfin/io.h new file mode 100644 index 000000000000..7e6995e80d97 --- /dev/null +++ b/include/asm-blackfin/io.h @@ -0,0 +1,207 @@ +#ifndef _BFIN_IO_H +#define _BFIN_IO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#endif +#include + +/* + * These are for ISA/PCI shared memory _only_ and should never be used + * on any other type of memory, including Zorro memory. They are meant to + * access the bus in the bus byte order which is little-endian!. + * + * readX/writeX() are used to access memory mapped devices. On some + * architectures the memory mapped IO stuff needs to be accessed + * differently. On the bfin architecture, we just read/write the + * memory location directly. + */ +#ifndef __ASSEMBLY__ + +static inline unsigned char readb(void __iomem *addr) +{ + unsigned int val; + int tmp; + + __asm__ __volatile__ ("cli %1;\n\t" + "NOP; NOP; SSYNC;\n\t" + "%0 = b [%2] (z);\n\t" + "sti %1;\n\t" + : "=d"(val), "=d"(tmp): "a"(addr) + ); + + return (unsigned char) val; +} + +static inline unsigned short readw(void __iomem *addr) +{ + unsigned int val; + int tmp; + + __asm__ __volatile__ ("cli %1;\n\t" + "NOP; NOP; SSYNC;\n\t" + "%0 = w [%2] (z);\n\t" + "sti %1;\n\t" + : "=d"(val), "=d"(tmp): "a"(addr) + ); + + return (unsigned short) val; +} + +static inline unsigned int readl(void __iomem *addr) +{ + unsigned int val; + int tmp; + + __asm__ __volatile__ ("cli %1;\n\t" + "NOP; NOP; SSYNC;\n\t" + "%0 = [%2];\n\t" + "sti %1;\n\t" + : "=d"(val), "=d"(tmp): "a"(addr) + ); + return val; +} + +#endif /* __ASSEMBLY__ */ + +#define writeb(b,addr) (void)((*(volatile unsigned char *) (addr)) = (b)) +#define writew(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b)) +#define writel(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b)) + +#define __raw_readb readb +#define __raw_readw readw +#define __raw_readl readl +#define __raw_writeb writeb +#define __raw_writew writew +#define __raw_writel writel +#define memset_io(a,b,c) memset((void *)(a),(b),(c)) +#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) +#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) + +#define inb(addr) readb(addr) +#define inw(addr) readw(addr) +#define inl(addr) readl(addr) +#define outb(x,addr) ((void) writeb(x,addr)) +#define outw(x,addr) ((void) writew(x,addr)) +#define outl(x,addr) ((void) writel(x,addr)) + +#define inb_p(addr) inb(addr) +#define inw_p(addr) inw(addr) +#define inl_p(addr) inl(addr) +#define outb_p(x,addr) outb(x,addr) +#define outw_p(x,addr) outw(x,addr) +#define outl_p(x,addr) outl(x,addr) + +#define ioread8_rep(a,d,c) insb(a,d,c) +#define ioread16_rep(a,d,c) insw(a,d,c) +#define ioread32_rep(a,d,c) insl(a,d,c) +#define iowrite8_rep(a,s,c) outsb(a,s,c) +#define iowrite16_rep(a,s,c) outsw(a,s,c) +#define iowrite32_rep(a,s,c) outsl(a,s,c) + +#define ioread8(X) readb(X) +#define ioread16(X) readw(X) +#define ioread32(X) readl(X) +#define iowrite8(val,X) writeb(val,X) +#define iowrite16(val,X) writew(val,X) +#define iowrite32(val,X) writel(val,X) + +#define IO_SPACE_LIMIT 0xffffffff + +/* Values for nocacheflag and cmode */ +#define IOMAP_NOCACHE_SER 1 + +#ifndef __ASSEMBLY__ + +extern void outsb(void __iomem *port, const void *addr, unsigned long count); +extern void outsw(void __iomem *port, const void *addr, unsigned long count); +extern void outsl(void __iomem *port, const void *addr, unsigned long count); + +extern void insb(const void __iomem *port, void *addr, unsigned long count); +extern void insw(const void __iomem *port, void *addr, unsigned long count); +extern void insl(const void __iomem *port, void *addr, unsigned long count); + +/* + * Map some physical address range into the kernel address space. + */ +static inline void __iomem *__ioremap(unsigned long physaddr, unsigned long size, + int cacheflag) +{ + return (void __iomem *)physaddr; +} + +/* + * Unmap a ioremap()ed region again + */ +static inline void iounmap(void *addr) +{ +} + +/* + * __iounmap unmaps nearly everything, so be careful + * it doesn't free currently pointer/page tables anymore but it + * wans't used anyway and might be added later. + */ +static inline void __iounmap(void *addr, unsigned long size) +{ +} + +/* + * Set new cache mode for some kernel address space. + * The caller must push data for that range itself, if such data may already + * be in the cache. + */ +static inline void kernel_set_cachemode(void *addr, unsigned long size, + int cmode) +{ +} + +static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size) +{ + return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); +} +static inline void __iomem *ioremap_nocache(unsigned long physaddr, + unsigned long size) +{ + return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); +} + +extern void blkfin_inv_cache_all(void); + +#endif + +#define ioport_map(port, nr) ((void __iomem*)(port)) +#define ioport_unmap(addr) + +#define dma_cache_inv(_start,_size) do { blkfin_inv_cache_all();} while (0) +#define dma_cache_wback(_start,_size) do { } while (0) +#define dma_cache_wback_inv(_start,_size) do { blkfin_inv_cache_all();} while (0) + +/* Pages to physical address... */ +#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) +#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT) + +#define mm_ptov(vaddr) ((void *) (vaddr)) +#define mm_vtop(vaddr) ((unsigned long) (vaddr)) +#define phys_to_virt(vaddr) ((void *) (vaddr)) +#define virt_to_phys(vaddr) ((unsigned long) (vaddr)) + +#define virt_to_bus virt_to_phys +#define bus_to_virt phys_to_virt + +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +#endif /* __KERNEL__ */ + +#endif /* _BFIN_IO_H */ diff --git a/include/asm-blackfin/ioctl.h b/include/asm-blackfin/ioctl.h new file mode 100644 index 000000000000..b279fe06dfe5 --- /dev/null +++ b/include/asm-blackfin/ioctl.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-blackfin/ioctls.h b/include/asm-blackfin/ioctls.h new file mode 100644 index 000000000000..8356204151db --- /dev/null +++ b/include/asm-blackfin/ioctls.h @@ -0,0 +1,82 @@ +#ifndef __ARCH_BFIN_IOCTLS_H__ +#define __ARCH_BFIN_IOCTLS_H__ + +#include + +/* 0x54 is just a magic number to make these relatively unique ('T') */ + +#define TCGETS 0x5401 +#define TCSETS 0x5402 +#define TCSETSW 0x5403 +#define TCSETSF 0x5404 +#define TCGETA 0x5405 +#define TCSETA 0x5406 +#define TCSETAW 0x5407 +#define TCSETAF 0x5408 +#define TCSBRK 0x5409 +#define TCXONC 0x540A +#define TCFLSH 0x540B +#define TIOCEXCL 0x540C +#define TIOCNXCL 0x540D +#define TIOCSCTTY 0x540E +#define TIOCGPGRP 0x540F +#define TIOCSPGRP 0x5410 +#define TIOCOUTQ 0x5411 +#define TIOCSTI 0x5412 +#define TIOCGWINSZ 0x5413 +#define TIOCSWINSZ 0x5414 +#define TIOCMGET 0x5415 +#define TIOCMBIS 0x5416 +#define TIOCMBIC 0x5417 +#define TIOCMSET 0x5418 +#define TIOCGSOFTCAR 0x5419 +#define TIOCSSOFTCAR 0x541A +#define FIONREAD 0x541B +#define TIOCINQ FIONREAD +#define TIOCLINUX 0x541C +#define TIOCCONS 0x541D +#define TIOCGSERIAL 0x541E +#define TIOCSSERIAL 0x541F +#define TIOCPKT 0x5420 +#define FIONBIO 0x5421 +#define TIOCNOTTY 0x5422 +#define TIOCSETD 0x5423 +#define TIOCGETD 0x5424 +#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */ +#define TIOCSBRK 0x5427 /* BSD compatibility */ +#define TIOCCBRK 0x5428 /* BSD compatibility */ +#define TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ + +#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ +#define FIOCLEX 0x5451 +#define FIOASYNC 0x5452 +#define TIOCSERCONFIG 0x5453 +#define TIOCSERGWILD 0x5454 +#define TIOCSERSWILD 0x5455 +#define TIOCGLCKTRMIOS 0x5456 +#define TIOCSLCKTRMIOS 0x5457 +#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TIOCSERGETLSR 0x5459 /* Get line status register */ +#define TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TIOCSERSETMULTI 0x545B /* Set multiport config */ + +#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ +#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ + +#define FIOQSIZE 0x545E + +/* Used for packet mode */ +#define TIOCPKT_DATA 0 +#define TIOCPKT_FLUSHREAD 1 +#define TIOCPKT_FLUSHWRITE 2 +#define TIOCPKT_STOP 4 +#define TIOCPKT_START 8 +#define TIOCPKT_NOSTOP 16 +#define TIOCPKT_DOSTOP 32 + +#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ + +#endif /* __ARCH_BFIN_IOCTLS_H__ */ diff --git a/include/asm-blackfin/ipc.h b/include/asm-blackfin/ipc.h new file mode 100644 index 000000000000..a46e3d9c2a3f --- /dev/null +++ b/include/asm-blackfin/ipc.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-blackfin/ipcbuf.h b/include/asm-blackfin/ipcbuf.h new file mode 100644 index 000000000000..8f0899cdf4d2 --- /dev/null +++ b/include/asm-blackfin/ipcbuf.h @@ -0,0 +1,30 @@ +/* Changes origined from m68k version. Lineo Inc. May 2001 */ + +#ifndef __BFIN_IPCBUF_H__ +#define __BFIN_IPCBUF_H__ + +/* + * The user_ipc_perm structure for m68k architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 32-bit mode_t and seq + * - 2 miscellaneous 32-bit values + */ + +struct ipc64_perm { + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; + __kernel_mode_t mode; + unsigned short __pad1; + unsigned short seq; + unsigned short __pad2; + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* __BFIN_IPCBUF_H__ */ diff --git a/include/asm-blackfin/irq.h b/include/asm-blackfin/irq.h new file mode 100644 index 000000000000..65480dab244e --- /dev/null +++ b/include/asm-blackfin/irq.h @@ -0,0 +1,70 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + * Changed by HuTao Apr18, 2003 + * + * Copyright was missing when I got the code so took from MIPS arch ...MaTed--- + * Copyright (C) 1994 by Waldorf GMBH, written by Ralf Baechle + * Copyright (C) 1995, 96, 97, 98, 99, 2000, 2001 by Ralf Baechle + * + * Adapted for BlackFin (ADI) by Ted Ma + * Copyright (c) 2002 Arcturus Networks Inc. (www.arcturusnetworks.com) + * Copyright (c) 2002 Lineo, Inc. + */ + +#ifndef _BFIN_IRQ_H_ +#define _BFIN_IRQ_H_ + +#include +#include + +/******************************************************************************* + ***** INTRODUCTION *********** + * On the Blackfin, the interrupt structure allows remmapping of the hardware + * levels. + * - I'm going to assume that the H/W level is going to stay at the default + * settings. If someone wants to go through and abstart this out, feel free + * to mod the interrupt numbering scheme. + * - I'm abstracting the interrupts so that uClinux does not know anything + * about the H/W levels. If you want to change the H/W AND keep the abstracted + * levels that uClinux sees, you should be able to do most of it here. + * - I've left the "abstract" numbering sparce in case someone wants to pull the + * interrupts apart (just the TX/RX for the various devices) + *******************************************************************************/ + +/* SYS_IRQS and NR_IRQS are defined in */ + +/* + * Machine specific interrupt sources. + * + * Adding an interrupt service routine for a source with this bit + * set indicates a special machine specific interrupt source. + * The machine specific files define these sources. + * + * The IRQ_MACHSPEC bit is now gone - the only thing it did was to + * introduce unnecessary overhead. + * + * All interrupt handling is actually machine specific so it is better + * to use function pointers, as used by the Sparc port, and select the + * interrupt handling functions when initializing the kernel. This way + * we save some unnecessary overhead at run-time. + * 01/11/97 - Jes + */ + +extern void ack_bad_irq(unsigned int irq); + +static __inline__ int irq_canonicalize(int irq) +{ + return irq; +} + +/* count of spurious interrupts */ +/* extern volatile unsigned int num_spurious; */ + +#ifndef NO_IRQ +#define NO_IRQ ((unsigned int)(-1)) +#endif + +#endif /* _BFIN_IRQ_H_ */ diff --git a/include/asm-blackfin/irq_handler.h b/include/asm-blackfin/irq_handler.h new file mode 100644 index 000000000000..d830f0a49a1c --- /dev/null +++ b/include/asm-blackfin/irq_handler.h @@ -0,0 +1,22 @@ +#ifndef _IRQ_HANDLER_H +#define _IRQ_HANDLER_H + +/* BASE LEVEL interrupt handler routines */ +asmlinkage void evt_emulation(void); +asmlinkage void evt_exception(void); +asmlinkage void trap(void); +asmlinkage void evt_ivhw(void); +asmlinkage void evt_timer(void); +asmlinkage void evt_evt2(void); +asmlinkage void evt_evt7(void); +asmlinkage void evt_evt8(void); +asmlinkage void evt_evt9(void); +asmlinkage void evt_evt10(void); +asmlinkage void evt_evt11(void); +asmlinkage void evt_evt12(void); +asmlinkage void evt_evt13(void); +asmlinkage void evt_soft_int1(void); +asmlinkage void evt_system_call(void); +asmlinkage void init_exception_buff(void); + +#endif diff --git a/include/asm-blackfin/irq_regs.h b/include/asm-blackfin/irq_regs.h new file mode 100644 index 000000000000..3dd9c0b70270 --- /dev/null +++ b/include/asm-blackfin/irq_regs.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-blackfin/kdebug.h b/include/asm-blackfin/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-blackfin/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-blackfin/kmap_types.h b/include/asm-blackfin/kmap_types.h new file mode 100644 index 000000000000..e215f7104974 --- /dev/null +++ b/include/asm-blackfin/kmap_types.h @@ -0,0 +1,21 @@ +#ifndef _ASM_KMAP_TYPES_H +#define _ASM_KMAP_TYPES_H + +enum km_type { + KM_BOUNCE_READ, + KM_SKB_SUNRPC_DATA, + KM_SKB_DATA_SOFTIRQ, + KM_USER0, + KM_USER1, + KM_BIO_SRC_IRQ, + KM_BIO_DST_IRQ, + KM_PTE0, + KM_PTE1, + KM_IRQ0, + KM_IRQ1, + KM_SOFTIRQ0, + KM_SOFTIRQ1, + KM_TYPE_NR +}; + +#endif diff --git a/include/asm-blackfin/l1layout.h b/include/asm-blackfin/l1layout.h new file mode 100644 index 000000000000..c13ded777828 --- /dev/null +++ b/include/asm-blackfin/l1layout.h @@ -0,0 +1,31 @@ +/* + * l1layout.h + * Defines a layout of L1 scratchpad memory that userspace can rely on. + */ + +#ifndef _L1LAYOUT_H_ +#define _L1LAYOUT_H_ + +#include + +#ifndef __ASSEMBLY__ + +/* Data that is "mapped" into the process VM at the start of the L1 scratch + memory, so that each process can access it at a fixed address. Used for + stack checking. */ +struct l1_scratch_task_info +{ + /* Points to the start of the stack. */ + void *stack_start; + /* Not updated by the kernel; a user process can modify this to + keep track of the lowest address of the stack pointer during its + runtime. */ + void *lowest_sp; +}; + +/* A pointer to the structure in memory. */ +#define L1_SCRATCH_TASK_INFO ((struct l1_scratch_task_info *)L1_SCRATCH_START) + +#endif + +#endif diff --git a/include/asm-blackfin/linkage.h b/include/asm-blackfin/linkage.h new file mode 100644 index 000000000000..5a822bb790f7 --- /dev/null +++ b/include/asm-blackfin/linkage.h @@ -0,0 +1,7 @@ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#define __ALIGN .align 4 +#define __ALIGN_STR ".align 4" + +#endif diff --git a/include/asm-blackfin/local.h b/include/asm-blackfin/local.h new file mode 100644 index 000000000000..75afffbc6421 --- /dev/null +++ b/include/asm-blackfin/local.h @@ -0,0 +1,6 @@ +#ifndef __BLACKFIN_LOCAL_H +#define __BLACKFIN_LOCAL_H + +#include + +#endif /* __BLACKFIN_LOCAL_H */ diff --git a/include/asm-blackfin/mach-bf533/anomaly.h b/include/asm-blackfin/mach-bf533/anomaly.h new file mode 100644 index 000000000000..a84d3909345e --- /dev/null +++ b/include/asm-blackfin/mach-bf533/anomaly.h @@ -0,0 +1,175 @@ +/* + * File: include/asm-blackfin/mach-bf533/anomaly.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* This file shoule be up to date with: + * - Revision U, May 17, 2006; ADSP-BF533 Blackfin Processor Anomaly List + * - Revision Y, May 17, 2006; ADSP-BF532 Blackfin Processor Anomaly List + * - Revision T, May 17, 2006; ADSP-BF531 Blackfin Processor Anomaly List + */ + +#ifndef _MACH_ANOMALY_H_ +#define _MACH_ANOMALY_H_ + +/* We do not support 0.1 or 0.2 silicon - sorry */ +#if (defined(CONFIG_BF_REV_0_1) || defined(CONFIG_BF_REV_0_2)) +#error Kernel will not work on BF533 Version 0.1 or 0.2 +#endif + +/* Issues that are common to 0.5, 0.4, and 0.3 silicon */ +#if (defined(CONFIG_BF_REV_0_5) || defined(CONFIG_BF_REV_0_4) || defined(CONFIG_BF_REV_0_3)) +#define ANOMALY_05000074 /* A multi issue instruction with dsp32shiftimm in + slot1 and store of a P register in slot 2 is not + supported */ +#define ANOMALY_05000105 /* Watchpoint Status Register (WPSTAT) bits are set on + every corresponding match */ +#define ANOMALY_05000119 /* DMA_RUN bit is not valid after a Peripheral Receive + Channel DMA stops */ +#define ANOMALY_05000122 /* Rx.H can not be used to access 16-bit System MMR + registers. */ +#define ANOMALY_05000166 /* PPI Data Lengths Between 8 and 16 do not zero out + upper bits*/ +#define ANOMALY_05000167 /* Turning Serial Ports on With External Frame Syncs */ +#define ANOMALY_05000180 /* PPI_DELAY not functional in PPI modes with 0 frame + syncs */ +#define ANOMALY_05000208 /* VSTAT status bit in PLL_STAT register is not + functional */ +#define ANOMALY_05000219 /* NMI event at boot time results in unpredictable + state */ +#define ANOMALY_05000229 /* SPI Slave Boot Mode modifies registers */ +#define ANOMALY_05000272 /* Certain data cache write through modes fail for + VDDint <=0.9V */ +#define ANOMALY_05000273 /* Writes to Synchronous SDRAM memory may be lost */ +#define ANOMALY_05000277 /* Writes to a flag data register one SCLK cycle after + an edge is detected may clear interrupt */ +#define ANOMALY_05000278 /* Disabling Peripherals with DMA running may cause + DMA system instability */ +#define ANOMALY_05000281 /* False Hardware Error Exception when ISR context is + not restored */ +#define ANOMALY_05000282 /* Memory DMA corruption with 32-bit data and traffic + control */ +#define ANOMALY_05000283 /* A system MMR write is stalled indefinitely when + killed in a particular stage*/ +#define ANOMALY_05000312 /* Errors when SSYNC, CSYNC, or loads to LT, LB and LC + registers are interrupted */ +#define ANOMALY_05000311 /* Erroneous flag pin operations under specific sequences*/ + +#endif + +/* These issues only occur on 0.3 or 0.4 BF533 */ +#if (defined(CONFIG_BF_REV_0_4) || defined(CONFIG_BF_REV_0_3)) +#define ANOMALY_05000099 /* UART Line Status Register (UART_LSR) bits are not + updated at the same time. */ +#define ANOMALY_05000158 /* Boot fails when data cache enabled: Data from a Data + Cache Fill can be corrupted after or during + Instruction DMA if certain core stalls exist */ +#define ANOMALY_05000179 /* PPI_COUNT cannot be programmed to 0 in General + Purpose TX or RX modes */ +#define ANOMALY_05000198 /* Failing SYSTEM MMR accesses when stalled by + preceding memory read */ +#define ANOMALY_05000200 /* SPORT TFS and DT are incorrectly driven during + inactive channels in certain conditions */ +#define ANOMALY_05000202 /* Possible infinite stall with specific dual dag + situation */ +#define ANOMALY_05000215 /* UART TX Interrupt masked erroneously */ +#define ANOMALY_05000225 /* Incorrect pulse-width of UART start-bit */ +#define ANOMALY_05000227 /* Scratchpad memory bank reads may return incorrect + data*/ +#define ANOMALY_05000230 /* UART Receiver is less robust against Baudrate + Differences in certain Conditions */ +#define ANOMALY_05000231 /* UART STB bit incorrectly affects receiver setting */ +#define ANOMALY_05000242 /* DF bit in PLL_CTL register does not respond to + hardware reset */ +#define ANOMALY_05000244 /* With instruction cache enabled, a CSYNC or SSYNC or + IDLE around a Change of Control causes + unpredictable results */ +#define ANOMALY_05000245 /* Spurious Hardware Error from an access in the + shadow of a conditional branch */ +#define ANOMALY_05000246 /* Data CPLB's should prevent spurious hardware + errors */ +#define ANOMALY_05000253 /* Maximum external clock speed for Timers */ +#define ANOMALY_05000255 /* Entering Hibernate Mode with RTC Seconds event + interrupt not functional */ +#define ANOMALY_05000257 /* An interrupt or exception during short Hardware + loops may cause the instruction fetch unit to + malfunction */ +#define ANOMALY_05000258 /* Instruction Cache is corrupted when bit 9 and 12 of + the ICPLB Data registers differ */ +#define ANOMALY_05000260 /* ICPLB_STATUS MMR register may be corrupted */ +#define ANOMALY_05000261 /* DCPLB_FAULT_ADDR MMR register may be corrupted */ +#define ANOMALY_05000262 /* Stores to data cache may be lost */ +#define ANOMALY_05000263 /* Hardware loop corrupted when taking an ICPLB exception */ +#define ANOMALY_05000264 /* A Sync instruction (CSYNC, SSYNC) or an IDLE + instruction will cause an infinite stall in the + second to last instruction in a hardware loop */ +#define ANOMALY_05000265 /* Sensitivity to noise with slow input edge rates on + SPORT external receive and transmit clocks. */ +#define ANOMALY_05000269 /* High I/O activity causes the output voltage of the + internal voltage regulator (VDDint) to increase. */ +#define ANOMALY_05000270 /* High I/O activity causes the output voltage of the + internal voltage regulator (VDDint) to decrease */ +#endif + +/* These issues are only on 0.4 silicon */ +#if (defined(CONFIG_BF_REV_0_4)) +#define ANOMALY_05000234 /* Incorrect Revision Number in DSPID Register */ +#define ANOMALY_05000250 /* Incorrect Bit-Shift of Data Word in Multichannel + (TDM) */ +#endif + +/* These issues are only on 0.3 silicon */ +#if defined(CONFIG_BF_REV_0_3) +#define ANOMALY_05000183 /* Timer Pin limitations for PPI TX Modes with + External Frame Syncs */ +#define ANOMALY_05000189 /* False Protection Exceptions caused by Speculative + Instruction or Data Fetches, or by Fetches at the + boundary of reserved memory space */ +#define ANOMALY_05000193 /* False Flag Pin Interrupts on Edge Sensitive Inputs + when polarity setting is changed */ +#define ANOMALY_05000194 /* Sport Restarting in specific modes may cause data + corruption */ +#define ANOMALY_05000199 /* DMA current address shows wrong value during carry + fix */ +#define ANOMALY_05000201 /* Receive frame sync not ignored during active + frames in sport MCM */ +#define ANOMALY_05000203 /* Specific sequence that can cause DMA error or DMA + stopping */ +#if defined(CONFIG_BF533) +#define ANOMALY_05000204 /* Incorrect data read with write-through cache and + allocate cache lines on reads only mode */ +#endif /* CONFIG_BF533 */ +#define ANOMALY_05000207 /* Recovery from "brown-out" condition */ +#define ANOMALY_05000209 /* Speed-Path in computational unit affects certain + instructions */ +#define ANOMALY_05000233 /* PPI_FS3 is not driven in 2 or 3 internal Frame + Sync Transmit Mode */ +#define ANOMALY_05000271 /* Spontaneous reset of Internal Voltage Regulator */ +#endif + +#endif /* _MACH_ANOMALY_H_ */ diff --git a/include/asm-blackfin/mach-bf533/bf533.h b/include/asm-blackfin/mach-bf533/bf533.h new file mode 100644 index 000000000000..185fc1284858 --- /dev/null +++ b/include/asm-blackfin/mach-bf533/bf533.h @@ -0,0 +1,306 @@ +/* + * File: include/asm-blackfin/mach-bf533/bf533.h + * Based on: + * Author: + * + * Created: + * Description: SYSTEM MMR REGISTER AND MEMORY MAP FOR ADSP-BF561 + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __MACH_BF533_H__ +#define __MACH_BF533_H__ + +#define SUPPORTED_REVID 2 + +#define OFFSET_(x) ((x) & 0x0000FFFF) + +/*some misc defines*/ +#define IMASK_IVG15 0x8000 +#define IMASK_IVG14 0x4000 +#define IMASK_IVG13 0x2000 +#define IMASK_IVG12 0x1000 + +#define IMASK_IVG11 0x0800 +#define IMASK_IVG10 0x0400 +#define IMASK_IVG9 0x0200 +#define IMASK_IVG8 0x0100 + +#define IMASK_IVG7 0x0080 +#define IMASK_IVGTMR 0x0040 +#define IMASK_IVGHW 0x0020 + +/***************************/ + + +#define BLKFIN_DSUBBANKS 4 +#define BLKFIN_DWAYS 2 +#define BLKFIN_DLINES 64 +#define BLKFIN_ISUBBANKS 4 +#define BLKFIN_IWAYS 4 +#define BLKFIN_ILINES 32 + +#define WAY0_L 0x1 +#define WAY1_L 0x2 +#define WAY01_L 0x3 +#define WAY2_L 0x4 +#define WAY02_L 0x5 +#define WAY12_L 0x6 +#define WAY012_L 0x7 + +#define WAY3_L 0x8 +#define WAY03_L 0x9 +#define WAY13_L 0xA +#define WAY013_L 0xB + +#define WAY32_L 0xC +#define WAY320_L 0xD +#define WAY321_L 0xE +#define WAYALL_L 0xF + +#define DMC_ENABLE (2<<2) /*yes, 2, not 1 */ + +/* IAR0 BIT FIELDS*/ +#define RTC_ERROR_BIT 0x0FFFFFFF +#define UART_ERROR_BIT 0xF0FFFFFF +#define SPORT1_ERROR_BIT 0xFF0FFFFF +#define SPI_ERROR_BIT 0xFFF0FFFF +#define SPORT0_ERROR_BIT 0xFFFF0FFF +#define PPI_ERROR_BIT 0xFFFFF0FF +#define DMA_ERROR_BIT 0xFFFFFF0F +#define PLLWAKE_ERROR_BIT 0xFFFFFFFF + +/* IAR1 BIT FIELDS*/ +#define DMA7_UARTTX_BIT 0x0FFFFFFF +#define DMA6_UARTRX_BIT 0xF0FFFFFF +#define DMA5_SPI_BIT 0xFF0FFFFF +#define DMA4_SPORT1TX_BIT 0xFFF0FFFF +#define DMA3_SPORT1RX_BIT 0xFFFF0FFF +#define DMA2_SPORT0TX_BIT 0xFFFFF0FF +#define DMA1_SPORT0RX_BIT 0xFFFFFF0F +#define DMA0_PPI_BIT 0xFFFFFFFF + +/* IAR2 BIT FIELDS*/ +#define WDTIMER_BIT 0x0FFFFFFF +#define MEMDMA1_BIT 0xF0FFFFFF +#define MEMDMA0_BIT 0xFF0FFFFF +#define PFB_BIT 0xFFF0FFFF +#define PFA_BIT 0xFFFF0FFF +#define TIMER2_BIT 0xFFFFF0FF +#define TIMER1_BIT 0xFFFFFF0F +#define TIMER0_BIT 0xFFFFFFFF + +/********************************* EBIU Settings ************************************/ +#define AMBCTL0VAL ((CONFIG_BANK_1 << 16) | CONFIG_BANK_0) +#define AMBCTL1VAL ((CONFIG_BANK_3 << 16) | CONFIG_BANK_2) + +#ifdef CONFIG_C_AMBEN_ALL +#define V_AMBEN AMBEN_ALL +#endif +#ifdef CONFIG_C_AMBEN +#define V_AMBEN 0x0 +#endif +#ifdef CONFIG_C_AMBEN_B0 +#define V_AMBEN AMBEN_B0 +#endif +#ifdef CONFIG_C_AMBEN_B0_B1 +#define V_AMBEN AMBEN_B0_B1 +#endif +#ifdef CONFIG_C_AMBEN_B0_B1_B2 +#define V_AMBEN AMBEN_B0_B1_B2 +#endif +#ifdef CONFIG_C_AMCKEN +#define V_AMCKEN AMCKEN +#else +#define V_AMCKEN 0x0 +#endif +#ifdef CONFIG_C_CDPRIO +#define V_CDPRIO 0x100 +#else +#define V_CDPRIO 0x0 +#endif + +#define AMGCTLVAL (V_AMBEN | V_AMCKEN | V_CDPRIO) + +#define MAX_VC 650000000 +#define MIN_VC 50000000 + +#ifdef CONFIG_BFIN_KERNEL_CLOCK +/********************************PLL Settings **************************************/ +#if (CONFIG_VCO_MULT < 0) +#error "VCO Multiplier is less than 0. Please select a different value" +#endif + +#if (CONFIG_VCO_MULT == 0) +#error "VCO Multiplier should be greater than 0. Please select a different value" +#endif + +#if (CONFIG_VCO_MULT > 64) +#error "VCO Multiplier is more than 64. Please select a different value" +#endif + +#ifndef CONFIG_CLKIN_HALF +#define CONFIG_VCO_HZ (CONFIG_CLKIN_HZ * CONFIG_VCO_MULT) +#else +#define CONFIG_VCO_HZ ((CONFIG_CLKIN_HZ * CONFIG_VCO_MULT)/2) +#endif + +#ifndef CONFIG_PLL_BYPASS +#define CONFIG_CCLK_HZ (CONFIG_VCO_HZ/CONFIG_CCLK_DIV) +#define CONFIG_SCLK_HZ (CONFIG_VCO_HZ/CONFIG_SCLK_DIV) +#else +#define CONFIG_CCLK_HZ CONFIG_CLKIN_HZ +#define CONFIG_SCLK_HZ CONFIG_CLKIN_HZ +#endif + +#if (CONFIG_SCLK_DIV < 1) +#error "SCLK DIV cannot be less than 1 or more than 15. Please select a proper value" +#endif + +#if (CONFIG_SCLK_DIV > 15) +#error "SCLK DIV cannot be less than 1 or more than 15. Please select a proper value" +#endif + +#if (CONFIG_CCLK_DIV != 1) +#if (CONFIG_CCLK_DIV != 2) +#if (CONFIG_CCLK_DIV != 4) +#if (CONFIG_CCLK_DIV != 8) +#error "CCLK DIV can be 1,2,4 or 8 only. Please select a proper value" +#endif +#endif +#endif +#endif + +#if (CONFIG_VCO_HZ > MAX_VC) +#error "VCO selected is more than maximum value. Please change the VCO multipler" +#endif + +#if (CONFIG_SCLK_HZ > 133000000) +#error "Sclk value selected is more than maximum. Please select a proper value for SCLK multiplier" +#endif + +#if (CONFIG_SCLK_HZ < 27000000) +#error "Sclk value selected is less than minimum. Please select a proper value for SCLK multiplier" +#endif + +#if (CONFIG_SCLK_HZ > CONFIG_CCLK_HZ) +#if (CONFIG_SCLK_HZ != CONFIG_CLKIN_HZ) +#if (CONFIG_CCLK_HZ != CONFIG_CLKIN_HZ) +#error "Please select sclk less than cclk" +#endif +#endif +#endif + +#if (CONFIG_CCLK_DIV == 1) +#define CONFIG_CCLK_ACT_DIV CCLK_DIV1 +#endif +#if (CONFIG_CCLK_DIV == 2) +#define CONFIG_CCLK_ACT_DIV CCLK_DIV2 +#endif +#if (CONFIG_CCLK_DIV == 4) +#define CONFIG_CCLK_ACT_DIV CCLK_DIV4 +#endif +#if (CONFIG_CCLK_DIV == 8) +#define CONFIG_CCLK_ACT_DIV CCLK_DIV8 +#endif +#ifndef CONFIG_CCLK_ACT_DIV +#define CONFIG_CCLK_ACT_DIV CONFIG_CCLK_DIV_not_defined_properly +#endif + +#if defined(ANOMALY_05000273) && (CONFIG_CCLK_DIV == 1) +#error ANOMALY 05000273, please make sure CCLK is at least 2x SCLK +#endif + +#endif /* CONFIG_BFIN_KERNEL_CLOCK */ + +#ifdef CONFIG_BF533 +#define CPU "BF533" +#define CPUID 0x027a5000 +#endif +#ifdef CONFIG_BF532 +#define CPU "BF532" +#define CPUID 0x0275A000 +#endif +#ifdef CONFIG_BF531 +#define CPU "BF531" +#define CPUID 0x027a5000 +#endif +#ifndef CPU +#define CPU "UNKNOWN" +#define CPUID 0x0 +#endif + +#if (CONFIG_MEM_SIZE % 4) +#error "SDRAM mem size must be multible of 4MB" +#endif + +#define SDRAM_IGENERIC (CPLB_L1_CHBL | CPLB_USER_RD | CPLB_VALID | CPLB_PORTPRIO) +#define SDRAM_IKERNEL (SDRAM_IGENERIC | CPLB_LOCK) +#define L1_IMEMORY ( CPLB_USER_RD | CPLB_VALID | CPLB_LOCK) +#define SDRAM_INON_CHBL ( CPLB_USER_RD | CPLB_VALID) + +/*Use the menuconfig cache policy here - CONFIG_BLKFIN_WT/CONFIG_BLKFIN_WB*/ + +#define ANOMALY_05000158_WORKAROUND 0x200 +#ifdef CONFIG_BLKFIN_WB /*Write Back Policy */ +#define SDRAM_DGENERIC (CPLB_L1_CHBL | CPLB_DIRTY \ + | CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND) +#else /*Write Through */ +#define SDRAM_DGENERIC (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW | CPLB_DIRTY \ + | CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND) +#endif + +#define L1_DMEMORY (CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_LOCK | CPLB_DIRTY) +#define SDRAM_DNON_CHBL (CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_DIRTY) +#define SDRAM_EBIU (CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_DIRTY) +#define SDRAM_OOPS (CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_LOCK | CPLB_DIRTY) + +#define SIZE_1K 0x00000400 /* 1K */ +#define SIZE_4K 0x00001000 /* 4K */ +#define SIZE_1M 0x00100000 /* 1M */ +#define SIZE_4M 0x00400000 /* 4M */ + +#define MAX_CPLBS (16 * 2) + +/* +* Number of required data CPLB switchtable entries +* MEMSIZE / 4 (we mostly install 4M page size CPLBs +* approx 16 for smaller 1MB page size CPLBs for allignment purposes +* 1 for L1 Data Memory +* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO +* 1 for ASYNC Memory +*/ + + +#define MAX_SWITCH_D_CPLBS (((CONFIG_MEM_SIZE / 4) + 16 + 1 + 1 + 1) * 2) + +/* +* Number of required instruction CPLB switchtable entries +* MEMSIZE / 4 (we mostly install 4M page size CPLBs +* approx 12 for smaller 1MB page size CPLBs for allignment purposes +* 1 for L1 Instruction Memory +* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO +*/ + +#define MAX_SWITCH_I_CPLBS (((CONFIG_MEM_SIZE / 4) + 12 + 1 + 1) * 2) + +#endif /* __MACH_BF533_H__ */ diff --git a/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h new file mode 100644 index 000000000000..23bf76aa3451 --- /dev/null +++ b/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h @@ -0,0 +1,108 @@ +#include +#include + +#define NR_PORTS 1 + +#define OFFSET_THR 0x00 /* Transmit Holding register */ +#define OFFSET_RBR 0x00 /* Receive Buffer register */ +#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */ +#define OFFSET_IER 0x04 /* Interrupt Enable Register */ +#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */ +#define OFFSET_IIR 0x08 /* Interrupt Identification Register */ +#define OFFSET_LCR 0x0C /* Line Control Register */ +#define OFFSET_MCR 0x10 /* Modem Control Register */ +#define OFFSET_LSR 0x14 /* Line Status Register */ +#define OFFSET_MSR 0x18 /* Modem Status Register */ +#define OFFSET_SCR 0x1C /* SCR Scratch Register */ +#define OFFSET_GCTL 0x24 /* Global Control Register */ + +#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR)) +#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL)) +#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER)) +#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH)) +#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR)) +#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR)) +#define UART_GET_LSR(uart) bfin_read16(((uart)->port.membase + OFFSET_LSR)) +#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL)) + +#define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v) +#define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v) +#define UART_PUT_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER),v) +#define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v) +#define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v) +#define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v) + +#ifdef CONFIG_BFIN_UART0_CTSRTS +# define CONFIG_SERIAL_BFIN_CTSRTS +# ifndef CONFIG_UART0_CTS_PIN +# define CONFIG_UART0_CTS_PIN -1 +# endif +# ifndef CONFIG_UART0_RTS_PIN +# define CONFIG_UART0_RTS_PIN -1 +# endif +#endif + +struct bfin_serial_port { + struct uart_port port; + unsigned int old_status; +#ifdef CONFIG_SERIAL_BFIN_DMA + int tx_done; + int tx_count; + struct circ_buf rx_dma_buf; + struct timer_list rx_dma_timer; + int rx_dma_nrows; + unsigned int tx_dma_channel; + unsigned int rx_dma_channel; + struct work_struct tx_dma_workqueue; +#else + struct work_struct cts_workqueue; +#endif +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + int cts_pin; + int rts_pin; +#endif +}; + +struct bfin_serial_port bfin_serial_ports[NR_PORTS]; +struct bfin_serial_res { + unsigned long uart_base_addr; + int uart_irq; +#ifdef CONFIG_SERIAL_BFIN_DMA + unsigned int uart_tx_dma_channel; + unsigned int uart_rx_dma_channel; +#endif +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + int uart_cts_pin; + int uart_rts_pin; +#endif +}; + +struct bfin_serial_res bfin_serial_resource[] = { + 0xFFC00400, + IRQ_UART_RX, +#ifdef CONFIG_SERIAL_BFIN_DMA + CH_UART_TX, + CH_UART_RX, +#endif +#ifdef CONFIG_BFIN_UART0_CTSRTS + CONFIG_UART0_CTS_PIN, + CONFIG_UART0_RTS_PIN, +#endif +}; + + +int nr_ports = NR_PORTS; +static void bfin_serial_hw_init(struct bfin_serial_port *uart) +{ + +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + if (uart->cts_pin >= 0) { + gpio_request(uart->cts_pin, NULL); + gpio_direction_input(uart->cts_pin); + } + if (uart->rts_pin >= 0) { + gpio_request(uart->rts_pin, NULL); + gpio_direction_input(uart->rts_pin); + } +#endif +} diff --git a/include/asm-blackfin/mach-bf533/blackfin.h b/include/asm-blackfin/mach-bf533/blackfin.h new file mode 100644 index 000000000000..e4384491e972 --- /dev/null +++ b/include/asm-blackfin/mach-bf533/blackfin.h @@ -0,0 +1,45 @@ +/* + * File: include/asm-blackfin/mach-bf533/blackfin.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _MACH_BLACKFIN_H_ +#define _MACH_BLACKFIN_H_ + +#define BF533_FAMILY + +#include "bf533.h" +#include "mem_map.h" +#include "defBF532.h" +#include "anomaly.h" + +#if !(defined(__ASSEMBLY__) || defined(ASSEMBLY)) +#include "cdefBF532.h" +#endif + +#endif /* _MACH_BLACKFIN_H_ */ diff --git a/include/asm-blackfin/mach-bf533/cdefBF532.h b/include/asm-blackfin/mach-bf533/cdefBF532.h new file mode 100644 index 000000000000..1d7c494ceb64 --- /dev/null +++ b/include/asm-blackfin/mach-bf533/cdefBF532.h @@ -0,0 +1,706 @@ +/* + * File: include/asm-blackfin/mach-bf533/cdefBF532.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _CDEF_BF532_H +#define _CDEF_BF532_H +/* +#if !defined(__ADSPLPBLACKFIN__) +#warning cdefBF532.h should only be included for 532 compatible chips. +#endif +*/ +/*include all Core registers and bit definitions*/ +#include "defBF532.h" + +/*include core specific register pointer definitions*/ +#include + +#include + +/* Clock and System Control (0xFFC0 0400-0xFFC0 07FF) */ +#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL) +#define bfin_write_PLL_CTL(val) bfin_write16(PLL_CTL,val) +#define bfin_read_PLL_STAT() bfin_read16(PLL_STAT) +#define bfin_write_PLL_STAT(val) bfin_write16(PLL_STAT,val) +#define bfin_read_PLL_LOCKCNT() bfin_read16(PLL_LOCKCNT) +#define bfin_write_PLL_LOCKCNT(val) bfin_write16(PLL_LOCKCNT,val) +#define bfin_read_CHIPID() bfin_read32(CHIPID) +#define bfin_read_SWRST() bfin_read16(SWRST) +#define bfin_write_SWRST(val) bfin_write16(SWRST,val) +#define bfin_read_SYSCR() bfin_read16(SYSCR) +#define bfin_write_SYSCR(val) bfin_write16(SYSCR,val) +#define bfin_read_PLL_DIV() bfin_read16(PLL_DIV) +#define bfin_write_PLL_DIV(val) bfin_write16(PLL_DIV,val) +#define bfin_read_VR_CTL() bfin_read16(VR_CTL) +/* Writing to VR_CTL initiates a PLL relock sequence. */ +static __inline__ void bfin_write_VR_CTL(unsigned int val) +{ + unsigned long flags, iwr; + + bfin_write16(VR_CTL, val); + __builtin_bfin_ssync(); + /* Enable the PLL Wakeup bit in SIC IWR */ + iwr = bfin_read32(SIC_IWR); + /* Only allow PPL Wakeup) */ + bfin_write32(SIC_IWR, IWR_ENABLE(0)); + local_irq_save(flags); + asm("IDLE;"); + local_irq_restore(flags); + bfin_write32(SIC_IWR, iwr); +} + +/* System Interrupt Controller (0xFFC0 0C00-0xFFC0 0FFF) */ +#define bfin_read_SIC_IAR0() bfin_read32(SIC_IAR0) +#define bfin_write_SIC_IAR0(val) bfin_write32(SIC_IAR0,val) +#define bfin_read_SIC_IAR1() bfin_read32(SIC_IAR1) +#define bfin_write_SIC_IAR1(val) bfin_write32(SIC_IAR1,val) +#define bfin_read_SIC_IAR2() bfin_read32(SIC_IAR2) +#define bfin_write_SIC_IAR2(val) bfin_write32(SIC_IAR2,val) +#define bfin_read_SIC_IAR3() bfin_read32(SIC_IAR3) +#define bfin_write_SIC_IAR3(val) bfin_write32(SIC_IAR3,val) +#define bfin_read_SIC_IMASK() bfin_read32(SIC_IMASK) +#define bfin_write_SIC_IMASK(val) bfin_write32(SIC_IMASK,val) +#define bfin_read_SIC_ISR() bfin_read32(SIC_ISR) +#define bfin_write_SIC_ISR(val) bfin_write32(SIC_ISR,val) +#define bfin_read_SIC_IWR() bfin_read32(SIC_IWR) +#define bfin_write_SIC_IWR(val) bfin_write32(SIC_IWR,val) + +/* Watchdog Timer (0xFFC0 1000-0xFFC0 13FF) */ +#define bfin_read_WDOG_CTL() bfin_read16(WDOG_CTL) +#define bfin_write_WDOG_CTL(val) bfin_write16(WDOG_CTL,val) +#define bfin_read_WDOG_CNT() bfin_read32(WDOG_CNT) +#define bfin_write_WDOG_CNT(val) bfin_write32(WDOG_CNT,val) +#define bfin_read_WDOG_STAT() bfin_read32(WDOG_STAT) +#define bfin_write_WDOG_STAT(val) bfin_write32(WDOG_STAT,val) + +/* Real Time Clock (0xFFC0 1400-0xFFC0 17FF) */ +#define bfin_read_RTC_STAT() bfin_read32(RTC_STAT) +#define bfin_write_RTC_STAT(val) bfin_write32(RTC_STAT,val) +#define bfin_read_RTC_ICTL() bfin_read16(RTC_ICTL) +#define bfin_write_RTC_ICTL(val) bfin_write16(RTC_ICTL,val) +#define bfin_read_RTC_ISTAT() bfin_read16(RTC_ISTAT) +#define bfin_write_RTC_ISTAT(val) bfin_write16(RTC_ISTAT,val) +#define bfin_read_RTC_SWCNT() bfin_read16(RTC_SWCNT) +#define bfin_write_RTC_SWCNT(val) bfin_write16(RTC_SWCNT,val) +#define bfin_read_RTC_ALARM() bfin_read32(RTC_ALARM) +#define bfin_write_RTC_ALARM(val) bfin_write32(RTC_ALARM,val) +#define bfin_read_RTC_FAST() bfin_read16(RTC_FAST) +#define bfin_write_RTC_FAST(val) bfin_write16(RTC_FAST,val) +#define bfin_read_RTC_PREN() bfin_read16(RTC_PREN) +#define bfin_write_RTC_PREN(val) bfin_write16(RTC_PREN,val) + +/* General Purpose IO (0xFFC0 2400-0xFFC0 27FF) */ +#define bfin_read_FIO_DIR() bfin_read16(FIO_DIR) +#define bfin_write_FIO_DIR(val) bfin_write16(FIO_DIR,val) +#define bfin_read_FIO_FLAG_C() bfin_read16(FIO_FLAG_C) +#define bfin_write_FIO_FLAG_C(val) bfin_write16(FIO_FLAG_C,val) +#define bfin_read_FIO_FLAG_S() bfin_read16(FIO_FLAG_S) +#define bfin_write_FIO_FLAG_S(val) bfin_write16(FIO_FLAG_S,val) +#define bfin_read_FIO_MASKA_C() bfin_read16(FIO_MASKA_C) +#define bfin_write_FIO_MASKA_C(val) bfin_write16(FIO_MASKA_C,val) +#define bfin_read_FIO_MASKA_S() bfin_read16(FIO_MASKA_S) +#define bfin_write_FIO_MASKA_S(val) bfin_write16(FIO_MASKA_S,val) +#define bfin_read_FIO_MASKB_C() bfin_read16(FIO_MASKB_C) +#define bfin_write_FIO_MASKB_C(val) bfin_write16(FIO_MASKB_C,val) +#define bfin_read_FIO_MASKB_S() bfin_read16(FIO_MASKB_S) +#define bfin_write_FIO_MASKB_S(val) bfin_write16(FIO_MASKB_S,val) +#define bfin_read_FIO_POLAR() bfin_read16(FIO_POLAR) +#define bfin_write_FIO_POLAR(val) bfin_write16(FIO_POLAR,val) +#define bfin_read_FIO_EDGE() bfin_read16(FIO_EDGE) +#define bfin_write_FIO_EDGE(val) bfin_write16(FIO_EDGE,val) +#define bfin_read_FIO_BOTH() bfin_read16(FIO_BOTH) +#define bfin_write_FIO_BOTH(val) bfin_write16(FIO_BOTH,val) +#define bfin_read_FIO_INEN() bfin_read16(FIO_INEN) +#define bfin_write_FIO_INEN(val) bfin_write16(FIO_INEN,val) +#define bfin_read_FIO_FLAG_D() bfin_read16(FIO_FLAG_D) +#define bfin_write_FIO_FLAG_D(val) bfin_write16(FIO_FLAG_D,val) +#define bfin_read_FIO_FLAG_T() bfin_read16(FIO_FLAG_T) +#define bfin_write_FIO_FLAG_T(val) bfin_write16(FIO_FLAG_T,val) +#define bfin_read_FIO_MASKA_D() bfin_read16(FIO_MASKA_D) +#define bfin_write_FIO_MASKA_D(val) bfin_write16(FIO_MASKA_D,val) +#define bfin_read_FIO_MASKA_T() bfin_read16(FIO_MASKA_T) +#define bfin_write_FIO_MASKA_T(val) bfin_write16(FIO_MASKA_T,val) +#define bfin_read_FIO_MASKB_D() bfin_read16(FIO_MASKB_D) +#define bfin_write_FIO_MASKB_D(val) bfin_write16(FIO_MASKB_D,val) +#define bfin_read_FIO_MASKB_T() bfin_read16(FIO_MASKB_T) +#define bfin_write_FIO_MASKB_T(val) bfin_write16(FIO_MASKB_T,val) + +/* DMA Traffic controls */ +#define bfin_read_DMA_TCPER() bfin_read16(DMA_TCPER) +#define bfin_write_DMA_TCPER(val) bfin_write16(DMA_TCPER,val) +#define bfin_read_DMA_TCCNT() bfin_read16(DMA_TCCNT) +#define bfin_write_DMA_TCCNT(val) bfin_write16(DMA_TCCNT,val) +#define bfin_read_DMA_TC_PER() bfin_read16(DMA_TC_PER) +#define bfin_write_DMA_TC_PER(val) bfin_write16(DMA_TC_PER,val) +#define bfin_read_DMA_TC_CNT() bfin_read16(DMA_TC_CNT) +#define bfin_write_DMA_TC_CNT(val) bfin_write16(DMA_TC_CNT,val) + +/* DMA Controller */ +#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG) +#define bfin_write_DMA0_CONFIG(val) bfin_write16(DMA0_CONFIG,val) +#define bfin_read_DMA0_NEXT_DESC_PTR() bfin_read32(DMA0_NEXT_DESC_PTR) +#define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_write32(DMA0_NEXT_DESC_PTR,val) +#define bfin_read_DMA0_START_ADDR() bfin_read32(DMA0_START_ADDR) +#define bfin_write_DMA0_START_ADDR(val) bfin_write32(DMA0_START_ADDR,val) +#define bfin_read_DMA0_X_COUNT() bfin_read16(DMA0_X_COUNT) +#define bfin_write_DMA0_X_COUNT(val) bfin_write16(DMA0_X_COUNT,val) +#define bfin_read_DMA0_Y_COUNT() bfin_read16(DMA0_Y_COUNT) +#define bfin_write_DMA0_Y_COUNT(val) bfin_write16(DMA0_Y_COUNT,val) +#define bfin_read_DMA0_X_MODIFY() bfin_read16(DMA0_X_MODIFY) +#define bfin_write_DMA0_X_MODIFY(val) bfin_write16(DMA0_X_MODIFY,val) +#define bfin_read_DMA0_Y_MODIFY() bfin_read16(DMA0_Y_MODIFY) +#define bfin_write_DMA0_Y_MODIFY(val) bfin_write16(DMA0_Y_MODIFY,val) +#define bfin_read_DMA0_CURR_DESC_PTR() bfin_read32(DMA0_CURR_DESC_PTR) +#define bfin_write_DMA0_CURR_DESC_PTR(val) bfin_write32(DMA0_CURR_DESC_PTR,val) +#define bfin_read_DMA0_CURR_ADDR() bfin_read32(DMA0_CURR_ADDR) +#define bfin_write_DMA0_CURR_ADDR(val) bfin_write32(DMA0_CURR_ADDR,val) +#define bfin_read_DMA0_CURR_X_COUNT() bfin_read16(DMA0_CURR_X_COUNT) +#define bfin_write_DMA0_CURR_X_COUNT(val) bfin_write16(DMA0_CURR_X_COUNT,val) +#define bfin_read_DMA0_CURR_Y_COUNT() bfin_read16(DMA0_CURR_Y_COUNT) +#define bfin_write_DMA0_CURR_Y_COUNT(val) bfin_write16(DMA0_CURR_Y_COUNT,val) +#define bfin_read_DMA0_IRQ_STATUS() bfin_read16(DMA0_IRQ_STATUS) +#define bfin_write_DMA0_IRQ_STATUS(val) bfin_write16(DMA0_IRQ_STATUS,val) +#define bfin_read_DMA0_PERIPHERAL_MAP() bfin_read16(DMA0_PERIPHERAL_MAP) +#define bfin_write_DMA0_PERIPHERAL_MAP(val) bfin_write16(DMA0_PERIPHERAL_MAP,val) + +#define bfin_read_DMA1_CONFIG() bfin_read16(DMA1_CONFIG) +#define bfin_write_DMA1_CONFIG(val) bfin_write16(DMA1_CONFIG,val) +#define bfin_read_DMA1_NEXT_DESC_PTR() bfin_read32(DMA1_NEXT_DESC_PTR) +#define bfin_write_DMA1_NEXT_DESC_PTR(val) bfin_write32(DMA1_NEXT_DESC_PTR,val) +#define bfin_read_DMA1_START_ADDR() bfin_read32(DMA1_START_ADDR) +#define bfin_write_DMA1_START_ADDR(val) bfin_write32(DMA1_START_ADDR,val) +#define bfin_read_DMA1_X_COUNT() bfin_read16(DMA1_X_COUNT) +#define bfin_write_DMA1_X_COUNT(val) bfin_write16(DMA1_X_COUNT,val) +#define bfin_read_DMA1_Y_COUNT() bfin_read16(DMA1_Y_COUNT) +#define bfin_write_DMA1_Y_COUNT(val) bfin_write16(DMA1_Y_COUNT,val) +#define bfin_read_DMA1_X_MODIFY() bfin_read16(DMA1_X_MODIFY) +#define bfin_write_DMA1_X_MODIFY(val) bfin_write16(DMA1_X_MODIFY,val) +#define bfin_read_DMA1_Y_MODIFY() bfin_read16(DMA1_Y_MODIFY) +#define bfin_write_DMA1_Y_MODIFY(val) bfin_write16(DMA1_Y_MODIFY,val) +#define bfin_read_DMA1_CURR_DESC_PTR() bfin_read32(DMA1_CURR_DESC_PTR) +#define bfin_write_DMA1_CURR_DESC_PTR(val) bfin_write32(DMA1_CURR_DESC_PTR,val) +#define bfin_read_DMA1_CURR_ADDR() bfin_read32(DMA1_CURR_ADDR) +#define bfin_write_DMA1_CURR_ADDR(val) bfin_write32(DMA1_CURR_ADDR,val) +#define bfin_read_DMA1_CURR_X_COUNT() bfin_read16(DMA1_CURR_X_COUNT) +#define bfin_write_DMA1_CURR_X_COUNT(val) bfin_write16(DMA1_CURR_X_COUNT,val) +#define bfin_read_DMA1_CURR_Y_COUNT() bfin_read16(DMA1_CURR_Y_COUNT) +#define bfin_write_DMA1_CURR_Y_COUNT(val) bfin_write16(DMA1_CURR_Y_COUNT,val) +#define bfin_read_DMA1_IRQ_STATUS() bfin_read16(DMA1_IRQ_STATUS) +#define bfin_write_DMA1_IRQ_STATUS(val) bfin_write16(DMA1_IRQ_STATUS,val) +#define bfin_read_DMA1_PERIPHERAL_MAP() bfin_read16(DMA1_PERIPHERAL_MAP) +#define bfin_write_DMA1_PERIPHERAL_MAP(val) bfin_write16(DMA1_PERIPHERAL_MAP,val) + +#define bfin_read_DMA2_CONFIG() bfin_read16(DMA2_CONFIG) +#define bfin_write_DMA2_CONFIG(val) bfin_write16(DMA2_CONFIG,val) +#define bfin_read_DMA2_NEXT_DESC_PTR() bfin_read32(DMA2_NEXT_DESC_PTR) +#define bfin_write_DMA2_NEXT_DESC_PTR(val) bfin_write32(DMA2_NEXT_DESC_PTR,val) +#define bfin_read_DMA2_START_ADDR() bfin_read32(DMA2_START_ADDR) +#define bfin_write_DMA2_START_ADDR(val) bfin_write32(DMA2_START_ADDR,val) +#define bfin_read_DMA2_X_COUNT() bfin_read16(DMA2_X_COUNT) +#define bfin_write_DMA2_X_COUNT(val) bfin_write16(DMA2_X_COUNT,val) +#define bfin_read_DMA2_Y_COUNT() bfin_read16(DMA2_Y_COUNT) +#define bfin_write_DMA2_Y_COUNT(val) bfin_write16(DMA2_Y_COUNT,val) +#define bfin_read_DMA2_X_MODIFY() bfin_read16(DMA2_X_MODIFY) +#define bfin_write_DMA2_X_MODIFY(val) bfin_write16(DMA2_X_MODIFY,val) +#define bfin_read_DMA2_Y_MODIFY() bfin_read16(DMA2_Y_MODIFY) +#define bfin_write_DMA2_Y_MODIFY(val) bfin_write16(DMA2_Y_MODIFY,val) +#define bfin_read_DMA2_CURR_DESC_PTR() bfin_read32(DMA2_CURR_DESC_PTR) +#define bfin_write_DMA2_CURR_DESC_PTR(val) bfin_write32(DMA2_CURR_DESC_PTR,val) +#define bfin_read_DMA2_CURR_ADDR() bfin_read32(DMA2_CURR_ADDR) +#define bfin_write_DMA2_CURR_ADDR(val) bfin_write32(DMA2_CURR_ADDR,val) +#define bfin_read_DMA2_CURR_X_COUNT() bfin_read16(DMA2_CURR_X_COUNT) +#define bfin_write_DMA2_CURR_X_COUNT(val) bfin_write16(DMA2_CURR_X_COUNT,val) +#define bfin_read_DMA2_CURR_Y_COUNT() bfin_read16(DMA2_CURR_Y_COUNT) +#define bfin_write_DMA2_CURR_Y_COUNT(val) bfin_write16(DMA2_CURR_Y_COUNT,val) +#define bfin_read_DMA2_IRQ_STATUS() bfin_read16(DMA2_IRQ_STATUS) +#define bfin_write_DMA2_IRQ_STATUS(val) bfin_write16(DMA2_IRQ_STATUS,val) +#define bfin_read_DMA2_PERIPHERAL_MAP() bfin_read16(DMA2_PERIPHERAL_MAP) +#define bfin_write_DMA2_PERIPHERAL_MAP(val) bfin_write16(DMA2_PERIPHERAL_MAP,val) + +#define bfin_read_DMA3_CONFIG() bfin_read16(DMA3_CONFIG) +#define bfin_write_DMA3_CONFIG(val) bfin_write16(DMA3_CONFIG,val) +#define bfin_read_DMA3_NEXT_DESC_PTR() bfin_read32(DMA3_NEXT_DESC_PTR) +#define bfin_write_DMA3_NEXT_DESC_PTR(val) bfin_write32(DMA3_NEXT_DESC_PTR,val) +#define bfin_read_DMA3_START_ADDR() bfin_read32(DMA3_START_ADDR) +#define bfin_write_DMA3_START_ADDR(val) bfin_write32(DMA3_START_ADDR,val) +#define bfin_read_DMA3_X_COUNT() bfin_read16(DMA3_X_COUNT) +#define bfin_write_DMA3_X_COUNT(val) bfin_write16(DMA3_X_COUNT,val) +#define bfin_read_DMA3_Y_COUNT() bfin_read16(DMA3_Y_COUNT) +#define bfin_write_DMA3_Y_COUNT(val) bfin_write16(DMA3_Y_COUNT,val) +#define bfin_read_DMA3_X_MODIFY() bfin_read16(DMA3_X_MODIFY) +#define bfin_write_DMA3_X_MODIFY(val) bfin_write16(DMA3_X_MODIFY,val) +#define bfin_read_DMA3_Y_MODIFY() bfin_read16(DMA3_Y_MODIFY) +#define bfin_write_DMA3_Y_MODIFY(val) bfin_write16(DMA3_Y_MODIFY,val) +#define bfin_read_DMA3_CURR_DESC_PTR() bfin_read32(DMA3_CURR_DESC_PTR) +#define bfin_write_DMA3_CURR_DESC_PTR(val) bfin_write32(DMA3_CURR_DESC_PTR,val) +#define bfin_read_DMA3_CURR_ADDR() bfin_read32(DMA3_CURR_ADDR) +#define bfin_write_DMA3_CURR_ADDR(val) bfin_write32(DMA3_CURR_ADDR,val) +#define bfin_read_DMA3_CURR_X_COUNT() bfin_read16(DMA3_CURR_X_COUNT) +#define bfin_write_DMA3_CURR_X_COUNT(val) bfin_write16(DMA3_CURR_X_COUNT,val) +#define bfin_read_DMA3_CURR_Y_COUNT() bfin_read16(DMA3_CURR_Y_COUNT) +#define bfin_write_DMA3_CURR_Y_COUNT(val) bfin_write16(DMA3_CURR_Y_COUNT,val) +#define bfin_read_DMA3_IRQ_STATUS() bfin_read16(DMA3_IRQ_STATUS) +#define bfin_write_DMA3_IRQ_STATUS(val) bfin_write16(DMA3_IRQ_STATUS,val) +#define bfin_read_DMA3_PERIPHERAL_MAP() bfin_read16(DMA3_PERIPHERAL_MAP) +#define bfin_write_DMA3_PERIPHERAL_MAP(val) bfin_write16(DMA3_PERIPHERAL_MAP,val) + +#define bfin_read_DMA4_CONFIG() bfin_read16(DMA4_CONFIG) +#define bfin_write_DMA4_CONFIG(val) bfin_write16(DMA4_CONFIG,val) +#define bfin_read_DMA4_NEXT_DESC_PTR() bfin_read32(DMA4_NEXT_DESC_PTR) +#define bfin_write_DMA4_NEXT_DESC_PTR(val) bfin_write32(DMA4_NEXT_DESC_PTR,val) +#define bfin_read_DMA4_START_ADDR() bfin_read32(DMA4_START_ADDR) +#define bfin_write_DMA4_START_ADDR(val) bfin_write32(DMA4_START_ADDR,val) +#define bfin_read_DMA4_X_COUNT() bfin_read16(DMA4_X_COUNT) +#define bfin_write_DMA4_X_COUNT(val) bfin_write16(DMA4_X_COUNT,val) +#define bfin_read_DMA4_Y_COUNT() bfin_read16(DMA4_Y_COUNT) +#define bfin_write_DMA4_Y_COUNT(val) bfin_write16(DMA4_Y_COUNT,val) +#define bfin_read_DMA4_X_MODIFY() bfin_read16(DMA4_X_MODIFY) +#define bfin_write_DMA4_X_MODIFY(val) bfin_write16(DMA4_X_MODIFY,val) +#define bfin_read_DMA4_Y_MODIFY() bfin_read16(DMA4_Y_MODIFY) +#define bfin_write_DMA4_Y_MODIFY(val) bfin_write16(DMA4_Y_MODIFY,val) +#define bfin_read_DMA4_CURR_DESC_PTR() bfin_read32(DMA4_CURR_DESC_PTR) +#define bfin_write_DMA4_CURR_DESC_PTR(val) bfin_write32(DMA4_CURR_DESC_PTR,val) +#define bfin_read_DMA4_CURR_ADDR() bfin_read32(DMA4_CURR_ADDR) +#define bfin_write_DMA4_CURR_ADDR(val) bfin_write32(DMA4_CURR_ADDR,val) +#define bfin_read_DMA4_CURR_X_COUNT() bfin_read16(DMA4_CURR_X_COUNT) +#define bfin_write_DMA4_CURR_X_COUNT(val) bfin_write16(DMA4_CURR_X_COUNT,val) +#define bfin_read_DMA4_CURR_Y_COUNT() bfin_read16(DMA4_CURR_Y_COUNT) +#define bfin_write_DMA4_CURR_Y_COUNT(val) bfin_write16(DMA4_CURR_Y_COUNT,val) +#define bfin_read_DMA4_IRQ_STATUS() bfin_read16(DMA4_IRQ_STATUS) +#define bfin_write_DMA4_IRQ_STATUS(val) bfin_write16(DMA4_IRQ_STATUS,val) +#define bfin_read_DMA4_PERIPHERAL_MAP() bfin_read16(DMA4_PERIPHERAL_MAP) +#define bfin_write_DMA4_PERIPHERAL_MAP(val) bfin_write16(DMA4_PERIPHERAL_MAP,val) + +#define bfin_read_DMA5_CONFIG() bfin_read16(DMA5_CONFIG) +#define bfin_write_DMA5_CONFIG(val) bfin_write16(DMA5_CONFIG,val) +#define bfin_read_DMA5_NEXT_DESC_PTR() bfin_read32(DMA5_NEXT_DESC_PTR) +#define bfin_write_DMA5_NEXT_DESC_PTR(val) bfin_write32(DMA5_NEXT_DESC_PTR,val) +#define bfin_read_DMA5_START_ADDR() bfin_read32(DMA5_START_ADDR) +#define bfin_write_DMA5_START_ADDR(val) bfin_write32(DMA5_START_ADDR,val) +#define bfin_read_DMA5_X_COUNT() bfin_read16(DMA5_X_COUNT) +#define bfin_write_DMA5_X_COUNT(val) bfin_write16(DMA5_X_COUNT,val) +#define bfin_read_DMA5_Y_COUNT() bfin_read16(DMA5_Y_COUNT) +#define bfin_write_DMA5_Y_COUNT(val) bfin_write16(DMA5_Y_COUNT,val) +#define bfin_read_DMA5_X_MODIFY() bfin_read16(DMA5_X_MODIFY) +#define bfin_write_DMA5_X_MODIFY(val) bfin_write16(DMA5_X_MODIFY,val) +#define bfin_read_DMA5_Y_MODIFY() bfin_read16(DMA5_Y_MODIFY) +#define bfin_write_DMA5_Y_MODIFY(val) bfin_write16(DMA5_Y_MODIFY,val) +#define bfin_read_DMA5_CURR_DESC_PTR() bfin_read32(DMA5_CURR_DESC_PTR) +#define bfin_write_DMA5_CURR_DESC_PTR(val) bfin_write32(DMA5_CURR_DESC_PTR,val) +#define bfin_read_DMA5_CURR_ADDR() bfin_read32(DMA5_CURR_ADDR) +#define bfin_write_DMA5_CURR_ADDR(val) bfin_write32(DMA5_CURR_ADDR,val) +#define bfin_read_DMA5_CURR_X_COUNT() bfin_read16(DMA5_CURR_X_COUNT) +#define bfin_write_DMA5_CURR_X_COUNT(val) bfin_write16(DMA5_CURR_X_COUNT,val) +#define bfin_read_DMA5_CURR_Y_COUNT() bfin_read16(DMA5_CURR_Y_COUNT) +#define bfin_write_DMA5_CURR_Y_COUNT(val) bfin_write16(DMA5_CURR_Y_COUNT,val) +#define bfin_read_DMA5_IRQ_STATUS() bfin_read16(DMA5_IRQ_STATUS) +#define bfin_write_DMA5_IRQ_STATUS(val) bfin_write16(DMA5_IRQ_STATUS,val) +#define bfin_read_DMA5_PERIPHERAL_MAP() bfin_read16(DMA5_PERIPHERAL_MAP) +#define bfin_write_DMA5_PERIPHERAL_MAP(val) bfin_write16(DMA5_PERIPHERAL_MAP,val) + +#define bfin_read_DMA6_CONFIG() bfin_read16(DMA6_CONFIG) +#define bfin_write_DMA6_CONFIG(val) bfin_write16(DMA6_CONFIG,val) +#define bfin_read_DMA6_NEXT_DESC_PTR() bfin_read32(DMA6_NEXT_DESC_PTR) +#define bfin_write_DMA6_NEXT_DESC_PTR(val) bfin_write32(DMA6_NEXT_DESC_PTR,val) +#define bfin_read_DMA6_START_ADDR() bfin_read32(DMA6_START_ADDR) +#define bfin_write_DMA6_START_ADDR(val) bfin_write32(DMA6_START_ADDR,val) +#define bfin_read_DMA6_X_COUNT() bfin_read16(DMA6_X_COUNT) +#define bfin_write_DMA6_X_COUNT(val) bfin_write16(DMA6_X_COUNT,val) +#define bfin_read_DMA6_Y_COUNT() bfin_read16(DMA6_Y_COUNT) +#define bfin_write_DMA6_Y_COUNT(val) bfin_write16(DMA6_Y_COUNT,val) +#define bfin_read_DMA6_X_MODIFY() bfin_read16(DMA6_X_MODIFY) +#define bfin_write_DMA6_X_MODIFY(val) bfin_write16(DMA6_X_MODIFY,val) +#define bfin_read_DMA6_Y_MODIFY() bfin_read16(DMA6_Y_MODIFY) +#define bfin_write_DMA6_Y_MODIFY(val) bfin_write16(DMA6_Y_MODIFY,val) +#define bfin_read_DMA6_CURR_DESC_PTR() bfin_read32(DMA6_CURR_DESC_PTR) +#define bfin_write_DMA6_CURR_DESC_PTR(val) bfin_write32(DMA6_CURR_DESC_PTR,val) +#define bfin_read_DMA6_CURR_ADDR() bfin_read32(DMA6_CURR_ADDR) +#define bfin_write_DMA6_CURR_ADDR(val) bfin_write32(DMA6_CURR_ADDR,val) +#define bfin_read_DMA6_CURR_X_COUNT() bfin_read16(DMA6_CURR_X_COUNT) +#define bfin_write_DMA6_CURR_X_COUNT(val) bfin_write16(DMA6_CURR_X_COUNT,val) +#define bfin_read_DMA6_CURR_Y_COUNT() bfin_read16(DMA6_CURR_Y_COUNT) +#define bfin_write_DMA6_CURR_Y_COUNT(val) bfin_write16(DMA6_CURR_Y_COUNT,val) +#define bfin_read_DMA6_IRQ_STATUS() bfin_read16(DMA6_IRQ_STATUS) +#define bfin_write_DMA6_IRQ_STATUS(val) bfin_write16(DMA6_IRQ_STATUS,val) +#define bfin_read_DMA6_PERIPHERAL_MAP() bfin_read16(DMA6_PERIPHERAL_MAP) +#define bfin_write_DMA6_PERIPHERAL_MAP(val) bfin_write16(DMA6_PERIPHERAL_MAP,val) + +#define bfin_read_DMA7_CONFIG() bfin_read16(DMA7_CONFIG) +#define bfin_write_DMA7_CONFIG(val) bfin_write16(DMA7_CONFIG,val) +#define bfin_read_DMA7_NEXT_DESC_PTR() bfin_read32(DMA7_NEXT_DESC_PTR) +#define bfin_write_DMA7_NEXT_DESC_PTR(val) bfin_write32(DMA7_NEXT_DESC_PTR,val) +#define bfin_read_DMA7_START_ADDR() bfin_read32(DMA7_START_ADDR) +#define bfin_write_DMA7_START_ADDR(val) bfin_write32(DMA7_START_ADDR,val) +#define bfin_read_DMA7_X_COUNT() bfin_read16(DMA7_X_COUNT) +#define bfin_write_DMA7_X_COUNT(val) bfin_write16(DMA7_X_COUNT,val) +#define bfin_read_DMA7_Y_COUNT() bfin_read16(DMA7_Y_COUNT) +#define bfin_write_DMA7_Y_COUNT(val) bfin_write16(DMA7_Y_COUNT,val) +#define bfin_read_DMA7_X_MODIFY() bfin_read16(DMA7_X_MODIFY) +#define bfin_write_DMA7_X_MODIFY(val) bfin_write16(DMA7_X_MODIFY,val) +#define bfin_read_DMA7_Y_MODIFY() bfin_read16(DMA7_Y_MODIFY) +#define bfin_write_DMA7_Y_MODIFY(val) bfin_write16(DMA7_Y_MODIFY,val) +#define bfin_read_DMA7_CURR_DESC_PTR() bfin_read32(DMA7_CURR_DESC_PTR) +#define bfin_write_DMA7_CURR_DESC_PTR(val) bfin_write32(DMA7_CURR_DESC_PTR,val) +#define bfin_read_DMA7_CURR_ADDR() bfin_read32(DMA7_CURR_ADDR) +#define bfin_write_DMA7_CURR_ADDR(val) bfin_write32(DMA7_CURR_ADDR,val) +#define bfin_read_DMA7_CURR_X_COUNT() bfin_read16(DMA7_CURR_X_COUNT) +#define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT,val) +#define bfin_read_DMA7_CURR_Y_COUNT() bfin_read16(DMA7_CURR_Y_COUNT) +#define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT,val) +#define bfin_read_DMA7_IRQ_STATUS() bfin_read16(DMA7_IRQ_STATUS) +#define bfin_write_DMA7_IRQ_STATUS(val) bfin_write16(DMA7_IRQ_STATUS,val) +#define bfin_read_DMA7_PERIPHERAL_MAP() bfin_read16(DMA7_PERIPHERAL_MAP) +#define bfin_write_DMA7_PERIPHERAL_MAP(val) bfin_write16(DMA7_PERIPHERAL_MAP,val) + +#define bfin_read_MDMA_D1_CONFIG() bfin_read16(MDMA_D1_CONFIG) +#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG,val) +#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_read32(MDMA_D1_NEXT_DESC_PTR) +#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA_D1_NEXT_DESC_PTR,val) +#define bfin_read_MDMA_D1_START_ADDR() bfin_read32(MDMA_D1_START_ADDR) +#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write32(MDMA_D1_START_ADDR,val) +#define bfin_read_MDMA_D1_X_COUNT() bfin_read16(MDMA_D1_X_COUNT) +#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT,val) +#define bfin_read_MDMA_D1_Y_COUNT() bfin_read16(MDMA_D1_Y_COUNT) +#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT,val) +#define bfin_read_MDMA_D1_X_MODIFY() bfin_read16(MDMA_D1_X_MODIFY) +#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY,val) +#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read16(MDMA_D1_Y_MODIFY) +#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY,val) +#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_read32(MDMA_D1_CURR_DESC_PTR) +#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_write32(MDMA_D1_CURR_DESC_PTR,val) +#define bfin_read_MDMA_D1_CURR_ADDR() bfin_read32(MDMA_D1_CURR_ADDR) +#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_write32(MDMA_D1_CURR_ADDR,val) +#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT) +#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT,val) +#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT) +#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT,val) +#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS) +#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS,val) +#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP) +#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP,val) + +#define bfin_read_MDMA_S1_CONFIG() bfin_read16(MDMA_S1_CONFIG) +#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG,val) +#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_read32(MDMA_S1_NEXT_DESC_PTR) +#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA_S1_NEXT_DESC_PTR,val) +#define bfin_read_MDMA_S1_START_ADDR() bfin_read32(MDMA_S1_START_ADDR) +#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write32(MDMA_S1_START_ADDR,val) +#define bfin_read_MDMA_S1_X_COUNT() bfin_read16(MDMA_S1_X_COUNT) +#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT,val) +#define bfin_read_MDMA_S1_Y_COUNT() bfin_read16(MDMA_S1_Y_COUNT) +#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT,val) +#define bfin_read_MDMA_S1_X_MODIFY() bfin_read16(MDMA_S1_X_MODIFY) +#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY,val) +#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read16(MDMA_S1_Y_MODIFY) +#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY,val) +#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_read32(MDMA_S1_CURR_DESC_PTR) +#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_write32(MDMA_S1_CURR_DESC_PTR,val) +#define bfin_read_MDMA_S1_CURR_ADDR() bfin_read32(MDMA_S1_CURR_ADDR) +#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_write32(MDMA_S1_CURR_ADDR,val) +#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT) +#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT,val) +#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT) +#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT,val) +#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS) +#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS,val) +#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP) +#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP,val) + +#define bfin_read_MDMA_D0_CONFIG() bfin_read16(MDMA_D0_CONFIG) +#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG,val) +#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_read32(MDMA_D0_NEXT_DESC_PTR) +#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA_D0_NEXT_DESC_PTR,val) +#define bfin_read_MDMA_D0_START_ADDR() bfin_read32(MDMA_D0_START_ADDR) +#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write32(MDMA_D0_START_ADDR,val) +#define bfin_read_MDMA_D0_X_COUNT() bfin_read16(MDMA_D0_X_COUNT) +#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT,val) +#define bfin_read_MDMA_D0_Y_COUNT() bfin_read16(MDMA_D0_Y_COUNT) +#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT,val) +#define bfin_read_MDMA_D0_X_MODIFY() bfin_read16(MDMA_D0_X_MODIFY) +#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY,val) +#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read16(MDMA_D0_Y_MODIFY) +#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY,val) +#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_read32(MDMA_D0_CURR_DESC_PTR) +#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_write32(MDMA_D0_CURR_DESC_PTR,val) +#define bfin_read_MDMA_D0_CURR_ADDR() bfin_read32(MDMA_D0_CURR_ADDR) +#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_write32(MDMA_D0_CURR_ADDR,val) +#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT) +#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT,val) +#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT) +#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT,val) +#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS) +#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS,val) +#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP) +#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP,val) + +#define bfin_read_MDMA_S0_CONFIG() bfin_read16(MDMA_S0_CONFIG) +#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG,val) +#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_read32(MDMA_S0_NEXT_DESC_PTR) +#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA_S0_NEXT_DESC_PTR,val) +#define bfin_read_MDMA_S0_START_ADDR() bfin_read32(MDMA_S0_START_ADDR) +#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write32(MDMA_S0_START_ADDR,val) +#define bfin_read_MDMA_S0_X_COUNT() bfin_read16(MDMA_S0_X_COUNT) +#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT,val) +#define bfin_read_MDMA_S0_Y_COUNT() bfin_read16(MDMA_S0_Y_COUNT) +#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT,val) +#define bfin_read_MDMA_S0_X_MODIFY() bfin_read16(MDMA_S0_X_MODIFY) +#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY,val) +#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read16(MDMA_S0_Y_MODIFY) +#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY,val) +#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_read32(MDMA_S0_CURR_DESC_PTR) +#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_write32(MDMA_S0_CURR_DESC_PTR,val) +#define bfin_read_MDMA_S0_CURR_ADDR() bfin_read32(MDMA_S0_CURR_ADDR) +#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_write32(MDMA_S0_CURR_ADDR,val) +#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT) +#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT,val) +#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT) +#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT,val) +#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS) +#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS,val) +#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP) +#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP,val) + +/* Aysnchronous Memory Controller - External Bus Interface Unit (0xFFC0 3C00-0xFFC0 3FFF) */ +#define bfin_read_EBIU_AMGCTL() bfin_read16(EBIU_AMGCTL) +#define bfin_write_EBIU_AMGCTL(val) bfin_write16(EBIU_AMGCTL,val) +#define bfin_read_EBIU_AMBCTL0() bfin_read32(EBIU_AMBCTL0) +#define bfin_write_EBIU_AMBCTL0(val) bfin_write32(EBIU_AMBCTL0,val) +#define bfin_read_EBIU_AMBCTL1() bfin_read32(EBIU_AMBCTL1) +#define bfin_write_EBIU_AMBCTL1(val) bfin_write32(EBIU_AMBCTL1,val) + +/* SDRAM Controller External Bus Interface Unit (0xFFC0 4C00-0xFFC0 4FFF) */ +#define bfin_read_EBIU_SDGCTL() bfin_read32(EBIU_SDGCTL) +#define bfin_write_EBIU_SDGCTL(val) bfin_write32(EBIU_SDGCTL,val) +#define bfin_read_EBIU_SDRRC() bfin_read16(EBIU_SDRRC) +#define bfin_write_EBIU_SDRRC(val) bfin_write16(EBIU_SDRRC,val) +#define bfin_read_EBIU_SDSTAT() bfin_read16(EBIU_SDSTAT) +#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT,val) +#define bfin_read_EBIU_SDBCTL() bfin_read16(EBIU_SDBCTL) +#define bfin_write_EBIU_SDBCTL(val) bfin_write16(EBIU_SDBCTL,val) + +/* UART Controller */ +#define bfin_read_UART_THR() bfin_read16(UART_THR) +#define bfin_write_UART_THR(val) bfin_write16(UART_THR,val) +#define bfin_read_UART_RBR() bfin_read16(UART_RBR) +#define bfin_write_UART_RBR(val) bfin_write16(UART_RBR,val) +#define bfin_read_UART_DLL() bfin_read16(UART_DLL) +#define bfin_write_UART_DLL(val) bfin_write16(UART_DLL,val) +#define bfin_read_UART_IER() bfin_read16(UART_IER) +#define bfin_write_UART_IER(val) bfin_write16(UART_IER,val) +#define bfin_read_UART_DLH() bfin_read16(UART_DLH) +#define bfin_write_UART_DLH(val) bfin_write16(UART_DLH,val) +#define bfin_read_UART_IIR() bfin_read16(UART_IIR) +#define bfin_write_UART_IIR(val) bfin_write16(UART_IIR,val) +#define bfin_read_UART_LCR() bfin_read16(UART_LCR) +#define bfin_write_UART_LCR(val) bfin_write16(UART_LCR,val) +#define bfin_read_UART_MCR() bfin_read16(UART_MCR) +#define bfin_write_UART_MCR(val) bfin_write16(UART_MCR,val) +#define bfin_read_UART_LSR() bfin_read16(UART_LSR) +#define bfin_write_UART_LSR(val) bfin_write16(UART_LSR,val) +/* +#define UART_MSR +*/ +#define bfin_read_UART_SCR() bfin_read16(UART_SCR) +#define bfin_write_UART_SCR(val) bfin_write16(UART_SCR,val) +#define bfin_read_UART_GCTL() bfin_read16(UART_GCTL) +#define bfin_write_UART_GCTL(val) bfin_write16(UART_GCTL,val) + +/* SPI Controller */ +#define bfin_read_SPI_CTL() bfin_read16(SPI_CTL) +#define bfin_write_SPI_CTL(val) bfin_write16(SPI_CTL,val) +#define bfin_read_SPI_FLG() bfin_read16(SPI_FLG) +#define bfin_write_SPI_FLG(val) bfin_write16(SPI_FLG,val) +#define bfin_read_SPI_STAT() bfin_read16(SPI_STAT) +#define bfin_write_SPI_STAT(val) bfin_write16(SPI_STAT,val) +#define bfin_read_SPI_TDBR() bfin_read16(SPI_TDBR) +#define bfin_write_SPI_TDBR(val) bfin_write16(SPI_TDBR,val) +#define bfin_read_SPI_RDBR() bfin_read16(SPI_RDBR) +#define bfin_write_SPI_RDBR(val) bfin_write16(SPI_RDBR,val) +#define bfin_read_SPI_BAUD() bfin_read16(SPI_BAUD) +#define bfin_write_SPI_BAUD(val) bfin_write16(SPI_BAUD,val) +#define bfin_read_SPI_SHADOW() bfin_read16(SPI_SHADOW) +#define bfin_write_SPI_SHADOW(val) bfin_write16(SPI_SHADOW,val) + +/* TIMER 0, 1, 2 Registers */ +#define bfin_read_TIMER0_CONFIG() bfin_read16(TIMER0_CONFIG) +#define bfin_write_TIMER0_CONFIG(val) bfin_write16(TIMER0_CONFIG,val) +#define bfin_read_TIMER0_COUNTER() bfin_read32(TIMER0_COUNTER) +#define bfin_write_TIMER0_COUNTER(val) bfin_write32(TIMER0_COUNTER,val) +#define bfin_read_TIMER0_PERIOD() bfin_read32(TIMER0_PERIOD) +#define bfin_write_TIMER0_PERIOD(val) bfin_write32(TIMER0_PERIOD,val) +#define bfin_read_TIMER0_WIDTH() bfin_read32(TIMER0_WIDTH) +#define bfin_write_TIMER0_WIDTH(val) bfin_write32(TIMER0_WIDTH,val) + +#define bfin_read_TIMER1_CONFIG() bfin_read16(TIMER1_CONFIG) +#define bfin_write_TIMER1_CONFIG(val) bfin_write16(TIMER1_CONFIG,val) +#define bfin_read_TIMER1_COUNTER() bfin_read32(TIMER1_COUNTER) +#define bfin_write_TIMER1_COUNTER(val) bfin_write32(TIMER1_COUNTER,val) +#define bfin_read_TIMER1_PERIOD() bfin_read32(TIMER1_PERIOD) +#define bfin_write_TIMER1_PERIOD(val) bfin_write32(TIMER1_PERIOD,val) +#define bfin_read_TIMER1_WIDTH() bfin_read32(TIMER1_WIDTH) +#define bfin_write_TIMER1_WIDTH(val) bfin_write32(TIMER1_WIDTH,val) + +#define bfin_read_TIMER2_CONFIG() bfin_read16(TIMER2_CONFIG) +#define bfin_write_TIMER2_CONFIG(val) bfin_write16(TIMER2_CONFIG,val) +#define bfin_read_TIMER2_COUNTER() bfin_read32(TIMER2_COUNTER) +#define bfin_write_TIMER2_COUNTER(val) bfin_write32(TIMER2_COUNTER,val) +#define bfin_read_TIMER2_PERIOD() bfin_read32(TIMER2_PERIOD) +#define bfin_write_TIMER2_PERIOD(val) bfin_write32(TIMER2_PERIOD,val) +#define bfin_read_TIMER2_WIDTH() bfin_read32(TIMER2_WIDTH) +#define bfin_write_TIMER2_WIDTH(val) bfin_write32(TIMER2_WIDTH,val) + +#define bfin_read_TIMER_ENABLE() bfin_read16(TIMER_ENABLE) +#define bfin_write_TIMER_ENABLE(val) bfin_write16(TIMER_ENABLE,val) +#define bfin_read_TIMER_DISABLE() bfin_read16(TIMER_DISABLE) +#define bfin_write_TIMER_DISABLE(val) bfin_write16(TIMER_DISABLE,val) +#define bfin_read_TIMER_STATUS() bfin_read16(TIMER_STATUS) +#define bfin_write_TIMER_STATUS(val) bfin_write16(TIMER_STATUS,val) + +/* SPORT0 Controller */ +#define bfin_read_SPORT0_TCR1() bfin_read16(SPORT0_TCR1) +#define bfin_write_SPORT0_TCR1(val) bfin_write16(SPORT0_TCR1,val) +#define bfin_read_SPORT0_TCR2() bfin_read16(SPORT0_TCR2) +#define bfin_write_SPORT0_TCR2(val) bfin_write16(SPORT0_TCR2,val) +#define bfin_read_SPORT0_TCLKDIV() bfin_read16(SPORT0_TCLKDIV) +#define bfin_write_SPORT0_TCLKDIV(val) bfin_write16(SPORT0_TCLKDIV,val) +#define bfin_read_SPORT0_TFSDIV() bfin_read16(SPORT0_TFSDIV) +#define bfin_write_SPORT0_TFSDIV(val) bfin_write16(SPORT0_TFSDIV,val) +#define bfin_read_SPORT0_TX() bfin_read32(SPORT0_TX) +#define bfin_write_SPORT0_TX(val) bfin_write32(SPORT0_TX,val) +#define bfin_read_SPORT0_RX() bfin_read32(SPORT0_RX) +#define bfin_write_SPORT0_RX(val) bfin_write32(SPORT0_RX,val) +#define bfin_read_SPORT0_TX32() bfin_read32(SPORT0_TX) +#define bfin_write_SPORT0_TX32(val) bfin_write32(SPORT0_TX,val) +#define bfin_read_SPORT0_RX32() bfin_read32(SPORT0_RX) +#define bfin_write_SPORT0_RX32(val) bfin_write32(SPORT0_RX,val) +#define bfin_read_SPORT0_TX16() bfin_read16(SPORT0_TX) +#define bfin_write_SPORT0_TX16(val) bfin_write16(SPORT0_TX,val) +#define bfin_read_SPORT0_RX16() bfin_read16(SPORT0_RX) +#define bfin_write_SPORT0_RX16(val) bfin_write16(SPORT0_RX,val) +#define bfin_read_SPORT0_RCR1() bfin_read16(SPORT0_RCR1) +#define bfin_write_SPORT0_RCR1(val) bfin_write16(SPORT0_RCR1,val) +#define bfin_read_SPORT0_RCR2() bfin_read16(SPORT0_RCR2) +#define bfin_write_SPORT0_RCR2(val) bfin_write16(SPORT0_RCR2,val) +#define bfin_read_SPORT0_RCLKDIV() bfin_read16(SPORT0_RCLKDIV) +#define bfin_write_SPORT0_RCLKDIV(val) bfin_write16(SPORT0_RCLKDIV,val) +#define bfin_read_SPORT0_RFSDIV() bfin_read16(SPORT0_RFSDIV) +#define bfin_write_SPORT0_RFSDIV(val) bfin_write16(SPORT0_RFSDIV,val) +#define bfin_read_SPORT0_STAT() bfin_read16(SPORT0_STAT) +#define bfin_write_SPORT0_STAT(val) bfin_write16(SPORT0_STAT,val) +#define bfin_read_SPORT0_CHNL() bfin_read16(SPORT0_CHNL) +#define bfin_write_SPORT0_CHNL(val) bfin_write16(SPORT0_CHNL,val) +#define bfin_read_SPORT0_MCMC1() bfin_read16(SPORT0_MCMC1) +#define bfin_write_SPORT0_MCMC1(val) bfin_write16(SPORT0_MCMC1,val) +#define bfin_read_SPORT0_MCMC2() bfin_read16(SPORT0_MCMC2) +#define bfin_write_SPORT0_MCMC2(val) bfin_write16(SPORT0_MCMC2,val) +#define bfin_read_SPORT0_MTCS0() bfin_read32(SPORT0_MTCS0) +#define bfin_write_SPORT0_MTCS0(val) bfin_write32(SPORT0_MTCS0,val) +#define bfin_read_SPORT0_MTCS1() bfin_read32(SPORT0_MTCS1) +#define bfin_write_SPORT0_MTCS1(val) bfin_write32(SPORT0_MTCS1,val) +#define bfin_read_SPORT0_MTCS2() bfin_read32(SPORT0_MTCS2) +#define bfin_write_SPORT0_MTCS2(val) bfin_write32(SPORT0_MTCS2,val) +#define bfin_read_SPORT0_MTCS3() bfin_read32(SPORT0_MTCS3) +#define bfin_write_SPORT0_MTCS3(val) bfin_write32(SPORT0_MTCS3,val) +#define bfin_read_SPORT0_MRCS0() bfin_read32(SPORT0_MRCS0) +#define bfin_write_SPORT0_MRCS0(val) bfin_write32(SPORT0_MRCS0,val) +#define bfin_read_SPORT0_MRCS1() bfin_read32(SPORT0_MRCS1) +#define bfin_write_SPORT0_MRCS1(val) bfin_write32(SPORT0_MRCS1,val) +#define bfin_read_SPORT0_MRCS2() bfin_read32(SPORT0_MRCS2) +#define bfin_write_SPORT0_MRCS2(val) bfin_write32(SPORT0_MRCS2,val) +#define bfin_read_SPORT0_MRCS3() bfin_read32(SPORT0_MRCS3) +#define bfin_write_SPORT0_MRCS3(val) bfin_write32(SPORT0_MRCS3,val) + +/* SPORT1 Controller */ +#define bfin_read_SPORT1_TCR1() bfin_read16(SPORT1_TCR1) +#define bfin_write_SPORT1_TCR1(val) bfin_write16(SPORT1_TCR1,val) +#define bfin_read_SPORT1_TCR2() bfin_read16(SPORT1_TCR2) +#define bfin_write_SPORT1_TCR2(val) bfin_write16(SPORT1_TCR2,val) +#define bfin_read_SPORT1_TCLKDIV() bfin_read16(SPORT1_TCLKDIV) +#define bfin_write_SPORT1_TCLKDIV(val) bfin_write16(SPORT1_TCLKDIV,val) +#define bfin_read_SPORT1_TFSDIV() bfin_read16(SPORT1_TFSDIV) +#define bfin_write_SPORT1_TFSDIV(val) bfin_write16(SPORT1_TFSDIV,val) +#define bfin_read_SPORT1_TX() bfin_read32(SPORT1_TX) +#define bfin_write_SPORT1_TX(val) bfin_write32(SPORT1_TX,val) +#define bfin_read_SPORT1_RX() bfin_read32(SPORT1_RX) +#define bfin_write_SPORT1_RX(val) bfin_write32(SPORT1_RX,val) +#define bfin_read_SPORT1_TX32() bfin_read32(SPORT1_TX) +#define bfin_write_SPORT1_TX32(val) bfin_write32(SPORT1_TX,val) +#define bfin_read_SPORT1_RX32() bfin_read32(SPORT1_RX) +#define bfin_write_SPORT1_RX32(val) bfin_write32(SPORT1_RX,val) +#define bfin_read_SPORT1_TX16() bfin_read16(SPORT1_TX) +#define bfin_write_SPORT1_TX16(val) bfin_write16(SPORT1_TX,val) +#define bfin_read_SPORT1_RX16() bfin_read16(SPORT1_RX) +#define bfin_write_SPORT1_RX16(val) bfin_write16(SPORT1_RX,val) +#define bfin_read_SPORT1_RCR1() bfin_read16(SPORT1_RCR1) +#define bfin_write_SPORT1_RCR1(val) bfin_write16(SPORT1_RCR1,val) +#define bfin_read_SPORT1_RCR2() bfin_read16(SPORT1_RCR2) +#define bfin_write_SPORT1_RCR2(val) bfin_write16(SPORT1_RCR2,val) +#define bfin_read_SPORT1_RCLKDIV() bfin_read16(SPORT1_RCLKDIV) +#define bfin_write_SPORT1_RCLKDIV(val) bfin_write16(SPORT1_RCLKDIV,val) +#define bfin_read_SPORT1_RFSDIV() bfin_read16(SPORT1_RFSDIV) +#define bfin_write_SPORT1_RFSDIV(val) bfin_write16(SPORT1_RFSDIV,val) +#define bfin_read_SPORT1_STAT() bfin_read16(SPORT1_STAT) +#define bfin_write_SPORT1_STAT(val) bfin_write16(SPORT1_STAT,val) +#define bfin_read_SPORT1_CHNL() bfin_read16(SPORT1_CHNL) +#define bfin_write_SPORT1_CHNL(val) bfin_write16(SPORT1_CHNL,val) +#define bfin_read_SPORT1_MCMC1() bfin_read16(SPORT1_MCMC1) +#define bfin_write_SPORT1_MCMC1(val) bfin_write16(SPORT1_MCMC1,val) +#define bfin_read_SPORT1_MCMC2() bfin_read16(SPORT1_MCMC2) +#define bfin_write_SPORT1_MCMC2(val) bfin_write16(SPORT1_MCMC2,val) +#define bfin_read_SPORT1_MTCS0() bfin_read32(SPORT1_MTCS0) +#define bfin_write_SPORT1_MTCS0(val) bfin_write32(SPORT1_MTCS0,val) +#define bfin_read_SPORT1_MTCS1() bfin_read32(SPORT1_MTCS1) +#define bfin_write_SPORT1_MTCS1(val) bfin_write32(SPORT1_MTCS1,val) +#define bfin_read_SPORT1_MTCS2() bfin_read32(SPORT1_MTCS2) +#define bfin_write_SPORT1_MTCS2(val) bfin_write32(SPORT1_MTCS2,val) +#define bfin_read_SPORT1_MTCS3() bfin_read32(SPORT1_MTCS3) +#define bfin_write_SPORT1_MTCS3(val) bfin_write32(SPORT1_MTCS3,val) +#define bfin_read_SPORT1_MRCS0() bfin_read32(SPORT1_MRCS0) +#define bfin_write_SPORT1_MRCS0(val) bfin_write32(SPORT1_MRCS0,val) +#define bfin_read_SPORT1_MRCS1() bfin_read32(SPORT1_MRCS1) +#define bfin_write_SPORT1_MRCS1(val) bfin_write32(SPORT1_MRCS1,val) +#define bfin_read_SPORT1_MRCS2() bfin_read32(SPORT1_MRCS2) +#define bfin_write_SPORT1_MRCS2(val) bfin_write32(SPORT1_MRCS2,val) +#define bfin_read_SPORT1_MRCS3() bfin_read32(SPORT1_MRCS3) +#define bfin_write_SPORT1_MRCS3(val) bfin_write32(SPORT1_MRCS3,val) + +/* Parallel Peripheral Interface (PPI) */ +#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL) +#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL,val) +#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS) +#define bfin_write_PPI_STATUS(val) bfin_write16(PPI_STATUS,val) +#define bfin_clear_PPI_STATUS() bfin_read_PPI_STATUS() +#define bfin_read_PPI_DELAY() bfin_read16(PPI_DELAY) +#define bfin_write_PPI_DELAY(val) bfin_write16(PPI_DELAY,val) +#define bfin_read_PPI_COUNT() bfin_read16(PPI_COUNT) +#define bfin_write_PPI_COUNT(val) bfin_write16(PPI_COUNT,val) +#define bfin_read_PPI_FRAME() bfin_read16(PPI_FRAME) +#define bfin_write_PPI_FRAME(val) bfin_write16(PPI_FRAME,val) + +#endif /* _CDEF_BF532_H */ diff --git a/include/asm-blackfin/mach-bf533/defBF532.h b/include/asm-blackfin/mach-bf533/defBF532.h new file mode 100644 index 000000000000..b240a082aa09 --- /dev/null +++ b/include/asm-blackfin/mach-bf533/defBF532.h @@ -0,0 +1,1175 @@ +/************************************************************************ + * + * This file is subject to the terms and conditions of the GNU Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Non-GPL License also available as part of VisualDSP++ + * http://www.analog.com/processors/resources/crosscore/visualDspDevSoftware.html + * + * (c) Copyright 2001-2005 Analog Devices, Inc. All rights reserved + * + * This file under source code control, please send bugs or changes to: + * dsptools.support@analog.com + * + ************************************************************************/ +/* + * File: include/asm-blackfin/mach-bf533/defBF532.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +/* SYSTEM & MM REGISTER BIT & ADDRESS DEFINITIONS FOR ADSP-BF532 */ + +#ifndef _DEF_BF532_H +#define _DEF_BF532_H +/* +#if !defined(__ADSPLPBLACKFIN__) +#warning defBF532.h should only be included for 532 compatible chips +#endif +*/ +/* include all Core registers and bit definitions */ +#include + +/*********************************************************************************** */ +/* System MMR Register Map */ +/*********************************************************************************** */ +/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */ + +#define PLL_CTL 0xFFC00000 /* PLL Control register (16-bit) */ +#define PLL_DIV 0xFFC00004 /* PLL Divide Register (16-bit) */ +#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register (16-bit) */ +#define PLL_STAT 0xFFC0000C /* PLL Status register (16-bit) */ +#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count register (16-bit) */ +#define CHIPID 0xFFC00014 /* Chip ID Register */ +#define SWRST 0xFFC00100 /* Software Reset Register (16-bit) */ +#define SYSCR 0xFFC00104 /* System Configuration registe */ + +/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */ +#define SIC_RVECT 0xFFC00108 /* Interrupt Reset Vector Address Register */ +#define SIC_IMASK 0xFFC0010C /* Interrupt Mask Register */ +#define SIC_IAR0 0xFFC00110 /* Interrupt Assignment Register 0 */ +#define SIC_IAR1 0xFFC00114 /* Interrupt Assignment Register 1 */ +#define SIC_IAR2 0xFFC00118 /* Interrupt Assignment Register 2 */ +#define SIC_ISR 0xFFC00120 /* Interrupt Status Register */ +#define SIC_IWR 0xFFC00124 /* Interrupt Wakeup Register */ + +/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */ +#define WDOG_CTL 0xFFC00200 /* Watchdog Control Register */ +#define WDOG_CNT 0xFFC00204 /* Watchdog Count Register */ +#define WDOG_STAT 0xFFC00208 /* Watchdog Status Register */ + +/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */ +#define RTC_STAT 0xFFC00300 /* RTC Status Register */ +#define RTC_ICTL 0xFFC00304 /* RTC Interrupt Control Register */ +#define RTC_ISTAT 0xFFC00308 /* RTC Interrupt Status Register */ +#define RTC_SWCNT 0xFFC0030C /* RTC Stopwatch Count Register */ +#define RTC_ALARM 0xFFC00310 /* RTC Alarm Time Register */ +#define RTC_FAST 0xFFC00314 /* RTC Prescaler Enable Register */ +#define RTC_PREN 0xFFC00314 /* RTC Prescaler Enable Register (alternate macro) */ + +/* UART Controller (0xFFC00400 - 0xFFC004FF) */ +#define UART_THR 0xFFC00400 /* Transmit Holding register */ +#define UART_RBR 0xFFC00400 /* Receive Buffer register */ +#define UART_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */ +#define UART_IER 0xFFC00404 /* Interrupt Enable Register */ +#define UART_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */ +#define UART_IIR 0xFFC00408 /* Interrupt Identification Register */ +#define UART_LCR 0xFFC0040C /* Line Control Register */ +#define UART_MCR 0xFFC00410 /* Modem Control Register */ +#define UART_LSR 0xFFC00414 /* Line Status Register */ +#if 0 +#define UART_MSR 0xFFC00418 /* Modem Status Register (UNUSED in ADSP-BF532) */ +#endif +#define UART_SCR 0xFFC0041C /* SCR Scratch Register */ +#define UART_GCTL 0xFFC00424 /* Global Control Register */ + +/* SPI Controller (0xFFC00500 - 0xFFC005FF) */ +#define SPI_CTL 0xFFC00500 /* SPI Control Register */ +#define SPI_FLG 0xFFC00504 /* SPI Flag register */ +#define SPI_STAT 0xFFC00508 /* SPI Status register */ +#define SPI_TDBR 0xFFC0050C /* SPI Transmit Data Buffer Register */ +#define SPI_RDBR 0xFFC00510 /* SPI Receive Data Buffer Register */ +#define SPI_BAUD 0xFFC00514 /* SPI Baud rate Register */ +#define SPI_SHADOW 0xFFC00518 /* SPI_RDBR Shadow Register */ + +/* TIMER 0, 1, 2 Registers (0xFFC00600 - 0xFFC006FF) */ + +#define TIMER0_CONFIG 0xFFC00600 /* Timer 0 Configuration Register */ +#define TIMER0_COUNTER 0xFFC00604 /* Timer 0 Counter Register */ +#define TIMER0_PERIOD 0xFFC00608 /* Timer 0 Period Register */ +#define TIMER0_WIDTH 0xFFC0060C /* Timer 0 Width Register */ + +#define TIMER1_CONFIG 0xFFC00610 /* Timer 1 Configuration Register */ +#define TIMER1_COUNTER 0xFFC00614 /* Timer 1 Counter Register */ +#define TIMER1_PERIOD 0xFFC00618 /* Timer 1 Period Register */ +#define TIMER1_WIDTH 0xFFC0061C /* Timer 1 Width Register */ + +#define TIMER2_CONFIG 0xFFC00620 /* Timer 2 Configuration Register */ +#define TIMER2_COUNTER 0xFFC00624 /* Timer 2 Counter Register */ +#define TIMER2_PERIOD 0xFFC00628 /* Timer 2 Period Register */ +#define TIMER2_WIDTH 0xFFC0062C /* Timer 2 Width Register */ + +#define TIMER_ENABLE 0xFFC00640 /* Timer Enable Register */ +#define TIMER_DISABLE 0xFFC00644 /* Timer Disable Register */ +#define TIMER_STATUS 0xFFC00648 /* Timer Status Register */ + +/* General Purpose IO (0xFFC00700 - 0xFFC007FF) */ + +#define FIO_FLAG_D 0xFFC00700 /* Flag Mask to directly specify state of pins */ +#define FIO_FLAG_C 0xFFC00704 /* Peripheral Interrupt Flag Register (clear) */ +#define FIO_FLAG_S 0xFFC00708 /* Peripheral Interrupt Flag Register (set) */ +#define FIO_FLAG_T 0xFFC0070C /* Flag Mask to directly toggle state of pins */ +#define FIO_MASKA_D 0xFFC00710 /* Flag Mask Interrupt A Register (set directly) */ +#define FIO_MASKA_C 0xFFC00714 /* Flag Mask Interrupt A Register (clear) */ +#define FIO_MASKA_S 0xFFC00718 /* Flag Mask Interrupt A Register (set) */ +#define FIO_MASKA_T 0xFFC0071C /* Flag Mask Interrupt A Register (toggle) */ +#define FIO_MASKB_D 0xFFC00720 /* Flag Mask Interrupt B Register (set directly) */ +#define FIO_MASKB_C 0xFFC00724 /* Flag Mask Interrupt B Register (clear) */ +#define FIO_MASKB_S 0xFFC00728 /* Flag Mask Interrupt B Register (set) */ +#define FIO_MASKB_T 0xFFC0072C /* Flag Mask Interrupt B Register (toggle) */ +#define FIO_DIR 0xFFC00730 /* Peripheral Flag Direction Register */ +#define FIO_POLAR 0xFFC00734 /* Flag Source Polarity Register */ +#define FIO_EDGE 0xFFC00738 /* Flag Source Sensitivity Register */ +#define FIO_BOTH 0xFFC0073C /* Flag Set on BOTH Edges Register */ +#define FIO_INEN 0xFFC00740 /* Flag Input Enable Register */ + +/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */ +#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */ +#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */ +#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */ +#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */ +#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */ +#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */ +#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */ +#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */ +#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */ +#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */ +#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */ +#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */ +#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */ +#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */ +#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */ +#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */ +#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */ +#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */ +#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */ +#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */ +#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */ +#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */ + +/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */ +#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */ +#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */ +#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */ +#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */ +#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */ +#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */ +#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */ +#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */ +#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */ +#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */ +#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */ +#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */ +#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */ +#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */ +#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */ +#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */ +#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */ +#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */ +#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */ +#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */ +#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */ +#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */ + +/* Asynchronous Memory Controller - External Bus Interface Unit */ +#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */ +#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */ +#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */ + +/* SDRAM Controller External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */ + +#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */ +#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */ +#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */ +#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */ + +/* DMA Traffic controls */ +#define DMA_TCPER 0xFFC00B0C /* Traffic Control Periods Register */ +#define DMA_TCCNT 0xFFC00B10 /* Traffic Control Current Counts Register */ +#define DMA_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */ +#define DMA_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */ + +/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */ +#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */ +#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */ +#define DMA0_START_ADDR 0xFFC00C04 /* DMA Channel 0 Start Address Register */ +#define DMA0_X_COUNT 0xFFC00C10 /* DMA Channel 0 X Count Register */ +#define DMA0_Y_COUNT 0xFFC00C18 /* DMA Channel 0 Y Count Register */ +#define DMA0_X_MODIFY 0xFFC00C14 /* DMA Channel 0 X Modify Register */ +#define DMA0_Y_MODIFY 0xFFC00C1C /* DMA Channel 0 Y Modify Register */ +#define DMA0_CURR_DESC_PTR 0xFFC00C20 /* DMA Channel 0 Current Descriptor Pointer Register */ +#define DMA0_CURR_ADDR 0xFFC00C24 /* DMA Channel 0 Current Address Register */ +#define DMA0_CURR_X_COUNT 0xFFC00C30 /* DMA Channel 0 Current X Count Register */ +#define DMA0_CURR_Y_COUNT 0xFFC00C38 /* DMA Channel 0 Current Y Count Register */ +#define DMA0_IRQ_STATUS 0xFFC00C28 /* DMA Channel 0 Interrupt/Status Register */ +#define DMA0_PERIPHERAL_MAP 0xFFC00C2C /* DMA Channel 0 Peripheral Map Register */ + +#define DMA1_CONFIG 0xFFC00C48 /* DMA Channel 1 Configuration Register */ +#define DMA1_NEXT_DESC_PTR 0xFFC00C40 /* DMA Channel 1 Next Descriptor Pointer Register */ +#define DMA1_START_ADDR 0xFFC00C44 /* DMA Channel 1 Start Address Register */ +#define DMA1_X_COUNT 0xFFC00C50 /* DMA Channel 1 X Count Register */ +#define DMA1_Y_COUNT 0xFFC00C58 /* DMA Channel 1 Y Count Register */ +#define DMA1_X_MODIFY 0xFFC00C54 /* DMA Channel 1 X Modify Register */ +#define DMA1_Y_MODIFY 0xFFC00C5C /* DMA Channel 1 Y Modify Register */ +#define DMA1_CURR_DESC_PTR 0xFFC00C60 /* DMA Channel 1 Current Descriptor Pointer Register */ +#define DMA1_CURR_ADDR 0xFFC00C64 /* DMA Channel 1 Current Address Register */ +#define DMA1_CURR_X_COUNT 0xFFC00C70 /* DMA Channel 1 Current X Count Register */ +#define DMA1_CURR_Y_COUNT 0xFFC00C78 /* DMA Channel 1 Current Y Count Register */ +#define DMA1_IRQ_STATUS 0xFFC00C68 /* DMA Channel 1 Interrupt/Status Register */ +#define DMA1_PERIPHERAL_MAP 0xFFC00C6C /* DMA Channel 1 Peripheral Map Register */ + +#define DMA2_CONFIG 0xFFC00C88 /* DMA Channel 2 Configuration Register */ +#define DMA2_NEXT_DESC_PTR 0xFFC00C80 /* DMA Channel 2 Next Descriptor Pointer Register */ +#define DMA2_START_ADDR 0xFFC00C84 /* DMA Channel 2 Start Address Register */ +#define DMA2_X_COUNT 0xFFC00C90 /* DMA Channel 2 X Count Register */ +#define DMA2_Y_COUNT 0xFFC00C98 /* DMA Channel 2 Y Count Register */ +#define DMA2_X_MODIFY 0xFFC00C94 /* DMA Channel 2 X Modify Register */ +#define DMA2_Y_MODIFY 0xFFC00C9C /* DMA Channel 2 Y Modify Register */ +#define DMA2_CURR_DESC_PTR 0xFFC00CA0 /* DMA Channel 2 Current Descriptor Pointer Register */ +#define DMA2_CURR_ADDR 0xFFC00CA4 /* DMA Channel 2 Current Address Register */ +#define DMA2_CURR_X_COUNT 0xFFC00CB0 /* DMA Channel 2 Current X Count Register */ +#define DMA2_CURR_Y_COUNT 0xFFC00CB8 /* DMA Channel 2 Current Y Count Register */ +#define DMA2_IRQ_STATUS 0xFFC00CA8 /* DMA Channel 2 Interrupt/Status Register */ +#define DMA2_PERIPHERAL_MAP 0xFFC00CAC /* DMA Channel 2 Peripheral Map Register */ + +#define DMA3_CONFIG 0xFFC00CC8 /* DMA Channel 3 Configuration Register */ +#define DMA3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA Channel 3 Next Descriptor Pointer Register */ +#define DMA3_START_ADDR 0xFFC00CC4 /* DMA Channel 3 Start Address Register */ +#define DMA3_X_COUNT 0xFFC00CD0 /* DMA Channel 3 X Count Register */ +#define DMA3_Y_COUNT 0xFFC00CD8 /* DMA Channel 3 Y Count Register */ +#define DMA3_X_MODIFY 0xFFC00CD4 /* DMA Channel 3 X Modify Register */ +#define DMA3_Y_MODIFY 0xFFC00CDC /* DMA Channel 3 Y Modify Register */ +#define DMA3_CURR_DESC_PTR 0xFFC00CE0 /* DMA Channel 3 Current Descriptor Pointer Register */ +#define DMA3_CURR_ADDR 0xFFC00CE4 /* DMA Channel 3 Current Address Register */ +#define DMA3_CURR_X_COUNT 0xFFC00CF0 /* DMA Channel 3 Current X Count Register */ +#define DMA3_CURR_Y_COUNT 0xFFC00CF8 /* DMA Channel 3 Current Y Count Register */ +#define DMA3_IRQ_STATUS 0xFFC00CE8 /* DMA Channel 3 Interrupt/Status Register */ +#define DMA3_PERIPHERAL_MAP 0xFFC00CEC /* DMA Channel 3 Peripheral Map Register */ + +#define DMA4_CONFIG 0xFFC00D08 /* DMA Channel 4 Configuration Register */ +#define DMA4_NEXT_DESC_PTR 0xFFC00D00 /* DMA Channel 4 Next Descriptor Pointer Register */ +#define DMA4_START_ADDR 0xFFC00D04 /* DMA Channel 4 Start Address Register */ +#define DMA4_X_COUNT 0xFFC00D10 /* DMA Channel 4 X Count Register */ +#define DMA4_Y_COUNT 0xFFC00D18 /* DMA Channel 4 Y Count Register */ +#define DMA4_X_MODIFY 0xFFC00D14 /* DMA Channel 4 X Modify Register */ +#define DMA4_Y_MODIFY 0xFFC00D1C /* DMA Channel 4 Y Modify Register */ +#define DMA4_CURR_DESC_PTR 0xFFC00D20 /* DMA Channel 4 Current Descriptor Pointer Register */ +#define DMA4_CURR_ADDR 0xFFC00D24 /* DMA Channel 4 Current Address Register */ +#define DMA4_CURR_X_COUNT 0xFFC00D30 /* DMA Channel 4 Current X Count Register */ +#define DMA4_CURR_Y_COUNT 0xFFC00D38 /* DMA Channel 4 Current Y Count Register */ +#define DMA4_IRQ_STATUS 0xFFC00D28 /* DMA Channel 4 Interrupt/Status Register */ +#define DMA4_PERIPHERAL_MAP 0xFFC00D2C /* DMA Channel 4 Peripheral Map Register */ + +#define DMA5_CONFIG 0xFFC00D48 /* DMA Channel 5 Configuration Register */ +#define DMA5_NEXT_DESC_PTR 0xFFC00D40 /* DMA Channel 5 Next Descriptor Pointer Register */ +#define DMA5_START_ADDR 0xFFC00D44 /* DMA Channel 5 Start Address Register */ +#define DMA5_X_COUNT 0xFFC00D50 /* DMA Channel 5 X Count Register */ +#define DMA5_Y_COUNT 0xFFC00D58 /* DMA Channel 5 Y Count Register */ +#define DMA5_X_MODIFY 0xFFC00D54 /* DMA Channel 5 X Modify Register */ +#define DMA5_Y_MODIFY 0xFFC00D5C /* DMA Channel 5 Y Modify Register */ +#define DMA5_CURR_DESC_PTR 0xFFC00D60 /* DMA Channel 5 Current Descriptor Pointer Register */ +#define DMA5_CURR_ADDR 0xFFC00D64 /* DMA Channel 5 Current Address Register */ +#define DMA5_CURR_X_COUNT 0xFFC00D70 /* DMA Channel 5 Current X Count Register */ +#define DMA5_CURR_Y_COUNT 0xFFC00D78 /* DMA Channel 5 Current Y Count Register */ +#define DMA5_IRQ_STATUS 0xFFC00D68 /* DMA Channel 5 Interrupt/Status Register */ +#define DMA5_PERIPHERAL_MAP 0xFFC00D6C /* DMA Channel 5 Peripheral Map Register */ + +#define DMA6_CONFIG 0xFFC00D88 /* DMA Channel 6 Configuration Register */ +#define DMA6_NEXT_DESC_PTR 0xFFC00D80 /* DMA Channel 6 Next Descriptor Pointer Register */ +#define DMA6_START_ADDR 0xFFC00D84 /* DMA Channel 6 Start Address Register */ +#define DMA6_X_COUNT 0xFFC00D90 /* DMA Channel 6 X Count Register */ +#define DMA6_Y_COUNT 0xFFC00D98 /* DMA Channel 6 Y Count Register */ +#define DMA6_X_MODIFY 0xFFC00D94 /* DMA Channel 6 X Modify Register */ +#define DMA6_Y_MODIFY 0xFFC00D9C /* DMA Channel 6 Y Modify Register */ +#define DMA6_CURR_DESC_PTR 0xFFC00DA0 /* DMA Channel 6 Current Descriptor Pointer Register */ +#define DMA6_CURR_ADDR 0xFFC00DA4 /* DMA Channel 6 Current Address Register */ +#define DMA6_CURR_X_COUNT 0xFFC00DB0 /* DMA Channel 6 Current X Count Register */ +#define DMA6_CURR_Y_COUNT 0xFFC00DB8 /* DMA Channel 6 Current Y Count Register */ +#define DMA6_IRQ_STATUS 0xFFC00DA8 /* DMA Channel 6 Interrupt/Status Register */ +#define DMA6_PERIPHERAL_MAP 0xFFC00DAC /* DMA Channel 6 Peripheral Map Register */ + +#define DMA7_CONFIG 0xFFC00DC8 /* DMA Channel 7 Configuration Register */ +#define DMA7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA Channel 7 Next Descriptor Pointer Register */ +#define DMA7_START_ADDR 0xFFC00DC4 /* DMA Channel 7 Start Address Register */ +#define DMA7_X_COUNT 0xFFC00DD0 /* DMA Channel 7 X Count Register */ +#define DMA7_Y_COUNT 0xFFC00DD8 /* DMA Channel 7 Y Count Register */ +#define DMA7_X_MODIFY 0xFFC00DD4 /* DMA Channel 7 X Modify Register */ +#define DMA7_Y_MODIFY 0xFFC00DDC /* DMA Channel 7 Y Modify Register */ +#define DMA7_CURR_DESC_PTR 0xFFC00DE0 /* DMA Channel 7 Current Descriptor Pointer Register */ +#define DMA7_CURR_ADDR 0xFFC00DE4 /* DMA Channel 7 Current Address Register */ +#define DMA7_CURR_X_COUNT 0xFFC00DF0 /* DMA Channel 7 Current X Count Register */ +#define DMA7_CURR_Y_COUNT 0xFFC00DF8 /* DMA Channel 7 Current Y Count Register */ +#define DMA7_IRQ_STATUS 0xFFC00DE8 /* DMA Channel 7 Interrupt/Status Register */ +#define DMA7_PERIPHERAL_MAP 0xFFC00DEC /* DMA Channel 7 Peripheral Map Register */ + +#define MDMA_D1_CONFIG 0xFFC00E88 /* MemDMA Stream 1 Destination Configuration Register */ +#define MDMA_D1_NEXT_DESC_PTR 0xFFC00E80 /* MemDMA Stream 1 Destination Next Descriptor Pointer Register */ +#define MDMA_D1_START_ADDR 0xFFC00E84 /* MemDMA Stream 1 Destination Start Address Register */ +#define MDMA_D1_X_COUNT 0xFFC00E90 /* MemDMA Stream 1 Destination X Count Register */ +#define MDMA_D1_Y_COUNT 0xFFC00E98 /* MemDMA Stream 1 Destination Y Count Register */ +#define MDMA_D1_X_MODIFY 0xFFC00E94 /* MemDMA Stream 1 Destination X Modify Register */ +#define MDMA_D1_Y_MODIFY 0xFFC00E9C /* MemDMA Stream 1 Destination Y Modify Register */ +#define MDMA_D1_CURR_DESC_PTR 0xFFC00EA0 /* MemDMA Stream 1 Destination Current Descriptor Pointer Register */ +#define MDMA_D1_CURR_ADDR 0xFFC00EA4 /* MemDMA Stream 1 Destination Current Address Register */ +#define MDMA_D1_CURR_X_COUNT 0xFFC00EB0 /* MemDMA Stream 1 Destination Current X Count Register */ +#define MDMA_D1_CURR_Y_COUNT 0xFFC00EB8 /* MemDMA Stream 1 Destination Current Y Count Register */ +#define MDMA_D1_IRQ_STATUS 0xFFC00EA8 /* MemDMA Stream 1 Destination Interrupt/Status Register */ +#define MDMA_D1_PERIPHERAL_MAP 0xFFC00EAC /* MemDMA Stream 1 Destination Peripheral Map Register */ + +#define MDMA_S1_CONFIG 0xFFC00EC8 /* MemDMA Stream 1 Source Configuration Register */ +#define MDMA_S1_NEXT_DESC_PTR 0xFFC00EC0 /* MemDMA Stream 1 Source Next Descriptor Pointer Register */ +#define MDMA_S1_START_ADDR 0xFFC00EC4 /* MemDMA Stream 1 Source Start Address Register */ +#define MDMA_S1_X_COUNT 0xFFC00ED0 /* MemDMA Stream 1 Source X Count Register */ +#define MDMA_S1_Y_COUNT 0xFFC00ED8 /* MemDMA Stream 1 Source Y Count Register */ +#define MDMA_S1_X_MODIFY 0xFFC00ED4 /* MemDMA Stream 1 Source X Modify Register */ +#define MDMA_S1_Y_MODIFY 0xFFC00EDC /* MemDMA Stream 1 Source Y Modify Register */ +#define MDMA_S1_CURR_DESC_PTR 0xFFC00EE0 /* MemDMA Stream 1 Source Current Descriptor Pointer Register */ +#define MDMA_S1_CURR_ADDR 0xFFC00EE4 /* MemDMA Stream 1 Source Current Address Register */ +#define MDMA_S1_CURR_X_COUNT 0xFFC00EF0 /* MemDMA Stream 1 Source Current X Count Register */ +#define MDMA_S1_CURR_Y_COUNT 0xFFC00EF8 /* MemDMA Stream 1 Source Current Y Count Register */ +#define MDMA_S1_IRQ_STATUS 0xFFC00EE8 /* MemDMA Stream 1 Source Interrupt/Status Register */ +#define MDMA_S1_PERIPHERAL_MAP 0xFFC00EEC /* MemDMA Stream 1 Source Peripheral Map Register */ + +#define MDMA_D0_CONFIG 0xFFC00E08 /* MemDMA Stream 0 Destination Configuration Register */ +#define MDMA_D0_NEXT_DESC_PTR 0xFFC00E00 /* MemDMA Stream 0 Destination Next Descriptor Pointer Register */ +#define MDMA_D0_START_ADDR 0xFFC00E04 /* MemDMA Stream 0 Destination Start Address Register */ +#define MDMA_D0_X_COUNT 0xFFC00E10 /* MemDMA Stream 0 Destination X Count Register */ +#define MDMA_D0_Y_COUNT 0xFFC00E18 /* MemDMA Stream 0 Destination Y Count Register */ +#define MDMA_D0_X_MODIFY 0xFFC00E14 /* MemDMA Stream 0 Destination X Modify Register */ +#define MDMA_D0_Y_MODIFY 0xFFC00E1C /* MemDMA Stream 0 Destination Y Modify Register */ +#define MDMA_D0_CURR_DESC_PTR 0xFFC00E20 /* MemDMA Stream 0 Destination Current Descriptor Pointer Register */ +#define MDMA_D0_CURR_ADDR 0xFFC00E24 /* MemDMA Stream 0 Destination Current Address Register */ +#define MDMA_D0_CURR_X_COUNT 0xFFC00E30 /* MemDMA Stream 0 Destination Current X Count Register */ +#define MDMA_D0_CURR_Y_COUNT 0xFFC00E38 /* MemDMA Stream 0 Destination Current Y Count Register */ +#define MDMA_D0_IRQ_STATUS 0xFFC00E28 /* MemDMA Stream 0 Destination Interrupt/Status Register */ +#define MDMA_D0_PERIPHERAL_MAP 0xFFC00E2C /* MemDMA Stream 0 Destination Peripheral Map Register */ + +#define MDMA_S0_CONFIG 0xFFC00E48 /* MemDMA Stream 0 Source Configuration Register */ +#define MDMA_S0_NEXT_DESC_PTR 0xFFC00E40 /* MemDMA Stream 0 Source Next Descriptor Pointer Register */ +#define MDMA_S0_START_ADDR 0xFFC00E44 /* MemDMA Stream 0 Source Start Address Register */ +#define MDMA_S0_X_COUNT 0xFFC00E50 /* MemDMA Stream 0 Source X Count Register */ +#define MDMA_S0_Y_COUNT 0xFFC00E58 /* MemDMA Stream 0 Source Y Count Register */ +#define MDMA_S0_X_MODIFY 0xFFC00E54 /* MemDMA Stream 0 Source X Modify Register */ +#define MDMA_S0_Y_MODIFY 0xFFC00E5C /* MemDMA Stream 0 Source Y Modify Register */ +#define MDMA_S0_CURR_DESC_PTR 0xFFC00E60 /* MemDMA Stream 0 Source Current Descriptor Pointer Register */ +#define MDMA_S0_CURR_ADDR 0xFFC00E64 /* MemDMA Stream 0 Source Current Address Register */ +#define MDMA_S0_CURR_X_COUNT 0xFFC00E70 /* MemDMA Stream 0 Source Current X Count Register */ +#define MDMA_S0_CURR_Y_COUNT 0xFFC00E78 /* MemDMA Stream 0 Source Current Y Count Register */ +#define MDMA_S0_IRQ_STATUS 0xFFC00E68 /* MemDMA Stream 0 Source Interrupt/Status Register */ +#define MDMA_S0_PERIPHERAL_MAP 0xFFC00E6C /* MemDMA Stream 0 Source Peripheral Map Register */ + +/* Parallel Peripheral Interface (PPI) (0xFFC01000 - 0xFFC010FF) */ + +#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */ +#define PPI_STATUS 0xFFC01004 /* PPI Status Register */ +#define PPI_COUNT 0xFFC01008 /* PPI Transfer Count Register */ +#define PPI_DELAY 0xFFC0100C /* PPI Delay Count Register */ +#define PPI_FRAME 0xFFC01010 /* PPI Frame Length Register */ + +/*********************************************************************************** */ +/* System MMR Register Bits */ +/******************************************************************************* */ + +/* ********************* PLL AND RESET MASKS ************************ */ + +/* PLL_CTL Masks */ +#define PLL_CLKIN 0x00000000 /* Pass CLKIN to PLL */ +#define PLL_CLKIN_DIV2 0x00000001 /* Pass CLKIN/2 to PLL */ +#define PLL_OFF 0x00000002 /* Shut off PLL clocks */ +#define STOPCK_OFF 0x00000008 /* Core clock off */ +#define PDWN 0x00000020 /* Put the PLL in a Deep Sleep state */ +#define BYPASS 0x00000100 /* Bypass the PLL */ + +/* PLL_DIV Masks */ + +#define SCLK_DIV(x) (x) /* SCLK = VCO / x */ + +#define CCLK_DIV1 0x00000000 /* CCLK = VCO / 1 */ +#define CCLK_DIV2 0x00000010 /* CCLK = VCO / 2 */ +#define CCLK_DIV4 0x00000020 /* CCLK = VCO / 4 */ +#define CCLK_DIV8 0x00000030 /* CCLK = VCO / 8 */ + +/* PLL_STAT Masks */ +#define ACTIVE_PLLENABLED 0x0001 /* Processor In Active Mode With PLL Enabled */ +#define FULL_ON 0x0002 /* Processor In Full On Mode */ +#define ACTIVE_PLLDISABLED 0x0004 /* Processor In Active Mode With PLL Disabled */ +#define PLL_LOCKED 0x0020 /* PLL_LOCKCNT Has Been Reached */ + +/* CHIPID Masks */ +#define CHIPID_VERSION 0xF0000000 +#define CHIPID_FAMILY 0x0FFFF000 +#define CHIPID_MANUFACTURE 0x00000FFE + +/* SWRST Mask */ +#define SYSTEM_RESET 0x00000007 /* Initiates a system software reset */ + +/* ************* SYSTEM INTERRUPT CONTROLLER MASKS ***************** */ + + /* SIC_IAR0 Masks */ + +#define P0_IVG(x) ((x)-7) /* Peripheral #0 assigned IVG #x */ +#define P1_IVG(x) ((x)-7) << 0x4 /* Peripheral #1 assigned IVG #x */ +#define P2_IVG(x) ((x)-7) << 0x8 /* Peripheral #2 assigned IVG #x */ +#define P3_IVG(x) ((x)-7) << 0xC /* Peripheral #3 assigned IVG #x */ +#define P4_IVG(x) ((x)-7) << 0x10 /* Peripheral #4 assigned IVG #x */ +#define P5_IVG(x) ((x)-7) << 0x14 /* Peripheral #5 assigned IVG #x */ +#define P6_IVG(x) ((x)-7) << 0x18 /* Peripheral #6 assigned IVG #x */ +#define P7_IVG(x) ((x)-7) << 0x1C /* Peripheral #7 assigned IVG #x */ + +/* SIC_IAR1 Masks */ + +#define P8_IVG(x) ((x)-7) /* Peripheral #8 assigned IVG #x */ +#define P9_IVG(x) ((x)-7) << 0x4 /* Peripheral #9 assigned IVG #x */ +#define P10_IVG(x) ((x)-7) << 0x8 /* Peripheral #10 assigned IVG #x */ +#define P11_IVG(x) ((x)-7) << 0xC /* Peripheral #11 assigned IVG #x */ +#define P12_IVG(x) ((x)-7) << 0x10 /* Peripheral #12 assigned IVG #x */ +#define P13_IVG(x) ((x)-7) << 0x14 /* Peripheral #13 assigned IVG #x */ +#define P14_IVG(x) ((x)-7) << 0x18 /* Peripheral #14 assigned IVG #x */ +#define P15_IVG(x) ((x)-7) << 0x1C /* Peripheral #15 assigned IVG #x */ + +/* SIC_IAR2 Masks */ +#define P16_IVG(x) ((x)-7) /* Peripheral #16 assigned IVG #x */ +#define P17_IVG(x) ((x)-7) << 0x4 /* Peripheral #17 assigned IVG #x */ +#define P18_IVG(x) ((x)-7) << 0x8 /* Peripheral #18 assigned IVG #x */ +#define P19_IVG(x) ((x)-7) << 0xC /* Peripheral #19 assigned IVG #x */ +#define P20_IVG(x) ((x)-7) << 0x10 /* Peripheral #20 assigned IVG #x */ +#define P21_IVG(x) ((x)-7) << 0x14 /* Peripheral #21 assigned IVG #x */ +#define P22_IVG(x) ((x)-7) << 0x18 /* Peripheral #22 assigned IVG #x */ +#define P23_IVG(x) ((x)-7) << 0x1C /* Peripheral #23 assigned IVG #x */ + +/* SIC_IMASK Masks */ +#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */ +#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */ +#define SIC_MASK(x) (1 << (x)) /* Mask Peripheral #x interrupt */ +#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << (x))) /* Unmask Peripheral #x interrupt */ + +/* SIC_IWR Masks */ +#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */ +#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */ +#define IWR_ENABLE(x) (1 << (x)) /* Wakeup Enable Peripheral #x */ +#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << (x))) /* Wakeup Disable Peripheral #x */ + +/* ********* WATCHDOG TIMER MASKS ********************8 */ + +/* Watchdog Timer WDOG_CTL Register */ +#define ICTL(x) ((x<<1) & 0x0006) +#define ENABLE_RESET 0x00000000 /* Set Watchdog Timer to generate reset */ +#define ENABLE_NMI 0x00000002 /* Set Watchdog Timer to generate non-maskable interrupt */ +#define ENABLE_GPI 0x00000004 /* Set Watchdog Timer to generate general-purpose interrupt */ +#define DISABLE_EVT 0x00000006 /* Disable Watchdog Timer interrupts */ + +#define TMR_EN 0x0000 +#define TMR_DIS 0x0AD0 +#define TRO 0x8000 + +#define ICTL_P0 0x01 +#define ICTL_P1 0x02 +#define TRO_P 0x0F + +/* ***************************** UART CONTROLLER MASKS ********************** */ + +/* UART_LCR Register */ + +#define DLAB 0x80 +#define SB 0x40 +#define STP 0x20 +#define EPS 0x10 +#define PEN 0x08 +#define STB 0x04 +#define WLS(x) ((x-5) & 0x03) + +#define DLAB_P 0x07 +#define SB_P 0x06 +#define STP_P 0x05 +#define EPS_P 0x04 +#define PEN_P 0x03 +#define STB_P 0x02 +#define WLS_P1 0x01 +#define WLS_P0 0x00 + +/* UART_MCR Register */ +#define LOOP_ENA 0x10 +#define LOOP_ENA_P 0x04 + +/* UART_LSR Register */ +#define TEMT 0x40 +#define THRE 0x20 +#define BI 0x10 +#define FE 0x08 +#define PE 0x04 +#define OE 0x02 +#define DR 0x01 + +#define TEMP_P 0x06 +#define THRE_P 0x05 +#define BI_P 0x04 +#define FE_P 0x03 +#define PE_P 0x02 +#define OE_P 0x01 +#define DR_P 0x00 + +/* UART_IER Register */ +#define ELSI 0x04 +#define ETBEI 0x02 +#define ERBFI 0x01 + +#define ELSI_P 0x02 +#define ETBEI_P 0x01 +#define ERBFI_P 0x00 + +/* UART_IIR Register */ +#define STATUS(x) ((x << 1) & 0x06) +#define NINT 0x01 +#define STATUS_P1 0x02 +#define STATUS_P0 0x01 +#define NINT_P 0x00 +#define IIR_TX_READY 0x02 /* UART_THR empty */ +#define IIR_RX_READY 0x04 /* Receive data ready */ +#define IIR_LINE_CHANGE 0x06 /* Receive line status */ +#define IIR_STATUS 0x06 + +/* UART_GCTL Register */ +#define FFE 0x20 +#define FPE 0x10 +#define RPOLC 0x08 +#define TPOLC 0x04 +#define IREN 0x02 +#define UCEN 0x01 + +#define FFE_P 0x05 +#define FPE_P 0x04 +#define RPOLC_P 0x03 +#define TPOLC_P 0x02 +#define IREN_P 0x01 +#define UCEN_P 0x00 + +/* ********** SERIAL PORT MASKS ********************** */ + +/* SPORTx_TCR1 Masks */ +#define TSPEN 0x0001 /* TX enable */ +#define ITCLK 0x0002 /* Internal TX Clock Select */ +#define TDTYPE 0x000C /* TX Data Formatting Select */ +#define TLSBIT 0x0010 /* TX Bit Order */ +#define ITFS 0x0200 /* Internal TX Frame Sync Select */ +#define TFSR 0x0400 /* TX Frame Sync Required Select */ +#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */ +#define LTFS 0x1000 /* Low TX Frame Sync Select */ +#define LATFS 0x2000 /* Late TX Frame Sync Select */ +#define TCKFE 0x4000 /* TX Clock Falling Edge Select */ + +/* SPORTx_TCR2 Masks */ +#define SLEN 0x001F /*TX Word Length */ +#define TXSE 0x0100 /*TX Secondary Enable */ +#define TSFSE 0x0200 /*TX Stereo Frame Sync Enable */ +#define TRFST 0x0400 /*TX Right-First Data Order */ + +/* SPORTx_RCR1 Masks */ +#define RSPEN 0x0001 /* RX enable */ +#define IRCLK 0x0002 /* Internal RX Clock Select */ +#define RDTYPE 0x000C /* RX Data Formatting Select */ +#define RULAW 0x0008 /* u-Law enable */ +#define RALAW 0x000C /* A-Law enable */ +#define RLSBIT 0x0010 /* RX Bit Order */ +#define IRFS 0x0200 /* Internal RX Frame Sync Select */ +#define RFSR 0x0400 /* RX Frame Sync Required Select */ +#define LRFS 0x1000 /* Low RX Frame Sync Select */ +#define LARFS 0x2000 /* Late RX Frame Sync Select */ +#define RCKFE 0x4000 /* RX Clock Falling Edge Select */ + +/* SPORTx_RCR2 Masks */ +#define SLEN 0x001F /*RX Word Length */ +#define RXSE 0x0100 /*RX Secondary Enable */ +#define RSFSE 0x0200 /*RX Stereo Frame Sync Enable */ +#define RRFST 0x0400 /*Right-First Data Order */ + +/*SPORTx_STAT Masks */ +#define RXNE 0x0001 /*RX FIFO Not Empty Status */ +#define RUVF 0x0002 /*RX Underflow Status */ +#define ROVF 0x0004 /*RX Overflow Status */ +#define TXF 0x0008 /*TX FIFO Full Status */ +#define TUVF 0x0010 /*TX Underflow Status */ +#define TOVF 0x0020 /*TX Overflow Status */ +#define TXHRE 0x0040 /*TX Hold Register Empty */ + +/*SPORTx_MCMC1 Masks */ +#define SP_WSIZE 0x0000F000 /*Multichannel Window Size Field */ +#define SP_WOFF 0x000003FF /*Multichannel Window Offset Field */ + +/*SPORTx_MCMC2 Masks */ +#define MCCRM 0x00000003 /*Multichannel Clock Recovery Mode */ +#define MCDTXPE 0x00000004 /*Multichannel DMA Transmit Packing */ +#define MCDRXPE 0x00000008 /*Multichannel DMA Receive Packing */ +#define MCMEN 0x00000010 /*Multichannel Frame Mode Enable */ +#define FSDR 0x00000080 /*Multichannel Frame Sync to Data Relationship */ +#define MFD 0x0000F000 /*Multichannel Frame Delay */ + +/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */ + +/* PPI_CONTROL Masks */ +#define PORT_EN 0x00000001 /* PPI Port Enable */ +#define PORT_DIR 0x00000002 /* PPI Port Direction */ +#define XFR_TYPE 0x0000000C /* PPI Transfer Type */ +#define PORT_CFG 0x00000030 /* PPI Port Configuration */ +#define FLD_SEL 0x00000040 /* PPI Active Field Select */ +#define PACK_EN 0x00000080 /* PPI Packing Mode */ +#define DMA32 0x00000100 /* PPI 32-bit DMA Enable */ +#define SKIP_EN 0x00000200 /* PPI Skip Element Enable */ +#define SKIP_EO 0x00000400 /* PPI Skip Even/Odd Elements */ +#define DLENGTH 0x00003800 /* PPI Data Length */ +#define DLEN_8 0x0000 /* Data Length = 8 Bits */ +#define DLEN_10 0x0800 /* Data Length = 10 Bits */ +#define DLEN_11 0x1000 /* Data Length = 11 Bits */ +#define DLEN_12 0x1800 /* Data Length = 12 Bits */ +#define DLEN_13 0x2000 /* Data Length = 13 Bits */ +#define DLEN_14 0x2800 /* Data Length = 14 Bits */ +#define DLEN_15 0x3000 /* Data Length = 15 Bits */ +#define DLEN_16 0x3800 /* Data Length = 16 Bits */ +#define DLEN(x) (((x-9) & 0x07) << 11) /* PPI Data Length (only works for x=10-->x=16) */ +#define POL 0x0000C000 /* PPI Signal Polarities */ + +/* PPI_STATUS Masks */ +#define FLD 0x00000400 /* Field Indicator */ +#define FT_ERR 0x00000800 /* Frame Track Error */ +#define OVR 0x00001000 /* FIFO Overflow Error */ +#define UNDR 0x00002000 /* FIFO Underrun Error */ +#define ERR_DET 0x00004000 /* Error Detected Indicator */ +#define ERR_NCOR 0x00008000 /* Error Not Corrected Indicator */ + +/* ********** DMA CONTROLLER MASKS *********************8 */ + +/*DMAx_CONFIG, MDMA_yy_CONFIG Masks */ +#define DMAEN 0x00000001 /* Channel Enable */ +#define WNR 0x00000002 /* Channel Direction (W/R*) */ +#define WDSIZE_8 0x00000000 /* Word Size 8 bits */ +#define WDSIZE_16 0x00000004 /* Word Size 16 bits */ +#define WDSIZE_32 0x00000008 /* Word Size 32 bits */ +#define DMA2D 0x00000010 /* 2D/1D* Mode */ +#define RESTART 0x00000020 /* Restart */ +#define DI_SEL 0x00000040 /* Data Interrupt Select */ +#define DI_EN 0x00000080 /* Data Interrupt Enable */ +#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */ +#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */ +#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */ +#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */ +#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */ +#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */ +#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */ +#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */ +#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */ +#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */ +#define NDSIZE 0x00000900 /* Next Descriptor Size */ +#define DMAFLOW 0x00007000 /* Flow Control */ +#define DMAFLOW_STOP 0x0000 /* Stop Mode */ +#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */ +#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */ +#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */ +#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */ + +#define DMAEN_P 0 /* Channel Enable */ +#define WNR_P 1 /* Channel Direction (W/R*) */ +#define DMA2D_P 4 /* 2D/1D* Mode */ +#define RESTART_P 5 /* Restart */ +#define DI_SEL_P 6 /* Data Interrupt Select */ +#define DI_EN_P 7 /* Data Interrupt Enable */ + +/*DMAx_IRQ_STATUS, MDMA_yy_IRQ_STATUS Masks */ + +#define DMA_DONE 0x00000001 /* DMA Done Indicator */ +#define DMA_ERR 0x00000002 /* DMA Error Indicator */ +#define DFETCH 0x00000004 /* Descriptor Fetch Indicator */ +#define DMA_RUN 0x00000008 /* DMA Running Indicator */ + +#define DMA_DONE_P 0 /* DMA Done Indicator */ +#define DMA_ERR_P 1 /* DMA Error Indicator */ +#define DFETCH_P 2 /* Descriptor Fetch Indicator */ +#define DMA_RUN_P 3 /* DMA Running Indicator */ + +/*DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */ + +#define CTYPE 0x00000040 /* DMA Channel Type Indicator */ +#define CTYPE_P 6 /* DMA Channel Type Indicator BIT POSITION */ +#define PCAP8 0x00000080 /* DMA 8-bit Operation Indicator */ +#define PCAP16 0x00000100 /* DMA 16-bit Operation Indicator */ +#define PCAP32 0x00000200 /* DMA 32-bit Operation Indicator */ +#define PCAPWR 0x00000400 /* DMA Write Operation Indicator */ +#define PCAPRD 0x00000800 /* DMA Read Operation Indicator */ +#define PMAP 0x00007000 /* DMA Peripheral Map Field */ + +/* ************* GENERAL PURPOSE TIMER MASKS ******************** */ + +/* PWM Timer bit definitions */ + +/* TIMER_ENABLE Register */ +#define TIMEN0 0x0001 +#define TIMEN1 0x0002 +#define TIMEN2 0x0004 + +#define TIMEN0_P 0x00 +#define TIMEN1_P 0x01 +#define TIMEN2_P 0x02 + +/* TIMER_DISABLE Register */ +#define TIMDIS0 0x0001 +#define TIMDIS1 0x0002 +#define TIMDIS2 0x0004 + +#define TIMDIS0_P 0x00 +#define TIMDIS1_P 0x01 +#define TIMDIS2_P 0x02 + +/* TIMER_STATUS Register */ +#define TIMIL0 0x0001 +#define TIMIL1 0x0002 +#define TIMIL2 0x0004 +#define TOVL_ERR0 0x0010 +#define TOVL_ERR1 0x0020 +#define TOVL_ERR2 0x0040 +#define TRUN0 0x1000 +#define TRUN1 0x2000 +#define TRUN2 0x4000 + +#define TIMIL0_P 0x00 +#define TIMIL1_P 0x01 +#define TIMIL2_P 0x02 +#define TOVL_ERR0_P 0x04 +#define TOVL_ERR1_P 0x05 +#define TOVL_ERR2_P 0x06 +#define TRUN0_P 0x0C +#define TRUN1_P 0x0D +#define TRUN2_P 0x0E + +/* TIMERx_CONFIG Registers */ +#define PWM_OUT 0x0001 +#define WDTH_CAP 0x0002 +#define EXT_CLK 0x0003 +#define PULSE_HI 0x0004 +#define PERIOD_CNT 0x0008 +#define IRQ_ENA 0x0010 +#define TIN_SEL 0x0020 +#define OUT_DIS 0x0040 +#define CLK_SEL 0x0080 +#define TOGGLE_HI 0x0100 +#define EMU_RUN 0x0200 +#define ERR_TYP(x) ((x & 0x03) << 14) + +#define TMODE_P0 0x00 +#define TMODE_P1 0x01 +#define PULSE_HI_P 0x02 +#define PERIOD_CNT_P 0x03 +#define IRQ_ENA_P 0x04 +#define TIN_SEL_P 0x05 +#define OUT_DIS_P 0x06 +#define CLK_SEL_P 0x07 +#define TOGGLE_HI_P 0x08 +#define EMU_RUN_P 0x09 +#define ERR_TYP_P0 0x0E +#define ERR_TYP_P1 0x0F + +/*/ ****************** PROGRAMMABLE FLAG MASKS ********************* */ + +/* General Purpose IO (0xFFC00700 - 0xFFC007FF) Masks */ +#define PF0 0x0001 +#define PF1 0x0002 +#define PF2 0x0004 +#define PF3 0x0008 +#define PF4 0x0010 +#define PF5 0x0020 +#define PF6 0x0040 +#define PF7 0x0080 +#define PF8 0x0100 +#define PF9 0x0200 +#define PF10 0x0400 +#define PF11 0x0800 +#define PF12 0x1000 +#define PF13 0x2000 +#define PF14 0x4000 +#define PF15 0x8000 + +/* General Purpose IO (0xFFC00700 - 0xFFC007FF) BIT POSITIONS */ +#define PF0_P 0 +#define PF1_P 1 +#define PF2_P 2 +#define PF3_P 3 +#define PF4_P 4 +#define PF5_P 5 +#define PF6_P 6 +#define PF7_P 7 +#define PF8_P 8 +#define PF9_P 9 +#define PF10_P 10 +#define PF11_P 11 +#define PF12_P 12 +#define PF13_P 13 +#define PF14_P 14 +#define PF15_P 15 + +/* *********** SERIAL PERIPHERAL INTERFACE (SPI) MASKS **************** */ + +/* SPI_CTL Masks */ +#define TIMOD 0x00000003 /* Transfer initiation mode and interrupt generation */ +#define SZ 0x00000004 /* Send Zero (=0) or last (=1) word when TDBR empty. */ +#define GM 0x00000008 /* When RDBR full, get more (=1) data or discard (=0) incoming Data */ +#define PSSE 0x00000010 /* Enable (=1) Slave-Select input for Master. */ +#define EMISO 0x00000020 /* Enable (=1) MISO pin as an output. */ +#define SPI_LEN 0x00000100 /* Word length (0 => 8 bits, 1 => 16 bits) */ +#define LSBF 0x00000200 /* Data format (0 => MSB sent/received first 1 => LSB sent/received first) */ +#define CPHA 0x00000400 /* Clock phase (0 => SPICLK starts toggling in middle of xfer, 1 => SPICLK toggles at the beginning of xfer. */ +#define CPOL 0x00000800 /* Clock polarity (0 => active-high, 1 => active-low) */ +#define MSTR 0x00001000 /* Configures SPI as master (=1) or slave (=0) */ +#define WOM 0x00002000 /* Open drain (=1) data output enable (for MOSI and MISO) */ +#define SPE 0x00004000 /* SPI module enable (=1), disable (=0) */ + +/* SPI_FLG Masks */ +#define FLS1 0x00000002 /* Enables (=1) SPI_FLOUT1 as flag output for SPI Slave-select */ +#define FLS2 0x00000004 /* Enables (=1) SPI_FLOUT2 as flag output for SPI Slave-select */ +#define FLS3 0x00000008 /* Enables (=1) SPI_FLOUT3 as flag output for SPI Slave-select */ +#define FLS4 0x00000010 /* Enables (=1) SPI_FLOUT4 as flag output for SPI Slave-select */ +#define FLS5 0x00000020 /* Enables (=1) SPI_FLOUT5 as flag output for SPI Slave-select */ +#define FLS6 0x00000040 /* Enables (=1) SPI_FLOUT6 as flag output for SPI Slave-select */ +#define FLS7 0x00000080 /* Enables (=1) SPI_FLOUT7 as flag output for SPI Slave-select */ +#define FLG1 0x00000200 /* Activates (=0) SPI_FLOUT1 as flag output for SPI Slave-select */ +#define FLG2 0x00000400 /* Activates (=0) SPI_FLOUT2 as flag output for SPI Slave-select */ +#define FLG3 0x00000800 /* Activates (=0) SPI_FLOUT3 as flag output for SPI Slave-select */ +#define FLG4 0x00001000 /* Activates (=0) SPI_FLOUT4 as flag output for SPI Slave-select */ +#define FLG5 0x00002000 /* Activates (=0) SPI_FLOUT5 as flag output for SPI Slave-select */ +#define FLG6 0x00004000 /* Activates (=0) SPI_FLOUT6 as flag output for SPI Slave-select */ +#define FLG7 0x00008000 /* Activates (=0) SPI_FLOUT7 as flag output for SPI Slave-select */ + +/* SPI_FLG Bit Positions */ +#define FLS1_P 0x00000001 /* Enables (=1) SPI_FLOUT1 as flag output for SPI Slave-select */ +#define FLS2_P 0x00000002 /* Enables (=1) SPI_FLOUT2 as flag output for SPI Slave-select */ +#define FLS3_P 0x00000003 /* Enables (=1) SPI_FLOUT3 as flag output for SPI Slave-select */ +#define FLS4_P 0x00000004 /* Enables (=1) SPI_FLOUT4 as flag output for SPI Slave-select */ +#define FLS5_P 0x00000005 /* Enables (=1) SPI_FLOUT5 as flag output for SPI Slave-select */ +#define FLS6_P 0x00000006 /* Enables (=1) SPI_FLOUT6 as flag output for SPI Slave-select */ +#define FLS7_P 0x00000007 /* Enables (=1) SPI_FLOUT7 as flag output for SPI Slave-select */ +#define FLG1_P 0x00000009 /* Activates (=0) SPI_FLOUT1 as flag output for SPI Slave-select */ +#define FLG2_P 0x0000000A /* Activates (=0) SPI_FLOUT2 as flag output for SPI Slave-select */ +#define FLG3_P 0x0000000B /* Activates (=0) SPI_FLOUT3 as flag output for SPI Slave-select */ +#define FLG4_P 0x0000000C /* Activates (=0) SPI_FLOUT4 as flag output for SPI Slave-select */ +#define FLG5_P 0x0000000D /* Activates (=0) SPI_FLOUT5 as flag output for SPI Slave-select */ +#define FLG6_P 0x0000000E /* Activates (=0) SPI_FLOUT6 as flag output for SPI Slave-select */ +#define FLG7_P 0x0000000F /* Activates (=0) SPI_FLOUT7 as flag output for SPI Slave-select */ + +/* SPI_STAT Masks */ +#define SPIF 0x00000001 /* Set (=1) when SPI single-word transfer complete */ +#define MODF 0x00000002 /* Set (=1) in a master device when some other device tries to become master */ +#define TXE 0x00000004 /* Set (=1) when transmission occurs with no new data in SPI_TDBR */ +#define TXS 0x00000008 /* SPI_TDBR Data Buffer Status (0=Empty, 1=Full) */ +#define RBSY 0x00000010 /* Set (=1) when data is received with RDBR full */ +#define RXS 0x00000020 /* SPI_RDBR Data Buffer Status (0=Empty, 1=Full) */ +#define TXCOL 0x00000040 /* When set (=1), corrupt data may have been transmitted */ + +/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS ************* */ + +/* AMGCTL Masks */ +#define AMCKEN 0x00000001 /* Enable CLKOUT */ +#define AMBEN_B0 0x00000002 /* Enable Asynchronous Memory Bank 0 only */ +#define AMBEN_B0_B1 0x00000004 /* Enable Asynchronous Memory Banks 0 & 1 only */ +#define AMBEN_B0_B1_B2 0x00000006 /* Enable Asynchronous Memory Banks 0, 1, and 2 */ +#define AMBEN_ALL 0x00000008 /* Enable Asynchronous Memory Banks (all) 0, 1, 2, and 3 */ + +/* AMGCTL Bit Positions */ +#define AMCKEN_P 0x00000000 /* Enable CLKOUT */ +#define AMBEN_P0 0x00000001 /* Asynchronous Memory Enable, 000 - banks 0-3 disabled, 001 - Bank 0 enabled */ +#define AMBEN_P1 0x00000002 /* Asynchronous Memory Enable, 010 - banks 0&1 enabled, 011 - banks 0-3 enabled */ +#define AMBEN_P2 0x00000003 /* Asynchronous Memory Enable, 1xx - All banks (bank 0, 1, 2, and 3) enabled */ + +/* AMBCTL0 Masks */ +#define B0RDYEN 0x00000001 /* Bank 0 RDY Enable, 0=disable, 1=enable */ +#define B0RDYPOL 0x00000002 /* Bank 0 RDY Active high, 0=active low, 1=active high */ +#define B0TT_1 0x00000004 /* Bank 0 Transition Time from Read to Write = 1 cycle */ +#define B0TT_2 0x00000008 /* Bank 0 Transition Time from Read to Write = 2 cycles */ +#define B0TT_3 0x0000000C /* Bank 0 Transition Time from Read to Write = 3 cycles */ +#define B0TT_4 0x00000000 /* Bank 0 Transition Time from Read to Write = 4 cycles */ +#define B0ST_1 0x00000010 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=1 cycle */ +#define B0ST_2 0x00000020 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=2 cycles */ +#define B0ST_3 0x00000030 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=3 cycles */ +#define B0ST_4 0x00000000 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=4 cycles */ +#define B0HT_1 0x00000040 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 1 cycle */ +#define B0HT_2 0x00000080 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 2 cycles */ +#define B0HT_3 0x000000C0 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 3 cycles */ +#define B0HT_0 0x00000000 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 0 cycles */ +#define B0RAT_1 0x00000100 /* Bank 0 Read Access Time = 1 cycle */ +#define B0RAT_2 0x00000200 /* Bank 0 Read Access Time = 2 cycles */ +#define B0RAT_3 0x00000300 /* Bank 0 Read Access Time = 3 cycles */ +#define B0RAT_4 0x00000400 /* Bank 0 Read Access Time = 4 cycles */ +#define B0RAT_5 0x00000500 /* Bank 0 Read Access Time = 5 cycles */ +#define B0RAT_6 0x00000600 /* Bank 0 Read Access Time = 6 cycles */ +#define B0RAT_7 0x00000700 /* Bank 0 Read Access Time = 7 cycles */ +#define B0RAT_8 0x00000800 /* Bank 0 Read Access Time = 8 cycles */ +#define B0RAT_9 0x00000900 /* Bank 0 Read Access Time = 9 cycles */ +#define B0RAT_10 0x00000A00 /* Bank 0 Read Access Time = 10 cycles */ +#define B0RAT_11 0x00000B00 /* Bank 0 Read Access Time = 11 cycles */ +#define B0RAT_12 0x00000C00 /* Bank 0 Read Access Time = 12 cycles */ +#define B0RAT_13 0x00000D00 /* Bank 0 Read Access Time = 13 cycles */ +#define B0RAT_14 0x00000E00 /* Bank 0 Read Access Time = 14 cycles */ +#define B0RAT_15 0x00000F00 /* Bank 0 Read Access Time = 15 cycles */ +#define B0WAT_1 0x00001000 /* Bank 0 Write Access Time = 1 cycle */ +#define B0WAT_2 0x00002000 /* Bank 0 Write Access Time = 2 cycles */ +#define B0WAT_3 0x00003000 /* Bank 0 Write Access Time = 3 cycles */ +#define B0WAT_4 0x00004000 /* Bank 0 Write Access Time = 4 cycles */ +#define B0WAT_5 0x00005000 /* Bank 0 Write Access Time = 5 cycles */ +#define B0WAT_6 0x00006000 /* Bank 0 Write Access Time = 6 cycles */ +#define B0WAT_7 0x00007000 /* Bank 0 Write Access Time = 7 cycles */ +#define B0WAT_8 0x00008000 /* Bank 0 Write Access Time = 8 cycles */ +#define B0WAT_9 0x00009000 /* Bank 0 Write Access Time = 9 cycles */ +#define B0WAT_10 0x0000A000 /* Bank 0 Write Access Time = 10 cycles */ +#define B0WAT_11 0x0000B000 /* Bank 0 Write Access Time = 11 cycles */ +#define B0WAT_12 0x0000C000 /* Bank 0 Write Access Time = 12 cycles */ +#define B0WAT_13 0x0000D000 /* Bank 0 Write Access Time = 13 cycles */ +#define B0WAT_14 0x0000E000 /* Bank 0 Write Access Time = 14 cycles */ +#define B0WAT_15 0x0000F000 /* Bank 0 Write Access Time = 15 cycles */ +#define B1RDYEN 0x00010000 /* Bank 1 RDY enable, 0=disable, 1=enable */ +#define B1RDYPOL 0x00020000 /* Bank 1 RDY Active high, 0=active low, 1=active high */ +#define B1TT_1 0x00040000 /* Bank 1 Transition Time from Read to Write = 1 cycle */ +#define B1TT_2 0x00080000 /* Bank 1 Transition Time from Read to Write = 2 cycles */ +#define B1TT_3 0x000C0000 /* Bank 1 Transition Time from Read to Write = 3 cycles */ +#define B1TT_4 0x00000000 /* Bank 1 Transition Time from Read to Write = 4 cycles */ +#define B1ST_1 0x00100000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */ +#define B1ST_2 0x00200000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */ +#define B1ST_3 0x00300000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */ +#define B1ST_4 0x00000000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */ +#define B1HT_1 0x00400000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */ +#define B1HT_2 0x00800000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */ +#define B1HT_3 0x00C00000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */ +#define B1HT_0 0x00000000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */ +#define B1RAT_1 0x01000000 /* Bank 1 Read Access Time = 1 cycle */ +#define B1RAT_2 0x02000000 /* Bank 1 Read Access Time = 2 cycles */ +#define B1RAT_3 0x03000000 /* Bank 1 Read Access Time = 3 cycles */ +#define B1RAT_4 0x04000000 /* Bank 1 Read Access Time = 4 cycles */ +#define B1RAT_5 0x05000000 /* Bank 1 Read Access Time = 5 cycles */ +#define B1RAT_6 0x06000000 /* Bank 1 Read Access Time = 6 cycles */ +#define B1RAT_7 0x07000000 /* Bank 1 Read Access Time = 7 cycles */ +#define B1RAT_8 0x08000000 /* Bank 1 Read Access Time = 8 cycles */ +#define B1RAT_9 0x09000000 /* Bank 1 Read Access Time = 9 cycles */ +#define B1RAT_10 0x0A000000 /* Bank 1 Read Access Time = 10 cycles */ +#define B1RAT_11 0x0B000000 /* Bank 1 Read Access Time = 11 cycles */ +#define B1RAT_12 0x0C000000 /* Bank 1 Read Access Time = 12 cycles */ +#define B1RAT_13 0x0D000000 /* Bank 1 Read Access Time = 13 cycles */ +#define B1RAT_14 0x0E000000 /* Bank 1 Read Access Time = 14 cycles */ +#define B1RAT_15 0x0F000000 /* Bank 1 Read Access Time = 15 cycles */ +#define B1WAT_1 0x10000000 /* Bank 1 Write Access Time = 1 cycle */ +#define B1WAT_2 0x20000000 /* Bank 1 Write Access Time = 2 cycles */ +#define B1WAT_3 0x30000000 /* Bank 1 Write Access Time = 3 cycles */ +#define B1WAT_4 0x40000000 /* Bank 1 Write Access Time = 4 cycles */ +#define B1WAT_5 0x50000000 /* Bank 1 Write Access Time = 5 cycles */ +#define B1WAT_6 0x60000000 /* Bank 1 Write Access Time = 6 cycles */ +#define B1WAT_7 0x70000000 /* Bank 1 Write Access Time = 7 cycles */ +#define B1WAT_8 0x80000000 /* Bank 1 Write Access Time = 8 cycles */ +#define B1WAT_9 0x90000000 /* Bank 1 Write Access Time = 9 cycles */ +#define B1WAT_10 0xA0000000 /* Bank 1 Write Access Time = 10 cycles */ +#define B1WAT_11 0xB0000000 /* Bank 1 Write Access Time = 11 cycles */ +#define B1WAT_12 0xC0000000 /* Bank 1 Write Access Time = 12 cycles */ +#define B1WAT_13 0xD0000000 /* Bank 1 Write Access Time = 13 cycles */ +#define B1WAT_14 0xE0000000 /* Bank 1 Write Access Time = 14 cycles */ +#define B1WAT_15 0xF0000000 /* Bank 1 Write Access Time = 15 cycles */ + +/* AMBCTL1 Masks */ +#define B2RDYEN 0x00000001 /* Bank 2 RDY Enable, 0=disable, 1=enable */ +#define B2RDYPOL 0x00000002 /* Bank 2 RDY Active high, 0=active low, 1=active high */ +#define B2TT_1 0x00000004 /* Bank 2 Transition Time from Read to Write = 1 cycle */ +#define B2TT_2 0x00000008 /* Bank 2 Transition Time from Read to Write = 2 cycles */ +#define B2TT_3 0x0000000C /* Bank 2 Transition Time from Read to Write = 3 cycles */ +#define B2TT_4 0x00000000 /* Bank 2 Transition Time from Read to Write = 4 cycles */ +#define B2ST_1 0x00000010 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */ +#define B2ST_2 0x00000020 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */ +#define B2ST_3 0x00000030 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */ +#define B2ST_4 0x00000000 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */ +#define B2HT_1 0x00000040 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */ +#define B2HT_2 0x00000080 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */ +#define B2HT_3 0x000000C0 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */ +#define B2HT_0 0x00000000 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */ +#define B2RAT_1 0x00000100 /* Bank 2 Read Access Time = 1 cycle */ +#define B2RAT_2 0x00000200 /* Bank 2 Read Access Time = 2 cycles */ +#define B2RAT_3 0x00000300 /* Bank 2 Read Access Time = 3 cycles */ +#define B2RAT_4 0x00000400 /* Bank 2 Read Access Time = 4 cycles */ +#define B2RAT_5 0x00000500 /* Bank 2 Read Access Time = 5 cycles */ +#define B2RAT_6 0x00000600 /* Bank 2 Read Access Time = 6 cycles */ +#define B2RAT_7 0x00000700 /* Bank 2 Read Access Time = 7 cycles */ +#define B2RAT_8 0x00000800 /* Bank 2 Read Access Time = 8 cycles */ +#define B2RAT_9 0x00000900 /* Bank 2 Read Access Time = 9 cycles */ +#define B2RAT_10 0x00000A00 /* Bank 2 Read Access Time = 10 cycles */ +#define B2RAT_11 0x00000B00 /* Bank 2 Read Access Time = 11 cycles */ +#define B2RAT_12 0x00000C00 /* Bank 2 Read Access Time = 12 cycles */ +#define B2RAT_13 0x00000D00 /* Bank 2 Read Access Time = 13 cycles */ +#define B2RAT_14 0x00000E00 /* Bank 2 Read Access Time = 14 cycles */ +#define B2RAT_15 0x00000F00 /* Bank 2 Read Access Time = 15 cycles */ +#define B2WAT_1 0x00001000 /* Bank 2 Write Access Time = 1 cycle */ +#define B2WAT_2 0x00002000 /* Bank 2 Write Access Time = 2 cycles */ +#define B2WAT_3 0x00003000 /* Bank 2 Write Access Time = 3 cycles */ +#define B2WAT_4 0x00004000 /* Bank 2 Write Access Time = 4 cycles */ +#define B2WAT_5 0x00005000 /* Bank 2 Write Access Time = 5 cycles */ +#define B2WAT_6 0x00006000 /* Bank 2 Write Access Time = 6 cycles */ +#define B2WAT_7 0x00007000 /* Bank 2 Write Access Time = 7 cycles */ +#define B2WAT_8 0x00008000 /* Bank 2 Write Access Time = 8 cycles */ +#define B2WAT_9 0x00009000 /* Bank 2 Write Access Time = 9 cycles */ +#define B2WAT_10 0x0000A000 /* Bank 2 Write Access Time = 10 cycles */ +#define B2WAT_11 0x0000B000 /* Bank 2 Write Access Time = 11 cycles */ +#define B2WAT_12 0x0000C000 /* Bank 2 Write Access Time = 12 cycles */ +#define B2WAT_13 0x0000D000 /* Bank 2 Write Access Time = 13 cycles */ +#define B2WAT_14 0x0000E000 /* Bank 2 Write Access Time = 14 cycles */ +#define B2WAT_15 0x0000F000 /* Bank 2 Write Access Time = 15 cycles */ +#define B3RDYEN 0x00010000 /* Bank 3 RDY enable, 0=disable, 1=enable */ +#define B3RDYPOL 0x00020000 /* Bank 3 RDY Active high, 0=active low, 1=active high */ +#define B3TT_1 0x00040000 /* Bank 3 Transition Time from Read to Write = 1 cycle */ +#define B3TT_2 0x00080000 /* Bank 3 Transition Time from Read to Write = 2 cycles */ +#define B3TT_3 0x000C0000 /* Bank 3 Transition Time from Read to Write = 3 cycles */ +#define B3TT_4 0x00000000 /* Bank 3 Transition Time from Read to Write = 4 cycles */ +#define B3ST_1 0x00100000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */ +#define B3ST_2 0x00200000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */ +#define B3ST_3 0x00300000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */ +#define B3ST_4 0x00000000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */ +#define B3HT_1 0x00400000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */ +#define B3HT_2 0x00800000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */ +#define B3HT_3 0x00C00000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */ +#define B3HT_0 0x00000000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */ +#define B3RAT_1 0x01000000 /* Bank 3 Read Access Time = 1 cycle */ +#define B3RAT_2 0x02000000 /* Bank 3 Read Access Time = 2 cycles */ +#define B3RAT_3 0x03000000 /* Bank 3 Read Access Time = 3 cycles */ +#define B3RAT_4 0x04000000 /* Bank 3 Read Access Time = 4 cycles */ +#define B3RAT_5 0x05000000 /* Bank 3 Read Access Time = 5 cycles */ +#define B3RAT_6 0x06000000 /* Bank 3 Read Access Time = 6 cycles */ +#define B3RAT_7 0x07000000 /* Bank 3 Read Access Time = 7 cycles */ +#define B3RAT_8 0x08000000 /* Bank 3 Read Access Time = 8 cycles */ +#define B3RAT_9 0x09000000 /* Bank 3 Read Access Time = 9 cycles */ +#define B3RAT_10 0x0A000000 /* Bank 3 Read Access Time = 10 cycles */ +#define B3RAT_11 0x0B000000 /* Bank 3 Read Access Time = 11 cycles */ +#define B3RAT_12 0x0C000000 /* Bank 3 Read Access Time = 12 cycles */ +#define B3RAT_13 0x0D000000 /* Bank 3 Read Access Time = 13 cycles */ +#define B3RAT_14 0x0E000000 /* Bank 3 Read Access Time = 14 cycles */ +#define B3RAT_15 0x0F000000 /* Bank 3 Read Access Time = 15 cycles */ +#define B3WAT_1 0x10000000 /* Bank 3 Write Access Time = 1 cycle */ +#define B3WAT_2 0x20000000 /* Bank 3 Write Access Time = 2 cycles */ +#define B3WAT_3 0x30000000 /* Bank 3 Write Access Time = 3 cycles */ +#define B3WAT_4 0x40000000 /* Bank 3 Write Access Time = 4 cycles */ +#define B3WAT_5 0x50000000 /* Bank 3 Write Access Time = 5 cycles */ +#define B3WAT_6 0x60000000 /* Bank 3 Write Access Time = 6 cycles */ +#define B3WAT_7 0x70000000 /* Bank 3 Write Access Time = 7 cycles */ +#define B3WAT_8 0x80000000 /* Bank 3 Write Access Time = 8 cycles */ +#define B3WAT_9 0x90000000 /* Bank 3 Write Access Time = 9 cycles */ +#define B3WAT_10 0xA0000000 /* Bank 3 Write Access Time = 10 cycles */ +#define B3WAT_11 0xB0000000 /* Bank 3 Write Access Time = 11 cycles */ +#define B3WAT_12 0xC0000000 /* Bank 3 Write Access Time = 12 cycles */ +#define B3WAT_13 0xD0000000 /* Bank 3 Write Access Time = 13 cycles */ +#define B3WAT_14 0xE0000000 /* Bank 3 Write Access Time = 14 cycles */ +#define B3WAT_15 0xF0000000 /* Bank 3 Write Access Time = 15 cycles */ + +/* ********************** SDRAM CONTROLLER MASKS *************************** */ + +/* SDGCTL Masks */ +#define SCTLE 0x00000001 /* Enable SCLK[0], /SRAS, /SCAS, /SWE, SDQM[3:0] */ +#define CL_2 0x00000008 /* SDRAM CAS latency = 2 cycles */ +#define CL_3 0x0000000C /* SDRAM CAS latency = 3 cycles */ +#define PFE 0x00000010 /* Enable SDRAM prefetch */ +#define PFP 0x00000020 /* Prefetch has priority over AMC requests */ +#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */ +#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */ +#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */ +#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */ +#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */ +#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */ +#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */ +#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */ +#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */ +#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */ +#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */ +#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */ +#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */ +#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */ +#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */ +#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */ +#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */ +#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */ +#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */ +#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */ +#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */ +#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */ +#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */ +#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */ +#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */ +#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */ +#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */ +#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */ +#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */ +#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */ +#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */ +#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */ +#define PUPSD 0x00200000 /*Power-up start delay */ +#define PSM 0x00400000 /* SDRAM power-up sequence = Precharge, mode register set, 8 CBR refresh cycles */ +#define PSS 0x00800000 /* enable SDRAM power-up sequence on next SDRAM access */ +#define SRFS 0x01000000 /* Start SDRAM self-refresh mode */ +#define EBUFE 0x02000000 /* Enable external buffering timing */ +#define FBBRW 0x04000000 /* Fast back-to-back read write enable */ +#define EMREN 0x10000000 /* Extended mode register enable */ +#define TCSR 0x20000000 /* Temp compensated self refresh value 85 deg C */ +#define CDDBG 0x40000000 /* Tristate SDRAM controls during bus grant */ + +/* EBIU_SDBCTL Masks */ +#define EBE 0x00000001 /* Enable SDRAM external bank */ +#define EBSZ_16 0x00000000 /* SDRAM external bank size = 16MB */ +#define EBSZ_32 0x00000002 /* SDRAM external bank size = 32MB */ +#define EBSZ_64 0x00000004 /* SDRAM external bank size = 64MB */ +#define EBSZ_128 0x00000006 /* SDRAM external bank size = 128MB */ +#define EBCAW_8 0x00000000 /* SDRAM external bank column address width = 8 bits */ +#define EBCAW_9 0x00000010 /* SDRAM external bank column address width = 9 bits */ +#define EBCAW_10 0x00000020 /* SDRAM external bank column address width = 9 bits */ +#define EBCAW_11 0x00000030 /* SDRAM external bank column address width = 9 bits */ + +/* EBIU_SDSTAT Masks */ +#define SDCI 0x00000001 /* SDRAM controller is idle */ +#define SDSRA 0x00000002 /* SDRAM SDRAM self refresh is active */ +#define SDPUA 0x00000004 /* SDRAM power up active */ +#define SDRS 0x00000008 /* SDRAM is in reset state */ +#define SDEASE 0x00000010 /* SDRAM EAB sticky error status - W1C */ +#define BGSTAT 0x00000020 /* Bus granted */ + +/*VR_CTL Masks*/ +#define WAKE 0x100 +#define VLEV_6 0x60 +#define VLEV_7 0x70 +#define VLEV_8 0x80 +#define VLEV_9 0x90 +#define VLEV_10 0xA0 +#define VLEV_11 0xB0 +#define VLEV_12 0xC0 +#define VLEV_13 0xD0 +#define VLEV_14 0xE0 +#define VLEV_15 0xF0 +#define FREQ_3 0x03 + +#endif /* _DEF_BF532_H */ diff --git a/include/asm-blackfin/mach-bf533/dma.h b/include/asm-blackfin/mach-bf533/dma.h new file mode 100644 index 000000000000..bd9d5e94307d --- /dev/null +++ b/include/asm-blackfin/mach-bf533/dma.h @@ -0,0 +1,54 @@ +/***************************************************************************** +* +* BF-533/2/1 Specific Declarations +* +****************************************************************************/ +/* + * File: include/asm-blackfin/mach-bf533/dma.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _MACH_DMA_H_ +#define _MACH_DMA_H_ + +#define MAX_BLACKFIN_DMA_CHANNEL 12 + +#define CH_PPI 0 +#define CH_SPORT0_RX 1 +#define CH_SPORT0_TX 2 +#define CH_SPORT1_RX 3 +#define CH_SPORT1_TX 4 +#define CH_SPI 5 +#define CH_UART_RX 6 +#define CH_UART_TX 7 +#define CH_MEM_STREAM0_DEST 8 /* TX */ +#define CH_MEM_STREAM0_SRC 9 /* RX */ +#define CH_MEM_STREAM1_DEST 10 /* TX */ +#define CH_MEM_STREAM1_SRC 11 /* RX */ + +#endif diff --git a/include/asm-blackfin/mach-bf533/irq.h b/include/asm-blackfin/mach-bf533/irq.h new file mode 100644 index 000000000000..9879e68e315c --- /dev/null +++ b/include/asm-blackfin/mach-bf533/irq.h @@ -0,0 +1,177 @@ +/* + * File: include/asm-blackfin/mach-bf533/defBF532.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _BF533_IRQ_H_ +#define _BF533_IRQ_H_ + +/* + * Interrupt source definitions + Event Source Core Event Name +Core Emulation ** + Events (highest priority) EMU 0 + Reset RST 1 + NMI NMI 2 + Exception EVX 3 + Reserved -- 4 + Hardware Error IVHW 5 + Core Timer IVTMR 6 * + PLL Wakeup Interrupt IVG7 7 + DMA Error (generic) IVG7 8 + PPI Error Interrupt IVG7 9 + SPORT0 Error Interrupt IVG7 10 + SPORT1 Error Interrupt IVG7 11 + SPI Error Interrupt IVG7 12 + UART Error Interrupt IVG7 13 + RTC Interrupt IVG8 14 + DMA0 Interrupt (PPI) IVG8 15 + DMA1 (SPORT0 RX) IVG9 16 + DMA2 (SPORT0 TX) IVG9 17 + DMA3 (SPORT1 RX) IVG9 18 + DMA4 (SPORT1 TX) IVG9 19 + DMA5 (PPI) IVG10 20 + DMA6 (UART RX) IVG10 21 + DMA7 (UART TX) IVG10 22 + Timer0 IVG11 23 + Timer1 IVG11 24 + Timer2 IVG11 25 + PF Interrupt A IVG12 26 + PF Interrupt B IVG12 27 + DMA8/9 Interrupt IVG13 28 + DMA10/11 Interrupt IVG13 29 + Watchdog Timer IVG13 30 + Software Interrupt 1 IVG14 31 + Software Interrupt 2 -- + (lowest priority) IVG15 32 * + */ +#define SYS_IRQS 32 +#define NR_PERI_INTS 24 + +/* The ABSTRACT IRQ definitions */ +/** the first seven of the following are fixed, the rest you change if you need to **/ +#define IRQ_EMU 0 /*Emulation */ +#define IRQ_RST 1 /*reset */ +#define IRQ_NMI 2 /*Non Maskable */ +#define IRQ_EVX 3 /*Exception */ +#define IRQ_UNUSED 4 /*- unused interrupt*/ +#define IRQ_HWERR 5 /*Hardware Error */ +#define IRQ_CORETMR 6 /*Core timer */ + +#define IRQ_PLL_WAKEUP 7 /*PLL Wakeup Interrupt */ +#define IRQ_DMA_ERROR 8 /*DMA Error (general) */ +#define IRQ_PPI_ERROR 9 /*PPI Error Interrupt */ +#define IRQ_SPORT0_ERROR 10 /*SPORT0 Error Interrupt */ +#define IRQ_SPORT1_ERROR 11 /*SPORT1 Error Interrupt */ +#define IRQ_SPI_ERROR 12 /*SPI Error Interrupt */ +#define IRQ_UART_ERROR 13 /*UART Error Interrupt */ +#define IRQ_RTC 14 /*RTC Interrupt */ +#define IRQ_PPI 15 /*DMA0 Interrupt (PPI) */ +#define IRQ_SPORT0_RX 16 /*DMA1 Interrupt (SPORT0 RX) */ +#define IRQ_SPORT0_TX 17 /*DMA2 Interrupt (SPORT0 TX) */ +#define IRQ_SPORT1_RX 18 /*DMA3 Interrupt (SPORT1 RX) */ +#define IRQ_SPORT1_TX 19 /*DMA4 Interrupt (SPORT1 TX) */ +#define IRQ_SPI 20 /*DMA5 Interrupt (SPI) */ +#define IRQ_UART_RX 21 /*DMA6 Interrupt (UART RX) */ +#define IRQ_UART_TX 22 /*DMA7 Interrupt (UART TX) */ +#define IRQ_TMR0 23 /*Timer 0 */ +#define IRQ_TMR1 24 /*Timer 1 */ +#define IRQ_TMR2 25 /*Timer 2 */ +#define IRQ_PROG_INTA 26 /*Programmable Flags A (8) */ +#define IRQ_PROG_INTB 27 /*Programmable Flags B (8) */ +#define IRQ_MEM_DMA0 28 /*DMA8/9 Interrupt (Memory DMA Stream 0) */ +#define IRQ_MEM_DMA1 29 /*DMA10/11 Interrupt (Memory DMA Stream 1) */ +#define IRQ_WATCH 30 /*Watch Dog Timer */ + +#define IRQ_SW_INT1 31 /*Software Int 1 */ +#define IRQ_SW_INT2 32 /*Software Int 2 (reserved for SYSCALL) */ + +#define IRQ_PF0 33 +#define IRQ_PF1 34 +#define IRQ_PF2 35 +#define IRQ_PF3 36 +#define IRQ_PF4 37 +#define IRQ_PF5 38 +#define IRQ_PF6 39 +#define IRQ_PF7 40 +#define IRQ_PF8 41 +#define IRQ_PF9 42 +#define IRQ_PF10 43 +#define IRQ_PF11 44 +#define IRQ_PF12 45 +#define IRQ_PF13 46 +#define IRQ_PF14 47 +#define IRQ_PF15 48 + +#ifdef CONFIG_IRQCHIP_DEMUX_GPIO +#define NR_IRQS (IRQ_PF15+1) +#else +#define NR_IRQS SYS_IRQS +#endif + +#define IVG7 7 +#define IVG8 8 +#define IVG9 9 +#define IVG10 10 +#define IVG11 11 +#define IVG12 12 +#define IVG13 13 +#define IVG14 14 +#define IVG15 15 + +/* IAR0 BIT FIELDS*/ +#define RTC_ERROR_POS 28 +#define UART_ERROR_POS 24 +#define SPORT1_ERROR_POS 20 +#define SPI_ERROR_POS 16 +#define SPORT0_ERROR_POS 12 +#define PPI_ERROR_POS 8 +#define DMA_ERROR_POS 4 +#define PLLWAKE_ERROR_POS 0 + +/* IAR1 BIT FIELDS*/ +#define DMA7_UARTTX_POS 28 +#define DMA6_UARTRX_POS 24 +#define DMA5_SPI_POS 20 +#define DMA4_SPORT1TX_POS 16 +#define DMA3_SPORT1RX_POS 12 +#define DMA2_SPORT0TX_POS 8 +#define DMA1_SPORT0RX_POS 4 +#define DMA0_PPI_POS 0 + +/* IAR2 BIT FIELDS*/ +#define WDTIMER_POS 28 +#define MEMDMA1_POS 24 +#define MEMDMA0_POS 20 +#define PFB_POS 16 +#define PFA_POS 12 +#define TIMER2_POS 8 +#define TIMER1_POS 4 +#define TIMER0_POS 0 + +#endif /* _BF533_IRQ_H_ */ diff --git a/include/asm-blackfin/mach-bf533/mem_init.h b/include/asm-blackfin/mach-bf533/mem_init.h new file mode 100644 index 000000000000..1620dae5254d --- /dev/null +++ b/include/asm-blackfin/mach-bf533/mem_init.h @@ -0,0 +1,316 @@ +/* + * File: include/asm-blackfin/mach-bf533/mem_init.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#if (CONFIG_MEM_MT48LC16M16A2TG_75 || CONFIG_MEM_MT48LC64M4A2FB_7E || CONFIG_MEM_GENERIC_BOARD) +#if (CONFIG_SCLK_HZ > 119402985) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_7 +#define SDRAM_tRAS_num 7 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 104477612) && (CONFIG_SCLK_HZ <= 119402985) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_6 +#define SDRAM_tRAS_num 6 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 8955223) && (CONFIG_SCLK_HZ <= 104477612) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_5 +#define SDRAM_tRAS_num 5 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 74626866) && (CONFIG_SCLK_HZ <= 89552239) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_4 +#define SDRAM_tRAS_num 4 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 66666667) && (CONFIG_SCLK_HZ <= 74626866) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_3 +#define SDRAM_tRAS_num 3 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 59701493) && (CONFIG_SCLK_HZ <= 66666667) +#define SDRAM_tRP TRP_1 +#define SDRAM_tRP_num 1 +#define SDRAM_tRAS TRAS_4 +#define SDRAM_tRAS_num 3 +#define SDRAM_tRCD TRCD_1 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 44776119) && (CONFIG_SCLK_HZ <= 59701493) +#define SDRAM_tRP TRP_1 +#define SDRAM_tRP_num 1 +#define SDRAM_tRAS TRAS_3 +#define SDRAM_tRAS_num 3 +#define SDRAM_tRCD TRCD_1 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 29850746) && (CONFIG_SCLK_HZ <= 44776119) +#define SDRAM_tRP TRP_1 +#define SDRAM_tRP_num 1 +#define SDRAM_tRAS TRAS_2 +#define SDRAM_tRAS_num 2 +#define SDRAM_tRCD TRCD_1 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ <= 29850746) +#define SDRAM_tRP TRP_1 +#define SDRAM_tRP_num 1 +#define SDRAM_tRAS TRAS_1 +#define SDRAM_tRAS_num 1 +#define SDRAM_tRCD TRCD_1 +#define SDRAM_tWR TWR_2 +#endif +#endif + +#if (CONFIG_MEM_MT48LC16M16A2TG_75) + /*SDRAM INFORMATION: */ +#define SDRAM_Tref 64 /* Refresh period in milliseconds */ +#define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */ +#define SDRAM_CL CL_3 +#endif + +#if (CONFIG_MEM_MT48LC64M4A2FB_7E) + /*SDRAM INFORMATION: */ +#define SDRAM_Tref 64 /* Refresh period in milliseconds */ +#define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */ +#define SDRAM_CL CL_3 +#endif + +#if (CONFIG_MEM_GENERIC_BOARD) + /*SDRAM INFORMATION: Modify this for your board */ +#define SDRAM_Tref 64 /* Refresh period in milliseconds */ +#define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */ +#define SDRAM_CL CL_3 +#endif + +#if (CONFIG_MEM_SIZE == 128) +#define SDRAM_SIZE EBSZ_128 +#endif +#if (CONFIG_MEM_SIZE == 64) +#define SDRAM_SIZE EBSZ_64 +#endif +#if (CONFIG_MEM_SIZE == 32) +#define SDRAM_SIZE EBSZ_32 +#endif +#if (CONFIG_MEM_SIZE == 16) +#define SDRAM_SIZE EBSZ_16 +#endif +#if (CONFIG_MEM_ADD_WIDTH == 11) +#define SDRAM_WIDTH EBCAW_11 +#endif +#if (CONFIG_MEM_ADD_WIDTH == 10) +#define SDRAM_WIDTH EBCAW_10 +#endif +#if (CONFIG_MEM_ADD_WIDTH == 9) +#define SDRAM_WIDTH EBCAW_9 +#endif +#if (CONFIG_MEM_ADD_WIDTH == 8) +#define SDRAM_WIDTH EBCAW_8 +#endif + +#define mem_SDBCTL (SDRAM_WIDTH | SDRAM_SIZE | EBE) + +/* Equation from section 17 (p17-46) of BF533 HRM */ +#define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num) + +/* Enable SCLK Out */ +#define mem_SDGCTL (SCTLE | SDRAM_CL | SDRAM_tRAS | SDRAM_tRP | SDRAM_tRCD | SDRAM_tWR | PSS) + +#if defined CONFIG_CLKIN_HALF +#define CLKIN_HALF 1 +#else +#define CLKIN_HALF 0 +#endif + +#if defined CONFIG_PLL_BYPASS +#define PLL_BYPASS 1 +#else +#define PLL_BYPASS 0 +#endif + +/***************************************Currently Not Being Used *********************************/ +#define flash_EBIU_AMBCTL_WAT ((CONFIG_FLASH_SPEED_BWAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 +#define flash_EBIU_AMBCTL_RAT ((CONFIG_FLASH_SPEED_BRAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 +#define flash_EBIU_AMBCTL_HT ((CONFIG_FLASH_SPEED_BHT * 4) / (4000000000 / CONFIG_SCLK_HZ)) +#define flash_EBIU_AMBCTL_ST ((CONFIG_FLASH_SPEED_BST * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 +#define flash_EBIU_AMBCTL_TT ((CONFIG_FLASH_SPEED_BTT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 + +#if (flash_EBIU_AMBCTL_TT > 3) +#define flash_EBIU_AMBCTL0_TT B0TT_4 +#endif +#if (flash_EBIU_AMBCTL_TT == 3) +#define flash_EBIU_AMBCTL0_TT B0TT_3 +#endif +#if (flash_EBIU_AMBCTL_TT == 2) +#define flash_EBIU_AMBCTL0_TT B0TT_2 +#endif +#if (flash_EBIU_AMBCTL_TT < 2) +#define flash_EBIU_AMBCTL0_TT B0TT_1 +#endif + +#if (flash_EBIU_AMBCTL_ST > 3) +#define flash_EBIU_AMBCTL0_ST B0ST_4 +#endif +#if (flash_EBIU_AMBCTL_ST == 3) +#define flash_EBIU_AMBCTL0_ST B0ST_3 +#endif +#if (flash_EBIU_AMBCTL_ST == 2) +#define flash_EBIU_AMBCTL0_ST B0ST_2 +#endif +#if (flash_EBIU_AMBCTL_ST < 2) +#define flash_EBIU_AMBCTL0_ST B0ST_1 +#endif + +#if (flash_EBIU_AMBCTL_HT > 2) +#define flash_EBIU_AMBCTL0_HT B0HT_3 +#endif +#if (flash_EBIU_AMBCTL_HT == 2) +#define flash_EBIU_AMBCTL0_HT B0HT_2 +#endif +#if (flash_EBIU_AMBCTL_HT == 1) +#define flash_EBIU_AMBCTL0_HT B0HT_1 +#endif +#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT == 0) +#define flash_EBIU_AMBCTL0_HT B0HT_0 +#endif +#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT != 0) +#define flash_EBIU_AMBCTL0_HT B0HT_1 +#endif + +#if (flash_EBIU_AMBCTL_WAT > 14) +#define flash_EBIU_AMBCTL0_WAT B0WAT_15 +#endif +#if (flash_EBIU_AMBCTL_WAT == 14) +#define flash_EBIU_AMBCTL0_WAT B0WAT_14 +#endif +#if (flash_EBIU_AMBCTL_WAT == 13) +#define flash_EBIU_AMBCTL0_WAT B0WAT_13 +#endif +#if (flash_EBIU_AMBCTL_WAT == 12) +#define flash_EBIU_AMBCTL0_WAT B0WAT_12 +#endif +#if (flash_EBIU_AMBCTL_WAT == 11) +#define flash_EBIU_AMBCTL0_WAT B0WAT_11 +#endif +#if (flash_EBIU_AMBCTL_WAT == 10) +#define flash_EBIU_AMBCTL0_WAT B0WAT_10 +#endif +#if (flash_EBIU_AMBCTL_WAT == 9) +#define flash_EBIU_AMBCTL0_WAT B0WAT_9 +#endif +#if (flash_EBIU_AMBCTL_WAT == 8) +#define flash_EBIU_AMBCTL0_WAT B0WAT_8 +#endif +#if (flash_EBIU_AMBCTL_WAT == 7) +#define flash_EBIU_AMBCTL0_WAT B0WAT_7 +#endif +#if (flash_EBIU_AMBCTL_WAT == 6) +#define flash_EBIU_AMBCTL0_WAT B0WAT_6 +#endif +#if (flash_EBIU_AMBCTL_WAT == 5) +#define flash_EBIU_AMBCTL0_WAT B0WAT_5 +#endif +#if (flash_EBIU_AMBCTL_WAT == 4) +#define flash_EBIU_AMBCTL0_WAT B0WAT_4 +#endif +#if (flash_EBIU_AMBCTL_WAT == 3) +#define flash_EBIU_AMBCTL0_WAT B0WAT_3 +#endif +#if (flash_EBIU_AMBCTL_WAT == 2) +#define flash_EBIU_AMBCTL0_WAT B0WAT_2 +#endif +#if (flash_EBIU_AMBCTL_WAT == 1) +#define flash_EBIU_AMBCTL0_WAT B0WAT_1 +#endif + +#if (flash_EBIU_AMBCTL_RAT > 14) +#define flash_EBIU_AMBCTL0_RAT B0RAT_15 +#endif +#if (flash_EBIU_AMBCTL_RAT == 14) +#define flash_EBIU_AMBCTL0_RAT B0RAT_14 +#endif +#if (flash_EBIU_AMBCTL_RAT == 13) +#define flash_EBIU_AMBCTL0_RAT B0RAT_13 +#endif +#if (flash_EBIU_AMBCTL_RAT == 12) +#define flash_EBIU_AMBCTL0_RAT B0RAT_12 +#endif +#if (flash_EBIU_AMBCTL_RAT == 11) +#define flash_EBIU_AMBCTL0_RAT B0RAT_11 +#endif +#if (flash_EBIU_AMBCTL_RAT == 10) +#define flash_EBIU_AMBCTL0_RAT B0RAT_10 +#endif +#if (flash_EBIU_AMBCTL_RAT == 9) +#define flash_EBIU_AMBCTL0_RAT B0RAT_9 +#endif +#if (flash_EBIU_AMBCTL_RAT == 8) +#define flash_EBIU_AMBCTL0_RAT B0RAT_8 +#endif +#if (flash_EBIU_AMBCTL_RAT == 7) +#define flash_EBIU_AMBCTL0_RAT B0RAT_7 +#endif +#if (flash_EBIU_AMBCTL_RAT == 6) +#define flash_EBIU_AMBCTL0_RAT B0RAT_6 +#endif +#if (flash_EBIU_AMBCTL_RAT == 5) +#define flash_EBIU_AMBCTL0_RAT B0RAT_5 +#endif +#if (flash_EBIU_AMBCTL_RAT == 4) +#define flash_EBIU_AMBCTL0_RAT B0RAT_4 +#endif +#if (flash_EBIU_AMBCTL_RAT == 3) +#define flash_EBIU_AMBCTL0_RAT B0RAT_3 +#endif +#if (flash_EBIU_AMBCTL_RAT == 2) +#define flash_EBIU_AMBCTL0_RAT B0RAT_2 +#endif +#if (flash_EBIU_AMBCTL_RAT == 1) +#define flash_EBIU_AMBCTL0_RAT B0RAT_1 +#endif + +#define flash_EBIU_AMBCTL0 \ + (flash_EBIU_AMBCTL0_WAT | flash_EBIU_AMBCTL0_RAT | flash_EBIU_AMBCTL0_HT | \ + flash_EBIU_AMBCTL0_ST | flash_EBIU_AMBCTL0_TT | CONFIG_FLASH_SPEED_RDYEN) diff --git a/include/asm-blackfin/mach-bf533/mem_map.h b/include/asm-blackfin/mach-bf533/mem_map.h new file mode 100644 index 000000000000..e84baa3e939d --- /dev/null +++ b/include/asm-blackfin/mach-bf533/mem_map.h @@ -0,0 +1,168 @@ + +/* + * File: include/asm-blackfin/mach-bf533/mem_map.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _MEM_MAP_533_H_ +#define _MEM_MAP_533_H_ + +#define COREMMR_BASE 0xFFE00000 /* Core MMRs */ +#define SYSMMR_BASE 0xFFC00000 /* System MMRs */ + +/* Async Memory Banks */ +#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */ +#define ASYNC_BANK3_SIZE 0x00100000 /* 1M */ +#define ASYNC_BANK2_BASE 0x20200000 /* Async Bank 2 */ +#define ASYNC_BANK2_SIZE 0x00100000 /* 1M */ +#define ASYNC_BANK1_BASE 0x20100000 /* Async Bank 1 */ +#define ASYNC_BANK1_SIZE 0x00100000 /* 1M */ +#define ASYNC_BANK0_BASE 0x20000000 /* Async Bank 0 */ +#define ASYNC_BANK0_SIZE 0x00100000 /* 1M */ + +/* Boot ROM Memory */ + +#define BOOT_ROM_START 0xEF000000 + +/* Level 1 Memory */ + +#ifdef CONFIG_BLKFIN_CACHE +#define BLKFIN_ICACHESIZE (16*1024) +#else +#define BLKFIN_ICACHESIZE (0*1024) +#endif + +/* Memory Map for ADSP-BF533 processors */ + +#ifdef CONFIG_BF533 +#define L1_CODE_START 0xFFA00000 +#define L1_DATA_A_START 0xFF800000 +#define L1_DATA_B_START 0xFF900000 + +#ifdef CONFIG_BLKFIN_CACHE +#define L1_CODE_LENGTH (0x14000 - 0x4000) +#else +#define L1_CODE_LENGTH 0x14000 +#endif + +#ifdef CONFIG_BLKFIN_DCACHE + +#ifdef CONFIG_BLKFIN_DCACHE_BANKA +#define DMEM_CNTR (ACACHE_BSRAM | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH (0x8000 - 0x4000) +#define L1_DATA_B_LENGTH 0x8000 +#define BLKFIN_DCACHESIZE (16*1024) +#define BLKFIN_DSUPBANKS 1 +#else +#define DMEM_CNTR (ACACHE_BCACHE | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH (0x8000 - 0x4000) +#define L1_DATA_B_LENGTH (0x8000 - 0x4000) +#define BLKFIN_DCACHESIZE (32*1024) +#define BLKFIN_DSUPBANKS 2 +#endif + +#else +#define DMEM_CNTR (ASRAM_BSRAM | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH 0x8000 +#define L1_DATA_B_LENGTH 0x8000 +#define BLKFIN_DCACHESIZE (0*1024) +#define BLKFIN_DSUPBANKS 0 +#endif /*CONFIG_BLKFIN_DCACHE*/ +#endif + +/* Memory Map for ADSP-BF532 processors */ + +#ifdef CONFIG_BF532 +#define L1_CODE_START 0xFFA08000 +#define L1_DATA_A_START 0xFF804000 +#define L1_DATA_B_START 0xFF904000 + +#ifdef CONFIG_BLKFIN_CACHE +#define L1_CODE_LENGTH (0xC000 - 0x4000) +#else +#define L1_CODE_LENGTH 0xC000 +#endif + +#ifdef CONFIG_BLKFIN_DCACHE + +#ifdef CONFIG_BLKFIN_DCACHE_BANKA +#define DMEM_CNTR (ACACHE_BSRAM | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH (0x4000 - 0x4000) +#define L1_DATA_B_LENGTH 0x4000 +#define BLKFIN_DCACHESIZE (16*1024) +#define BLKFIN_DSUPBANKS 1 + +#else +#define DMEM_CNTR (ACACHE_BCACHE | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH (0x4000 - 0x4000) +#define L1_DATA_B_LENGTH (0x4000 - 0x4000) +#define BLKFIN_DCACHESIZE (32*1024) +#define BLKFIN_DSUPBANKS 2 +#endif + +#else +#define DMEM_CNTR (ASRAM_BSRAM | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH 0x4000 +#define L1_DATA_B_LENGTH 0x4000 +#define BLKFIN_DCACHESIZE (0*1024) +#define BLKFIN_DSUPBANKS 0 +#endif /*CONFIG_BLKFIN_DCACHE*/ +#endif + +/* Memory Map for ADSP-BF531 processors */ + +#ifdef CONFIG_BF531 +#define L1_CODE_START 0xFFA08000 +#define L1_DATA_A_START 0xFF804000 +#define L1_DATA_B_START 0xFF904000 +#define L1_CODE_LENGTH 0x4000 +#define L1_DATA_B_LENGTH 0x0000 + + +#ifdef CONFIG_BLKFIN_DCACHE +#define DMEM_CNTR (ACACHE_BSRAM | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH (0x4000 - 0x4000) +#define BLKFIN_DCACHESIZE (16*1024) +#define BLKFIN_DSUPBANKS 1 +#else +#define DMEM_CNTR (ASRAM_BSRAM | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH 0x4000 +#define BLKFIN_DCACHESIZE (0*1024) +#define BLKFIN_DSUPBANKS 0 +#endif + +#endif + +/* Scratch Pad Memory */ + +#if defined(CONFIG_BF533) || defined(CONFIG_BF532) || defined(CONFIG_BF531) +#define L1_SCRATCH_START 0xFFB00000 +#define L1_SCRATCH_LENGTH 0x1000 +#endif + +#endif /* _MEM_MAP_533_H_ */ diff --git a/include/asm-blackfin/mach-bf537/anomaly.h b/include/asm-blackfin/mach-bf537/anomaly.h new file mode 100644 index 000000000000..7f040f5ba018 --- /dev/null +++ b/include/asm-blackfin/mach-bf537/anomaly.h @@ -0,0 +1,120 @@ + +/* + * File: include/asm-blackfin/mach-bf537/anomaly.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* This file shoule be up to date with: + * - Revision J, June 1, 2006; ADSP-BF537 Blackfin Processor Anomaly List + * - Revision I, June 1, 2006; ADSP-BF536 Blackfin Processor Anomaly List + * - Revision J, June 1, 2006; ADSP-BF534 Blackfin Processor Anomaly List + */ + +#ifndef _MACH_ANOMALY_H_ +#define _MACH_ANOMALY_H_ + +/* We do not support 0.1 silicon - sorry */ +#if (defined(CONFIG_BF_REV_0_1)) +#error Kernel will not work on BF537/6/4 Version 0.1 +#endif + +#if (defined(CONFIG_BF_REV_0_3) || defined(CONFIG_BF_REV_0_2)) +#define ANOMALY_05000074 /* A multi issue instruction with dsp32shiftimm in + slot1 and store of a P register in slot 2 is not + supported */ +#define ANOMALY_05000119 /* DMA_RUN bit is not valid after a Peripheral Receive + Channel DMA stops */ +#define ANOMALY_05000122 /* Rx.H can not be used to access 16-bit System MMR + registers. */ +#define ANOMALY_05000166 /* PPI Data Lengths Between 8 and 16 do not zero out + upper bits*/ +#define ANOMALY_05000180 /* PPI_DELAY not functional in PPI modes with 0 frame + syncs */ +#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) +#define ANOMALY_05000247 /* CLKIN Buffer Output Enable Reset Behavior Is + Changed */ +#endif +#define ANOMALY_05000265 /* Sensitivity to noise with slow input edge rates on + SPORT external receive and transmit clocks. */ +#define ANOMALY_05000272 /* Certain data cache write through modes fail for + VDDint <=0.9V */ +#define ANOMALY_05000273 /* Writes to Synchronous SDRAM memory may be lost */ +#define ANOMALY_05000277 /* Writes to a flag data register one SCLK cycle after + an edge is detected may clear interrupt */ +#define ANOMALY_05000281 /* False Hardware Error Exception when ISR context is + not restored */ +#define ANOMALY_05000282 /* Memory DMA corruption with 32-bit data and traffic + control */ +#define ANOMALY_05000283 /* A system MMR write is stalled indefinitely when + killed in a particular stage*/ +#define ANOMALY_05000312 /* Errors when SSYNC, CSYNC, or loads to LT, LB and LC + registers are interrupted */ +#endif + +#if defined(CONFIG_BF_REV_0_2) +#define ANOMALY_05000244 /* With instruction cache enabled, a CSYNC or SSYNC or + IDLE around a Change of Control causes + unpredictable results */ +#define ANOMALY_05000250 /* Incorrect Bit-Shift of Data Word in Multichannel + (TDM) */ +#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) +#define ANOMALY_05000252 /* EMAC Tx DMA error after an early frame abort */ +#endif +#define ANOMALY_05000253 /* Maximum external clock speed for Timers */ +#define ANOMALY_05000255 /* Entering Hibernate Mode with RTC Seconds event + interrupt not functional */ +#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) +#define ANOMALY_05000256 /* EMAC MDIO input latched on wrong MDC edge */ +#endif +#define ANOMALY_05000257 /* An interrupt or exception during short Hardware + loops may cause the instruction fetch unit to + malfunction */ +#define ANOMALY_05000258 /* Instruction Cache is corrupted when bit 9 and 12 of + the ICPLB Data registers differ */ +#define ANOMALY_05000260 /* ICPLB_STATUS MMR register may be corrupted */ +#define ANOMALY_05000261 /* DCPLB_FAULT_ADDR MMR register may be corrupted */ +#define ANOMALY_05000262 /* Stores to data cache may be lost */ +#define ANOMALY_05000263 /* Hardware loop corrupted when taking an ICPLB exception */ +#define ANOMALY_05000264 /* A Sync instruction (CSYNC, SSYNC) or an IDLE + instruction will cause an infinite stall in the + second to last instruction in a hardware loop */ +#define ANOMALY_05000268 /* Memory DMA error when peripheral DMA is running + and non-zero DEB_TRAFFIC_PERIOD value */ +#define ANOMALY_05000270 /* High I/O activity causes the output voltage of the + internal voltage regulator (VDDint) to decrease */ +#define ANOMALY_05000277 /* Writes to a flag data register one SCLK cycle after + an edge is detected may clear interrupt */ +#define ANOMALY_05000278 /* Disabling Peripherals with DMA running may cause + DMA system instability */ +#define ANOMALY_05000280 /* SPI Master boot mode does not work well with + Atmel Dataflash devices */ + +#endif /* CONFIG_BF_REV_0_2 */ + +#endif /* _MACH_ANOMALY_H_ */ diff --git a/include/asm-blackfin/mach-bf537/bf537.h b/include/asm-blackfin/mach-bf537/bf537.h new file mode 100644 index 000000000000..b8924cd7730c --- /dev/null +++ b/include/asm-blackfin/mach-bf537/bf537.h @@ -0,0 +1,287 @@ +/* + * File: include/asm-blackfin/mach-bf537/bf537.h + * Based on: + * Author: + * + * Created: + * Description: SYSTEM MMR REGISTER AND MEMORY MAP FOR ADSP-BF537 + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __MACH_BF537_H__ +#define __MACH_BF537_H__ + +#define SUPPORTED_REVID 2 + +/* Masks for generic ERROR IRQ demultiplexing used in int-priority-sc.c */ + +#define SPI_ERR_MASK (TXCOL | RBSY | MODF | TXE) /* SPI_STAT */ +#define SPORT_ERR_MASK (ROVF | RUVF | TOVF | TUVF) /* SPORTx_STAT */ +#define PPI_ERR_MASK (0xFFFF & ~FLD) /* PPI_STATUS */ +#define EMAC_ERR_MASK (PHYINT | MMCINT | RXFSINT | TXFSINT | WAKEDET | RXDMAERR | TXDMAERR | STMDONE) /* EMAC_SYSTAT */ +#define UART_ERR_MASK_STAT1 (0x4) /* UARTx_IIR */ +#define UART_ERR_MASK_STAT0 (0x2) /* UARTx_IIR */ +#define CAN_ERR_MASK (EWTIF | EWRIF | EPIF | BOIF | WUIF | UIAIF | AAIF | RMLIF | UCEIF | EXTIF | ADIF) /* CAN_GIF */ + +#define OFFSET_(x) ((x) & 0x0000FFFF) + +/*some misc defines*/ +#define IMASK_IVG15 0x8000 +#define IMASK_IVG14 0x4000 +#define IMASK_IVG13 0x2000 +#define IMASK_IVG12 0x1000 + +#define IMASK_IVG11 0x0800 +#define IMASK_IVG10 0x0400 +#define IMASK_IVG9 0x0200 +#define IMASK_IVG8 0x0100 + +#define IMASK_IVG7 0x0080 +#define IMASK_IVGTMR 0x0040 +#define IMASK_IVGHW 0x0020 + +/***************************/ + + +#define BLKFIN_DSUBBANKS 4 +#define BLKFIN_DWAYS 2 +#define BLKFIN_DLINES 64 +#define BLKFIN_ISUBBANKS 4 +#define BLKFIN_IWAYS 4 +#define BLKFIN_ILINES 32 + +#define WAY0_L 0x1 +#define WAY1_L 0x2 +#define WAY01_L 0x3 +#define WAY2_L 0x4 +#define WAY02_L 0x5 +#define WAY12_L 0x6 +#define WAY012_L 0x7 + +#define WAY3_L 0x8 +#define WAY03_L 0x9 +#define WAY13_L 0xA +#define WAY013_L 0xB + +#define WAY32_L 0xC +#define WAY320_L 0xD +#define WAY321_L 0xE +#define WAYALL_L 0xF + +#define DMC_ENABLE (2<<2) /*yes, 2, not 1 */ + +/********************************* EBIU Settings ************************************/ +#define AMBCTL0VAL ((CONFIG_BANK_1 << 16) | CONFIG_BANK_0) +#define AMBCTL1VAL ((CONFIG_BANK_3 << 16) | CONFIG_BANK_2) + +#ifdef CONFIG_C_AMBEN_ALL +#define V_AMBEN AMBEN_ALL +#endif +#ifdef CONFIG_C_AMBEN +#define V_AMBEN 0x0 +#endif +#ifdef CONFIG_C_AMBEN_B0 +#define V_AMBEN AMBEN_B0 +#endif +#ifdef CONFIG_C_AMBEN_B0_B1 +#define V_AMBEN AMBEN_B0_B1 +#endif +#ifdef CONFIG_C_AMBEN_B0_B1_B2 +#define V_AMBEN AMBEN_B0_B1_B2 +#endif +#ifdef CONFIG_C_AMCKEN +#define V_AMCKEN AMCKEN +#else +#define V_AMCKEN 0x0 +#endif +#ifdef CONFIG_C_CDPRIO +#define V_CDPRIO 0x100 +#else +#define V_CDPRIO 0x0 +#endif + +#define AMGCTLVAL (V_AMBEN | V_AMCKEN | V_CDPRIO) + +#define MAX_VC 650000000 +#define MIN_VC 50000000 + +/********************************PLL Settings **************************************/ +#ifdef CONFIG_BFIN_KERNEL_CLOCK +#if (CONFIG_VCO_MULT < 0) +#error "VCO Multiplier is less than 0. Please select a different value" +#endif + +#if (CONFIG_VCO_MULT == 0) +#error "VCO Multiplier should be greater than 0. Please select a different value" +#endif + +#if (CONFIG_VCO_MULT > 64) +#error "VCO Multiplier is more than 64. Please select a different value" +#endif + +#ifndef CONFIG_CLKIN_HALF +#define CONFIG_VCO_HZ (CONFIG_CLKIN_HZ * CONFIG_VCO_MULT) +#else +#define CONFIG_VCO_HZ ((CONFIG_CLKIN_HZ * CONFIG_VCO_MULT)/2) +#endif + +#ifndef CONFIG_PLL_BYPASS +#define CONFIG_CCLK_HZ (CONFIG_VCO_HZ/CONFIG_CCLK_DIV) +#define CONFIG_SCLK_HZ (CONFIG_VCO_HZ/CONFIG_SCLK_DIV) +#else +#define CONFIG_CCLK_HZ CONFIG_CLKIN_HZ +#define CONFIG_SCLK_HZ CONFIG_CLKIN_HZ +#endif + +#if (CONFIG_SCLK_DIV < 1) +#error "SCLK DIV cannot be less than 1 or more than 15. Please select a proper value" +#endif + +#if (CONFIG_SCLK_DIV > 15) +#error "SCLK DIV cannot be less than 1 or more than 15. Please select a proper value" +#endif + +#if (CONFIG_CCLK_DIV != 1) +#if (CONFIG_CCLK_DIV != 2) +#if (CONFIG_CCLK_DIV != 4) +#if (CONFIG_CCLK_DIV != 8) +#error "CCLK DIV can be 1,2,4 or 8 only. Please select a proper value" +#endif +#endif +#endif +#endif + +#if (CONFIG_VCO_HZ > MAX_VC) +#error "VCO selected is more than maximum value. Please change the VCO multipler" +#endif + +#if (CONFIG_SCLK_HZ > 133000000) +#error "Sclk value selected is more than maximum. Please select a proper value for SCLK multiplier" +#endif + +#if (CONFIG_SCLK_HZ < 27000000) +#error "Sclk value selected is less than minimum. Please select a proper value for SCLK multiplier" +#endif + +#if (CONFIG_SCLK_HZ >= CONFIG_CCLK_HZ) +#if (CONFIG_SCLK_HZ != CONFIG_CLKIN_HZ) +#if (CONFIG_CCLK_HZ != CONFIG_CLKIN_HZ) +#error "Please select sclk less than cclk" +#endif +#endif +#endif + +#if (CONFIG_CCLK_DIV == 1) +#define CONFIG_CCLK_ACT_DIV CCLK_DIV1 +#endif +#if (CONFIG_CCLK_DIV == 2) +#define CONFIG_CCLK_ACT_DIV CCLK_DIV2 +#endif +#if (CONFIG_CCLK_DIV == 4) +#define CONFIG_CCLK_ACT_DIV CCLK_DIV4 +#endif +#if (CONFIG_CCLK_DIV == 8) +#define CONFIG_CCLK_ACT_DIV CCLK_DIV8 +#endif +#ifndef CONFIG_CCLK_ACT_DIV +#define CONFIG_CCLK_ACT_DIV CONFIG_CCLK_DIV_not_defined_properly +#endif + +#if defined(ANOMALY_05000273) && (CONFIG_CCLK_DIV == 1) +#error ANOMALY 05000273, please make sure CCLK is at least 2x SCLK +#endif + +#endif /* CONFIG_BFIN_KERNEL_CLOCK */ + +#ifdef CONFIG_BF537 +#define CPU "BF537" +#define CPUID 0x027c8000 +#endif +#ifdef CONFIG_BF536 +#define CPU "BF536" +#define CPUID 0x027c8000 +#endif +#ifdef CONFIG_BF534 +#define CPU "BF534" +#define CPUID 0x027c6000 +#endif +#ifndef CPU +#define CPU "UNKNOWN" +#define CPUID 0x0 +#endif + +#if (CONFIG_MEM_SIZE % 4) +#error "SDRAM mem size must be multible of 4MB" +#endif + +#define SDRAM_IGENERIC (CPLB_L1_CHBL | CPLB_USER_RD | CPLB_VALID | CPLB_PORTPRIO) +#define SDRAM_IKERNEL (SDRAM_IGENERIC | CPLB_LOCK) +#define L1_IMEMORY ( CPLB_USER_RD | CPLB_VALID | CPLB_LOCK) +#define SDRAM_INON_CHBL ( CPLB_USER_RD | CPLB_VALID) + +/*Use the menuconfig cache policy here - CONFIG_BLKFIN_WT/CONFIG_BLKFIN_WB*/ + +#define ANOMALY_05000158_WORKAROUND 0x200 +#ifdef CONFIG_BLKFIN_WB /*Write Back Policy */ +#define SDRAM_DGENERIC (CPLB_L1_CHBL | CPLB_DIRTY \ + | CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND) +#else /*Write Through */ +#define SDRAM_DGENERIC (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW \ + | CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_DIRTY ) +#endif + + +#define L1_DMEMORY (CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_LOCK | CPLB_DIRTY ) +#define SDRAM_DNON_CHBL (CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_DIRTY ) +#define SDRAM_EBIU (CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_DIRTY ) +#define SDRAM_OOPS (CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_LOCK | CPLB_DIRTY ) + +#define SIZE_1K 0x00000400 /* 1K */ +#define SIZE_4K 0x00001000 /* 4K */ +#define SIZE_1M 0x00100000 /* 1M */ +#define SIZE_4M 0x00400000 /* 4M */ + +#define MAX_CPLBS (16 * 2) + +/* +* Number of required data CPLB switchtable entries +* MEMSIZE / 4 (we mostly install 4M page size CPLBs +* approx 16 for smaller 1MB page size CPLBs for allignment purposes +* 1 for L1 Data Memory +* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO +* 1 for ASYNC Memory +*/ + + +#define MAX_SWITCH_D_CPLBS (((CONFIG_MEM_SIZE / 4) + 16 + 1 + 1 + 1) * 2) + +/* +* Number of required instruction CPLB switchtable entries +* MEMSIZE / 4 (we mostly install 4M page size CPLBs +* approx 12 for smaller 1MB page size CPLBs for allignment purposes +* 1 for L1 Instruction Memory +* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO +*/ + +#define MAX_SWITCH_I_CPLBS (((CONFIG_MEM_SIZE / 4) + 12 + 1 + 1) * 2) + +#endif /* __MACH_BF537_H__ */ diff --git a/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h new file mode 100644 index 000000000000..8f5d9c4d8d5b --- /dev/null +++ b/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h @@ -0,0 +1,147 @@ +#include +#include + +#define NR_PORTS 2 + +#define OFFSET_THR 0x00 /* Transmit Holding register */ +#define OFFSET_RBR 0x00 /* Receive Buffer register */ +#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */ +#define OFFSET_IER 0x04 /* Interrupt Enable Register */ +#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */ +#define OFFSET_IIR 0x08 /* Interrupt Identification Register */ +#define OFFSET_LCR 0x0C /* Line Control Register */ +#define OFFSET_MCR 0x10 /* Modem Control Register */ +#define OFFSET_LSR 0x14 /* Line Status Register */ +#define OFFSET_MSR 0x18 /* Modem Status Register */ +#define OFFSET_SCR 0x1C /* SCR Scratch Register */ +#define OFFSET_GCTL 0x24 /* Global Control Register */ + +#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR)) +#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL)) +#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER)) +#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH)) +#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR)) +#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR)) +#define UART_GET_LSR(uart) bfin_read16(((uart)->port.membase + OFFSET_LSR)) +#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL)) + +#define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v) +#define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v) +#define UART_PUT_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER),v) +#define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v) +#define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v) +#define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v) + +#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) +# define CONFIG_SERIAL_BFIN_CTSRTS + +# ifndef CONFIG_UART0_CTS_PIN +# define CONFIG_UART0_CTS_PIN -1 +# endif + +# ifndef CONFIG_UART0_RTS_PIN +# define CONFIG_UART0_RTS_PIN -1 +# endif + +# ifndef CONFIG_UART1_CTS_PIN +# define CONFIG_UART1_CTS_PIN -1 +# endif + +# ifndef CONFIG_UART1_RTS_PIN +# define CONFIG_UART1_RTS_PIN -1 +# endif +#endif +/* + * The pin configuration is different from schematic + */ +struct bfin_serial_port { + struct uart_port port; + unsigned int old_status; +#ifdef CONFIG_SERIAL_BFIN_DMA + int tx_done; + int tx_count; + struct circ_buf rx_dma_buf; + struct timer_list rx_dma_timer; + int rx_dma_nrows; + unsigned int tx_dma_channel; + unsigned int rx_dma_channel; + struct work_struct tx_dma_workqueue; +#else + struct work_struct cts_workqueue; +#endif +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + int cts_pin; + int rts_pin; +#endif +}; + +struct bfin_serial_port bfin_serial_ports[NR_PORTS]; +struct bfin_serial_res { + unsigned long uart_base_addr; + int uart_irq; +#ifdef CONFIG_SERIAL_BFIN_DMA + unsigned int uart_tx_dma_channel; + unsigned int uart_rx_dma_channel; +#endif +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + int uart_cts_pin; + int uart_rts_pin; +#endif +}; + +struct bfin_serial_res bfin_serial_resource[] = { +#ifdef CONFIG_SERIAL_BFIN_UART0 + { + 0xFFC00400, + IRQ_UART0_RX, +#ifdef CONFIG_SERIAL_BFIN_DMA + CH_UART0_TX, + CH_UART0_RX, +#endif +#ifdef CONFIG_BFIN_UART0_CTSRTS + CONFIG_UART0_CTS_PIN, + CONFIG_UART0_RTS_PIN, +#endif + }, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + { + 0xFFC02000, + IRQ_UART1_RX, +#ifdef CONFIG_SERIAL_BFIN_DMA + CH_UART1_TX, + CH_UART1_RX, +#endif +#ifdef CONFIG_BFIN_UART1_CTSRTS + CONFIG_UART1_CTS_PIN, + CONFIG_UART1_RTS_PIN, +#endif + }, +#endif +}; + +int nr_ports = ARRAY_SIZE(bfin_serial_resource); + +static void bfin_serial_hw_init(struct bfin_serial_port *uart) +{ + unsigned short val; + val = bfin_read16(BFIN_PORT_MUX); + val &= ~(PFDE | PFTE); + bfin_write16(BFIN_PORT_MUX, val); + + val = bfin_read16(PORTF_FER); + val |= 0xF; + bfin_write16(PORTF_FER, val); + +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + if (uart->cts_pin >= 0) { + gpio_request(uart->cts_pin, NULL); + gpio_direction_input(uart->cts_pin); + } + + if (uart->rts_pin >= 0) { + gpio_request(uart->rts_pin, NULL); + gpio_direction_output(uart->rts_pin); + } +#endif +} diff --git a/include/asm-blackfin/mach-bf537/blackfin.h b/include/asm-blackfin/mach-bf537/blackfin.h new file mode 100644 index 000000000000..bbd97051ec9c --- /dev/null +++ b/include/asm-blackfin/mach-bf537/blackfin.h @@ -0,0 +1,430 @@ +/* + * File: include/asm-blackfin/mach-bf537/blackfin.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _MACH_BLACKFIN_H_ +#define _MACH_BLACKFIN_H_ + +#define BF537_FAMILY + +#include "bf537.h" +#include "mem_map.h" +#include "defBF534.h" +#include "anomaly.h" + +#if defined(CONFIG_BF537) || defined(CONFIG_BF536) +#include "defBF537.h" +#endif + +#if !(defined(__ASSEMBLY__) || defined(ASSEMBLY)) +#include "cdefBF534.h" + +/* UART 0*/ +#define bfin_read_UART_THR() bfin_read_UART0_THR() +#define bfin_write_UART_THR(val) bfin_write_UART0_THR(val) +#define bfin_read_UART_RBR() bfin_read_UART0_RBR() +#define bfin_write_UART_RBR(val) bfin_write_UART0_RBR(val) +#define bfin_read_UART_DLL() bfin_read_UART0_DLL() +#define bfin_write_UART_DLL(val) bfin_write_UART0_DLL(val) +#define bfin_read_UART_IER() bfin_read_UART0_IER() +#define bfin_write_UART_IER(val) bfin_write_UART0_IER(val) +#define bfin_read_UART_DLH() bfin_read_UART0_DLH() +#define bfin_write_UART_DLH(val) bfin_write_UART0_DLH(val) +#define bfin_read_UART_IIR() bfin_read_UART0_IIR() +#define bfin_write_UART_IIR(val) bfin_write_UART0_IIR(val) +#define bfin_read_UART_LCR() bfin_read_UART0_LCR() +#define bfin_write_UART_LCR(val) bfin_write_UART0_LCR(val) +#define bfin_read_UART_MCR() bfin_read_UART0_MCR() +#define bfin_write_UART_MCR(val) bfin_write_UART0_MCR(val) +#define bfin_read_UART_LSR() bfin_read_UART0_LSR() +#define bfin_write_UART_LSR(val) bfin_write_UART0_LSR(val) +#define bfin_read_UART_SCR() bfin_read_UART0_SCR() +#define bfin_write_UART_SCR(val) bfin_write_UART0_SCR(val) +#define bfin_read_UART_GCTL() bfin_read_UART0_GCTL() +#define bfin_write_UART_GCTL(val) bfin_write_UART0_GCTL(val) + +#if defined(CONFIG_BF537) || defined(CONFIG_BF536) +#include "cdefBF537.h" +#endif +#endif + +/* MAP used DEFINES from BF533 to BF537 - so we don't need to change them in the driver, kernel, etc. */ + +/* UART_IIR Register */ +#define STATUS(x) ((x << 1) & 0x06) +#define STATUS_P1 0x02 +#define STATUS_P0 0x01 + +/* UART 0*/ + +/* DMA Channnel */ +#define bfin_read_CH_UART_RX() bfin_read_CH_UART0_RX() +#define bfin_write_CH_UART_RX(val) bfin_write_CH_UART0_RX(val) +#define CH_UART_RX CH_UART0_RX +#define bfin_read_CH_UART_TX() bfin_read_CH_UART0_TX() +#define bfin_write_CH_UART_TX(val) bfin_write_CH_UART0_TX(val) +#define CH_UART_TX CH_UART0_TX + +/* System Interrupt Controller */ +#define bfin_read_IRQ_UART_RX() bfin_read_IRQ_UART0_RX() +#define bfin_write_IRQ_UART_RX(val) bfin_write_IRQ_UART0_RX(val) +#define IRQ_UART_RX IRQ_UART0_RX +#define bfin_read_IRQ_UART_TX() bfin_read_IRQ_UART0_TX() +#define bfin_write_IRQ_UART_TX(val) bfin_write_IRQ_UART0_TX(val) +#define IRQ_UART_TX IRQ_UART0_TX +#define bfin_read_IRQ_UART_ERROR() bfin_read_IRQ_UART0_ERROR() +#define bfin_write_IRQ_UART_ERROR(val) bfin_write_IRQ_UART0_ERROR(val) +#define IRQ_UART_ERROR IRQ_UART0_ERROR + +/* MMR Registers*/ +#define bfin_read_UART_THR() bfin_read_UART0_THR() +#define bfin_write_UART_THR(val) bfin_write_UART0_THR(val) +#define UART_THR UART0_THR +#define bfin_read_UART_RBR() bfin_read_UART0_RBR() +#define bfin_write_UART_RBR(val) bfin_write_UART0_RBR(val) +#define UART_RBR UART0_RBR +#define bfin_read_UART_DLL() bfin_read_UART0_DLL() +#define bfin_write_UART_DLL(val) bfin_write_UART0_DLL(val) +#define UART_DLL UART0_DLL +#define bfin_read_UART_IER() bfin_read_UART0_IER() +#define bfin_write_UART_IER(val) bfin_write_UART0_IER(val) +#define UART_IER UART0_IER +#define bfin_read_UART_DLH() bfin_read_UART0_DLH() +#define bfin_write_UART_DLH(val) bfin_write_UART0_DLH(val) +#define UART_DLH UART0_DLH +#define bfin_read_UART_IIR() bfin_read_UART0_IIR() +#define bfin_write_UART_IIR(val) bfin_write_UART0_IIR(val) +#define UART_IIR UART0_IIR +#define bfin_read_UART_LCR() bfin_read_UART0_LCR() +#define bfin_write_UART_LCR(val) bfin_write_UART0_LCR(val) +#define UART_LCR UART0_LCR +#define bfin_read_UART_MCR() bfin_read_UART0_MCR() +#define bfin_write_UART_MCR(val) bfin_write_UART0_MCR(val) +#define UART_MCR UART0_MCR +#define bfin_read_UART_LSR() bfin_read_UART0_LSR() +#define bfin_write_UART_LSR(val) bfin_write_UART0_LSR(val) +#define UART_LSR UART0_LSR +#define bfin_read_UART_SCR() bfin_read_UART0_SCR() +#define bfin_write_UART_SCR(val) bfin_write_UART0_SCR(val) +#define UART_SCR UART0_SCR +#define bfin_read_UART_GCTL() bfin_read_UART0_GCTL() +#define bfin_write_UART_GCTL(val) bfin_write_UART0_GCTL(val) +#define UART_GCTL UART0_GCTL + +/* DPMC*/ +#define bfin_read_STOPCK_OFF() bfin_read_STOPCK() +#define bfin_write_STOPCK_OFF(val) bfin_write_STOPCK(val) +#define STOPCK_OFF STOPCK + +/* FIO USE PORT F*/ +#ifdef CONFIG_BF537_PORT_F +#define bfin_read_PORT_FER() bfin_read_PORTF_FER() +#define bfin_write_PORT_FER(val) bfin_write_PORTF_FER(val) +#define bfin_read_FIO_FLAG_D() bfin_read_PORTFIO() +#define bfin_write_FIO_FLAG_D(val) bfin_write_PORTFIO(val) +#define bfin_read_FIO_FLAG_C() bfin_read_PORTFIO_CLEAR() +#define bfin_write_FIO_FLAG_C(val) bfin_write_PORTFIO_CLEAR(val) +#define bfin_read_FIO_FLAG_S() bfin_read_PORTFIO_SET() +#define bfin_write_FIO_FLAG_S(val) bfin_write_PORTFIO_SET(val) +#define bfin_read_FIO_FLAG_T() bfin_read_PORTFIO_TOGGLE() +#define bfin_write_FIO_FLAG_T(val) bfin_write_PORTFIO_TOGGLE(val) +#define bfin_read_FIO_MASKA_D() bfin_read_PORTFIO_MASKA() +#define bfin_write_FIO_MASKA_D(val) bfin_write_PORTFIO_MASKA(val) +#define bfin_read_FIO_MASKA_C() bfin_read_PORTFIO_MASKA_CLEAR() +#define bfin_write_FIO_MASKA_C(val) bfin_write_PORTFIO_MASKA_CLEAR(val) +#define bfin_read_FIO_MASKA_S() bfin_read_PORTFIO_MASKA_SET() +#define bfin_write_FIO_MASKA_S(val) bfin_write_PORTFIO_MASKA_SET(val) +#define bfin_read_FIO_MASKA_T() bfin_read_PORTFIO_MASKA_TOGGLE() +#define bfin_write_FIO_MASKA_T(val) bfin_write_PORTFIO_MASKA_TOGGLE(val) +#define bfin_read_FIO_MASKB_D() bfin_read_PORTFIO_MASKB() +#define bfin_write_FIO_MASKB_D(val) bfin_write_PORTFIO_MASKB(val) +#define bfin_read_FIO_MASKB_C() bfin_read_PORTFIO_MASKB_CLEAR() +#define bfin_write_FIO_MASKB_C(val) bfin_write_PORTFIO_MASKB_CLEAR(val) +#define bfin_read_FIO_MASKB_S() bfin_read_PORTFIO_MASKB_SET() +#define bfin_write_FIO_MASKB_S(val) bfin_write_PORTFIO_MASKB_SET(val) +#define bfin_read_FIO_MASKB_T() bfin_read_PORTFIO_MASKB_TOGGLE() +#define bfin_write_FIO_MASKB_T(val) bfin_write_PORTFIO_MASKB_TOGGLE(val) +#define bfin_read_FIO_DIR() bfin_read_PORTFIO_DIR() +#define bfin_write_FIO_DIR(val) bfin_write_PORTFIO_DIR(val) +#define bfin_read_FIO_POLAR() bfin_read_PORTFIO_POLAR() +#define bfin_write_FIO_POLAR(val) bfin_write_PORTFIO_POLAR(val) +#define bfin_read_FIO_EDGE() bfin_read_PORTFIO_EDGE() +#define bfin_write_FIO_EDGE(val) bfin_write_PORTFIO_EDGE(val) +#define bfin_read_FIO_BOTH() bfin_read_PORTFIO_BOTH() +#define bfin_write_FIO_BOTH(val) bfin_write_PORTFIO_BOTH(val) +#define bfin_read_FIO_INEN() bfin_read_PORTFIO_INEN() +#define bfin_write_FIO_INEN(val) bfin_write_PORTFIO_INEN(val) + +#define bfin_read_FIO_FLAG_D() bfin_read_PORTFIO() +#define bfin_write_FIO_FLAG_D(val) bfin_write_PORTFIO(val) +#define FIO_FLAG_D PORTFIO +#define bfin_read_FIO_FLAG_C() bfin_read_PORTFIO_CLEAR() +#define bfin_write_FIO_FLAG_C(val) bfin_write_PORTFIO_CLEAR(val) +#define FIO_FLAG_C PORTFIO_CLEAR +#define bfin_read_FIO_FLAG_S() bfin_read_PORTFIO_SET() +#define bfin_write_FIO_FLAG_S(val) bfin_write_PORTFIO_SET(val) +#define FIO_FLAG_S PORTFIO_SET +#define bfin_read_FIO_FLAG_T() bfin_read_PORTFIO_TOGGLE() +#define bfin_write_FIO_FLAG_T(val) bfin_write_PORTFIO_TOGGLE(val) +#define FIO_FLAG_T PORTFIO_TOGGLE +#define bfin_read_FIO_MASKA_D() bfin_read_PORTFIO_MASKA() +#define bfin_write_FIO_MASKA_D(val) bfin_write_PORTFIO_MASKA(val) +#define FIO_MASKA_D PORTFIO_MASKA +#define bfin_read_FIO_MASKA_C() bfin_read_PORTFIO_MASKA_CLEAR() +#define bfin_write_FIO_MASKA_C(val) bfin_write_PORTFIO_MASKA_CLEAR(val) +#define FIO_MASKA_C PORTFIO_MASKA_CLEAR +#define bfin_read_FIO_MASKA_S() bfin_read_PORTFIO_MASKA_SET() +#define bfin_write_FIO_MASKA_S(val) bfin_write_PORTFIO_MASKA_SET(val) +#define FIO_MASKA_S PORTFIO_MASKA_SET +#define bfin_read_FIO_MASKA_T() bfin_read_PORTFIO_MASKA_TOGGLE() +#define bfin_write_FIO_MASKA_T(val) bfin_write_PORTFIO_MASKA_TOGGLE(val) +#define FIO_MASKA_T PORTFIO_MASKA_TOGGLE +#define bfin_read_FIO_MASKB_D() bfin_read_PORTFIO_MASKB() +#define bfin_write_FIO_MASKB_D(val) bfin_write_PORTFIO_MASKB(val) +#define FIO_MASKB_D PORTFIO_MASKB +#define bfin_read_FIO_MASKB_C() bfin_read_PORTFIO_MASKB_CLEAR() +#define bfin_write_FIO_MASKB_C(val) bfin_write_PORTFIO_MASKB_CLEAR(val) +#define FIO_MASKB_C PORTFIO_MASKB_CLEAR +#define bfin_read_FIO_MASKB_S() bfin_read_PORTFIO_MASKB_SET() +#define bfin_write_FIO_MASKB_S(val) bfin_write_PORTFIO_MASKB_SET(val) +#define FIO_MASKB_S PORTFIO_MASKB_SET +#define bfin_read_FIO_MASKB_T() bfin_read_PORTFIO_MASKB_TOGGLE() +#define bfin_write_FIO_MASKB_T(val) bfin_write_PORTFIO_MASKB_TOGGLE(val) +#define FIO_MASKB_T PORTFIO_MASKB_TOGGLE +#define bfin_read_FIO_DIR() bfin_read_PORTFIO_DIR() +#define bfin_write_FIO_DIR(val) bfin_write_PORTFIO_DIR(val) +#define FIO_DIR PORTFIO_DIR +#define bfin_read_FIO_POLAR() bfin_read_PORTFIO_POLAR() +#define bfin_write_FIO_POLAR(val) bfin_write_PORTFIO_POLAR(val) +#define FIO_POLAR PORTFIO_POLAR +#define bfin_read_FIO_EDGE() bfin_read_PORTFIO_EDGE() +#define bfin_write_FIO_EDGE(val) bfin_write_PORTFIO_EDGE(val) +#define FIO_EDGE PORTFIO_EDGE +#define bfin_read_FIO_BOTH() bfin_read_PORTFIO_BOTH() +#define bfin_write_FIO_BOTH(val) bfin_write_PORTFIO_BOTH(val) +#define FIO_BOTH PORTFIO_BOTH +#define bfin_read_FIO_INEN() bfin_read_PORTFIO_INEN() +#define bfin_write_FIO_INEN(val) bfin_write_PORTFIO_INEN(val) +#define FIO_INEN PORTFIO_INEN +#endif + +/* FIO USE PORT G*/ +#ifdef CONFIG_BF537_PORT_G +#define bfin_read_PORT_FER() bfin_read_PORTG_FER() +#define bfin_write_PORT_FER(val) bfin_write_PORTG_FER(val) +#define bfin_read_FIO_FLAG_D() bfin_read_PORTGIO() +#define bfin_write_FIO_FLAG_D(val) bfin_write_PORTGIO(val) +#define bfin_read_FIO_FLAG_C() bfin_read_PORTGIO_CLEAR() +#define bfin_write_FIO_FLAG_C(val) bfin_write_PORTGIO_CLEAR(val) +#define bfin_read_FIO_FLAG_S() bfin_read_PORTGIO_SET() +#define bfin_write_FIO_FLAG_S(val) bfin_write_PORTGIO_SET(val) +#define bfin_read_FIO_FLAG_T() bfin_read_PORTGIO_TOGGLE() +#define bfin_write_FIO_FLAG_T(val) bfin_write_PORTGIO_TOGGLE(val) +#define bfin_read_FIO_MASKA_D() bfin_read_PORTGIO_MASKA() +#define bfin_write_FIO_MASKA_D(val) bfin_write_PORTGIO_MASKA(val) +#define bfin_read_FIO_MASKA_C() bfin_read_PORTGIO_MASKA_CLEAR() +#define bfin_write_FIO_MASKA_C(val) bfin_write_PORTGIO_MASKA_CLEAR(val) +#define bfin_read_FIO_MASKA_S() bfin_read_PORTGIO_MASKA_SET() +#define bfin_write_FIO_MASKA_S(val) bfin_write_PORTGIO_MASKA_SET(val) +#define bfin_read_FIO_MASKA_T() bfin_read_PORTGIO_MASKA_TOGGLE() +#define bfin_write_FIO_MASKA_T(val) bfin_write_PORTGIO_MASKA_TOGGLE(val) +#define bfin_read_FIO_MASKB_D() bfin_read_PORTGIO_MASKB() +#define bfin_write_FIO_MASKB_D(val) bfin_write_PORTGIO_MASKB(val) +#define bfin_read_FIO_MASKB_C() bfin_read_PORTGIO_MASKB_CLEAR() +#define bfin_write_FIO_MASKB_C(val) bfin_write_PORTGIO_MASKB_CLEAR(val) +#define bfin_read_FIO_MASKB_S() bfin_read_PORTGIO_MASKB_SET() +#define bfin_write_FIO_MASKB_S(val) bfin_write_PORTGIO_MASKB_SET(val) +#define bfin_read_FIO_MASKB_T() bfin_read_PORTGIO_MASKB_TOGGLE() +#define bfin_write_FIO_MASKB_T(val) bfin_write_PORTGIO_MASKB_TOGGLE(val) +#define bfin_read_FIO_DIR() bfin_read_PORTGIO_DIR() +#define bfin_write_FIO_DIR(val) bfin_write_PORTGIO_DIR(val) +#define bfin_read_FIO_POLAR() bfin_read_PORTGIO_POLAR() +#define bfin_write_FIO_POLAR(val) bfin_write_PORTGIO_POLAR(val) +#define bfin_read_FIO_EDGE() bfin_read_PORTGIO_EDGE() +#define bfin_write_FIO_EDGE(val) bfin_write_PORTGIO_EDGE(val) +#define bfin_read_FIO_BOTH() bfin_read_PORTGIO_BOTH() +#define bfin_write_FIO_BOTH(val) bfin_write_PORTGIO_BOTH(val) +#define bfin_read_FIO_INEN() bfin_read_PORTGIO_INEN() +#define bfin_write_FIO_INEN(val) bfin_write_PORTGIO_INEN(val) + +#define bfin_read_FIO_FLAG_D() bfin_read_PORTGIO() +#define bfin_write_FIO_FLAG_D(val) bfin_write_PORTGIO(val) +#define FIO_FLAG_D PORTGIO +#define bfin_read_FIO_FLAG_C() bfin_read_PORTGIO_CLEAR() +#define bfin_write_FIO_FLAG_C(val) bfin_write_PORTGIO_CLEAR(val) +#define FIO_FLAG_C PORTGIO_CLEAR +#define bfin_read_FIO_FLAG_S() bfin_read_PORTGIO_SET() +#define bfin_write_FIO_FLAG_S(val) bfin_write_PORTGIO_SET(val) +#define FIO_FLAG_S PORTGIO_SET +#define bfin_read_FIO_FLAG_T() bfin_read_PORTGIO_TOGGLE() +#define bfin_write_FIO_FLAG_T(val) bfin_write_PORTGIO_TOGGLE(val) +#define FIO_FLAG_T PORTGIO_TOGGLE +#define bfin_read_FIO_MASKA_D() bfin_read_PORTGIO_MASKA() +#define bfin_write_FIO_MASKA_D(val) bfin_write_PORTGIO_MASKA(val) +#define FIO_MASKA_D PORTGIO_MASKA +#define bfin_read_FIO_MASKA_C() bfin_read_PORTGIO_MASKA_CLEAR() +#define bfin_write_FIO_MASKA_C(val) bfin_write_PORTGIO_MASKA_CLEAR(val) +#define FIO_MASKA_C PORTGIO_MASKA_CLEAR +#define bfin_read_FIO_MASKA_S() bfin_read_PORTGIO_MASKA_SET() +#define bfin_write_FIO_MASKA_S(val) bfin_write_PORTGIO_MASKA_SET(val) +#define FIO_MASKA_S PORTGIO_MASKA_SET +#define bfin_read_FIO_MASKA_T() bfin_read_PORTGIO_MASKA_TOGGLE() +#define bfin_write_FIO_MASKA_T(val) bfin_write_PORTGIO_MASKA_TOGGLE(val) +#define FIO_MASKA_T PORTGIO_MASKA_TOGGLE +#define bfin_read_FIO_MASKB_D() bfin_read_PORTGIO_MASKB() +#define bfin_write_FIO_MASKB_D(val) bfin_write_PORTGIO_MASKB(val) +#define FIO_MASKB_D PORTGIO_MASKB +#define bfin_read_FIO_MASKB_C() bfin_read_PORTGIO_MASKB_CLEAR() +#define bfin_write_FIO_MASKB_C(val) bfin_write_PORTGIO_MASKB_CLEAR(val) +#define FIO_MASKB_C PORTGIO_MASKB_CLEAR +#define bfin_read_FIO_MASKB_S() bfin_read_PORTGIO_MASKB_SET() +#define bfin_write_FIO_MASKB_S(val) bfin_write_PORTGIO_MASKB_SET(val) +#define FIO_MASKB_S PORTGIO_MASKB_SET +#define bfin_read_FIO_MASKB_T() bfin_read_PORTGIO_MASKB_TOGGLE() +#define bfin_write_FIO_MASKB_T(val) bfin_write_PORTGIO_MASKB_TOGGLE(val) +#define FIO_MASKB_T PORTGIO_MASKB_TOGGLE +#define bfin_read_FIO_DIR() bfin_read_PORTGIO_DIR() +#define bfin_write_FIO_DIR(val) bfin_write_PORTGIO_DIR(val) +#define FIO_DIR PORTGIO_DIR +#define bfin_read_FIO_POLAR() bfin_read_PORTGIO_POLAR() +#define bfin_write_FIO_POLAR(val) bfin_write_PORTGIO_POLAR(val) +#define FIO_POLAR PORTGIO_POLAR +#define bfin_read_FIO_EDGE() bfin_read_PORTGIO_EDGE() +#define bfin_write_FIO_EDGE(val) bfin_write_PORTGIO_EDGE(val) +#define FIO_EDGE PORTGIO_EDGE +#define bfin_read_FIO_BOTH() bfin_read_PORTGIO_BOTH() +#define bfin_write_FIO_BOTH(val) bfin_write_PORTGIO_BOTH(val) +#define FIO_BOTH PORTGIO_BOTH +#define bfin_read_FIO_INEN() bfin_read_PORTGIO_INEN() +#define bfin_write_FIO_INEN(val) bfin_write_PORTGIO_INEN(val) +#define FIO_INEN PORTGIO_INEN + +#endif + +/* FIO USE PORT H*/ +#ifdef CONFIG_BF537_PORT_H +#define bfin_read_PORT_FER() bfin_read_PORTH_FER() +#define bfin_write_PORT_FER(val) bfin_write_PORTH_FER(val) +#define bfin_read_FIO_FLAG_D() bfin_read_PORTHIO() +#define bfin_write_FIO_FLAG_D(val) bfin_write_PORTHIO(val) +#define bfin_read_FIO_FLAG_C() bfin_read_PORTHIO_CLEAR() +#define bfin_write_FIO_FLAG_C(val) bfin_write_PORTHIO_CLEAR(val) +#define bfin_read_FIO_FLAG_S() bfin_read_PORTHIO_SET() +#define bfin_write_FIO_FLAG_S(val) bfin_write_PORTHIO_SET(val) +#define bfin_read_FIO_FLAG_T() bfin_read_PORTHIO_TOGGLE() +#define bfin_write_FIO_FLAG_T(val) bfin_write_PORTHIO_TOGGLE(val) +#define bfin_read_FIO_MASKA_D() bfin_read_PORTHIO_MASKA() +#define bfin_write_FIO_MASKA_D(val) bfin_write_PORTHIO_MASKA(val) +#define bfin_read_FIO_MASKA_C() bfin_read_PORTHIO_MASKA_CLEAR() +#define bfin_write_FIO_MASKA_C(val) bfin_write_PORTHIO_MASKA_CLEAR(val) +#define bfin_read_FIO_MASKA_S() bfin_read_PORTHIO_MASKA_SET() +#define bfin_write_FIO_MASKA_S(val) bfin_write_PORTHIO_MASKA_SET(val) +#define bfin_read_FIO_MASKA_T() bfin_read_PORTHIO_MASKA_TOGGLE() +#define bfin_write_FIO_MASKA_T(val) bfin_write_PORTHIO_MASKA_TOGGLE(val) +#define bfin_read_FIO_MASKB_D() bfin_read_PORTHIO_MASKB() +#define bfin_write_FIO_MASKB_D(val) bfin_write_PORTHIO_MASKB(val) +#define bfin_read_FIO_MASKB_C() bfin_read_PORTHIO_MASKB_CLEAR() +#define bfin_write_FIO_MASKB_C(val) bfin_write_PORTHIO_MASKB_CLEAR(val) +#define bfin_read_FIO_MASKB_S() bfin_read_PORTHIO_MASKB_SET() +#define bfin_write_FIO_MASKB_S(val) bfin_write_PORTHIO_MASKB_SET(val) +#define bfin_read_FIO_MASKB_T() bfin_read_PORTHIO_MASKB_TOGGLE() +#define bfin_write_FIO_MASKB_T(val) bfin_write_PORTHIO_MASKB_TOGGLE(val) +#define bfin_read_FIO_DIR() bfin_read_PORTHIO_DIR() +#define bfin_write_FIO_DIR(val) bfin_write_PORTHIO_DIR(val) +#define bfin_read_FIO_POLAR() bfin_read_PORTHIO_POLAR() +#define bfin_write_FIO_POLAR(val) bfin_write_PORTHIO_POLAR(val) +#define bfin_read_FIO_EDGE() bfin_read_PORTHIO_EDGE() +#define bfin_write_FIO_EDGE(val) bfin_write_PORTHIO_EDGE(val) +#define bfin_read_FIO_BOTH() bfin_read_PORTHIO_BOTH() +#define bfin_write_FIO_BOTH(val) bfin_write_PORTHIO_BOTH(val) +#define bfin_read_FIO_INEN() bfin_read_PORTHIO_INEN() +#define bfin_write_FIO_INEN(val) bfin_write_PORTHIO_INEN(val) + +#define bfin_read_FIO_FLAG_D() bfin_read_PORTHIO() +#define bfin_write_FIO_FLAG_D(val) bfin_write_PORTHIO(val) +#define FIO_FLAG_D PORTHIO +#define bfin_read_FIO_FLAG_C() bfin_read_PORTHIO_CLEAR() +#define bfin_write_FIO_FLAG_C(val) bfin_write_PORTHIO_CLEAR(val) +#define FIO_FLAG_C PORTHIO_CLEAR +#define bfin_read_FIO_FLAG_S() bfin_read_PORTHIO_SET() +#define bfin_write_FIO_FLAG_S(val) bfin_write_PORTHIO_SET(val) +#define FIO_FLAG_S PORTHIO_SET +#define bfin_read_FIO_FLAG_T() bfin_read_PORTHIO_TOGGLE() +#define bfin_write_FIO_FLAG_T(val) bfin_write_PORTHIO_TOGGLE(val) +#define FIO_FLAG_T PORTHIO_TOGGLE +#define bfin_read_FIO_MASKA_D() bfin_read_PORTHIO_MASKA() +#define bfin_write_FIO_MASKA_D(val) bfin_write_PORTHIO_MASKA(val) +#define FIO_MASKA_D PORTHIO_MASKA +#define bfin_read_FIO_MASKA_C() bfin_read_PORTHIO_MASKA_CLEAR() +#define bfin_write_FIO_MASKA_C(val) bfin_write_PORTHIO_MASKA_CLEAR(val) +#define FIO_MASKA_C PORTHIO_MASKA_CLEAR +#define bfin_read_FIO_MASKA_S() bfin_read_PORTHIO_MASKA_SET() +#define bfin_write_FIO_MASKA_S(val) bfin_write_PORTHIO_MASKA_SET(val) +#define FIO_MASKA_S PORTHIO_MASKA_SET +#define bfin_read_FIO_MASKA_T() bfin_read_PORTHIO_MASKA_TOGGLE() +#define bfin_write_FIO_MASKA_T(val) bfin_write_PORTHIO_MASKA_TOGGLE(val) +#define FIO_MASKA_T PORTHIO_MASKA_TOGGLE +#define bfin_read_FIO_MASKB_D() bfin_read_PORTHIO_MASKB() +#define bfin_write_FIO_MASKB_D(val) bfin_write_PORTHIO_MASKB(val) +#define FIO_MASKB_D PORTHIO_MASKB +#define bfin_read_FIO_MASKB_C() bfin_read_PORTHIO_MASKB_CLEAR() +#define bfin_write_FIO_MASKB_C(val) bfin_write_PORTHIO_MASKB_CLEAR(val) +#define FIO_MASKB_C PORTHIO_MASKB_CLEAR +#define bfin_read_FIO_MASKB_S() bfin_read_PORTHIO_MASKB_SET() +#define bfin_write_FIO_MASKB_S(val) bfin_write_PORTHIO_MASKB_SET(val) +#define FIO_MASKB_S PORTHIO_MASKB_SET +#define bfin_read_FIO_MASKB_T() bfin_read_PORTHIO_MASKB_TOGGLE() +#define bfin_write_FIO_MASKB_T(val) bfin_write_PORTHIO_MASKB_TOGGLE(val) +#define FIO_MASKB_T PORTHIO_MASKB_TOGGLE +#define bfin_read_FIO_DIR() bfin_read_PORTHIO_DIR() +#define bfin_write_FIO_DIR(val) bfin_write_PORTHIO_DIR(val) +#define FIO_DIR PORTHIO_DIR +#define bfin_read_FIO_POLAR() bfin_read_PORTHIO_POLAR() +#define bfin_write_FIO_POLAR(val) bfin_write_PORTHIO_POLAR(val) +#define FIO_POLAR PORTHIO_POLAR +#define bfin_read_FIO_EDGE() bfin_read_PORTHIO_EDGE() +#define bfin_write_FIO_EDGE(val) bfin_write_PORTHIO_EDGE(val) +#define FIO_EDGE PORTHIO_EDGE +#define bfin_read_FIO_BOTH() bfin_read_PORTHIO_BOTH() +#define bfin_write_FIO_BOTH(val) bfin_write_PORTHIO_BOTH(val) +#define FIO_BOTH PORTHIO_BOTH +#define bfin_read_FIO_INEN() bfin_read_PORTHIO_INEN() +#define bfin_write_FIO_INEN(val) bfin_write_PORTHIO_INEN(val) +#define FIO_INEN PORTHIO_INEN + +#endif + +/* PLL_DIV Masks */ +#define CCLK_DIV1 CSEL_DIV1 /* CCLK = VCO / 1 */ +#define CCLK_DIV2 CSEL_DIV2 /* CCLK = VCO / 2 */ +#define CCLK_DIV4 CSEL_DIV4 /* CCLK = VCO / 4 */ +#define CCLK_DIV8 CSEL_DIV8 /* CCLK = VCO / 8 */ + +#endif diff --git a/include/asm-blackfin/mach-bf537/cdefBF534.h b/include/asm-blackfin/mach-bf537/cdefBF534.h new file mode 100644 index 000000000000..7b658c175f85 --- /dev/null +++ b/include/asm-blackfin/mach-bf537/cdefBF534.h @@ -0,0 +1,1823 @@ +/* + * File: include/asm-blackfin/mach-bf537/cdefbf534.h + * Based on: + * Author: + * + * Created: + * Description: system mmr register map + * + * Rev: + * + * Modified: + * + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _CDEF_BF534_H +#define _CDEF_BF534_H + +/* Include all Core registers and bit definitions */ +#include "defBF534.h" + +/* Include core specific register pointer definitions */ +#include + +#include + +/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */ +#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL) +#define bfin_write_PLL_CTL(val) bfin_write16(PLL_CTL,val) +#define bfin_read_PLL_DIV() bfin_read16(PLL_DIV) +#define bfin_write_PLL_DIV(val) bfin_write16(PLL_DIV,val) +#define bfin_read_VR_CTL() bfin_read16(VR_CTL) +/* Writing to VR_CTL initiates a PLL relock sequence. */ +static __inline__ void bfin_write_VR_CTL(unsigned int val) +{ + unsigned long flags, iwr; + + bfin_write16(VR_CTL, val); + __builtin_bfin_ssync(); + /* Enable the PLL Wakeup bit in SIC IWR */ + iwr = bfin_read32(SIC_IWR); + /* Only allow PPL Wakeup) */ + bfin_write32(SIC_IWR, IWR_ENABLE(0)); + local_irq_save(flags); + asm("IDLE;"); + local_irq_restore(flags); + bfin_write32(SIC_IWR, iwr); +} +#define bfin_read_PLL_STAT() bfin_read16(PLL_STAT) +#define bfin_write_PLL_STAT(val) bfin_write16(PLL_STAT,val) +#define bfin_read_PLL_LOCKCNT() bfin_read16(PLL_LOCKCNT) +#define bfin_write_PLL_LOCKCNT(val) bfin_write16(PLL_LOCKCNT,val) +#define bfin_read_CHIPID() bfin_read32(CHIPID) + +/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */ +#define bfin_read_SWRST() bfin_read16(SWRST) +#define bfin_write_SWRST(val) bfin_write16(SWRST,val) +#define bfin_read_SYSCR() bfin_read16(SYSCR) +#define bfin_write_SYSCR(val) bfin_write16(SYSCR,val) +#define pSIC_RVECT ((void * volatile *)SIC_RVECT) +#define bfin_read_SIC_RVECT() bfin_read32(SIC_RVECT) +#define bfin_write_SIC_RVECT(val) bfin_write32(SIC_RVECT,val) +#define bfin_read_SIC_IMASK() bfin_read32(SIC_IMASK) +#define bfin_write_SIC_IMASK(val) bfin_write32(SIC_IMASK,val) +#define bfin_read_SIC_IAR0() bfin_read32(SIC_IAR0) +#define bfin_write_SIC_IAR0(val) bfin_write32(SIC_IAR0,val) +#define bfin_read_SIC_IAR1() bfin_read32(SIC_IAR1) +#define bfin_write_SIC_IAR1(val) bfin_write32(SIC_IAR1,val) +#define bfin_read_SIC_IAR2() bfin_read32(SIC_IAR2) +#define bfin_write_SIC_IAR2(val) bfin_write32(SIC_IAR2,val) +#define bfin_read_SIC_IAR3() bfin_read32(SIC_IAR3) +#define bfin_write_SIC_IAR3(val) bfin_write32(SIC_IAR3,val) +#define bfin_read_SIC_ISR() bfin_read32(SIC_ISR) +#define bfin_write_SIC_ISR(val) bfin_write32(SIC_ISR,val) +#define bfin_read_SIC_IWR() bfin_read32(SIC_IWR) +#define bfin_write_SIC_IWR(val) bfin_write32(SIC_IWR,val) + +/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */ +#define bfin_read_WDOG_CTL() bfin_read16(WDOG_CTL) +#define bfin_write_WDOG_CTL(val) bfin_write16(WDOG_CTL,val) +#define bfin_read_WDOG_CNT() bfin_read32(WDOG_CNT) +#define bfin_write_WDOG_CNT(val) bfin_write32(WDOG_CNT,val) +#define bfin_read_WDOG_STAT() bfin_read32(WDOG_STAT) +#define bfin_write_WDOG_STAT(val) bfin_write32(WDOG_STAT,val) + +/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */ +#define bfin_read_RTC_STAT() bfin_read32(RTC_STAT) +#define bfin_write_RTC_STAT(val) bfin_write32(RTC_STAT,val) +#define bfin_read_RTC_ICTL() bfin_read16(RTC_ICTL) +#define bfin_write_RTC_ICTL(val) bfin_write16(RTC_ICTL,val) +#define bfin_read_RTC_ISTAT() bfin_read16(RTC_ISTAT) +#define bfin_write_RTC_ISTAT(val) bfin_write16(RTC_ISTAT,val) +#define bfin_read_RTC_SWCNT() bfin_read16(RTC_SWCNT) +#define bfin_write_RTC_SWCNT(val) bfin_write16(RTC_SWCNT,val) +#define bfin_read_RTC_ALARM() bfin_read32(RTC_ALARM) +#define bfin_write_RTC_ALARM(val) bfin_write32(RTC_ALARM,val) +#define bfin_read_RTC_FAST() bfin_read16(RTC_FAST) +#define bfin_write_RTC_FAST(val) bfin_write16(RTC_FAST,val) +#define bfin_read_RTC_PREN() bfin_read16(RTC_PREN) +#define bfin_write_RTC_PREN(val) bfin_write16(RTC_PREN,val) + +/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */ +#define bfin_read_UART0_THR() bfin_read16(UART0_THR) +#define bfin_write_UART0_THR(val) bfin_write16(UART0_THR,val) +#define bfin_read_UART0_RBR() bfin_read16(UART0_RBR) +#define bfin_write_UART0_RBR(val) bfin_write16(UART0_RBR,val) +#define bfin_read_UART0_DLL() bfin_read16(UART0_DLL) +#define bfin_write_UART0_DLL(val) bfin_write16(UART0_DLL,val) +#define bfin_read_UART0_IER() bfin_read16(UART0_IER) +#define bfin_write_UART0_IER(val) bfin_write16(UART0_IER,val) +#define bfin_read_UART0_DLH() bfin_read16(UART0_DLH) +#define bfin_write_UART0_DLH(val) bfin_write16(UART0_DLH,val) +#define bfin_read_UART0_IIR() bfin_read16(UART0_IIR) +#define bfin_write_UART0_IIR(val) bfin_write16(UART0_IIR,val) +#define bfin_read_UART0_LCR() bfin_read16(UART0_LCR) +#define bfin_write_UART0_LCR(val) bfin_write16(UART0_LCR,val) +#define bfin_read_UART0_MCR() bfin_read16(UART0_MCR) +#define bfin_write_UART0_MCR(val) bfin_write16(UART0_MCR,val) +#define bfin_read_UART0_LSR() bfin_read16(UART0_LSR) +#define bfin_write_UART0_LSR(val) bfin_write16(UART0_LSR,val) +#define bfin_read_UART0_MSR() bfin_read16(UART0_MSR) +#define bfin_write_UART0_MSR(val) bfin_write16(UART0_MSR,val) +#define bfin_read_UART0_SCR() bfin_read16(UART0_SCR) +#define bfin_write_UART0_SCR(val) bfin_write16(UART0_SCR,val) +#define bfin_read_UART0_GCTL() bfin_read16(UART0_GCTL) +#define bfin_write_UART0_GCTL(val) bfin_write16(UART0_GCTL,val) + +/* SPI Controller (0xFFC00500 - 0xFFC005FF) */ +#define bfin_read_SPI_CTL() bfin_read16(SPI_CTL) +#define bfin_write_SPI_CTL(val) bfin_write16(SPI_CTL,val) +#define bfin_read_SPI_FLG() bfin_read16(SPI_FLG) +#define bfin_write_SPI_FLG(val) bfin_write16(SPI_FLG,val) +#define bfin_read_SPI_STAT() bfin_read16(SPI_STAT) +#define bfin_write_SPI_STAT(val) bfin_write16(SPI_STAT,val) +#define bfin_read_SPI_TDBR() bfin_read16(SPI_TDBR) +#define bfin_write_SPI_TDBR(val) bfin_write16(SPI_TDBR,val) +#define bfin_read_SPI_RDBR() bfin_read16(SPI_RDBR) +#define bfin_write_SPI_RDBR(val) bfin_write16(SPI_RDBR,val) +#define bfin_read_SPI_BAUD() bfin_read16(SPI_BAUD) +#define bfin_write_SPI_BAUD(val) bfin_write16(SPI_BAUD,val) +#define bfin_read_SPI_SHADOW() bfin_read16(SPI_SHADOW) +#define bfin_write_SPI_SHADOW(val) bfin_write16(SPI_SHADOW,val) + +/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */ +#define bfin_read_TIMER0_CONFIG() bfin_read16(TIMER0_CONFIG) +#define bfin_write_TIMER0_CONFIG(val) bfin_write16(TIMER0_CONFIG,val) +#define bfin_read_TIMER0_COUNTER() bfin_read32(TIMER0_COUNTER) +#define bfin_write_TIMER0_COUNTER(val) bfin_write32(TIMER0_COUNTER,val) +#define bfin_read_TIMER0_PERIOD() bfin_read32(TIMER0_PERIOD) +#define bfin_write_TIMER0_PERIOD(val) bfin_write32(TIMER0_PERIOD,val) +#define bfin_read_TIMER0_WIDTH() bfin_read32(TIMER0_WIDTH) +#define bfin_write_TIMER0_WIDTH(val) bfin_write32(TIMER0_WIDTH,val) + +#define bfin_read_TIMER1_CONFIG() bfin_read16(TIMER1_CONFIG) +#define bfin_write_TIMER1_CONFIG(val) bfin_write16(TIMER1_CONFIG,val) +#define bfin_read_TIMER1_COUNTER() bfin_read32(TIMER1_COUNTER) +#define bfin_write_TIMER1_COUNTER(val) bfin_write32(TIMER1_COUNTER,val) +#define bfin_read_TIMER1_PERIOD() bfin_read32(TIMER1_PERIOD) +#define bfin_write_TIMER1_PERIOD(val) bfin_write32(TIMER1_PERIOD,val) +#define bfin_read_TIMER1_WIDTH() bfin_read32(TIMER1_WIDTH) +#define bfin_write_TIMER1_WIDTH(val) bfin_write32(TIMER1_WIDTH,val) + +#define bfin_read_TIMER2_CONFIG() bfin_read16(TIMER2_CONFIG) +#define bfin_write_TIMER2_CONFIG(val) bfin_write16(TIMER2_CONFIG,val) +#define bfin_read_TIMER2_COUNTER() bfin_read32(TIMER2_COUNTER) +#define bfin_write_TIMER2_COUNTER(val) bfin_write32(TIMER2_COUNTER,val) +#define bfin_read_TIMER2_PERIOD() bfin_read32(TIMER2_PERIOD) +#define bfin_write_TIMER2_PERIOD(val) bfin_write32(TIMER2_PERIOD,val) +#define bfin_read_TIMER2_WIDTH() bfin_read32(TIMER2_WIDTH) +#define bfin_write_TIMER2_WIDTH(val) bfin_write32(TIMER2_WIDTH,val) + +#define bfin_read_TIMER3_CONFIG() bfin_read16(TIMER3_CONFIG) +#define bfin_write_TIMER3_CONFIG(val) bfin_write16(TIMER3_CONFIG,val) +#define bfin_read_TIMER3_COUNTER() bfin_read32(TIMER3_COUNTER) +#define bfin_write_TIMER3_COUNTER(val) bfin_write32(TIMER3_COUNTER,val) +#define bfin_read_TIMER3_PERIOD() bfin_read32(TIMER3_PERIOD) +#define bfin_write_TIMER3_PERIOD(val) bfin_write32(TIMER3_PERIOD,val) +#define bfin_read_TIMER3_WIDTH() bfin_read32(TIMER3_WIDTH) +#define bfin_write_TIMER3_WIDTH(val) bfin_write32(TIMER3_WIDTH,val) + +#define bfin_read_TIMER4_CONFIG() bfin_read16(TIMER4_CONFIG) +#define bfin_write_TIMER4_CONFIG(val) bfin_write16(TIMER4_CONFIG,val) +#define bfin_read_TIMER4_COUNTER() bfin_read32(TIMER4_COUNTER) +#define bfin_write_TIMER4_COUNTER(val) bfin_write32(TIMER4_COUNTER,val) +#define bfin_read_TIMER4_PERIOD() bfin_read32(TIMER4_PERIOD) +#define bfin_write_TIMER4_PERIOD(val) bfin_write32(TIMER4_PERIOD,val) +#define bfin_read_TIMER4_WIDTH() bfin_read32(TIMER4_WIDTH) +#define bfin_write_TIMER4_WIDTH(val) bfin_write32(TIMER4_WIDTH,val) + +#define bfin_read_TIMER5_CONFIG() bfin_read16(TIMER5_CONFIG) +#define bfin_write_TIMER5_CONFIG(val) bfin_write16(TIMER5_CONFIG,val) +#define bfin_read_TIMER5_COUNTER() bfin_read32(TIMER5_COUNTER) +#define bfin_write_TIMER5_COUNTER(val) bfin_write32(TIMER5_COUNTER,val) +#define bfin_read_TIMER5_PERIOD() bfin_read32(TIMER5_PERIOD) +#define bfin_write_TIMER5_PERIOD(val) bfin_write32(TIMER5_PERIOD,val) +#define bfin_read_TIMER5_WIDTH() bfin_read32(TIMER5_WIDTH) +#define bfin_write_TIMER5_WIDTH(val) bfin_write32(TIMER5_WIDTH,val) + +#define bfin_read_TIMER6_CONFIG() bfin_read16(TIMER6_CONFIG) +#define bfin_write_TIMER6_CONFIG(val) bfin_write16(TIMER6_CONFIG,val) +#define bfin_read_TIMER6_COUNTER() bfin_read32(TIMER6_COUNTER) +#define bfin_write_TIMER6_COUNTER(val) bfin_write32(TIMER6_COUNTER,val) +#define bfin_read_TIMER6_PERIOD() bfin_read32(TIMER6_PERIOD) +#define bfin_write_TIMER6_PERIOD(val) bfin_write32(TIMER6_PERIOD,val) +#define bfin_read_TIMER6_WIDTH() bfin_read32(TIMER6_WIDTH) +#define bfin_write_TIMER6_WIDTH(val) bfin_write32(TIMER6_WIDTH,val) + +#define bfin_read_TIMER7_CONFIG() bfin_read16(TIMER7_CONFIG) +#define bfin_write_TIMER7_CONFIG(val) bfin_write16(TIMER7_CONFIG,val) +#define bfin_read_TIMER7_COUNTER() bfin_read32(TIMER7_COUNTER) +#define bfin_write_TIMER7_COUNTER(val) bfin_write32(TIMER7_COUNTER,val) +#define bfin_read_TIMER7_PERIOD() bfin_read32(TIMER7_PERIOD) +#define bfin_write_TIMER7_PERIOD(val) bfin_write32(TIMER7_PERIOD,val) +#define bfin_read_TIMER7_WIDTH() bfin_read32(TIMER7_WIDTH) +#define bfin_write_TIMER7_WIDTH(val) bfin_write32(TIMER7_WIDTH,val) + +#define bfin_read_TIMER_ENABLE() bfin_read16(TIMER_ENABLE) +#define bfin_write_TIMER_ENABLE(val) bfin_write16(TIMER_ENABLE,val) +#define bfin_read_TIMER_DISABLE() bfin_read16(TIMER_DISABLE) +#define bfin_write_TIMER_DISABLE(val) bfin_write16(TIMER_DISABLE,val) +#define bfin_read_TIMER_STATUS() bfin_read32(TIMER_STATUS) +#define bfin_write_TIMER_STATUS(val) bfin_write32(TIMER_STATUS,val) + +/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */ +#define bfin_read_PORTFIO() bfin_read16(PORTFIO) +#define bfin_write_PORTFIO(val) bfin_write16(PORTFIO,val) +#define bfin_read_PORTFIO_CLEAR() bfin_read16(PORTFIO_CLEAR) +#define bfin_write_PORTFIO_CLEAR(val) bfin_write16(PORTFIO_CLEAR,val) +#define bfin_read_PORTFIO_SET() bfin_read16(PORTFIO_SET) +#define bfin_write_PORTFIO_SET(val) bfin_write16(PORTFIO_SET,val) +#define bfin_read_PORTFIO_TOGGLE() bfin_read16(PORTFIO_TOGGLE) +#define bfin_write_PORTFIO_TOGGLE(val) bfin_write16(PORTFIO_TOGGLE,val) +#define bfin_read_PORTFIO_MASKA() bfin_read16(PORTFIO_MASKA) +#define bfin_write_PORTFIO_MASKA(val) bfin_write16(PORTFIO_MASKA,val) +#define bfin_read_PORTFIO_MASKA_CLEAR() bfin_read16(PORTFIO_MASKA_CLEAR) +#define bfin_write_PORTFIO_MASKA_CLEAR(val) bfin_write16(PORTFIO_MASKA_CLEAR,val) +#define bfin_read_PORTFIO_MASKA_SET() bfin_read16(PORTFIO_MASKA_SET) +#define bfin_write_PORTFIO_MASKA_SET(val) bfin_write16(PORTFIO_MASKA_SET,val) +#define bfin_read_PORTFIO_MASKA_TOGGLE() bfin_read16(PORTFIO_MASKA_TOGGLE) +#define bfin_write_PORTFIO_MASKA_TOGGLE(val) bfin_write16(PORTFIO_MASKA_TOGGLE,val) +#define bfin_read_PORTFIO_MASKB() bfin_read16(PORTFIO_MASKB) +#define bfin_write_PORTFIO_MASKB(val) bfin_write16(PORTFIO_MASKB,val) +#define bfin_read_PORTFIO_MASKB_CLEAR() bfin_read16(PORTFIO_MASKB_CLEAR) +#define bfin_write_PORTFIO_MASKB_CLEAR(val) bfin_write16(PORTFIO_MASKB_CLEAR,val) +#define bfin_read_PORTFIO_MASKB_SET() bfin_read16(PORTFIO_MASKB_SET) +#define bfin_write_PORTFIO_MASKB_SET(val) bfin_write16(PORTFIO_MASKB_SET,val) +#define bfin_read_PORTFIO_MASKB_TOGGLE() bfin_read16(PORTFIO_MASKB_TOGGLE) +#define bfin_write_PORTFIO_MASKB_TOGGLE(val) bfin_write16(PORTFIO_MASKB_TOGGLE,val) +#define bfin_read_PORTFIO_DIR() bfin_read16(PORTFIO_DIR) +#define bfin_write_PORTFIO_DIR(val) bfin_write16(PORTFIO_DIR,val) +#define bfin_read_PORTFIO_POLAR() bfin_read16(PORTFIO_POLAR) +#define bfin_write_PORTFIO_POLAR(val) bfin_write16(PORTFIO_POLAR,val) +#define bfin_read_PORTFIO_EDGE() bfin_read16(PORTFIO_EDGE) +#define bfin_write_PORTFIO_EDGE(val) bfin_write16(PORTFIO_EDGE,val) +#define bfin_read_PORTFIO_BOTH() bfin_read16(PORTFIO_BOTH) +#define bfin_write_PORTFIO_BOTH(val) bfin_write16(PORTFIO_BOTH,val) +#define bfin_read_PORTFIO_INEN() bfin_read16(PORTFIO_INEN) +#define bfin_write_PORTFIO_INEN(val) bfin_write16(PORTFIO_INEN,val) + +/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */ +#define bfin_read_SPORT0_TCR1() bfin_read16(SPORT0_TCR1) +#define bfin_write_SPORT0_TCR1(val) bfin_write16(SPORT0_TCR1,val) +#define bfin_read_SPORT0_TCR2() bfin_read16(SPORT0_TCR2) +#define bfin_write_SPORT0_TCR2(val) bfin_write16(SPORT0_TCR2,val) +#define bfin_read_SPORT0_TCLKDIV() bfin_read16(SPORT0_TCLKDIV) +#define bfin_write_SPORT0_TCLKDIV(val) bfin_write16(SPORT0_TCLKDIV,val) +#define bfin_read_SPORT0_TFSDIV() bfin_read16(SPORT0_TFSDIV) +#define bfin_write_SPORT0_TFSDIV(val) bfin_write16(SPORT0_TFSDIV,val) +#define bfin_read_SPORT0_TX() bfin_read32(SPORT0_TX) +#define bfin_write_SPORT0_TX(val) bfin_write32(SPORT0_TX,val) +#define bfin_read_SPORT0_RX() bfin_read32(SPORT0_RX) +#define bfin_write_SPORT0_RX(val) bfin_write32(SPORT0_RX,val) +#define bfin_read_SPORT0_TX32() bfin_read32(SPORT0_TX) +#define bfin_write_SPORT0_TX32(val) bfin_write32(SPORT0_TX,val) +#define bfin_read_SPORT0_RX32() bfin_read32(SPORT0_RX) +#define bfin_write_SPORT0_RX32(val) bfin_write32(SPORT0_RX,val) +#define bfin_read_SPORT0_TX16() bfin_read16(SPORT0_TX) +#define bfin_write_SPORT0_TX16(val) bfin_write16(SPORT0_TX,val) +#define bfin_read_SPORT0_RX16() bfin_read16(SPORT0_RX) +#define bfin_write_SPORT0_RX16(val) bfin_write16(SPORT0_RX,val) +#define bfin_read_SPORT0_RCR1() bfin_read16(SPORT0_RCR1) +#define bfin_write_SPORT0_RCR1(val) bfin_write16(SPORT0_RCR1,val) +#define bfin_read_SPORT0_RCR2() bfin_read16(SPORT0_RCR2) +#define bfin_write_SPORT0_RCR2(val) bfin_write16(SPORT0_RCR2,val) +#define bfin_read_SPORT0_RCLKDIV() bfin_read16(SPORT0_RCLKDIV) +#define bfin_write_SPORT0_RCLKDIV(val) bfin_write16(SPORT0_RCLKDIV,val) +#define bfin_read_SPORT0_RFSDIV() bfin_read16(SPORT0_RFSDIV) +#define bfin_write_SPORT0_RFSDIV(val) bfin_write16(SPORT0_RFSDIV,val) +#define bfin_read_SPORT0_STAT() bfin_read16(SPORT0_STAT) +#define bfin_write_SPORT0_STAT(val) bfin_write16(SPORT0_STAT,val) +#define bfin_read_SPORT0_CHNL() bfin_read16(SPORT0_CHNL) +#define bfin_write_SPORT0_CHNL(val) bfin_write16(SPORT0_CHNL,val) +#define bfin_read_SPORT0_MCMC1() bfin_read16(SPORT0_MCMC1) +#define bfin_write_SPORT0_MCMC1(val) bfin_write16(SPORT0_MCMC1,val) +#define bfin_read_SPORT0_MCMC2() bfin_read16(SPORT0_MCMC2) +#define bfin_write_SPORT0_MCMC2(val) bfin_write16(SPORT0_MCMC2,val) +#define bfin_read_SPORT0_MTCS0() bfin_read32(SPORT0_MTCS0) +#define bfin_write_SPORT0_MTCS0(val) bfin_write32(SPORT0_MTCS0,val) +#define bfin_read_SPORT0_MTCS1() bfin_read32(SPORT0_MTCS1) +#define bfin_write_SPORT0_MTCS1(val) bfin_write32(SPORT0_MTCS1,val) +#define bfin_read_SPORT0_MTCS2() bfin_read32(SPORT0_MTCS2) +#define bfin_write_SPORT0_MTCS2(val) bfin_write32(SPORT0_MTCS2,val) +#define bfin_read_SPORT0_MTCS3() bfin_read32(SPORT0_MTCS3) +#define bfin_write_SPORT0_MTCS3(val) bfin_write32(SPORT0_MTCS3,val) +#define bfin_read_SPORT0_MRCS0() bfin_read32(SPORT0_MRCS0) +#define bfin_write_SPORT0_MRCS0(val) bfin_write32(SPORT0_MRCS0,val) +#define bfin_read_SPORT0_MRCS1() bfin_read32(SPORT0_MRCS1) +#define bfin_write_SPORT0_MRCS1(val) bfin_write32(SPORT0_MRCS1,val) +#define bfin_read_SPORT0_MRCS2() bfin_read32(SPORT0_MRCS2) +#define bfin_write_SPORT0_MRCS2(val) bfin_write32(SPORT0_MRCS2,val) +#define bfin_read_SPORT0_MRCS3() bfin_read32(SPORT0_MRCS3) +#define bfin_write_SPORT0_MRCS3(val) bfin_write32(SPORT0_MRCS3,val) + +/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */ +#define bfin_read_SPORT1_TCR1() bfin_read16(SPORT1_TCR1) +#define bfin_write_SPORT1_TCR1(val) bfin_write16(SPORT1_TCR1,val) +#define bfin_read_SPORT1_TCR2() bfin_read16(SPORT1_TCR2) +#define bfin_write_SPORT1_TCR2(val) bfin_write16(SPORT1_TCR2,val) +#define bfin_read_SPORT1_TCLKDIV() bfin_read16(SPORT1_TCLKDIV) +#define bfin_write_SPORT1_TCLKDIV(val) bfin_write16(SPORT1_TCLKDIV,val) +#define bfin_read_SPORT1_TFSDIV() bfin_read16(SPORT1_TFSDIV) +#define bfin_write_SPORT1_TFSDIV(val) bfin_write16(SPORT1_TFSDIV,val) +#define bfin_read_SPORT1_TX() bfin_read32(SPORT1_TX) +#define bfin_write_SPORT1_TX(val) bfin_write32(SPORT1_TX,val) +#define bfin_read_SPORT1_RX() bfin_read32(SPORT1_RX) +#define bfin_write_SPORT1_RX(val) bfin_write32(SPORT1_RX,val) +#define bfin_read_SPORT1_TX32() bfin_read32(SPORT1_TX) +#define bfin_write_SPORT1_TX32(val) bfin_write32(SPORT1_TX,val) +#define bfin_read_SPORT1_RX32() bfin_read32(SPORT1_RX) +#define bfin_write_SPORT1_RX32(val) bfin_write32(SPORT1_RX,val) +#define bfin_read_SPORT1_TX16() bfin_read16(SPORT1_TX) +#define bfin_write_SPORT1_TX16(val) bfin_write16(SPORT1_TX,val) +#define bfin_read_SPORT1_RX16() bfin_read16(SPORT1_RX) +#define bfin_write_SPORT1_RX16(val) bfin_write16(SPORT1_RX,val) +#define bfin_read_SPORT1_RCR1() bfin_read16(SPORT1_RCR1) +#define bfin_write_SPORT1_RCR1(val) bfin_write16(SPORT1_RCR1,val) +#define bfin_read_SPORT1_RCR2() bfin_read16(SPORT1_RCR2) +#define bfin_write_SPORT1_RCR2(val) bfin_write16(SPORT1_RCR2,val) +#define bfin_read_SPORT1_RCLKDIV() bfin_read16(SPORT1_RCLKDIV) +#define bfin_write_SPORT1_RCLKDIV(val) bfin_write16(SPORT1_RCLKDIV,val) +#define bfin_read_SPORT1_RFSDIV() bfin_read16(SPORT1_RFSDIV) +#define bfin_write_SPORT1_RFSDIV(val) bfin_write16(SPORT1_RFSDIV,val) +#define bfin_read_SPORT1_STAT() bfin_read16(SPORT1_STAT) +#define bfin_write_SPORT1_STAT(val) bfin_write16(SPORT1_STAT,val) +#define bfin_read_SPORT1_CHNL() bfin_read16(SPORT1_CHNL) +#define bfin_write_SPORT1_CHNL(val) bfin_write16(SPORT1_CHNL,val) +#define bfin_read_SPORT1_MCMC1() bfin_read16(SPORT1_MCMC1) +#define bfin_write_SPORT1_MCMC1(val) bfin_write16(SPORT1_MCMC1,val) +#define bfin_read_SPORT1_MCMC2() bfin_read16(SPORT1_MCMC2) +#define bfin_write_SPORT1_MCMC2(val) bfin_write16(SPORT1_MCMC2,val) +#define bfin_read_SPORT1_MTCS0() bfin_read32(SPORT1_MTCS0) +#define bfin_write_SPORT1_MTCS0(val) bfin_write32(SPORT1_MTCS0,val) +#define bfin_read_SPORT1_MTCS1() bfin_read32(SPORT1_MTCS1) +#define bfin_write_SPORT1_MTCS1(val) bfin_write32(SPORT1_MTCS1,val) +#define bfin_read_SPORT1_MTCS2() bfin_read32(SPORT1_MTCS2) +#define bfin_write_SPORT1_MTCS2(val) bfin_write32(SPORT1_MTCS2,val) +#define bfin_read_SPORT1_MTCS3() bfin_read32(SPORT1_MTCS3) +#define bfin_write_SPORT1_MTCS3(val) bfin_write32(SPORT1_MTCS3,val) +#define bfin_read_SPORT1_MRCS0() bfin_read32(SPORT1_MRCS0) +#define bfin_write_SPORT1_MRCS0(val) bfin_write32(SPORT1_MRCS0,val) +#define bfin_read_SPORT1_MRCS1() bfin_read32(SPORT1_MRCS1) +#define bfin_write_SPORT1_MRCS1(val) bfin_write32(SPORT1_MRCS1,val) +#define bfin_read_SPORT1_MRCS2() bfin_read32(SPORT1_MRCS2) +#define bfin_write_SPORT1_MRCS2(val) bfin_write32(SPORT1_MRCS2,val) +#define bfin_read_SPORT1_MRCS3() bfin_read32(SPORT1_MRCS3) +#define bfin_write_SPORT1_MRCS3(val) bfin_write32(SPORT1_MRCS3,val) + +/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */ +#define bfin_read_EBIU_AMGCTL() bfin_read16(EBIU_AMGCTL) +#define bfin_write_EBIU_AMGCTL(val) bfin_write16(EBIU_AMGCTL,val) +#define bfin_read_EBIU_AMBCTL0() bfin_read32(EBIU_AMBCTL0) +#define bfin_write_EBIU_AMBCTL0(val) bfin_write32(EBIU_AMBCTL0,val) +#define bfin_read_EBIU_AMBCTL1() bfin_read32(EBIU_AMBCTL1) +#define bfin_write_EBIU_AMBCTL1(val) bfin_write32(EBIU_AMBCTL1,val) +#define bfin_read_EBIU_SDGCTL() bfin_read32(EBIU_SDGCTL) +#define bfin_write_EBIU_SDGCTL(val) bfin_write32(EBIU_SDGCTL,val) +#define bfin_read_EBIU_SDBCTL() bfin_read16(EBIU_SDBCTL) +#define bfin_write_EBIU_SDBCTL(val) bfin_write16(EBIU_SDBCTL,val) +#define bfin_read_EBIU_SDRRC() bfin_read16(EBIU_SDRRC) +#define bfin_write_EBIU_SDRRC(val) bfin_write16(EBIU_SDRRC,val) +#define bfin_read_EBIU_SDSTAT() bfin_read16(EBIU_SDSTAT) +#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT,val) + +/* DMA Traffic Control Registers */ +#define pDMA_TCPER ((volatile unsigned short *)DMA_TCPER) +#define bfin_read_DMA_TCPER() bfin_read16(DMA_TCPER) +#define bfin_write_DMA_TCPER(val) bfin_write16(DMA_TCPER,val) +#define pDMA_TCCNT ((volatile unsigned short *)DMA_TCCNT) +#define bfin_read_DMA_TCCNT() bfin_read16(DMA_TCCNT) +#define bfin_write_DMA_TCCNT(val) bfin_write16(DMA_TCCNT,val) + +/* DMA Controller */ +#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG) +#define bfin_write_DMA0_CONFIG(val) bfin_write16(DMA0_CONFIG,val) +#define bfin_read_DMA0_NEXT_DESC_PTR() bfin_read32(DMA0_NEXT_DESC_PTR) +#define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_write32(DMA0_NEXT_DESC_PTR,val) +#define bfin_read_DMA0_START_ADDR() bfin_read32(DMA0_START_ADDR) +#define bfin_write_DMA0_START_ADDR(val) bfin_write32(DMA0_START_ADDR,val) +#define bfin_read_DMA0_X_COUNT() bfin_read16(DMA0_X_COUNT) +#define bfin_write_DMA0_X_COUNT(val) bfin_write16(DMA0_X_COUNT,val) +#define bfin_read_DMA0_Y_COUNT() bfin_read16(DMA0_Y_COUNT) +#define bfin_write_DMA0_Y_COUNT(val) bfin_write16(DMA0_Y_COUNT,val) +#define bfin_read_DMA0_X_MODIFY() bfin_read16(DMA0_X_MODIFY) +#define bfin_write_DMA0_X_MODIFY(val) bfin_write16(DMA0_X_MODIFY,val) +#define bfin_read_DMA0_Y_MODIFY() bfin_read16(DMA0_Y_MODIFY) +#define bfin_write_DMA0_Y_MODIFY(val) bfin_write16(DMA0_Y_MODIFY,val) +#define bfin_read_DMA0_CURR_DESC_PTR() bfin_read32(DMA0_CURR_DESC_PTR) +#define bfin_write_DMA0_CURR_DESC_PTR(val) bfin_write32(DMA0_CURR_DESC_PTR,val) +#define bfin_read_DMA0_CURR_ADDR() bfin_read32(DMA0_CURR_ADDR) +#define bfin_write_DMA0_CURR_ADDR(val) bfin_write32(DMA0_CURR_ADDR,val) +#define bfin_read_DMA0_CURR_X_COUNT() bfin_read16(DMA0_CURR_X_COUNT) +#define bfin_write_DMA0_CURR_X_COUNT(val) bfin_write16(DMA0_CURR_X_COUNT,val) +#define bfin_read_DMA0_CURR_Y_COUNT() bfin_read16(DMA0_CURR_Y_COUNT) +#define bfin_write_DMA0_CURR_Y_COUNT(val) bfin_write16(DMA0_CURR_Y_COUNT,val) +#define bfin_read_DMA0_IRQ_STATUS() bfin_read16(DMA0_IRQ_STATUS) +#define bfin_write_DMA0_IRQ_STATUS(val) bfin_write16(DMA0_IRQ_STATUS,val) +#define bfin_read_DMA0_PERIPHERAL_MAP() bfin_read16(DMA0_PERIPHERAL_MAP) +#define bfin_write_DMA0_PERIPHERAL_MAP(val) bfin_write16(DMA0_PERIPHERAL_MAP,val) + +#define bfin_read_DMA1_CONFIG() bfin_read16(DMA1_CONFIG) +#define bfin_write_DMA1_CONFIG(val) bfin_write16(DMA1_CONFIG,val) +#define bfin_read_DMA1_NEXT_DESC_PTR() bfin_read32(DMA1_NEXT_DESC_PTR) +#define bfin_write_DMA1_NEXT_DESC_PTR(val) bfin_write32(DMA1_NEXT_DESC_PTR,val) +#define bfin_read_DMA1_START_ADDR() bfin_read32(DMA1_START_ADDR) +#define bfin_write_DMA1_START_ADDR(val) bfin_write32(DMA1_START_ADDR,val) +#define bfin_read_DMA1_X_COUNT() bfin_read16(DMA1_X_COUNT) +#define bfin_write_DMA1_X_COUNT(val) bfin_write16(DMA1_X_COUNT,val) +#define bfin_read_DMA1_Y_COUNT() bfin_read16(DMA1_Y_COUNT) +#define bfin_write_DMA1_Y_COUNT(val) bfin_write16(DMA1_Y_COUNT,val) +#define bfin_read_DMA1_X_MODIFY() bfin_read16(DMA1_X_MODIFY) +#define bfin_write_DMA1_X_MODIFY(val) bfin_write16(DMA1_X_MODIFY,val) +#define bfin_read_DMA1_Y_MODIFY() bfin_read16(DMA1_Y_MODIFY) +#define bfin_write_DMA1_Y_MODIFY(val) bfin_write16(DMA1_Y_MODIFY,val) +#define bfin_read_DMA1_CURR_DESC_PTR() bfin_read32(DMA1_CURR_DESC_PTR) +#define bfin_write_DMA1_CURR_DESC_PTR(val) bfin_write32(DMA1_CURR_DESC_PTR,val) +#define bfin_read_DMA1_CURR_ADDR() bfin_read32(DMA1_CURR_ADDR) +#define bfin_write_DMA1_CURR_ADDR(val) bfin_write32(DMA1_CURR_ADDR,val) +#define bfin_read_DMA1_CURR_X_COUNT() bfin_read16(DMA1_CURR_X_COUNT) +#define bfin_write_DMA1_CURR_X_COUNT(val) bfin_write16(DMA1_CURR_X_COUNT,val) +#define bfin_read_DMA1_CURR_Y_COUNT() bfin_read16(DMA1_CURR_Y_COUNT) +#define bfin_write_DMA1_CURR_Y_COUNT(val) bfin_write16(DMA1_CURR_Y_COUNT,val) +#define bfin_read_DMA1_IRQ_STATUS() bfin_read16(DMA1_IRQ_STATUS) +#define bfin_write_DMA1_IRQ_STATUS(val) bfin_write16(DMA1_IRQ_STATUS,val) +#define bfin_read_DMA1_PERIPHERAL_MAP() bfin_read16(DMA1_PERIPHERAL_MAP) +#define bfin_write_DMA1_PERIPHERAL_MAP(val) bfin_write16(DMA1_PERIPHERAL_MAP,val) + +#define bfin_read_DMA2_CONFIG() bfin_read16(DMA2_CONFIG) +#define bfin_write_DMA2_CONFIG(val) bfin_write16(DMA2_CONFIG,val) +#define bfin_read_DMA2_NEXT_DESC_PTR() bfin_read32(DMA2_NEXT_DESC_PTR) +#define bfin_write_DMA2_NEXT_DESC_PTR(val) bfin_write32(DMA2_NEXT_DESC_PTR,val) +#define bfin_read_DMA2_START_ADDR() bfin_read32(DMA2_START_ADDR) +#define bfin_write_DMA2_START_ADDR(val) bfin_write32(DMA2_START_ADDR,val) +#define bfin_read_DMA2_X_COUNT() bfin_read16(DMA2_X_COUNT) +#define bfin_write_DMA2_X_COUNT(val) bfin_write16(DMA2_X_COUNT,val) +#define bfin_read_DMA2_Y_COUNT() bfin_read16(DMA2_Y_COUNT) +#define bfin_write_DMA2_Y_COUNT(val) bfin_write16(DMA2_Y_COUNT,val) +#define bfin_read_DMA2_X_MODIFY() bfin_read16(DMA2_X_MODIFY) +#define bfin_write_DMA2_X_MODIFY(val) bfin_write16(DMA2_X_MODIFY,val) +#define bfin_read_DMA2_Y_MODIFY() bfin_read16(DMA2_Y_MODIFY) +#define bfin_write_DMA2_Y_MODIFY(val) bfin_write16(DMA2_Y_MODIFY,val) +#define bfin_read_DMA2_CURR_DESC_PTR() bfin_read32(DMA2_CURR_DESC_PTR) +#define bfin_write_DMA2_CURR_DESC_PTR(val) bfin_write32(DMA2_CURR_DESC_PTR,val) +#define bfin_read_DMA2_CURR_ADDR() bfin_read32(DMA2_CURR_ADDR) +#define bfin_write_DMA2_CURR_ADDR(val) bfin_write32(DMA2_CURR_ADDR,val) +#define bfin_read_DMA2_CURR_X_COUNT() bfin_read16(DMA2_CURR_X_COUNT) +#define bfin_write_DMA2_CURR_X_COUNT(val) bfin_write16(DMA2_CURR_X_COUNT,val) +#define bfin_read_DMA2_CURR_Y_COUNT() bfin_read16(DMA2_CURR_Y_COUNT) +#define bfin_write_DMA2_CURR_Y_COUNT(val) bfin_write16(DMA2_CURR_Y_COUNT,val) +#define bfin_read_DMA2_IRQ_STATUS() bfin_read16(DMA2_IRQ_STATUS) +#define bfin_write_DMA2_IRQ_STATUS(val) bfin_write16(DMA2_IRQ_STATUS,val) +#define bfin_read_DMA2_PERIPHERAL_MAP() bfin_read16(DMA2_PERIPHERAL_MAP) +#define bfin_write_DMA2_PERIPHERAL_MAP(val) bfin_write16(DMA2_PERIPHERAL_MAP,val) + +#define bfin_read_DMA3_CONFIG() bfin_read16(DMA3_CONFIG) +#define bfin_write_DMA3_CONFIG(val) bfin_write16(DMA3_CONFIG,val) +#define bfin_read_DMA3_NEXT_DESC_PTR() bfin_read32(DMA3_NEXT_DESC_PTR) +#define bfin_write_DMA3_NEXT_DESC_PTR(val) bfin_write32(DMA3_NEXT_DESC_PTR,val) +#define bfin_read_DMA3_START_ADDR() bfin_read32(DMA3_START_ADDR) +#define bfin_write_DMA3_START_ADDR(val) bfin_write32(DMA3_START_ADDR,val) +#define bfin_read_DMA3_X_COUNT() bfin_read16(DMA3_X_COUNT) +#define bfin_write_DMA3_X_COUNT(val) bfin_write16(DMA3_X_COUNT,val) +#define bfin_read_DMA3_Y_COUNT() bfin_read16(DMA3_Y_COUNT) +#define bfin_write_DMA3_Y_COUNT(val) bfin_write16(DMA3_Y_COUNT,val) +#define bfin_read_DMA3_X_MODIFY() bfin_read16(DMA3_X_MODIFY) +#define bfin_write_DMA3_X_MODIFY(val) bfin_write16(DMA3_X_MODIFY,val) +#define bfin_read_DMA3_Y_MODIFY() bfin_read16(DMA3_Y_MODIFY) +#define bfin_write_DMA3_Y_MODIFY(val) bfin_write16(DMA3_Y_MODIFY,val) +#define bfin_read_DMA3_CURR_DESC_PTR() bfin_read32(DMA3_CURR_DESC_PTR) +#define bfin_write_DMA3_CURR_DESC_PTR(val) bfin_write32(DMA3_CURR_DESC_PTR,val) +#define bfin_read_DMA3_CURR_ADDR() bfin_read32(DMA3_CURR_ADDR) +#define bfin_write_DMA3_CURR_ADDR(val) bfin_write32(DMA3_CURR_ADDR,val) +#define bfin_read_DMA3_CURR_X_COUNT() bfin_read16(DMA3_CURR_X_COUNT) +#define bfin_write_DMA3_CURR_X_COUNT(val) bfin_write16(DMA3_CURR_X_COUNT,val) +#define bfin_read_DMA3_CURR_Y_COUNT() bfin_read16(DMA3_CURR_Y_COUNT) +#define bfin_write_DMA3_CURR_Y_COUNT(val) bfin_write16(DMA3_CURR_Y_COUNT,val) +#define bfin_read_DMA3_IRQ_STATUS() bfin_read16(DMA3_IRQ_STATUS) +#define bfin_write_DMA3_IRQ_STATUS(val) bfin_write16(DMA3_IRQ_STATUS,val) +#define bfin_read_DMA3_PERIPHERAL_MAP() bfin_read16(DMA3_PERIPHERAL_MAP) +#define bfin_write_DMA3_PERIPHERAL_MAP(val) bfin_write16(DMA3_PERIPHERAL_MAP,val) + +#define bfin_read_DMA4_CONFIG() bfin_read16(DMA4_CONFIG) +#define bfin_write_DMA4_CONFIG(val) bfin_write16(DMA4_CONFIG,val) +#define bfin_read_DMA4_NEXT_DESC_PTR() bfin_read32(DMA4_NEXT_DESC_PTR) +#define bfin_write_DMA4_NEXT_DESC_PTR(val) bfin_write32(DMA4_NEXT_DESC_PTR,val) +#define bfin_read_DMA4_START_ADDR() bfin_read32(DMA4_START_ADDR) +#define bfin_write_DMA4_START_ADDR(val) bfin_write32(DMA4_START_ADDR,val) +#define bfin_read_DMA4_X_COUNT() bfin_read16(DMA4_X_COUNT) +#define bfin_write_DMA4_X_COUNT(val) bfin_write16(DMA4_X_COUNT,val) +#define bfin_read_DMA4_Y_COUNT() bfin_read16(DMA4_Y_COUNT) +#define bfin_write_DMA4_Y_COUNT(val) bfin_write16(DMA4_Y_COUNT,val) +#define bfin_read_DMA4_X_MODIFY() bfin_read16(DMA4_X_MODIFY) +#define bfin_write_DMA4_X_MODIFY(val) bfin_write16(DMA4_X_MODIFY,val) +#define bfin_read_DMA4_Y_MODIFY() bfin_read16(DMA4_Y_MODIFY) +#define bfin_write_DMA4_Y_MODIFY(val) bfin_write16(DMA4_Y_MODIFY,val) +#define bfin_read_DMA4_CURR_DESC_PTR() bfin_read32(DMA4_CURR_DESC_PTR) +#define bfin_write_DMA4_CURR_DESC_PTR(val) bfin_write32(DMA4_CURR_DESC_PTR,val) +#define bfin_read_DMA4_CURR_ADDR() bfin_read32(DMA4_CURR_ADDR) +#define bfin_write_DMA4_CURR_ADDR(val) bfin_write32(DMA4_CURR_ADDR,val) +#define bfin_read_DMA4_CURR_X_COUNT() bfin_read16(DMA4_CURR_X_COUNT) +#define bfin_write_DMA4_CURR_X_COUNT(val) bfin_write16(DMA4_CURR_X_COUNT,val) +#define bfin_read_DMA4_CURR_Y_COUNT() bfin_read16(DMA4_CURR_Y_COUNT) +#define bfin_write_DMA4_CURR_Y_COUNT(val) bfin_write16(DMA4_CURR_Y_COUNT,val) +#define bfin_read_DMA4_IRQ_STATUS() bfin_read16(DMA4_IRQ_STATUS) +#define bfin_write_DMA4_IRQ_STATUS(val) bfin_write16(DMA4_IRQ_STATUS,val) +#define bfin_read_DMA4_PERIPHERAL_MAP() bfin_read16(DMA4_PERIPHERAL_MAP) +#define bfin_write_DMA4_PERIPHERAL_MAP(val) bfin_write16(DMA4_PERIPHERAL_MAP,val) + +#define bfin_read_DMA5_CONFIG() bfin_read16(DMA5_CONFIG) +#define bfin_write_DMA5_CONFIG(val) bfin_write16(DMA5_CONFIG,val) +#define bfin_read_DMA5_NEXT_DESC_PTR() bfin_read32(DMA5_NEXT_DESC_PTR) +#define bfin_write_DMA5_NEXT_DESC_PTR(val) bfin_write32(DMA5_NEXT_DESC_PTR,val) +#define bfin_read_DMA5_START_ADDR() bfin_read32(DMA5_START_ADDR) +#define bfin_write_DMA5_START_ADDR(val) bfin_write32(DMA5_START_ADDR,val) +#define bfin_read_DMA5_X_COUNT() bfin_read16(DMA5_X_COUNT) +#define bfin_write_DMA5_X_COUNT(val) bfin_write16(DMA5_X_COUNT,val) +#define bfin_read_DMA5_Y_COUNT() bfin_read16(DMA5_Y_COUNT) +#define bfin_write_DMA5_Y_COUNT(val) bfin_write16(DMA5_Y_COUNT,val) +#define bfin_read_DMA5_X_MODIFY() bfin_read16(DMA5_X_MODIFY) +#define bfin_write_DMA5_X_MODIFY(val) bfin_write16(DMA5_X_MODIFY,val) +#define bfin_read_DMA5_Y_MODIFY() bfin_read16(DMA5_Y_MODIFY) +#define bfin_write_DMA5_Y_MODIFY(val) bfin_write16(DMA5_Y_MODIFY,val) +#define bfin_read_DMA5_CURR_DESC_PTR() bfin_read32(DMA5_CURR_DESC_PTR) +#define bfin_write_DMA5_CURR_DESC_PTR(val) bfin_write32(DMA5_CURR_DESC_PTR,val) +#define bfin_read_DMA5_CURR_ADDR() bfin_read32(DMA5_CURR_ADDR) +#define bfin_write_DMA5_CURR_ADDR(val) bfin_write32(DMA5_CURR_ADDR,val) +#define bfin_read_DMA5_CURR_X_COUNT() bfin_read16(DMA5_CURR_X_COUNT) +#define bfin_write_DMA5_CURR_X_COUNT(val) bfin_write16(DMA5_CURR_X_COUNT,val) +#define bfin_read_DMA5_CURR_Y_COUNT() bfin_read16(DMA5_CURR_Y_COUNT) +#define bfin_write_DMA5_CURR_Y_COUNT(val) bfin_write16(DMA5_CURR_Y_COUNT,val) +#define bfin_read_DMA5_IRQ_STATUS() bfin_read16(DMA5_IRQ_STATUS) +#define bfin_write_DMA5_IRQ_STATUS(val) bfin_write16(DMA5_IRQ_STATUS,val) +#define bfin_read_DMA5_PERIPHERAL_MAP() bfin_read16(DMA5_PERIPHERAL_MAP) +#define bfin_write_DMA5_PERIPHERAL_MAP(val) bfin_write16(DMA5_PERIPHERAL_MAP,val) + +#define bfin_read_DMA6_CONFIG() bfin_read16(DMA6_CONFIG) +#define bfin_write_DMA6_CONFIG(val) bfin_write16(DMA6_CONFIG,val) +#define bfin_read_DMA6_NEXT_DESC_PTR() bfin_read32(DMA6_NEXT_DESC_PTR) +#define bfin_write_DMA6_NEXT_DESC_PTR(val) bfin_write32(DMA6_NEXT_DESC_PTR,val) +#define bfin_read_DMA6_START_ADDR() bfin_read32(DMA6_START_ADDR) +#define bfin_write_DMA6_START_ADDR(val) bfin_write32(DMA6_START_ADDR,val) +#define bfin_read_DMA6_X_COUNT() bfin_read16(DMA6_X_COUNT) +#define bfin_write_DMA6_X_COUNT(val) bfin_write16(DMA6_X_COUNT,val) +#define bfin_read_DMA6_Y_COUNT() bfin_read16(DMA6_Y_COUNT) +#define bfin_write_DMA6_Y_COUNT(val) bfin_write16(DMA6_Y_COUNT,val) +#define bfin_read_DMA6_X_MODIFY() bfin_read16(DMA6_X_MODIFY) +#define bfin_write_DMA6_X_MODIFY(val) bfin_write16(DMA6_X_MODIFY,val) +#define bfin_read_DMA6_Y_MODIFY() bfin_read16(DMA6_Y_MODIFY) +#define bfin_write_DMA6_Y_MODIFY(val) bfin_write16(DMA6_Y_MODIFY,val) +#define bfin_read_DMA6_CURR_DESC_PTR() bfin_read32(DMA6_CURR_DESC_PTR) +#define bfin_write_DMA6_CURR_DESC_PTR(val) bfin_write32(DMA6_CURR_DESC_PTR,val) +#define bfin_read_DMA6_CURR_ADDR() bfin_read32(DMA6_CURR_ADDR) +#define bfin_write_DMA6_CURR_ADDR(val) bfin_write32(DMA6_CURR_ADDR,val) +#define bfin_read_DMA6_CURR_X_COUNT() bfin_read16(DMA6_CURR_X_COUNT) +#define bfin_write_DMA6_CURR_X_COUNT(val) bfin_write16(DMA6_CURR_X_COUNT,val) +#define bfin_read_DMA6_CURR_Y_COUNT() bfin_read16(DMA6_CURR_Y_COUNT) +#define bfin_write_DMA6_CURR_Y_COUNT(val) bfin_write16(DMA6_CURR_Y_COUNT,val) +#define bfin_read_DMA6_IRQ_STATUS() bfin_read16(DMA6_IRQ_STATUS) +#define bfin_write_DMA6_IRQ_STATUS(val) bfin_write16(DMA6_IRQ_STATUS,val) +#define bfin_read_DMA6_PERIPHERAL_MAP() bfin_read16(DMA6_PERIPHERAL_MAP) +#define bfin_write_DMA6_PERIPHERAL_MAP(val) bfin_write16(DMA6_PERIPHERAL_MAP,val) + +#define bfin_read_DMA7_CONFIG() bfin_read16(DMA7_CONFIG) +#define bfin_write_DMA7_CONFIG(val) bfin_write16(DMA7_CONFIG,val) +#define bfin_read_DMA7_NEXT_DESC_PTR() bfin_read32(DMA7_NEXT_DESC_PTR) +#define bfin_write_DMA7_NEXT_DESC_PTR(val) bfin_write32(DMA7_NEXT_DESC_PTR,val) +#define bfin_read_DMA7_START_ADDR() bfin_read32(DMA7_START_ADDR) +#define bfin_write_DMA7_START_ADDR(val) bfin_write32(DMA7_START_ADDR,val) +#define bfin_read_DMA7_X_COUNT() bfin_read16(DMA7_X_COUNT) +#define bfin_write_DMA7_X_COUNT(val) bfin_write16(DMA7_X_COUNT,val) +#define bfin_read_DMA7_Y_COUNT() bfin_read16(DMA7_Y_COUNT) +#define bfin_write_DMA7_Y_COUNT(val) bfin_write16(DMA7_Y_COUNT,val) +#define bfin_read_DMA7_X_MODIFY() bfin_read16(DMA7_X_MODIFY) +#define bfin_write_DMA7_X_MODIFY(val) bfin_write16(DMA7_X_MODIFY,val) +#define bfin_read_DMA7_Y_MODIFY() bfin_read16(DMA7_Y_MODIFY) +#define bfin_write_DMA7_Y_MODIFY(val) bfin_write16(DMA7_Y_MODIFY,val) +#define bfin_read_DMA7_CURR_DESC_PTR() bfin_read32(DMA7_CURR_DESC_PTR) +#define bfin_write_DMA7_CURR_DESC_PTR(val) bfin_write32(DMA7_CURR_DESC_PTR,val) +#define bfin_read_DMA7_CURR_ADDR() bfin_read32(DMA7_CURR_ADDR) +#define bfin_write_DMA7_CURR_ADDR(val) bfin_write32(DMA7_CURR_ADDR,val) +#define bfin_read_DMA7_CURR_X_COUNT() bfin_read16(DMA7_CURR_X_COUNT) +#define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT,val) +#define bfin_read_DMA7_CURR_Y_COUNT() bfin_read16(DMA7_CURR_Y_COUNT) +#define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT,val) +#define bfin_read_DMA7_IRQ_STATUS() bfin_read16(DMA7_IRQ_STATUS) +#define bfin_write_DMA7_IRQ_STATUS(val) bfin_write16(DMA7_IRQ_STATUS,val) +#define bfin_read_DMA7_PERIPHERAL_MAP() bfin_read16(DMA7_PERIPHERAL_MAP) +#define bfin_write_DMA7_PERIPHERAL_MAP(val) bfin_write16(DMA7_PERIPHERAL_MAP,val) + +#define bfin_read_DMA8_CONFIG() bfin_read16(DMA8_CONFIG) +#define bfin_write_DMA8_CONFIG(val) bfin_write16(DMA8_CONFIG,val) +#define bfin_read_DMA8_NEXT_DESC_PTR() bfin_read32(DMA8_NEXT_DESC_PTR) +#define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_write32(DMA8_NEXT_DESC_PTR,val) +#define bfin_read_DMA8_START_ADDR() bfin_read32(DMA8_START_ADDR) +#define bfin_write_DMA8_START_ADDR(val) bfin_write32(DMA8_START_ADDR,val) +#define bfin_read_DMA8_X_COUNT() bfin_read16(DMA8_X_COUNT) +#define bfin_write_DMA8_X_COUNT(val) bfin_write16(DMA8_X_COUNT,val) +#define bfin_read_DMA8_Y_COUNT() bfin_read16(DMA8_Y_COUNT) +#define bfin_write_DMA8_Y_COUNT(val) bfin_write16(DMA8_Y_COUNT,val) +#define bfin_read_DMA8_X_MODIFY() bfin_read16(DMA8_X_MODIFY) +#define bfin_write_DMA8_X_MODIFY(val) bfin_write16(DMA8_X_MODIFY,val) +#define bfin_read_DMA8_Y_MODIFY() bfin_read16(DMA8_Y_MODIFY) +#define bfin_write_DMA8_Y_MODIFY(val) bfin_write16(DMA8_Y_MODIFY,val) +#define bfin_read_DMA8_CURR_DESC_PTR() bfin_read32(DMA8_CURR_DESC_PTR) +#define bfin_write_DMA8_CURR_DESC_PTR(val) bfin_write32(DMA8_CURR_DESC_PTR,val) +#define bfin_read_DMA8_CURR_ADDR() bfin_read32(DMA8_CURR_ADDR) +#define bfin_write_DMA8_CURR_ADDR(val) bfin_write32(DMA8_CURR_ADDR,val) +#define bfin_read_DMA8_CURR_X_COUNT() bfin_read16(DMA8_CURR_X_COUNT) +#define bfin_write_DMA8_CURR_X_COUNT(val) bfin_write16(DMA8_CURR_X_COUNT,val) +#define bfin_read_DMA8_CURR_Y_COUNT() bfin_read16(DMA8_CURR_Y_COUNT) +#define bfin_write_DMA8_CURR_Y_COUNT(val) bfin_write16(DMA8_CURR_Y_COUNT,val) +#define bfin_read_DMA8_IRQ_STATUS() bfin_read16(DMA8_IRQ_STATUS) +#define bfin_write_DMA8_IRQ_STATUS(val) bfin_write16(DMA8_IRQ_STATUS,val) +#define bfin_read_DMA8_PERIPHERAL_MAP() bfin_read16(DMA8_PERIPHERAL_MAP) +#define bfin_write_DMA8_PERIPHERAL_MAP(val) bfin_write16(DMA8_PERIPHERAL_MAP,val) + +#define bfin_read_DMA9_CONFIG() bfin_read16(DMA9_CONFIG) +#define bfin_write_DMA9_CONFIG(val) bfin_write16(DMA9_CONFIG,val) +#define bfin_read_DMA9_NEXT_DESC_PTR() bfin_read32(DMA9_NEXT_DESC_PTR) +#define bfin_write_DMA9_NEXT_DESC_PTR(val) bfin_write32(DMA9_NEXT_DESC_PTR,val) +#define bfin_read_DMA9_START_ADDR() bfin_read32(DMA9_START_ADDR) +#define bfin_write_DMA9_START_ADDR(val) bfin_write32(DMA9_START_ADDR,val) +#define bfin_read_DMA9_X_COUNT() bfin_read16(DMA9_X_COUNT) +#define bfin_write_DMA9_X_COUNT(val) bfin_write16(DMA9_X_COUNT,val) +#define bfin_read_DMA9_Y_COUNT() bfin_read16(DMA9_Y_COUNT) +#define bfin_write_DMA9_Y_COUNT(val) bfin_write16(DMA9_Y_COUNT,val) +#define bfin_read_DMA9_X_MODIFY() bfin_read16(DMA9_X_MODIFY) +#define bfin_write_DMA9_X_MODIFY(val) bfin_write16(DMA9_X_MODIFY,val) +#define bfin_read_DMA9_Y_MODIFY() bfin_read16(DMA9_Y_MODIFY) +#define bfin_write_DMA9_Y_MODIFY(val) bfin_write16(DMA9_Y_MODIFY,val) +#define bfin_read_DMA9_CURR_DESC_PTR() bfin_read32(DMA9_CURR_DESC_PTR) +#define bfin_write_DMA9_CURR_DESC_PTR(val) bfin_write32(DMA9_CURR_DESC_PTR,val) +#define bfin_read_DMA9_CURR_ADDR() bfin_read32(DMA9_CURR_ADDR) +#define bfin_write_DMA9_CURR_ADDR(val) bfin_write32(DMA9_CURR_ADDR,val) +#define bfin_read_DMA9_CURR_X_COUNT() bfin_read16(DMA9_CURR_X_COUNT) +#define bfin_write_DMA9_CURR_X_COUNT(val) bfin_write16(DMA9_CURR_X_COUNT,val) +#define bfin_read_DMA9_CURR_Y_COUNT() bfin_read16(DMA9_CURR_Y_COUNT) +#define bfin_write_DMA9_CURR_Y_COUNT(val) bfin_write16(DMA9_CURR_Y_COUNT,val) +#define bfin_read_DMA9_IRQ_STATUS() bfin_read16(DMA9_IRQ_STATUS) +#define bfin_write_DMA9_IRQ_STATUS(val) bfin_write16(DMA9_IRQ_STATUS,val) +#define bfin_read_DMA9_PERIPHERAL_MAP() bfin_read16(DMA9_PERIPHERAL_MAP) +#define bfin_write_DMA9_PERIPHERAL_MAP(val) bfin_write16(DMA9_PERIPHERAL_MAP,val) + +#define bfin_read_DMA10_CONFIG() bfin_read16(DMA10_CONFIG) +#define bfin_write_DMA10_CONFIG(val) bfin_write16(DMA10_CONFIG,val) +#define bfin_read_DMA10_NEXT_DESC_PTR() bfin_read32(DMA10_NEXT_DESC_PTR) +#define bfin_write_DMA10_NEXT_DESC_PTR(val) bfin_write32(DMA10_NEXT_DESC_PTR,val) +#define bfin_read_DMA10_START_ADDR() bfin_read32(DMA10_START_ADDR) +#define bfin_write_DMA10_START_ADDR(val) bfin_write32(DMA10_START_ADDR,val) +#define bfin_read_DMA10_X_COUNT() bfin_read16(DMA10_X_COUNT) +#define bfin_write_DMA10_X_COUNT(val) bfin_write16(DMA10_X_COUNT,val) +#define bfin_read_DMA10_Y_COUNT() bfin_read16(DMA10_Y_COUNT) +#define bfin_write_DMA10_Y_COUNT(val) bfin_write16(DMA10_Y_COUNT,val) +#define bfin_read_DMA10_X_MODIFY() bfin_read16(DMA10_X_MODIFY) +#define bfin_write_DMA10_X_MODIFY(val) bfin_write16(DMA10_X_MODIFY,val) +#define bfin_read_DMA10_Y_MODIFY() bfin_read16(DMA10_Y_MODIFY) +#define bfin_write_DMA10_Y_MODIFY(val) bfin_write16(DMA10_Y_MODIFY,val) +#define bfin_read_DMA10_CURR_DESC_PTR() bfin_read32(DMA10_CURR_DESC_PTR) +#define bfin_write_DMA10_CURR_DESC_PTR(val) bfin_write32(DMA10_CURR_DESC_PTR,val) +#define bfin_read_DMA10_CURR_ADDR() bfin_read32(DMA10_CURR_ADDR) +#define bfin_write_DMA10_CURR_ADDR(val) bfin_write32(DMA10_CURR_ADDR,val) +#define bfin_read_DMA10_CURR_X_COUNT() bfin_read16(DMA10_CURR_X_COUNT) +#define bfin_write_DMA10_CURR_X_COUNT(val) bfin_write16(DMA10_CURR_X_COUNT,val) +#define bfin_read_DMA10_CURR_Y_COUNT() bfin_read16(DMA10_CURR_Y_COUNT) +#define bfin_write_DMA10_CURR_Y_COUNT(val) bfin_write16(DMA10_CURR_Y_COUNT,val) +#define bfin_read_DMA10_IRQ_STATUS() bfin_read16(DMA10_IRQ_STATUS) +#define bfin_write_DMA10_IRQ_STATUS(val) bfin_write16(DMA10_IRQ_STATUS,val) +#define bfin_read_DMA10_PERIPHERAL_MAP() bfin_read16(DMA10_PERIPHERAL_MAP) +#define bfin_write_DMA10_PERIPHERAL_MAP(val) bfin_write16(DMA10_PERIPHERAL_MAP,val) + +#define bfin_read_DMA11_CONFIG() bfin_read16(DMA11_CONFIG) +#define bfin_write_DMA11_CONFIG(val) bfin_write16(DMA11_CONFIG,val) +#define bfin_read_DMA11_NEXT_DESC_PTR() bfin_read32(DMA11_NEXT_DESC_PTR) +#define bfin_write_DMA11_NEXT_DESC_PTR(val) bfin_write32(DMA11_NEXT_DESC_PTR,val) +#define bfin_read_DMA11_START_ADDR() bfin_read32(DMA11_START_ADDR) +#define bfin_write_DMA11_START_ADDR(val) bfin_write32(DMA11_START_ADDR,val) +#define bfin_read_DMA11_X_COUNT() bfin_read16(DMA11_X_COUNT) +#define bfin_write_DMA11_X_COUNT(val) bfin_write16(DMA11_X_COUNT,val) +#define bfin_read_DMA11_Y_COUNT() bfin_read16(DMA11_Y_COUNT) +#define bfin_write_DMA11_Y_COUNT(val) bfin_write16(DMA11_Y_COUNT,val) +#define bfin_read_DMA11_X_MODIFY() bfin_read16(DMA11_X_MODIFY) +#define bfin_write_DMA11_X_MODIFY(val) bfin_write16(DMA11_X_MODIFY,val) +#define bfin_read_DMA11_Y_MODIFY() bfin_read16(DMA11_Y_MODIFY) +#define bfin_write_DMA11_Y_MODIFY(val) bfin_write16(DMA11_Y_MODIFY,val) +#define bfin_read_DMA11_CURR_DESC_PTR() bfin_read32(DMA11_CURR_DESC_PTR) +#define bfin_write_DMA11_CURR_DESC_PTR(val) bfin_write32(DMA11_CURR_DESC_PTR,val) +#define bfin_read_DMA11_CURR_ADDR() bfin_read32(DMA11_CURR_ADDR) +#define bfin_write_DMA11_CURR_ADDR(val) bfin_write32(DMA11_CURR_ADDR,val) +#define bfin_read_DMA11_CURR_X_COUNT() bfin_read16(DMA11_CURR_X_COUNT) +#define bfin_write_DMA11_CURR_X_COUNT(val) bfin_write16(DMA11_CURR_X_COUNT,val) +#define bfin_read_DMA11_CURR_Y_COUNT() bfin_read16(DMA11_CURR_Y_COUNT) +#define bfin_write_DMA11_CURR_Y_COUNT(val) bfin_write16(DMA11_CURR_Y_COUNT,val) +#define bfin_read_DMA11_IRQ_STATUS() bfin_read16(DMA11_IRQ_STATUS) +#define bfin_write_DMA11_IRQ_STATUS(val) bfin_write16(DMA11_IRQ_STATUS,val) +#define bfin_read_DMA11_PERIPHERAL_MAP() bfin_read16(DMA11_PERIPHERAL_MAP) +#define bfin_write_DMA11_PERIPHERAL_MAP(val) bfin_write16(DMA11_PERIPHERAL_MAP,val) + +#define bfin_read_MDMA_D0_CONFIG() bfin_read16(MDMA_D0_CONFIG) +#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG,val) +#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_read32(MDMA_D0_NEXT_DESC_PTR) +#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA_D0_NEXT_DESC_PTR,val) +#define bfin_read_MDMA_D0_START_ADDR() bfin_read32(MDMA_D0_START_ADDR) +#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write32(MDMA_D0_START_ADDR,val) +#define bfin_read_MDMA_D0_X_COUNT() bfin_read16(MDMA_D0_X_COUNT) +#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT,val) +#define bfin_read_MDMA_D0_Y_COUNT() bfin_read16(MDMA_D0_Y_COUNT) +#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT,val) +#define bfin_read_MDMA_D0_X_MODIFY() bfin_read16(MDMA_D0_X_MODIFY) +#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY,val) +#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read16(MDMA_D0_Y_MODIFY) +#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY,val) +#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_read32(MDMA_D0_CURR_DESC_PTR) +#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_write32(MDMA_D0_CURR_DESC_PTR,val) +#define bfin_read_MDMA_D0_CURR_ADDR() bfin_read32(MDMA_D0_CURR_ADDR) +#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_write32(MDMA_D0_CURR_ADDR,val) +#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT) +#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT,val) +#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT) +#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT,val) +#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS) +#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS,val) +#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP) +#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP,val) + +#define bfin_read_MDMA_S0_CONFIG() bfin_read16(MDMA_S0_CONFIG) +#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG,val) +#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_read32(MDMA_S0_NEXT_DESC_PTR) +#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA_S0_NEXT_DESC_PTR,val) +#define bfin_read_MDMA_S0_START_ADDR() bfin_read32(MDMA_S0_START_ADDR) +#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write32(MDMA_S0_START_ADDR,val) +#define bfin_read_MDMA_S0_X_COUNT() bfin_read16(MDMA_S0_X_COUNT) +#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT,val) +#define bfin_read_MDMA_S0_Y_COUNT() bfin_read16(MDMA_S0_Y_COUNT) +#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT,val) +#define bfin_read_MDMA_S0_X_MODIFY() bfin_read16(MDMA_S0_X_MODIFY) +#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY,val) +#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read16(MDMA_S0_Y_MODIFY) +#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY,val) +#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_read32(MDMA_S0_CURR_DESC_PTR) +#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_write32(MDMA_S0_CURR_DESC_PTR,val) +#define bfin_read_MDMA_S0_CURR_ADDR() bfin_read32(MDMA_S0_CURR_ADDR) +#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_write32(MDMA_S0_CURR_ADDR,val) +#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT) +#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT,val) +#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT) +#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT,val) +#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS) +#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS,val) +#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP) +#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP,val) + +#define bfin_read_MDMA_D1_CONFIG() bfin_read16(MDMA_D1_CONFIG) +#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG,val) +#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_read32(MDMA_D1_NEXT_DESC_PTR) +#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA_D1_NEXT_DESC_PTR,val) +#define bfin_read_MDMA_D1_START_ADDR() bfin_read32(MDMA_D1_START_ADDR) +#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write32(MDMA_D1_START_ADDR,val) +#define bfin_read_MDMA_D1_X_COUNT() bfin_read16(MDMA_D1_X_COUNT) +#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT,val) +#define bfin_read_MDMA_D1_Y_COUNT() bfin_read16(MDMA_D1_Y_COUNT) +#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT,val) +#define bfin_read_MDMA_D1_X_MODIFY() bfin_read16(MDMA_D1_X_MODIFY) +#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY,val) +#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read16(MDMA_D1_Y_MODIFY) +#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY,val) +#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_read32(MDMA_D1_CURR_DESC_PTR) +#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_write32(MDMA_D1_CURR_DESC_PTR,val) +#define bfin_read_MDMA_D1_CURR_ADDR() bfin_read32(MDMA_D1_CURR_ADDR) +#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_write32(MDMA_D1_CURR_ADDR,val) +#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT) +#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT,val) +#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT) +#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT,val) +#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS) +#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS,val) +#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP) +#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP,val) + +#define bfin_read_MDMA_S1_CONFIG() bfin_read16(MDMA_S1_CONFIG) +#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG,val) +#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_read32(MDMA_S1_NEXT_DESC_PTR) +#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA_S1_NEXT_DESC_PTR,val) +#define bfin_read_MDMA_S1_START_ADDR() bfin_read32(MDMA_S1_START_ADDR) +#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write32(MDMA_S1_START_ADDR,val) +#define bfin_read_MDMA_S1_X_COUNT() bfin_read16(MDMA_S1_X_COUNT) +#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT,val) +#define bfin_read_MDMA_S1_Y_COUNT() bfin_read16(MDMA_S1_Y_COUNT) +#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT,val) +#define bfin_read_MDMA_S1_X_MODIFY() bfin_read16(MDMA_S1_X_MODIFY) +#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY,val) +#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read16(MDMA_S1_Y_MODIFY) +#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY,val) +#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_read32(MDMA_S1_CURR_DESC_PTR) +#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_write32(MDMA_S1_CURR_DESC_PTR,val) +#define bfin_read_MDMA_S1_CURR_ADDR() bfin_read32(MDMA_S1_CURR_ADDR) +#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_write32(MDMA_S1_CURR_ADDR,val) +#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT) +#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT,val) +#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT) +#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT,val) +#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS) +#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS,val) +#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP) +#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP,val) + +/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */ +#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL) +#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL,val) +#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS) +#define bfin_write_PPI_STATUS(val) bfin_write16(PPI_STATUS,val) +#define bfin_clear_PPI_STATUS() bfin_write_PPI_STATUS(0xFFFF) +#define bfin_read_PPI_DELAY() bfin_read16(PPI_DELAY) +#define bfin_write_PPI_DELAY(val) bfin_write16(PPI_DELAY,val) +#define bfin_read_PPI_COUNT() bfin_read16(PPI_COUNT) +#define bfin_write_PPI_COUNT(val) bfin_write16(PPI_COUNT,val) +#define bfin_read_PPI_FRAME() bfin_read16(PPI_FRAME) +#define bfin_write_PPI_FRAME(val) bfin_write16(PPI_FRAME,val) + +/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */ +#define bfin_read_TWI_CLKDIV() bfin_read16(TWI_CLKDIV) +#define bfin_write_TWI_CLKDIV(val) bfin_write16(TWI_CLKDIV,val) +#define bfin_read_TWI_CONTROL() bfin_read16(TWI_CONTROL) +#define bfin_write_TWI_CONTROL(val) bfin_write16(TWI_CONTROL,val) +#define bfin_read_TWI_SLAVE_CTL() bfin_read16(TWI_SLAVE_CTL) +#define bfin_write_TWI_SLAVE_CTL(val) bfin_write16(TWI_SLAVE_CTL,val) +#define bfin_read_TWI_SLAVE_STAT() bfin_read16(TWI_SLAVE_STAT) +#define bfin_write_TWI_SLAVE_STAT(val) bfin_write16(TWI_SLAVE_STAT,val) +#define bfin_read_TWI_SLAVE_ADDR() bfin_read16(TWI_SLAVE_ADDR) +#define bfin_write_TWI_SLAVE_ADDR(val) bfin_write16(TWI_SLAVE_ADDR,val) +#define bfin_read_TWI_MASTER_CTL() bfin_read16(TWI_MASTER_CTL) +#define bfin_write_TWI_MASTER_CTL(val) bfin_write16(TWI_MASTER_CTL,val) +#define bfin_read_TWI_MASTER_STAT() bfin_read16(TWI_MASTER_STAT) +#define bfin_write_TWI_MASTER_STAT(val) bfin_write16(TWI_MASTER_STAT,val) +#define bfin_read_TWI_MASTER_ADDR() bfin_read16(TWI_MASTER_ADDR) +#define bfin_write_TWI_MASTER_ADDR(val) bfin_write16(TWI_MASTER_ADDR,val) +#define bfin_read_TWI_INT_STAT() bfin_read16(TWI_INT_STAT) +#define bfin_write_TWI_INT_STAT(val) bfin_write16(TWI_INT_STAT,val) +#define bfin_read_TWI_INT_MASK() bfin_read16(TWI_INT_MASK) +#define bfin_write_TWI_INT_MASK(val) bfin_write16(TWI_INT_MASK,val) +#define bfin_read_TWI_FIFO_CTL() bfin_read16(TWI_FIFO_CTL) +#define bfin_write_TWI_FIFO_CTL(val) bfin_write16(TWI_FIFO_CTL,val) +#define bfin_read_TWI_FIFO_STAT() bfin_read16(TWI_FIFO_STAT) +#define bfin_write_TWI_FIFO_STAT(val) bfin_write16(TWI_FIFO_STAT,val) +#define bfin_read_TWI_XMT_DATA8() bfin_read16(TWI_XMT_DATA8) +#define bfin_write_TWI_XMT_DATA8(val) bfin_write16(TWI_XMT_DATA8,val) +#define bfin_read_TWI_XMT_DATA16() bfin_read16(TWI_XMT_DATA16) +#define bfin_write_TWI_XMT_DATA16(val) bfin_write16(TWI_XMT_DATA16,val) +#define bfin_read_TWI_RCV_DATA8() bfin_read16(TWI_RCV_DATA8) +#define bfin_write_TWI_RCV_DATA8(val) bfin_write16(TWI_RCV_DATA8,val) +#define bfin_read_TWI_RCV_DATA16() bfin_read16(TWI_RCV_DATA16) +#define bfin_write_TWI_RCV_DATA16(val) bfin_write16(TWI_RCV_DATA16,val) + +/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */ +#define bfin_read_PORTGIO() bfin_read16(PORTGIO) +#define bfin_write_PORTGIO(val) bfin_write16(PORTGIO,val) +#define bfin_read_PORTGIO_CLEAR() bfin_read16(PORTGIO_CLEAR) +#define bfin_write_PORTGIO_CLEAR(val) bfin_write16(PORTGIO_CLEAR,val) +#define bfin_read_PORTGIO_SET() bfin_read16(PORTGIO_SET) +#define bfin_write_PORTGIO_SET(val) bfin_write16(PORTGIO_SET,val) +#define bfin_read_PORTGIO_TOGGLE() bfin_read16(PORTGIO_TOGGLE) +#define bfin_write_PORTGIO_TOGGLE(val) bfin_write16(PORTGIO_TOGGLE,val) +#define bfin_read_PORTGIO_MASKA() bfin_read16(PORTGIO_MASKA) +#define bfin_write_PORTGIO_MASKA(val) bfin_write16(PORTGIO_MASKA,val) +#define bfin_read_PORTGIO_MASKA_CLEAR() bfin_read16(PORTGIO_MASKA_CLEAR) +#define bfin_write_PORTGIO_MASKA_CLEAR(val) bfin_write16(PORTGIO_MASKA_CLEAR,val) +#define bfin_read_PORTGIO_MASKA_SET() bfin_read16(PORTGIO_MASKA_SET) +#define bfin_write_PORTGIO_MASKA_SET(val) bfin_write16(PORTGIO_MASKA_SET,val) +#define bfin_read_PORTGIO_MASKA_TOGGLE() bfin_read16(PORTGIO_MASKA_TOGGLE) +#define bfin_write_PORTGIO_MASKA_TOGGLE(val) bfin_write16(PORTGIO_MASKA_TOGGLE,val) +#define bfin_read_PORTGIO_MASKB() bfin_read16(PORTGIO_MASKB) +#define bfin_write_PORTGIO_MASKB(val) bfin_write16(PORTGIO_MASKB,val) +#define bfin_read_PORTGIO_MASKB_CLEAR() bfin_read16(PORTGIO_MASKB_CLEAR) +#define bfin_write_PORTGIO_MASKB_CLEAR(val) bfin_write16(PORTGIO_MASKB_CLEAR,val) +#define bfin_read_PORTGIO_MASKB_SET() bfin_read16(PORTGIO_MASKB_SET) +#define bfin_write_PORTGIO_MASKB_SET(val) bfin_write16(PORTGIO_MASKB_SET,val) +#define bfin_read_PORTGIO_MASKB_TOGGLE() bfin_read16(PORTGIO_MASKB_TOGGLE) +#define bfin_write_PORTGIO_MASKB_TOGGLE(val) bfin_write16(PORTGIO_MASKB_TOGGLE,val) +#define bfin_read_PORTGIO_DIR() bfin_read16(PORTGIO_DIR) +#define bfin_write_PORTGIO_DIR(val) bfin_write16(PORTGIO_DIR,val) +#define bfin_read_PORTGIO_POLAR() bfin_read16(PORTGIO_POLAR) +#define bfin_write_PORTGIO_POLAR(val) bfin_write16(PORTGIO_POLAR,val) +#define bfin_read_PORTGIO_EDGE() bfin_read16(PORTGIO_EDGE) +#define bfin_write_PORTGIO_EDGE(val) bfin_write16(PORTGIO_EDGE,val) +#define bfin_read_PORTGIO_BOTH() bfin_read16(PORTGIO_BOTH) +#define bfin_write_PORTGIO_BOTH(val) bfin_write16(PORTGIO_BOTH,val) +#define bfin_read_PORTGIO_INEN() bfin_read16(PORTGIO_INEN) +#define bfin_write_PORTGIO_INEN(val) bfin_write16(PORTGIO_INEN,val) + +/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */ +#define bfin_read_PORTHIO() bfin_read16(PORTHIO) +#define bfin_write_PORTHIO(val) bfin_write16(PORTHIO,val) +#define bfin_read_PORTHIO_CLEAR() bfin_read16(PORTHIO_CLEAR) +#define bfin_write_PORTHIO_CLEAR(val) bfin_write16(PORTHIO_CLEAR,val) +#define bfin_read_PORTHIO_SET() bfin_read16(PORTHIO_SET) +#define bfin_write_PORTHIO_SET(val) bfin_write16(PORTHIO_SET,val) +#define bfin_read_PORTHIO_TOGGLE() bfin_read16(PORTHIO_TOGGLE) +#define bfin_write_PORTHIO_TOGGLE(val) bfin_write16(PORTHIO_TOGGLE,val) +#define bfin_read_PORTHIO_MASKA() bfin_read16(PORTHIO_MASKA) +#define bfin_write_PORTHIO_MASKA(val) bfin_write16(PORTHIO_MASKA,val) +#define bfin_read_PORTHIO_MASKA_CLEAR() bfin_read16(PORTHIO_MASKA_CLEAR) +#define bfin_write_PORTHIO_MASKA_CLEAR(val) bfin_write16(PORTHIO_MASKA_CLEAR,val) +#define bfin_read_PORTHIO_MASKA_SET() bfin_read16(PORTHIO_MASKA_SET) +#define bfin_write_PORTHIO_MASKA_SET(val) bfin_write16(PORTHIO_MASKA_SET,val) +#define bfin_read_PORTHIO_MASKA_TOGGLE() bfin_read16(PORTHIO_MASKA_TOGGLE) +#define bfin_write_PORTHIO_MASKA_TOGGLE(val) bfin_write16(PORTHIO_MASKA_TOGGLE,val) +#define bfin_read_PORTHIO_MASKB() bfin_read16(PORTHIO_MASKB) +#define bfin_write_PORTHIO_MASKB(val) bfin_write16(PORTHIO_MASKB,val) +#define bfin_read_PORTHIO_MASKB_CLEAR() bfin_read16(PORTHIO_MASKB_CLEAR) +#define bfin_write_PORTHIO_MASKB_CLEAR(val) bfin_write16(PORTHIO_MASKB_CLEAR,val) +#define bfin_read_PORTHIO_MASKB_SET() bfin_read16(PORTHIO_MASKB_SET) +#define bfin_write_PORTHIO_MASKB_SET(val) bfin_write16(PORTHIO_MASKB_SET,val) +#define bfin_read_PORTHIO_MASKB_TOGGLE() bfin_read16(PORTHIO_MASKB_TOGGLE) +#define bfin_write_PORTHIO_MASKB_TOGGLE(val) bfin_write16(PORTHIO_MASKB_TOGGLE,val) +#define bfin_read_PORTHIO_DIR() bfin_read16(PORTHIO_DIR) +#define bfin_write_PORTHIO_DIR(val) bfin_write16(PORTHIO_DIR,val) +#define bfin_read_PORTHIO_POLAR() bfin_read16(PORTHIO_POLAR) +#define bfin_write_PORTHIO_POLAR(val) bfin_write16(PORTHIO_POLAR,val) +#define bfin_read_PORTHIO_EDGE() bfin_read16(PORTHIO_EDGE) +#define bfin_write_PORTHIO_EDGE(val) bfin_write16(PORTHIO_EDGE,val) +#define bfin_read_PORTHIO_BOTH() bfin_read16(PORTHIO_BOTH) +#define bfin_write_PORTHIO_BOTH(val) bfin_write16(PORTHIO_BOTH,val) +#define bfin_read_PORTHIO_INEN() bfin_read16(PORTHIO_INEN) +#define bfin_write_PORTHIO_INEN(val) bfin_write16(PORTHIO_INEN,val) + +/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */ +#define bfin_read_UART1_THR() bfin_read16(UART1_THR) +#define bfin_write_UART1_THR(val) bfin_write16(UART1_THR,val) +#define bfin_read_UART1_RBR() bfin_read16(UART1_RBR) +#define bfin_write_UART1_RBR(val) bfin_write16(UART1_RBR,val) +#define bfin_read_UART1_DLL() bfin_read16(UART1_DLL) +#define bfin_write_UART1_DLL(val) bfin_write16(UART1_DLL,val) +#define bfin_read_UART1_IER() bfin_read16(UART1_IER) +#define bfin_write_UART1_IER(val) bfin_write16(UART1_IER,val) +#define bfin_read_UART1_DLH() bfin_read16(UART1_DLH) +#define bfin_write_UART1_DLH(val) bfin_write16(UART1_DLH,val) +#define bfin_read_UART1_IIR() bfin_read16(UART1_IIR) +#define bfin_write_UART1_IIR(val) bfin_write16(UART1_IIR,val) +#define bfin_read_UART1_LCR() bfin_read16(UART1_LCR) +#define bfin_write_UART1_LCR(val) bfin_write16(UART1_LCR,val) +#define bfin_read_UART1_MCR() bfin_read16(UART1_MCR) +#define bfin_write_UART1_MCR(val) bfin_write16(UART1_MCR,val) +#define bfin_read_UART1_LSR() bfin_read16(UART1_LSR) +#define bfin_write_UART1_LSR(val) bfin_write16(UART1_LSR,val) +#define bfin_read_UART1_MSR() bfin_read16(UART1_MSR) +#define bfin_write_UART1_MSR(val) bfin_write16(UART1_MSR,val) +#define bfin_read_UART1_SCR() bfin_read16(UART1_SCR) +#define bfin_write_UART1_SCR(val) bfin_write16(UART1_SCR,val) +#define bfin_read_UART1_GCTL() bfin_read16(UART1_GCTL) +#define bfin_write_UART1_GCTL(val) bfin_write16(UART1_GCTL,val) + +/* CAN Controller (0xFFC02A00 - 0xFFC02FFF) */ +/* For Mailboxes 0-15 */ +#define bfin_read_CAN_MC1() bfin_read16(CAN_MC1) +#define bfin_write_CAN_MC1(val) bfin_write16(CAN_MC1,val) +#define bfin_read_CAN_MD1() bfin_read16(CAN_MD1) +#define bfin_write_CAN_MD1(val) bfin_write16(CAN_MD1,val) +#define bfin_read_CAN_TRS1() bfin_read16(CAN_TRS1) +#define bfin_write_CAN_TRS1(val) bfin_write16(CAN_TRS1,val) +#define bfin_read_CAN_TRR1() bfin_read16(CAN_TRR1) +#define bfin_write_CAN_TRR1(val) bfin_write16(CAN_TRR1,val) +#define bfin_read_CAN_TA1() bfin_read16(CAN_TA1) +#define bfin_write_CAN_TA1(val) bfin_write16(CAN_TA1,val) +#define bfin_read_CAN_AA1() bfin_read16(CAN_AA1) +#define bfin_write_CAN_AA1(val) bfin_write16(CAN_AA1,val) +#define bfin_read_CAN_RMP1() bfin_read16(CAN_RMP1) +#define bfin_write_CAN_RMP1(val) bfin_write16(CAN_RMP1,val) +#define bfin_read_CAN_RML1() bfin_read16(CAN_RML1) +#define bfin_write_CAN_RML1(val) bfin_write16(CAN_RML1,val) +#define bfin_read_CAN_MBTIF1() bfin_read16(CAN_MBTIF1) +#define bfin_write_CAN_MBTIF1(val) bfin_write16(CAN_MBTIF1,val) +#define bfin_read_CAN_MBRIF1() bfin_read16(CAN_MBRIF1) +#define bfin_write_CAN_MBRIF1(val) bfin_write16(CAN_MBRIF1,val) +#define bfin_read_CAN_MBIM1() bfin_read16(CAN_MBIM1) +#define bfin_write_CAN_MBIM1(val) bfin_write16(CAN_MBIM1,val) +#define bfin_read_CAN_RFH1() bfin_read16(CAN_RFH1) +#define bfin_write_CAN_RFH1(val) bfin_write16(CAN_RFH1,val) +#define bfin_read_CAN_OPSS1() bfin_read16(CAN_OPSS1) +#define bfin_write_CAN_OPSS1(val) bfin_write16(CAN_OPSS1,val) + +/* For Mailboxes 16-31 */ +#define bfin_read_CAN_MC2() bfin_read16(CAN_MC2) +#define bfin_write_CAN_MC2(val) bfin_write16(CAN_MC2,val) +#define bfin_read_CAN_MD2() bfin_read16(CAN_MD2) +#define bfin_write_CAN_MD2(val) bfin_write16(CAN_MD2,val) +#define bfin_read_CAN_TRS2() bfin_read16(CAN_TRS2) +#define bfin_write_CAN_TRS2(val) bfin_write16(CAN_TRS2,val) +#define bfin_read_CAN_TRR2() bfin_read16(CAN_TRR2) +#define bfin_write_CAN_TRR2(val) bfin_write16(CAN_TRR2,val) +#define bfin_read_CAN_TA2() bfin_read16(CAN_TA2) +#define bfin_write_CAN_TA2(val) bfin_write16(CAN_TA2,val) +#define bfin_read_CAN_AA2() bfin_read16(CAN_AA2) +#define bfin_write_CAN_AA2(val) bfin_write16(CAN_AA2,val) +#define bfin_read_CAN_RMP2() bfin_read16(CAN_RMP2) +#define bfin_write_CAN_RMP2(val) bfin_write16(CAN_RMP2,val) +#define bfin_read_CAN_RML2() bfin_read16(CAN_RML2) +#define bfin_write_CAN_RML2(val) bfin_write16(CAN_RML2,val) +#define bfin_read_CAN_MBTIF2() bfin_read16(CAN_MBTIF2) +#define bfin_write_CAN_MBTIF2(val) bfin_write16(CAN_MBTIF2,val) +#define bfin_read_CAN_MBRIF2() bfin_read16(CAN_MBRIF2) +#define bfin_write_CAN_MBRIF2(val) bfin_write16(CAN_MBRIF2,val) +#define bfin_read_CAN_MBIM2() bfin_read16(CAN_MBIM2) +#define bfin_write_CAN_MBIM2(val) bfin_write16(CAN_MBIM2,val) +#define bfin_read_CAN_RFH2() bfin_read16(CAN_RFH2) +#define bfin_write_CAN_RFH2(val) bfin_write16(CAN_RFH2,val) +#define bfin_read_CAN_OPSS2() bfin_read16(CAN_OPSS2) +#define bfin_write_CAN_OPSS2(val) bfin_write16(CAN_OPSS2,val) + +#define bfin_read_CAN_CLOCK() bfin_read16(CAN_CLOCK) +#define bfin_write_CAN_CLOCK(val) bfin_write16(CAN_CLOCK,val) +#define bfin_read_CAN_TIMING() bfin_read16(CAN_TIMING) +#define bfin_write_CAN_TIMING(val) bfin_write16(CAN_TIMING,val) +#define bfin_read_CAN_DEBUG() bfin_read16(CAN_DEBUG) +#define bfin_write_CAN_DEBUG(val) bfin_write16(CAN_DEBUG,val) +#define bfin_read_CAN_STATUS() bfin_read16(CAN_STATUS) +#define bfin_write_CAN_STATUS(val) bfin_write16(CAN_STATUS,val) +#define bfin_read_CAN_CEC() bfin_read16(CAN_CEC) +#define bfin_write_CAN_CEC(val) bfin_write16(CAN_CEC,val) +#define bfin_read_CAN_GIS() bfin_read16(CAN_GIS) +#define bfin_write_CAN_GIS(val) bfin_write16(CAN_GIS,val) +#define bfin_read_CAN_GIM() bfin_read16(CAN_GIM) +#define bfin_write_CAN_GIM(val) bfin_write16(CAN_GIM,val) +#define bfin_read_CAN_GIF() bfin_read16(CAN_GIF) +#define bfin_write_CAN_GIF(val) bfin_write16(CAN_GIF,val) +#define bfin_read_CAN_CONTROL() bfin_read16(CAN_CONTROL) +#define bfin_write_CAN_CONTROL(val) bfin_write16(CAN_CONTROL,val) +#define bfin_read_CAN_INTR() bfin_read16(CAN_INTR) +#define bfin_write_CAN_INTR(val) bfin_write16(CAN_INTR,val) +#define bfin_read_CAN_SFCMVER() bfin_read16(CAN_SFCMVER) +#define bfin_write_CAN_SFCMVER(val) bfin_write16(CAN_SFCMVER,val) +#define bfin_read_CAN_MBTD() bfin_read16(CAN_MBTD) +#define bfin_write_CAN_MBTD(val) bfin_write16(CAN_MBTD,val) +#define bfin_read_CAN_EWR() bfin_read16(CAN_EWR) +#define bfin_write_CAN_EWR(val) bfin_write16(CAN_EWR,val) +#define bfin_read_CAN_ESR() bfin_read16(CAN_ESR) +#define bfin_write_CAN_ESR(val) bfin_write16(CAN_ESR,val) +#define bfin_read_CAN_UCREG() bfin_read16(CAN_UCREG) +#define bfin_write_CAN_UCREG(val) bfin_write16(CAN_UCREG,val) +#define bfin_read_CAN_UCCNT() bfin_read16(CAN_UCCNT) +#define bfin_write_CAN_UCCNT(val) bfin_write16(CAN_UCCNT,val) +#define bfin_read_CAN_UCRC() bfin_read16(CAN_UCRC) +#define bfin_write_CAN_UCRC(val) bfin_write16(CAN_UCRC,val) +#define bfin_read_CAN_UCCNF() bfin_read16(CAN_UCCNF) +#define bfin_write_CAN_UCCNF(val) bfin_write16(CAN_UCCNF,val) +#define bfin_read_CAN_SFCMVER2() bfin_read16(CAN_SFCMVER2) +#define bfin_write_CAN_SFCMVER2(val) bfin_write16(CAN_SFCMVER2,val) + +/* Mailbox Acceptance Masks */ +#define bfin_read_CAN_AM00L() bfin_read16(CAN_AM00L) +#define bfin_write_CAN_AM00L(val) bfin_write16(CAN_AM00L,val) +#define bfin_read_CAN_AM00H() bfin_read16(CAN_AM00H) +#define bfin_write_CAN_AM00H(val) bfin_write16(CAN_AM00H,val) +#define bfin_read_CAN_AM01L() bfin_read16(CAN_AM01L) +#define bfin_write_CAN_AM01L(val) bfin_write16(CAN_AM01L,val) +#define bfin_read_CAN_AM01H() bfin_read16(CAN_AM01H) +#define bfin_write_CAN_AM01H(val) bfin_write16(CAN_AM01H,val) +#define bfin_read_CAN_AM02L() bfin_read16(CAN_AM02L) +#define bfin_write_CAN_AM02L(val) bfin_write16(CAN_AM02L,val) +#define bfin_read_CAN_AM02H() bfin_read16(CAN_AM02H) +#define bfin_write_CAN_AM02H(val) bfin_write16(CAN_AM02H,val) +#define bfin_read_CAN_AM03L() bfin_read16(CAN_AM03L) +#define bfin_write_CAN_AM03L(val) bfin_write16(CAN_AM03L,val) +#define bfin_read_CAN_AM03H() bfin_read16(CAN_AM03H) +#define bfin_write_CAN_AM03H(val) bfin_write16(CAN_AM03H,val) +#define bfin_read_CAN_AM04L() bfin_read16(CAN_AM04L) +#define bfin_write_CAN_AM04L(val) bfin_write16(CAN_AM04L,val) +#define bfin_read_CAN_AM04H() bfin_read16(CAN_AM04H) +#define bfin_write_CAN_AM04H(val) bfin_write16(CAN_AM04H,val) +#define bfin_read_CAN_AM05L() bfin_read16(CAN_AM05L) +#define bfin_write_CAN_AM05L(val) bfin_write16(CAN_AM05L,val) +#define bfin_read_CAN_AM05H() bfin_read16(CAN_AM05H) +#define bfin_write_CAN_AM05H(val) bfin_write16(CAN_AM05H,val) +#define bfin_read_CAN_AM06L() bfin_read16(CAN_AM06L) +#define bfin_write_CAN_AM06L(val) bfin_write16(CAN_AM06L,val) +#define bfin_read_CAN_AM06H() bfin_read16(CAN_AM06H) +#define bfin_write_CAN_AM06H(val) bfin_write16(CAN_AM06H,val) +#define bfin_read_CAN_AM07L() bfin_read16(CAN_AM07L) +#define bfin_write_CAN_AM07L(val) bfin_write16(CAN_AM07L,val) +#define bfin_read_CAN_AM07H() bfin_read16(CAN_AM07H) +#define bfin_write_CAN_AM07H(val) bfin_write16(CAN_AM07H,val) +#define bfin_read_CAN_AM08L() bfin_read16(CAN_AM08L) +#define bfin_write_CAN_AM08L(val) bfin_write16(CAN_AM08L,val) +#define bfin_read_CAN_AM08H() bfin_read16(CAN_AM08H) +#define bfin_write_CAN_AM08H(val) bfin_write16(CAN_AM08H,val) +#define bfin_read_CAN_AM09L() bfin_read16(CAN_AM09L) +#define bfin_write_CAN_AM09L(val) bfin_write16(CAN_AM09L,val) +#define bfin_read_CAN_AM09H() bfin_read16(CAN_AM09H) +#define bfin_write_CAN_AM09H(val) bfin_write16(CAN_AM09H,val) +#define bfin_read_CAN_AM10L() bfin_read16(CAN_AM10L) +#define bfin_write_CAN_AM10L(val) bfin_write16(CAN_AM10L,val) +#define bfin_read_CAN_AM10H() bfin_read16(CAN_AM10H) +#define bfin_write_CAN_AM10H(val) bfin_write16(CAN_AM10H,val) +#define bfin_read_CAN_AM11L() bfin_read16(CAN_AM11L) +#define bfin_write_CAN_AM11L(val) bfin_write16(CAN_AM11L,val) +#define bfin_read_CAN_AM11H() bfin_read16(CAN_AM11H) +#define bfin_write_CAN_AM11H(val) bfin_write16(CAN_AM11H,val) +#define bfin_read_CAN_AM12L() bfin_read16(CAN_AM12L) +#define bfin_write_CAN_AM12L(val) bfin_write16(CAN_AM12L,val) +#define bfin_read_CAN_AM12H() bfin_read16(CAN_AM12H) +#define bfin_write_CAN_AM12H(val) bfin_write16(CAN_AM12H,val) +#define bfin_read_CAN_AM13L() bfin_read16(CAN_AM13L) +#define bfin_write_CAN_AM13L(val) bfin_write16(CAN_AM13L,val) +#define bfin_read_CAN_AM13H() bfin_read16(CAN_AM13H) +#define bfin_write_CAN_AM13H(val) bfin_write16(CAN_AM13H,val) +#define bfin_read_CAN_AM14L() bfin_read16(CAN_AM14L) +#define bfin_write_CAN_AM14L(val) bfin_write16(CAN_AM14L,val) +#define bfin_read_CAN_AM14H() bfin_read16(CAN_AM14H) +#define bfin_write_CAN_AM14H(val) bfin_write16(CAN_AM14H,val) +#define bfin_read_CAN_AM15L() bfin_read16(CAN_AM15L) +#define bfin_write_CAN_AM15L(val) bfin_write16(CAN_AM15L,val) +#define bfin_read_CAN_AM15H() bfin_read16(CAN_AM15H) +#define bfin_write_CAN_AM15H(val) bfin_write16(CAN_AM15H,val) + +#define bfin_read_CAN_AM16L() bfin_read16(CAN_AM16L) +#define bfin_write_CAN_AM16L(val) bfin_write16(CAN_AM16L,val) +#define bfin_read_CAN_AM16H() bfin_read16(CAN_AM16H) +#define bfin_write_CAN_AM16H(val) bfin_write16(CAN_AM16H,val) +#define bfin_read_CAN_AM17L() bfin_read16(CAN_AM17L) +#define bfin_write_CAN_AM17L(val) bfin_write16(CAN_AM17L,val) +#define bfin_read_CAN_AM17H() bfin_read16(CAN_AM17H) +#define bfin_write_CAN_AM17H(val) bfin_write16(CAN_AM17H,val) +#define bfin_read_CAN_AM18L() bfin_read16(CAN_AM18L) +#define bfin_write_CAN_AM18L(val) bfin_write16(CAN_AM18L,val) +#define bfin_read_CAN_AM18H() bfin_read16(CAN_AM18H) +#define bfin_write_CAN_AM18H(val) bfin_write16(CAN_AM18H,val) +#define bfin_read_CAN_AM19L() bfin_read16(CAN_AM19L) +#define bfin_write_CAN_AM19L(val) bfin_write16(CAN_AM19L,val) +#define bfin_read_CAN_AM19H() bfin_read16(CAN_AM19H) +#define bfin_write_CAN_AM19H(val) bfin_write16(CAN_AM19H,val) +#define bfin_read_CAN_AM20L() bfin_read16(CAN_AM20L) +#define bfin_write_CAN_AM20L(val) bfin_write16(CAN_AM20L,val) +#define bfin_read_CAN_AM20H() bfin_read16(CAN_AM20H) +#define bfin_write_CAN_AM20H(val) bfin_write16(CAN_AM20H,val) +#define bfin_read_CAN_AM21L() bfin_read16(CAN_AM21L) +#define bfin_write_CAN_AM21L(val) bfin_write16(CAN_AM21L,val) +#define bfin_read_CAN_AM21H() bfin_read16(CAN_AM21H) +#define bfin_write_CAN_AM21H(val) bfin_write16(CAN_AM21H,val) +#define bfin_read_CAN_AM22L() bfin_read16(CAN_AM22L) +#define bfin_write_CAN_AM22L(val) bfin_write16(CAN_AM22L,val) +#define bfin_read_CAN_AM22H() bfin_read16(CAN_AM22H) +#define bfin_write_CAN_AM22H(val) bfin_write16(CAN_AM22H,val) +#define bfin_read_CAN_AM23L() bfin_read16(CAN_AM23L) +#define bfin_write_CAN_AM23L(val) bfin_write16(CAN_AM23L,val) +#define bfin_read_CAN_AM23H() bfin_read16(CAN_AM23H) +#define bfin_write_CAN_AM23H(val) bfin_write16(CAN_AM23H,val) +#define bfin_read_CAN_AM24L() bfin_read16(CAN_AM24L) +#define bfin_write_CAN_AM24L(val) bfin_write16(CAN_AM24L,val) +#define bfin_read_CAN_AM24H() bfin_read16(CAN_AM24H) +#define bfin_write_CAN_AM24H(val) bfin_write16(CAN_AM24H,val) +#define bfin_read_CAN_AM25L() bfin_read16(CAN_AM25L) +#define bfin_write_CAN_AM25L(val) bfin_write16(CAN_AM25L,val) +#define bfin_read_CAN_AM25H() bfin_read16(CAN_AM25H) +#define bfin_write_CAN_AM25H(val) bfin_write16(CAN_AM25H,val) +#define bfin_read_CAN_AM26L() bfin_read16(CAN_AM26L) +#define bfin_write_CAN_AM26L(val) bfin_write16(CAN_AM26L,val) +#define bfin_read_CAN_AM26H() bfin_read16(CAN_AM26H) +#define bfin_write_CAN_AM26H(val) bfin_write16(CAN_AM26H,val) +#define bfin_read_CAN_AM27L() bfin_read16(CAN_AM27L) +#define bfin_write_CAN_AM27L(val) bfin_write16(CAN_AM27L,val) +#define bfin_read_CAN_AM27H() bfin_read16(CAN_AM27H) +#define bfin_write_CAN_AM27H(val) bfin_write16(CAN_AM27H,val) +#define bfin_read_CAN_AM28L() bfin_read16(CAN_AM28L) +#define bfin_write_CAN_AM28L(val) bfin_write16(CAN_AM28L,val) +#define bfin_read_CAN_AM28H() bfin_read16(CAN_AM28H) +#define bfin_write_CAN_AM28H(val) bfin_write16(CAN_AM28H,val) +#define bfin_read_CAN_AM29L() bfin_read16(CAN_AM29L) +#define bfin_write_CAN_AM29L(val) bfin_write16(CAN_AM29L,val) +#define bfin_read_CAN_AM29H() bfin_read16(CAN_AM29H) +#define bfin_write_CAN_AM29H(val) bfin_write16(CAN_AM29H,val) +#define bfin_read_CAN_AM30L() bfin_read16(CAN_AM30L) +#define bfin_write_CAN_AM30L(val) bfin_write16(CAN_AM30L,val) +#define bfin_read_CAN_AM30H() bfin_read16(CAN_AM30H) +#define bfin_write_CAN_AM30H(val) bfin_write16(CAN_AM30H,val) +#define bfin_read_CAN_AM31L() bfin_read16(CAN_AM31L) +#define bfin_write_CAN_AM31L(val) bfin_write16(CAN_AM31L,val) +#define bfin_read_CAN_AM31H() bfin_read16(CAN_AM31H) +#define bfin_write_CAN_AM31H(val) bfin_write16(CAN_AM31H,val) + +/* CAN Acceptance Mask Area Macros */ +#define bfin_read_CAN_AM_L(x)() bfin_read16(CAN_AM_L(x)) +#define bfin_write_CAN_AM_L(x)(val) bfin_write16(CAN_AM_L(x),val) +#define bfin_read_CAN_AM_H(x)() bfin_read16(CAN_AM_H(x)) +#define bfin_write_CAN_AM_H(x)(val) bfin_write16(CAN_AM_H(x),val) + +/* Mailbox Registers */ +#define bfin_read_CAN_MB00_ID1() bfin_read16(CAN_MB00_ID1) +#define bfin_write_CAN_MB00_ID1(val) bfin_write16(CAN_MB00_ID1,val) +#define bfin_read_CAN_MB00_ID0() bfin_read16(CAN_MB00_ID0) +#define bfin_write_CAN_MB00_ID0(val) bfin_write16(CAN_MB00_ID0,val) +#define bfin_read_CAN_MB00_TIMESTAMP() bfin_read16(CAN_MB00_TIMESTAMP) +#define bfin_write_CAN_MB00_TIMESTAMP(val) bfin_write16(CAN_MB00_TIMESTAMP,val) +#define bfin_read_CAN_MB00_LENGTH() bfin_read16(CAN_MB00_LENGTH) +#define bfin_write_CAN_MB00_LENGTH(val) bfin_write16(CAN_MB00_LENGTH,val) +#define bfin_read_CAN_MB00_DATA3() bfin_read16(CAN_MB00_DATA3) +#define bfin_write_CAN_MB00_DATA3(val) bfin_write16(CAN_MB00_DATA3,val) +#define bfin_read_CAN_MB00_DATA2() bfin_read16(CAN_MB00_DATA2) +#define bfin_write_CAN_MB00_DATA2(val) bfin_write16(CAN_MB00_DATA2,val) +#define bfin_read_CAN_MB00_DATA1() bfin_read16(CAN_MB00_DATA1) +#define bfin_write_CAN_MB00_DATA1(val) bfin_write16(CAN_MB00_DATA1,val) +#define bfin_read_CAN_MB00_DATA0() bfin_read16(CAN_MB00_DATA0) +#define bfin_write_CAN_MB00_DATA0(val) bfin_write16(CAN_MB00_DATA0,val) + +#define bfin_read_CAN_MB01_ID1() bfin_read16(CAN_MB01_ID1) +#define bfin_write_CAN_MB01_ID1(val) bfin_write16(CAN_MB01_ID1,val) +#define bfin_read_CAN_MB01_ID0() bfin_read16(CAN_MB01_ID0) +#define bfin_write_CAN_MB01_ID0(val) bfin_write16(CAN_MB01_ID0,val) +#define bfin_read_CAN_MB01_TIMESTAMP() bfin_read16(CAN_MB01_TIMESTAMP) +#define bfin_write_CAN_MB01_TIMESTAMP(val) bfin_write16(CAN_MB01_TIMESTAMP,val) +#define bfin_read_CAN_MB01_LENGTH() bfin_read16(CAN_MB01_LENGTH) +#define bfin_write_CAN_MB01_LENGTH(val) bfin_write16(CAN_MB01_LENGTH,val) +#define bfin_read_CAN_MB01_DATA3() bfin_read16(CAN_MB01_DATA3) +#define bfin_write_CAN_MB01_DATA3(val) bfin_write16(CAN_MB01_DATA3,val) +#define bfin_read_CAN_MB01_DATA2() bfin_read16(CAN_MB01_DATA2) +#define bfin_write_CAN_MB01_DATA2(val) bfin_write16(CAN_MB01_DATA2,val) +#define bfin_read_CAN_MB01_DATA1() bfin_read16(CAN_MB01_DATA1) +#define bfin_write_CAN_MB01_DATA1(val) bfin_write16(CAN_MB01_DATA1,val) +#define bfin_read_CAN_MB01_DATA0() bfin_read16(CAN_MB01_DATA0) +#define bfin_write_CAN_MB01_DATA0(val) bfin_write16(CAN_MB01_DATA0,val) + +#define bfin_read_CAN_MB02_ID1() bfin_read16(CAN_MB02_ID1) +#define bfin_write_CAN_MB02_ID1(val) bfin_write16(CAN_MB02_ID1,val) +#define bfin_read_CAN_MB02_ID0() bfin_read16(CAN_MB02_ID0) +#define bfin_write_CAN_MB02_ID0(val) bfin_write16(CAN_MB02_ID0,val) +#define bfin_read_CAN_MB02_TIMESTAMP() bfin_read16(CAN_MB02_TIMESTAMP) +#define bfin_write_CAN_MB02_TIMESTAMP(val) bfin_write16(CAN_MB02_TIMESTAMP,val) +#define bfin_read_CAN_MB02_LENGTH() bfin_read16(CAN_MB02_LENGTH) +#define bfin_write_CAN_MB02_LENGTH(val) bfin_write16(CAN_MB02_LENGTH,val) +#define bfin_read_CAN_MB02_DATA3() bfin_read16(CAN_MB02_DATA3) +#define bfin_write_CAN_MB02_DATA3(val) bfin_write16(CAN_MB02_DATA3,val) +#define bfin_read_CAN_MB02_DATA2() bfin_read16(CAN_MB02_DATA2) +#define bfin_write_CAN_MB02_DATA2(val) bfin_write16(CAN_MB02_DATA2,val) +#define bfin_read_CAN_MB02_DATA1() bfin_read16(CAN_MB02_DATA1) +#define bfin_write_CAN_MB02_DATA1(val) bfin_write16(CAN_MB02_DATA1,val) +#define bfin_read_CAN_MB02_DATA0() bfin_read16(CAN_MB02_DATA0) +#define bfin_write_CAN_MB02_DATA0(val) bfin_write16(CAN_MB02_DATA0,val) + +#define bfin_read_CAN_MB03_ID1() bfin_read16(CAN_MB03_ID1) +#define bfin_write_CAN_MB03_ID1(val) bfin_write16(CAN_MB03_ID1,val) +#define bfin_read_CAN_MB03_ID0() bfin_read16(CAN_MB03_ID0) +#define bfin_write_CAN_MB03_ID0(val) bfin_write16(CAN_MB03_ID0,val) +#define bfin_read_CAN_MB03_TIMESTAMP() bfin_read16(CAN_MB03_TIMESTAMP) +#define bfin_write_CAN_MB03_TIMESTAMP(val) bfin_write16(CAN_MB03_TIMESTAMP,val) +#define bfin_read_CAN_MB03_LENGTH() bfin_read16(CAN_MB03_LENGTH) +#define bfin_write_CAN_MB03_LENGTH(val) bfin_write16(CAN_MB03_LENGTH,val) +#define bfin_read_CAN_MB03_DATA3() bfin_read16(CAN_MB03_DATA3) +#define bfin_write_CAN_MB03_DATA3(val) bfin_write16(CAN_MB03_DATA3,val) +#define bfin_read_CAN_MB03_DATA2() bfin_read16(CAN_MB03_DATA2) +#define bfin_write_CAN_MB03_DATA2(val) bfin_write16(CAN_MB03_DATA2,val) +#define bfin_read_CAN_MB03_DATA1() bfin_read16(CAN_MB03_DATA1) +#define bfin_write_CAN_MB03_DATA1(val) bfin_write16(CAN_MB03_DATA1,val) +#define bfin_read_CAN_MB03_DATA0() bfin_read16(CAN_MB03_DATA0) +#define bfin_write_CAN_MB03_DATA0(val) bfin_write16(CAN_MB03_DATA0,val) + +#define bfin_read_CAN_MB04_ID1() bfin_read16(CAN_MB04_ID1) +#define bfin_write_CAN_MB04_ID1(val) bfin_write16(CAN_MB04_ID1,val) +#define bfin_read_CAN_MB04_ID0() bfin_read16(CAN_MB04_ID0) +#define bfin_write_CAN_MB04_ID0(val) bfin_write16(CAN_MB04_ID0,val) +#define bfin_read_CAN_MB04_TIMESTAMP() bfin_read16(CAN_MB04_TIMESTAMP) +#define bfin_write_CAN_MB04_TIMESTAMP(val) bfin_write16(CAN_MB04_TIMESTAMP,val) +#define bfin_read_CAN_MB04_LENGTH() bfin_read16(CAN_MB04_LENGTH) +#define bfin_write_CAN_MB04_LENGTH(val) bfin_write16(CAN_MB04_LENGTH,val) +#define bfin_read_CAN_MB04_DATA3() bfin_read16(CAN_MB04_DATA3) +#define bfin_write_CAN_MB04_DATA3(val) bfin_write16(CAN_MB04_DATA3,val) +#define bfin_read_CAN_MB04_DATA2() bfin_read16(CAN_MB04_DATA2) +#define bfin_write_CAN_MB04_DATA2(val) bfin_write16(CAN_MB04_DATA2,val) +#define bfin_read_CAN_MB04_DATA1() bfin_read16(CAN_MB04_DATA1) +#define bfin_write_CAN_MB04_DATA1(val) bfin_write16(CAN_MB04_DATA1,val) +#define bfin_read_CAN_MB04_DATA0() bfin_read16(CAN_MB04_DATA0) +#define bfin_write_CAN_MB04_DATA0(val) bfin_write16(CAN_MB04_DATA0,val) + +#define bfin_read_CAN_MB05_ID1() bfin_read16(CAN_MB05_ID1) +#define bfin_write_CAN_MB05_ID1(val) bfin_write16(CAN_MB05_ID1,val) +#define bfin_read_CAN_MB05_ID0() bfin_read16(CAN_MB05_ID0) +#define bfin_write_CAN_MB05_ID0(val) bfin_write16(CAN_MB05_ID0,val) +#define bfin_read_CAN_MB05_TIMESTAMP() bfin_read16(CAN_MB05_TIMESTAMP) +#define bfin_write_CAN_MB05_TIMESTAMP(val) bfin_write16(CAN_MB05_TIMESTAMP,val) +#define bfin_read_CAN_MB05_LENGTH() bfin_read16(CAN_MB05_LENGTH) +#define bfin_write_CAN_MB05_LENGTH(val) bfin_write16(CAN_MB05_LENGTH,val) +#define bfin_read_CAN_MB05_DATA3() bfin_read16(CAN_MB05_DATA3) +#define bfin_write_CAN_MB05_DATA3(val) bfin_write16(CAN_MB05_DATA3,val) +#define bfin_read_CAN_MB05_DATA2() bfin_read16(CAN_MB05_DATA2) +#define bfin_write_CAN_MB05_DATA2(val) bfin_write16(CAN_MB05_DATA2,val) +#define bfin_read_CAN_MB05_DATA1() bfin_read16(CAN_MB05_DATA1) +#define bfin_write_CAN_MB05_DATA1(val) bfin_write16(CAN_MB05_DATA1,val) +#define bfin_read_CAN_MB05_DATA0() bfin_read16(CAN_MB05_DATA0) +#define bfin_write_CAN_MB05_DATA0(val) bfin_write16(CAN_MB05_DATA0,val) + +#define bfin_read_CAN_MB06_ID1() bfin_read16(CAN_MB06_ID1) +#define bfin_write_CAN_MB06_ID1(val) bfin_write16(CAN_MB06_ID1,val) +#define bfin_read_CAN_MB06_ID0() bfin_read16(CAN_MB06_ID0) +#define bfin_write_CAN_MB06_ID0(val) bfin_write16(CAN_MB06_ID0,val) +#define bfin_read_CAN_MB06_TIMESTAMP() bfin_read16(CAN_MB06_TIMESTAMP) +#define bfin_write_CAN_MB06_TIMESTAMP(val) bfin_write16(CAN_MB06_TIMESTAMP,val) +#define bfin_read_CAN_MB06_LENGTH() bfin_read16(CAN_MB06_LENGTH) +#define bfin_write_CAN_MB06_LENGTH(val) bfin_write16(CAN_MB06_LENGTH,val) +#define bfin_read_CAN_MB06_DATA3() bfin_read16(CAN_MB06_DATA3) +#define bfin_write_CAN_MB06_DATA3(val) bfin_write16(CAN_MB06_DATA3,val) +#define bfin_read_CAN_MB06_DATA2() bfin_read16(CAN_MB06_DATA2) +#define bfin_write_CAN_MB06_DATA2(val) bfin_write16(CAN_MB06_DATA2,val) +#define bfin_read_CAN_MB06_DATA1() bfin_read16(CAN_MB06_DATA1) +#define bfin_write_CAN_MB06_DATA1(val) bfin_write16(CAN_MB06_DATA1,val) +#define bfin_read_CAN_MB06_DATA0() bfin_read16(CAN_MB06_DATA0) +#define bfin_write_CAN_MB06_DATA0(val) bfin_write16(CAN_MB06_DATA0,val) + +#define bfin_read_CAN_MB07_ID1() bfin_read16(CAN_MB07_ID1) +#define bfin_write_CAN_MB07_ID1(val) bfin_write16(CAN_MB07_ID1,val) +#define bfin_read_CAN_MB07_ID0() bfin_read16(CAN_MB07_ID0) +#define bfin_write_CAN_MB07_ID0(val) bfin_write16(CAN_MB07_ID0,val) +#define bfin_read_CAN_MB07_TIMESTAMP() bfin_read16(CAN_MB07_TIMESTAMP) +#define bfin_write_CAN_MB07_TIMESTAMP(val) bfin_write16(CAN_MB07_TIMESTAMP,val) +#define bfin_read_CAN_MB07_LENGTH() bfin_read16(CAN_MB07_LENGTH) +#define bfin_write_CAN_MB07_LENGTH(val) bfin_write16(CAN_MB07_LENGTH,val) +#define bfin_read_CAN_MB07_DATA3() bfin_read16(CAN_MB07_DATA3) +#define bfin_write_CAN_MB07_DATA3(val) bfin_write16(CAN_MB07_DATA3,val) +#define bfin_read_CAN_MB07_DATA2() bfin_read16(CAN_MB07_DATA2) +#define bfin_write_CAN_MB07_DATA2(val) bfin_write16(CAN_MB07_DATA2,val) +#define bfin_read_CAN_MB07_DATA1() bfin_read16(CAN_MB07_DATA1) +#define bfin_write_CAN_MB07_DATA1(val) bfin_write16(CAN_MB07_DATA1,val) +#define bfin_read_CAN_MB07_DATA0() bfin_read16(CAN_MB07_DATA0) +#define bfin_write_CAN_MB07_DATA0(val) bfin_write16(CAN_MB07_DATA0,val) + +#define bfin_read_CAN_MB08_ID1() bfin_read16(CAN_MB08_ID1) +#define bfin_write_CAN_MB08_ID1(val) bfin_write16(CAN_MB08_ID1,val) +#define bfin_read_CAN_MB08_ID0() bfin_read16(CAN_MB08_ID0) +#define bfin_write_CAN_MB08_ID0(val) bfin_write16(CAN_MB08_ID0,val) +#define bfin_read_CAN_MB08_TIMESTAMP() bfin_read16(CAN_MB08_TIMESTAMP) +#define bfin_write_CAN_MB08_TIMESTAMP(val) bfin_write16(CAN_MB08_TIMESTAMP,val) +#define bfin_read_CAN_MB08_LENGTH() bfin_read16(CAN_MB08_LENGTH) +#define bfin_write_CAN_MB08_LENGTH(val) bfin_write16(CAN_MB08_LENGTH,val) +#define bfin_read_CAN_MB08_DATA3() bfin_read16(CAN_MB08_DATA3) +#define bfin_write_CAN_MB08_DATA3(val) bfin_write16(CAN_MB08_DATA3,val) +#define bfin_read_CAN_MB08_DATA2() bfin_read16(CAN_MB08_DATA2) +#define bfin_write_CAN_MB08_DATA2(val) bfin_write16(CAN_MB08_DATA2,val) +#define bfin_read_CAN_MB08_DATA1() bfin_read16(CAN_MB08_DATA1) +#define bfin_write_CAN_MB08_DATA1(val) bfin_write16(CAN_MB08_DATA1,val) +#define bfin_read_CAN_MB08_DATA0() bfin_read16(CAN_MB08_DATA0) +#define bfin_write_CAN_MB08_DATA0(val) bfin_write16(CAN_MB08_DATA0,val) + +#define bfin_read_CAN_MB09_ID1() bfin_read16(CAN_MB09_ID1) +#define bfin_write_CAN_MB09_ID1(val) bfin_write16(CAN_MB09_ID1,val) +#define bfin_read_CAN_MB09_ID0() bfin_read16(CAN_MB09_ID0) +#define bfin_write_CAN_MB09_ID0(val) bfin_write16(CAN_MB09_ID0,val) +#define bfin_read_CAN_MB09_TIMESTAMP() bfin_read16(CAN_MB09_TIMESTAMP) +#define bfin_write_CAN_MB09_TIMESTAMP(val) bfin_write16(CAN_MB09_TIMESTAMP,val) +#define bfin_read_CAN_MB09_LENGTH() bfin_read16(CAN_MB09_LENGTH) +#define bfin_write_CAN_MB09_LENGTH(val) bfin_write16(CAN_MB09_LENGTH,val) +#define bfin_read_CAN_MB09_DATA3() bfin_read16(CAN_MB09_DATA3) +#define bfin_write_CAN_MB09_DATA3(val) bfin_write16(CAN_MB09_DATA3,val) +#define bfin_read_CAN_MB09_DATA2() bfin_read16(CAN_MB09_DATA2) +#define bfin_write_CAN_MB09_DATA2(val) bfin_write16(CAN_MB09_DATA2,val) +#define bfin_read_CAN_MB09_DATA1() bfin_read16(CAN_MB09_DATA1) +#define bfin_write_CAN_MB09_DATA1(val) bfin_write16(CAN_MB09_DATA1,val) +#define bfin_read_CAN_MB09_DATA0() bfin_read16(CAN_MB09_DATA0) +#define bfin_write_CAN_MB09_DATA0(val) bfin_write16(CAN_MB09_DATA0,val) + +#define bfin_read_CAN_MB10_ID1() bfin_read16(CAN_MB10_ID1) +#define bfin_write_CAN_MB10_ID1(val) bfin_write16(CAN_MB10_ID1,val) +#define bfin_read_CAN_MB10_ID0() bfin_read16(CAN_MB10_ID0) +#define bfin_write_CAN_MB10_ID0(val) bfin_write16(CAN_MB10_ID0,val) +#define bfin_read_CAN_MB10_TIMESTAMP() bfin_read16(CAN_MB10_TIMESTAMP) +#define bfin_write_CAN_MB10_TIMESTAMP(val) bfin_write16(CAN_MB10_TIMESTAMP,val) +#define bfin_read_CAN_MB10_LENGTH() bfin_read16(CAN_MB10_LENGTH) +#define bfin_write_CAN_MB10_LENGTH(val) bfin_write16(CAN_MB10_LENGTH,val) +#define bfin_read_CAN_MB10_DATA3() bfin_read16(CAN_MB10_DATA3) +#define bfin_write_CAN_MB10_DATA3(val) bfin_write16(CAN_MB10_DATA3,val) +#define bfin_read_CAN_MB10_DATA2() bfin_read16(CAN_MB10_DATA2) +#define bfin_write_CAN_MB10_DATA2(val) bfin_write16(CAN_MB10_DATA2,val) +#define bfin_read_CAN_MB10_DATA1() bfin_read16(CAN_MB10_DATA1) +#define bfin_write_CAN_MB10_DATA1(val) bfin_write16(CAN_MB10_DATA1,val) +#define bfin_read_CAN_MB10_DATA0() bfin_read16(CAN_MB10_DATA0) +#define bfin_write_CAN_MB10_DATA0(val) bfin_write16(CAN_MB10_DATA0,val) + +#define bfin_read_CAN_MB11_ID1() bfin_read16(CAN_MB11_ID1) +#define bfin_write_CAN_MB11_ID1(val) bfin_write16(CAN_MB11_ID1,val) +#define bfin_read_CAN_MB11_ID0() bfin_read16(CAN_MB11_ID0) +#define bfin_write_CAN_MB11_ID0(val) bfin_write16(CAN_MB11_ID0,val) +#define bfin_read_CAN_MB11_TIMESTAMP() bfin_read16(CAN_MB11_TIMESTAMP) +#define bfin_write_CAN_MB11_TIMESTAMP(val) bfin_write16(CAN_MB11_TIMESTAMP,val) +#define bfin_read_CAN_MB11_LENGTH() bfin_read16(CAN_MB11_LENGTH) +#define bfin_write_CAN_MB11_LENGTH(val) bfin_write16(CAN_MB11_LENGTH,val) +#define bfin_read_CAN_MB11_DATA3() bfin_read16(CAN_MB11_DATA3) +#define bfin_write_CAN_MB11_DATA3(val) bfin_write16(CAN_MB11_DATA3,val) +#define bfin_read_CAN_MB11_DATA2() bfin_read16(CAN_MB11_DATA2) +#define bfin_write_CAN_MB11_DATA2(val) bfin_write16(CAN_MB11_DATA2,val) +#define bfin_read_CAN_MB11_DATA1() bfin_read16(CAN_MB11_DATA1) +#define bfin_write_CAN_MB11_DATA1(val) bfin_write16(CAN_MB11_DATA1,val) +#define bfin_read_CAN_MB11_DATA0() bfin_read16(CAN_MB11_DATA0) +#define bfin_write_CAN_MB11_DATA0(val) bfin_write16(CAN_MB11_DATA0,val) + +#define bfin_read_CAN_MB12_ID1() bfin_read16(CAN_MB12_ID1) +#define bfin_write_CAN_MB12_ID1(val) bfin_write16(CAN_MB12_ID1,val) +#define bfin_read_CAN_MB12_ID0() bfin_read16(CAN_MB12_ID0) +#define bfin_write_CAN_MB12_ID0(val) bfin_write16(CAN_MB12_ID0,val) +#define bfin_read_CAN_MB12_TIMESTAMP() bfin_read16(CAN_MB12_TIMESTAMP) +#define bfin_write_CAN_MB12_TIMESTAMP(val) bfin_write16(CAN_MB12_TIMESTAMP,val) +#define bfin_read_CAN_MB12_LENGTH() bfin_read16(CAN_MB12_LENGTH) +#define bfin_write_CAN_MB12_LENGTH(val) bfin_write16(CAN_MB12_LENGTH,val) +#define bfin_read_CAN_MB12_DATA3() bfin_read16(CAN_MB12_DATA3) +#define bfin_write_CAN_MB12_DATA3(val) bfin_write16(CAN_MB12_DATA3,val) +#define bfin_read_CAN_MB12_DATA2() bfin_read16(CAN_MB12_DATA2) +#define bfin_write_CAN_MB12_DATA2(val) bfin_write16(CAN_MB12_DATA2,val) +#define bfin_read_CAN_MB12_DATA1() bfin_read16(CAN_MB12_DATA1) +#define bfin_write_CAN_MB12_DATA1(val) bfin_write16(CAN_MB12_DATA1,val) +#define bfin_read_CAN_MB12_DATA0() bfin_read16(CAN_MB12_DATA0) +#define bfin_write_CAN_MB12_DATA0(val) bfin_write16(CAN_MB12_DATA0,val) + +#define bfin_read_CAN_MB13_ID1() bfin_read16(CAN_MB13_ID1) +#define bfin_write_CAN_MB13_ID1(val) bfin_write16(CAN_MB13_ID1,val) +#define bfin_read_CAN_MB13_ID0() bfin_read16(CAN_MB13_ID0) +#define bfin_write_CAN_MB13_ID0(val) bfin_write16(CAN_MB13_ID0,val) +#define bfin_read_CAN_MB13_TIMESTAMP() bfin_read16(CAN_MB13_TIMESTAMP) +#define bfin_write_CAN_MB13_TIMESTAMP(val) bfin_write16(CAN_MB13_TIMESTAMP,val) +#define bfin_read_CAN_MB13_LENGTH() bfin_read16(CAN_MB13_LENGTH) +#define bfin_write_CAN_MB13_LENGTH(val) bfin_write16(CAN_MB13_LENGTH,val) +#define bfin_read_CAN_MB13_DATA3() bfin_read16(CAN_MB13_DATA3) +#define bfin_write_CAN_MB13_DATA3(val) bfin_write16(CAN_MB13_DATA3,val) +#define bfin_read_CAN_MB13_DATA2() bfin_read16(CAN_MB13_DATA2) +#define bfin_write_CAN_MB13_DATA2(val) bfin_write16(CAN_MB13_DATA2,val) +#define bfin_read_CAN_MB13_DATA1() bfin_read16(CAN_MB13_DATA1) +#define bfin_write_CAN_MB13_DATA1(val) bfin_write16(CAN_MB13_DATA1,val) +#define bfin_read_CAN_MB13_DATA0() bfin_read16(CAN_MB13_DATA0) +#define bfin_write_CAN_MB13_DATA0(val) bfin_write16(CAN_MB13_DATA0,val) + +#define bfin_read_CAN_MB14_ID1() bfin_read16(CAN_MB14_ID1) +#define bfin_write_CAN_MB14_ID1(val) bfin_write16(CAN_MB14_ID1,val) +#define bfin_read_CAN_MB14_ID0() bfin_read16(CAN_MB14_ID0) +#define bfin_write_CAN_MB14_ID0(val) bfin_write16(CAN_MB14_ID0,val) +#define bfin_read_CAN_MB14_TIMESTAMP() bfin_read16(CAN_MB14_TIMESTAMP) +#define bfin_write_CAN_MB14_TIMESTAMP(val) bfin_write16(CAN_MB14_TIMESTAMP,val) +#define bfin_read_CAN_MB14_LENGTH() bfin_read16(CAN_MB14_LENGTH) +#define bfin_write_CAN_MB14_LENGTH(val) bfin_write16(CAN_MB14_LENGTH,val) +#define bfin_read_CAN_MB14_DATA3() bfin_read16(CAN_MB14_DATA3) +#define bfin_write_CAN_MB14_DATA3(val) bfin_write16(CAN_MB14_DATA3,val) +#define bfin_read_CAN_MB14_DATA2() bfin_read16(CAN_MB14_DATA2) +#define bfin_write_CAN_MB14_DATA2(val) bfin_write16(CAN_MB14_DATA2,val) +#define bfin_read_CAN_MB14_DATA1() bfin_read16(CAN_MB14_DATA1) +#define bfin_write_CAN_MB14_DATA1(val) bfin_write16(CAN_MB14_DATA1,val) +#define bfin_read_CAN_MB14_DATA0() bfin_read16(CAN_MB14_DATA0) +#define bfin_write_CAN_MB14_DATA0(val) bfin_write16(CAN_MB14_DATA0,val) + +#define bfin_read_CAN_MB15_ID1() bfin_read16(CAN_MB15_ID1) +#define bfin_write_CAN_MB15_ID1(val) bfin_write16(CAN_MB15_ID1,val) +#define bfin_read_CAN_MB15_ID0() bfin_read16(CAN_MB15_ID0) +#define bfin_write_CAN_MB15_ID0(val) bfin_write16(CAN_MB15_ID0,val) +#define bfin_read_CAN_MB15_TIMESTAMP() bfin_read16(CAN_MB15_TIMESTAMP) +#define bfin_write_CAN_MB15_TIMESTAMP(val) bfin_write16(CAN_MB15_TIMESTAMP,val) +#define bfin_read_CAN_MB15_LENGTH() bfin_read16(CAN_MB15_LENGTH) +#define bfin_write_CAN_MB15_LENGTH(val) bfin_write16(CAN_MB15_LENGTH,val) +#define bfin_read_CAN_MB15_DATA3() bfin_read16(CAN_MB15_DATA3) +#define bfin_write_CAN_MB15_DATA3(val) bfin_write16(CAN_MB15_DATA3,val) +#define bfin_read_CAN_MB15_DATA2() bfin_read16(CAN_MB15_DATA2) +#define bfin_write_CAN_MB15_DATA2(val) bfin_write16(CAN_MB15_DATA2,val) +#define bfin_read_CAN_MB15_DATA1() bfin_read16(CAN_MB15_DATA1) +#define bfin_write_CAN_MB15_DATA1(val) bfin_write16(CAN_MB15_DATA1,val) +#define bfin_read_CAN_MB15_DATA0() bfin_read16(CAN_MB15_DATA0) +#define bfin_write_CAN_MB15_DATA0(val) bfin_write16(CAN_MB15_DATA0,val) + +#define bfin_read_CAN_MB16_ID1() bfin_read16(CAN_MB16_ID1) +#define bfin_write_CAN_MB16_ID1(val) bfin_write16(CAN_MB16_ID1,val) +#define bfin_read_CAN_MB16_ID0() bfin_read16(CAN_MB16_ID0) +#define bfin_write_CAN_MB16_ID0(val) bfin_write16(CAN_MB16_ID0,val) +#define bfin_read_CAN_MB16_TIMESTAMP() bfin_read16(CAN_MB16_TIMESTAMP) +#define bfin_write_CAN_MB16_TIMESTAMP(val) bfin_write16(CAN_MB16_TIMESTAMP,val) +#define bfin_read_CAN_MB16_LENGTH() bfin_read16(CAN_MB16_LENGTH) +#define bfin_write_CAN_MB16_LENGTH(val) bfin_write16(CAN_MB16_LENGTH,val) +#define bfin_read_CAN_MB16_DATA3() bfin_read16(CAN_MB16_DATA3) +#define bfin_write_CAN_MB16_DATA3(val) bfin_write16(CAN_MB16_DATA3,val) +#define bfin_read_CAN_MB16_DATA2() bfin_read16(CAN_MB16_DATA2) +#define bfin_write_CAN_MB16_DATA2(val) bfin_write16(CAN_MB16_DATA2,val) +#define bfin_read_CAN_MB16_DATA1() bfin_read16(CAN_MB16_DATA1) +#define bfin_write_CAN_MB16_DATA1(val) bfin_write16(CAN_MB16_DATA1,val) +#define bfin_read_CAN_MB16_DATA0() bfin_read16(CAN_MB16_DATA0) +#define bfin_write_CAN_MB16_DATA0(val) bfin_write16(CAN_MB16_DATA0,val) + +#define bfin_read_CAN_MB17_ID1() bfin_read16(CAN_MB17_ID1) +#define bfin_write_CAN_MB17_ID1(val) bfin_write16(CAN_MB17_ID1,val) +#define bfin_read_CAN_MB17_ID0() bfin_read16(CAN_MB17_ID0) +#define bfin_write_CAN_MB17_ID0(val) bfin_write16(CAN_MB17_ID0,val) +#define bfin_read_CAN_MB17_TIMESTAMP() bfin_read16(CAN_MB17_TIMESTAMP) +#define bfin_write_CAN_MB17_TIMESTAMP(val) bfin_write16(CAN_MB17_TIMESTAMP,val) +#define bfin_read_CAN_MB17_LENGTH() bfin_read16(CAN_MB17_LENGTH) +#define bfin_write_CAN_MB17_LENGTH(val) bfin_write16(CAN_MB17_LENGTH,val) +#define bfin_read_CAN_MB17_DATA3() bfin_read16(CAN_MB17_DATA3) +#define bfin_write_CAN_MB17_DATA3(val) bfin_write16(CAN_MB17_DATA3,val) +#define bfin_read_CAN_MB17_DATA2() bfin_read16(CAN_MB17_DATA2) +#define bfin_write_CAN_MB17_DATA2(val) bfin_write16(CAN_MB17_DATA2,val) +#define bfin_read_CAN_MB17_DATA1() bfin_read16(CAN_MB17_DATA1) +#define bfin_write_CAN_MB17_DATA1(val) bfin_write16(CAN_MB17_DATA1,val) +#define bfin_read_CAN_MB17_DATA0() bfin_read16(CAN_MB17_DATA0) +#define bfin_write_CAN_MB17_DATA0(val) bfin_write16(CAN_MB17_DATA0,val) + +#define bfin_read_CAN_MB18_ID1() bfin_read16(CAN_MB18_ID1) +#define bfin_write_CAN_MB18_ID1(val) bfin_write16(CAN_MB18_ID1,val) +#define bfin_read_CAN_MB18_ID0() bfin_read16(CAN_MB18_ID0) +#define bfin_write_CAN_MB18_ID0(val) bfin_write16(CAN_MB18_ID0,val) +#define bfin_read_CAN_MB18_TIMESTAMP() bfin_read16(CAN_MB18_TIMESTAMP) +#define bfin_write_CAN_MB18_TIMESTAMP(val) bfin_write16(CAN_MB18_TIMESTAMP,val) +#define bfin_read_CAN_MB18_LENGTH() bfin_read16(CAN_MB18_LENGTH) +#define bfin_write_CAN_MB18_LENGTH(val) bfin_write16(CAN_MB18_LENGTH,val) +#define bfin_read_CAN_MB18_DATA3() bfin_read16(CAN_MB18_DATA3) +#define bfin_write_CAN_MB18_DATA3(val) bfin_write16(CAN_MB18_DATA3,val) +#define bfin_read_CAN_MB18_DATA2() bfin_read16(CAN_MB18_DATA2) +#define bfin_write_CAN_MB18_DATA2(val) bfin_write16(CAN_MB18_DATA2,val) +#define bfin_read_CAN_MB18_DATA1() bfin_read16(CAN_MB18_DATA1) +#define bfin_write_CAN_MB18_DATA1(val) bfin_write16(CAN_MB18_DATA1,val) +#define bfin_read_CAN_MB18_DATA0() bfin_read16(CAN_MB18_DATA0) +#define bfin_write_CAN_MB18_DATA0(val) bfin_write16(CAN_MB18_DATA0,val) + +#define bfin_read_CAN_MB19_ID1() bfin_read16(CAN_MB19_ID1) +#define bfin_write_CAN_MB19_ID1(val) bfin_write16(CAN_MB19_ID1,val) +#define bfin_read_CAN_MB19_ID0() bfin_read16(CAN_MB19_ID0) +#define bfin_write_CAN_MB19_ID0(val) bfin_write16(CAN_MB19_ID0,val) +#define bfin_read_CAN_MB19_TIMESTAMP() bfin_read16(CAN_MB19_TIMESTAMP) +#define bfin_write_CAN_MB19_TIMESTAMP(val) bfin_write16(CAN_MB19_TIMESTAMP,val) +#define bfin_read_CAN_MB19_LENGTH() bfin_read16(CAN_MB19_LENGTH) +#define bfin_write_CAN_MB19_LENGTH(val) bfin_write16(CAN_MB19_LENGTH,val) +#define bfin_read_CAN_MB19_DATA3() bfin_read16(CAN_MB19_DATA3) +#define bfin_write_CAN_MB19_DATA3(val) bfin_write16(CAN_MB19_DATA3,val) +#define bfin_read_CAN_MB19_DATA2() bfin_read16(CAN_MB19_DATA2) +#define bfin_write_CAN_MB19_DATA2(val) bfin_write16(CAN_MB19_DATA2,val) +#define bfin_read_CAN_MB19_DATA1() bfin_read16(CAN_MB19_DATA1) +#define bfin_write_CAN_MB19_DATA1(val) bfin_write16(CAN_MB19_DATA1,val) +#define bfin_read_CAN_MB19_DATA0() bfin_read16(CAN_MB19_DATA0) +#define bfin_write_CAN_MB19_DATA0(val) bfin_write16(CAN_MB19_DATA0,val) + +#define bfin_read_CAN_MB20_ID1() bfin_read16(CAN_MB20_ID1) +#define bfin_write_CAN_MB20_ID1(val) bfin_write16(CAN_MB20_ID1,val) +#define bfin_read_CAN_MB20_ID0() bfin_read16(CAN_MB20_ID0) +#define bfin_write_CAN_MB20_ID0(val) bfin_write16(CAN_MB20_ID0,val) +#define bfin_read_CAN_MB20_TIMESTAMP() bfin_read16(CAN_MB20_TIMESTAMP) +#define bfin_write_CAN_MB20_TIMESTAMP(val) bfin_write16(CAN_MB20_TIMESTAMP,val) +#define bfin_read_CAN_MB20_LENGTH() bfin_read16(CAN_MB20_LENGTH) +#define bfin_write_CAN_MB20_LENGTH(val) bfin_write16(CAN_MB20_LENGTH,val) +#define bfin_read_CAN_MB20_DATA3() bfin_read16(CAN_MB20_DATA3) +#define bfin_write_CAN_MB20_DATA3(val) bfin_write16(CAN_MB20_DATA3,val) +#define bfin_read_CAN_MB20_DATA2() bfin_read16(CAN_MB20_DATA2) +#define bfin_write_CAN_MB20_DATA2(val) bfin_write16(CAN_MB20_DATA2,val) +#define bfin_read_CAN_MB20_DATA1() bfin_read16(CAN_MB20_DATA1) +#define bfin_write_CAN_MB20_DATA1(val) bfin_write16(CAN_MB20_DATA1,val) +#define bfin_read_CAN_MB20_DATA0() bfin_read16(CAN_MB20_DATA0) +#define bfin_write_CAN_MB20_DATA0(val) bfin_write16(CAN_MB20_DATA0,val) + +#define bfin_read_CAN_MB21_ID1() bfin_read16(CAN_MB21_ID1) +#define bfin_write_CAN_MB21_ID1(val) bfin_write16(CAN_MB21_ID1,val) +#define bfin_read_CAN_MB21_ID0() bfin_read16(CAN_MB21_ID0) +#define bfin_write_CAN_MB21_ID0(val) bfin_write16(CAN_MB21_ID0,val) +#define bfin_read_CAN_MB21_TIMESTAMP() bfin_read16(CAN_MB21_TIMESTAMP) +#define bfin_write_CAN_MB21_TIMESTAMP(val) bfin_write16(CAN_MB21_TIMESTAMP,val) +#define bfin_read_CAN_MB21_LENGTH() bfin_read16(CAN_MB21_LENGTH) +#define bfin_write_CAN_MB21_LENGTH(val) bfin_write16(CAN_MB21_LENGTH,val) +#define bfin_read_CAN_MB21_DATA3() bfin_read16(CAN_MB21_DATA3) +#define bfin_write_CAN_MB21_DATA3(val) bfin_write16(CAN_MB21_DATA3,val) +#define bfin_read_CAN_MB21_DATA2() bfin_read16(CAN_MB21_DATA2) +#define bfin_write_CAN_MB21_DATA2(val) bfin_write16(CAN_MB21_DATA2,val) +#define bfin_read_CAN_MB21_DATA1() bfin_read16(CAN_MB21_DATA1) +#define bfin_write_CAN_MB21_DATA1(val) bfin_write16(CAN_MB21_DATA1,val) +#define bfin_read_CAN_MB21_DATA0() bfin_read16(CAN_MB21_DATA0) +#define bfin_write_CAN_MB21_DATA0(val) bfin_write16(CAN_MB21_DATA0,val) + +#define bfin_read_CAN_MB22_ID1() bfin_read16(CAN_MB22_ID1) +#define bfin_write_CAN_MB22_ID1(val) bfin_write16(CAN_MB22_ID1,val) +#define bfin_read_CAN_MB22_ID0() bfin_read16(CAN_MB22_ID0) +#define bfin_write_CAN_MB22_ID0(val) bfin_write16(CAN_MB22_ID0,val) +#define bfin_read_CAN_MB22_TIMESTAMP() bfin_read16(CAN_MB22_TIMESTAMP) +#define bfin_write_CAN_MB22_TIMESTAMP(val) bfin_write16(CAN_MB22_TIMESTAMP,val) +#define bfin_read_CAN_MB22_LENGTH() bfin_read16(CAN_MB22_LENGTH) +#define bfin_write_CAN_MB22_LENGTH(val) bfin_write16(CAN_MB22_LENGTH,val) +#define bfin_read_CAN_MB22_DATA3() bfin_read16(CAN_MB22_DATA3) +#define bfin_write_CAN_MB22_DATA3(val) bfin_write16(CAN_MB22_DATA3,val) +#define bfin_read_CAN_MB22_DATA2() bfin_read16(CAN_MB22_DATA2) +#define bfin_write_CAN_MB22_DATA2(val) bfin_write16(CAN_MB22_DATA2,val) +#define bfin_read_CAN_MB22_DATA1() bfin_read16(CAN_MB22_DATA1) +#define bfin_write_CAN_MB22_DATA1(val) bfin_write16(CAN_MB22_DATA1,val) +#define bfin_read_CAN_MB22_DATA0() bfin_read16(CAN_MB22_DATA0) +#define bfin_write_CAN_MB22_DATA0(val) bfin_write16(CAN_MB22_DATA0,val) + +#define bfin_read_CAN_MB23_ID1() bfin_read16(CAN_MB23_ID1) +#define bfin_write_CAN_MB23_ID1(val) bfin_write16(CAN_MB23_ID1,val) +#define bfin_read_CAN_MB23_ID0() bfin_read16(CAN_MB23_ID0) +#define bfin_write_CAN_MB23_ID0(val) bfin_write16(CAN_MB23_ID0,val) +#define bfin_read_CAN_MB23_TIMESTAMP() bfin_read16(CAN_MB23_TIMESTAMP) +#define bfin_write_CAN_MB23_TIMESTAMP(val) bfin_write16(CAN_MB23_TIMESTAMP,val) +#define bfin_read_CAN_MB23_LENGTH() bfin_read16(CAN_MB23_LENGTH) +#define bfin_write_CAN_MB23_LENGTH(val) bfin_write16(CAN_MB23_LENGTH,val) +#define bfin_read_CAN_MB23_DATA3() bfin_read16(CAN_MB23_DATA3) +#define bfin_write_CAN_MB23_DATA3(val) bfin_write16(CAN_MB23_DATA3,val) +#define bfin_read_CAN_MB23_DATA2() bfin_read16(CAN_MB23_DATA2) +#define bfin_write_CAN_MB23_DATA2(val) bfin_write16(CAN_MB23_DATA2,val) +#define bfin_read_CAN_MB23_DATA1() bfin_read16(CAN_MB23_DATA1) +#define bfin_write_CAN_MB23_DATA1(val) bfin_write16(CAN_MB23_DATA1,val) +#define bfin_read_CAN_MB23_DATA0() bfin_read16(CAN_MB23_DATA0) +#define bfin_write_CAN_MB23_DATA0(val) bfin_write16(CAN_MB23_DATA0,val) + +#define bfin_read_CAN_MB24_ID1() bfin_read16(CAN_MB24_ID1) +#define bfin_write_CAN_MB24_ID1(val) bfin_write16(CAN_MB24_ID1,val) +#define bfin_read_CAN_MB24_ID0() bfin_read16(CAN_MB24_ID0) +#define bfin_write_CAN_MB24_ID0(val) bfin_write16(CAN_MB24_ID0,val) +#define bfin_read_CAN_MB24_TIMESTAMP() bfin_read16(CAN_MB24_TIMESTAMP) +#define bfin_write_CAN_MB24_TIMESTAMP(val) bfin_write16(CAN_MB24_TIMESTAMP,val) +#define bfin_read_CAN_MB24_LENGTH() bfin_read16(CAN_MB24_LENGTH) +#define bfin_write_CAN_MB24_LENGTH(val) bfin_write16(CAN_MB24_LENGTH,val) +#define bfin_read_CAN_MB24_DATA3() bfin_read16(CAN_MB24_DATA3) +#define bfin_write_CAN_MB24_DATA3(val) bfin_write16(CAN_MB24_DATA3,val) +#define bfin_read_CAN_MB24_DATA2() bfin_read16(CAN_MB24_DATA2) +#define bfin_write_CAN_MB24_DATA2(val) bfin_write16(CAN_MB24_DATA2,val) +#define bfin_read_CAN_MB24_DATA1() bfin_read16(CAN_MB24_DATA1) +#define bfin_write_CAN_MB24_DATA1(val) bfin_write16(CAN_MB24_DATA1,val) +#define bfin_read_CAN_MB24_DATA0() bfin_read16(CAN_MB24_DATA0) +#define bfin_write_CAN_MB24_DATA0(val) bfin_write16(CAN_MB24_DATA0,val) + +#define bfin_read_CAN_MB25_ID1() bfin_read16(CAN_MB25_ID1) +#define bfin_write_CAN_MB25_ID1(val) bfin_write16(CAN_MB25_ID1,val) +#define bfin_read_CAN_MB25_ID0() bfin_read16(CAN_MB25_ID0) +#define bfin_write_CAN_MB25_ID0(val) bfin_write16(CAN_MB25_ID0,val) +#define bfin_read_CAN_MB25_TIMESTAMP() bfin_read16(CAN_MB25_TIMESTAMP) +#define bfin_write_CAN_MB25_TIMESTAMP(val) bfin_write16(CAN_MB25_TIMESTAMP,val) +#define bfin_read_CAN_MB25_LENGTH() bfin_read16(CAN_MB25_LENGTH) +#define bfin_write_CAN_MB25_LENGTH(val) bfin_write16(CAN_MB25_LENGTH,val) +#define bfin_read_CAN_MB25_DATA3() bfin_read16(CAN_MB25_DATA3) +#define bfin_write_CAN_MB25_DATA3(val) bfin_write16(CAN_MB25_DATA3,val) +#define bfin_read_CAN_MB25_DATA2() bfin_read16(CAN_MB25_DATA2) +#define bfin_write_CAN_MB25_DATA2(val) bfin_write16(CAN_MB25_DATA2,val) +#define bfin_read_CAN_MB25_DATA1() bfin_read16(CAN_MB25_DATA1) +#define bfin_write_CAN_MB25_DATA1(val) bfin_write16(CAN_MB25_DATA1,val) +#define bfin_read_CAN_MB25_DATA0() bfin_read16(CAN_MB25_DATA0) +#define bfin_write_CAN_MB25_DATA0(val) bfin_write16(CAN_MB25_DATA0,val) + +#define bfin_read_CAN_MB26_ID1() bfin_read16(CAN_MB26_ID1) +#define bfin_write_CAN_MB26_ID1(val) bfin_write16(CAN_MB26_ID1,val) +#define bfin_read_CAN_MB26_ID0() bfin_read16(CAN_MB26_ID0) +#define bfin_write_CAN_MB26_ID0(val) bfin_write16(CAN_MB26_ID0,val) +#define bfin_read_CAN_MB26_TIMESTAMP() bfin_read16(CAN_MB26_TIMESTAMP) +#define bfin_write_CAN_MB26_TIMESTAMP(val) bfin_write16(CAN_MB26_TIMESTAMP,val) +#define bfin_read_CAN_MB26_LENGTH() bfin_read16(CAN_MB26_LENGTH) +#define bfin_write_CAN_MB26_LENGTH(val) bfin_write16(CAN_MB26_LENGTH,val) +#define bfin_read_CAN_MB26_DATA3() bfin_read16(CAN_MB26_DATA3) +#define bfin_write_CAN_MB26_DATA3(val) bfin_write16(CAN_MB26_DATA3,val) +#define bfin_read_CAN_MB26_DATA2() bfin_read16(CAN_MB26_DATA2) +#define bfin_write_CAN_MB26_DATA2(val) bfin_write16(CAN_MB26_DATA2,val) +#define bfin_read_CAN_MB26_DATA1() bfin_read16(CAN_MB26_DATA1) +#define bfin_write_CAN_MB26_DATA1(val) bfin_write16(CAN_MB26_DATA1,val) +#define bfin_read_CAN_MB26_DATA0() bfin_read16(CAN_MB26_DATA0) +#define bfin_write_CAN_MB26_DATA0(val) bfin_write16(CAN_MB26_DATA0,val) + +#define bfin_read_CAN_MB27_ID1() bfin_read16(CAN_MB27_ID1) +#define bfin_write_CAN_MB27_ID1(val) bfin_write16(CAN_MB27_ID1,val) +#define bfin_read_CAN_MB27_ID0() bfin_read16(CAN_MB27_ID0) +#define bfin_write_CAN_MB27_ID0(val) bfin_write16(CAN_MB27_ID0,val) +#define bfin_read_CAN_MB27_TIMESTAMP() bfin_read16(CAN_MB27_TIMESTAMP) +#define bfin_write_CAN_MB27_TIMESTAMP(val) bfin_write16(CAN_MB27_TIMESTAMP,val) +#define bfin_read_CAN_MB27_LENGTH() bfin_read16(CAN_MB27_LENGTH) +#define bfin_write_CAN_MB27_LENGTH(val) bfin_write16(CAN_MB27_LENGTH,val) +#define bfin_read_CAN_MB27_DATA3() bfin_read16(CAN_MB27_DATA3) +#define bfin_write_CAN_MB27_DATA3(val) bfin_write16(CAN_MB27_DATA3,val) +#define bfin_read_CAN_MB27_DATA2() bfin_read16(CAN_MB27_DATA2) +#define bfin_write_CAN_MB27_DATA2(val) bfin_write16(CAN_MB27_DATA2,val) +#define bfin_read_CAN_MB27_DATA1() bfin_read16(CAN_MB27_DATA1) +#define bfin_write_CAN_MB27_DATA1(val) bfin_write16(CAN_MB27_DATA1,val) +#define bfin_read_CAN_MB27_DATA0() bfin_read16(CAN_MB27_DATA0) +#define bfin_write_CAN_MB27_DATA0(val) bfin_write16(CAN_MB27_DATA0,val) + +#define bfin_read_CAN_MB28_ID1() bfin_read16(CAN_MB28_ID1) +#define bfin_write_CAN_MB28_ID1(val) bfin_write16(CAN_MB28_ID1,val) +#define bfin_read_CAN_MB28_ID0() bfin_read16(CAN_MB28_ID0) +#define bfin_write_CAN_MB28_ID0(val) bfin_write16(CAN_MB28_ID0,val) +#define bfin_read_CAN_MB28_TIMESTAMP() bfin_read16(CAN_MB28_TIMESTAMP) +#define bfin_write_CAN_MB28_TIMESTAMP(val) bfin_write16(CAN_MB28_TIMESTAMP,val) +#define bfin_read_CAN_MB28_LENGTH() bfin_read16(CAN_MB28_LENGTH) +#define bfin_write_CAN_MB28_LENGTH(val) bfin_write16(CAN_MB28_LENGTH,val) +#define bfin_read_CAN_MB28_DATA3() bfin_read16(CAN_MB28_DATA3) +#define bfin_write_CAN_MB28_DATA3(val) bfin_write16(CAN_MB28_DATA3,val) +#define bfin_read_CAN_MB28_DATA2() bfin_read16(CAN_MB28_DATA2) +#define bfin_write_CAN_MB28_DATA2(val) bfin_write16(CAN_MB28_DATA2,val) +#define bfin_read_CAN_MB28_DATA1() bfin_read16(CAN_MB28_DATA1) +#define bfin_write_CAN_MB28_DATA1(val) bfin_write16(CAN_MB28_DATA1,val) +#define bfin_read_CAN_MB28_DATA0() bfin_read16(CAN_MB28_DATA0) +#define bfin_write_CAN_MB28_DATA0(val) bfin_write16(CAN_MB28_DATA0,val) + +#define bfin_read_CAN_MB29_ID1() bfin_read16(CAN_MB29_ID1) +#define bfin_write_CAN_MB29_ID1(val) bfin_write16(CAN_MB29_ID1,val) +#define bfin_read_CAN_MB29_ID0() bfin_read16(CAN_MB29_ID0) +#define bfin_write_CAN_MB29_ID0(val) bfin_write16(CAN_MB29_ID0,val) +#define bfin_read_CAN_MB29_TIMESTAMP() bfin_read16(CAN_MB29_TIMESTAMP) +#define bfin_write_CAN_MB29_TIMESTAMP(val) bfin_write16(CAN_MB29_TIMESTAMP,val) +#define bfin_read_CAN_MB29_LENGTH() bfin_read16(CAN_MB29_LENGTH) +#define bfin_write_CAN_MB29_LENGTH(val) bfin_write16(CAN_MB29_LENGTH,val) +#define bfin_read_CAN_MB29_DATA3() bfin_read16(CAN_MB29_DATA3) +#define bfin_write_CAN_MB29_DATA3(val) bfin_write16(CAN_MB29_DATA3,val) +#define bfin_read_CAN_MB29_DATA2() bfin_read16(CAN_MB29_DATA2) +#define bfin_write_CAN_MB29_DATA2(val) bfin_write16(CAN_MB29_DATA2,val) +#define bfin_read_CAN_MB29_DATA1() bfin_read16(CAN_MB29_DATA1) +#define bfin_write_CAN_MB29_DATA1(val) bfin_write16(CAN_MB29_DATA1,val) +#define bfin_read_CAN_MB29_DATA0() bfin_read16(CAN_MB29_DATA0) +#define bfin_write_CAN_MB29_DATA0(val) bfin_write16(CAN_MB29_DATA0,val) + +#define bfin_read_CAN_MB30_ID1() bfin_read16(CAN_MB30_ID1) +#define bfin_write_CAN_MB30_ID1(val) bfin_write16(CAN_MB30_ID1,val) +#define bfin_read_CAN_MB30_ID0() bfin_read16(CAN_MB30_ID0) +#define bfin_write_CAN_MB30_ID0(val) bfin_write16(CAN_MB30_ID0,val) +#define bfin_read_CAN_MB30_TIMESTAMP() bfin_read16(CAN_MB30_TIMESTAMP) +#define bfin_write_CAN_MB30_TIMESTAMP(val) bfin_write16(CAN_MB30_TIMESTAMP,val) +#define bfin_read_CAN_MB30_LENGTH() bfin_read16(CAN_MB30_LENGTH) +#define bfin_write_CAN_MB30_LENGTH(val) bfin_write16(CAN_MB30_LENGTH,val) +#define bfin_read_CAN_MB30_DATA3() bfin_read16(CAN_MB30_DATA3) +#define bfin_write_CAN_MB30_DATA3(val) bfin_write16(CAN_MB30_DATA3,val) +#define bfin_read_CAN_MB30_DATA2() bfin_read16(CAN_MB30_DATA2) +#define bfin_write_CAN_MB30_DATA2(val) bfin_write16(CAN_MB30_DATA2,val) +#define bfin_read_CAN_MB30_DATA1() bfin_read16(CAN_MB30_DATA1) +#define bfin_write_CAN_MB30_DATA1(val) bfin_write16(CAN_MB30_DATA1,val) +#define bfin_read_CAN_MB30_DATA0() bfin_read16(CAN_MB30_DATA0) +#define bfin_write_CAN_MB30_DATA0(val) bfin_write16(CAN_MB30_DATA0,val) + +#define bfin_read_CAN_MB31_ID1() bfin_read16(CAN_MB31_ID1) +#define bfin_write_CAN_MB31_ID1(val) bfin_write16(CAN_MB31_ID1,val) +#define bfin_read_CAN_MB31_ID0() bfin_read16(CAN_MB31_ID0) +#define bfin_write_CAN_MB31_ID0(val) bfin_write16(CAN_MB31_ID0,val) +#define bfin_read_CAN_MB31_TIMESTAMP() bfin_read16(CAN_MB31_TIMESTAMP) +#define bfin_write_CAN_MB31_TIMESTAMP(val) bfin_write16(CAN_MB31_TIMESTAMP,val) +#define bfin_read_CAN_MB31_LENGTH() bfin_read16(CAN_MB31_LENGTH) +#define bfin_write_CAN_MB31_LENGTH(val) bfin_write16(CAN_MB31_LENGTH,val) +#define bfin_read_CAN_MB31_DATA3() bfin_read16(CAN_MB31_DATA3) +#define bfin_write_CAN_MB31_DATA3(val) bfin_write16(CAN_MB31_DATA3,val) +#define bfin_read_CAN_MB31_DATA2() bfin_read16(CAN_MB31_DATA2) +#define bfin_write_CAN_MB31_DATA2(val) bfin_write16(CAN_MB31_DATA2,val) +#define bfin_read_CAN_MB31_DATA1() bfin_read16(CAN_MB31_DATA1) +#define bfin_write_CAN_MB31_DATA1(val) bfin_write16(CAN_MB31_DATA1,val) +#define bfin_read_CAN_MB31_DATA0() bfin_read16(CAN_MB31_DATA0) +#define bfin_write_CAN_MB31_DATA0(val) bfin_write16(CAN_MB31_DATA0,val) + +/* CAN Mailbox Area Macros */ +#define bfin_read_CAN_MB_ID1(x)() bfin_read16(CAN_MB_ID1(x)) +#define bfin_write_CAN_MB_ID1(x)(val) bfin_write16(CAN_MB_ID1(x),val) +#define bfin_read_CAN_MB_ID0(x)() bfin_read16(CAN_MB_ID0(x)) +#define bfin_write_CAN_MB_ID0(x)(val) bfin_write16(CAN_MB_ID0(x),val) +#define bfin_read_CAN_MB_TIMESTAMP(x)() bfin_read16(CAN_MB_TIMESTAMP(x)) +#define bfin_write_CAN_MB_TIMESTAMP(x)(val) bfin_write16(CAN_MB_TIMESTAMP(x),val) +#define bfin_read_CAN_MB_LENGTH(x)() bfin_read16(CAN_MB_LENGTH(x)) +#define bfin_write_CAN_MB_LENGTH(x)(val) bfin_write16(CAN_MB_LENGTH(x),val) +#define bfin_read_CAN_MB_DATA3(x)() bfin_read16(CAN_MB_DATA3(x)) +#define bfin_write_CAN_MB_DATA3(x)(val) bfin_write16(CAN_MB_DATA3(x),val) +#define bfin_read_CAN_MB_DATA2(x)() bfin_read16(CAN_MB_DATA2(x)) +#define bfin_write_CAN_MB_DATA2(x)(val) bfin_write16(CAN_MB_DATA2(x),val) +#define bfin_read_CAN_MB_DATA1(x)() bfin_read16(CAN_MB_DATA1(x)) +#define bfin_write_CAN_MB_DATA1(x)(val) bfin_write16(CAN_MB_DATA1(x),val) +#define bfin_read_CAN_MB_DATA0(x)() bfin_read16(CAN_MB_DATA0(x)) +#define bfin_write_CAN_MB_DATA0(x)(val) bfin_write16(CAN_MB_DATA0(x),val) + +/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */ +#define bfin_read_PORTF_FER() bfin_read16(PORTF_FER) +#define bfin_write_PORTF_FER(val) bfin_write16(PORTF_FER,val) +#define bfin_read_PORTG_FER() bfin_read16(PORTG_FER) +#define bfin_write_PORTG_FER(val) bfin_write16(PORTG_FER,val) +#define bfin_read_PORTH_FER() bfin_read16(PORTH_FER) +#define bfin_write_PORTH_FER(val) bfin_write16(PORTH_FER,val) +#define bfin_read_PORT_MUX() bfin_read16(BFIN_PORT_MUX) +#define bfin_write_PORT_MUX(val) bfin_write16(BFIN_PORT_MUX,val) + +/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */ +#define bfin_read_HMDMA0_CONTROL() bfin_read16(HMDMA0_CONTROL) +#define bfin_write_HMDMA0_CONTROL(val) bfin_write16(HMDMA0_CONTROL,val) +#define bfin_read_HMDMA0_ECINIT() bfin_read16(HMDMA0_ECINIT) +#define bfin_write_HMDMA0_ECINIT(val) bfin_write16(HMDMA0_ECINIT,val) +#define bfin_read_HMDMA0_BCINIT() bfin_read16(HMDMA0_BCINIT) +#define bfin_write_HMDMA0_BCINIT(val) bfin_write16(HMDMA0_BCINIT,val) +#define bfin_read_HMDMA0_ECURGENT() bfin_read16(HMDMA0_ECURGENT) +#define bfin_write_HMDMA0_ECURGENT(val) bfin_write16(HMDMA0_ECURGENT,val) +#define bfin_read_HMDMA0_ECOVERFLOW() bfin_read16(HMDMA0_ECOVERFLOW) +#define bfin_write_HMDMA0_ECOVERFLOW(val) bfin_write16(HMDMA0_ECOVERFLOW,val) +#define bfin_read_HMDMA0_ECOUNT() bfin_read16(HMDMA0_ECOUNT) +#define bfin_write_HMDMA0_ECOUNT(val) bfin_write16(HMDMA0_ECOUNT,val) +#define bfin_read_HMDMA0_BCOUNT() bfin_read16(HMDMA0_BCOUNT) +#define bfin_write_HMDMA0_BCOUNT(val) bfin_write16(HMDMA0_BCOUNT,val) + +#define bfin_read_HMDMA1_CONTROL() bfin_read16(HMDMA1_CONTROL) +#define bfin_write_HMDMA1_CONTROL(val) bfin_write16(HMDMA1_CONTROL,val) +#define bfin_read_HMDMA1_ECINIT() bfin_read16(HMDMA1_ECINIT) +#define bfin_write_HMDMA1_ECINIT(val) bfin_write16(HMDMA1_ECINIT,val) +#define bfin_read_HMDMA1_BCINIT() bfin_read16(HMDMA1_BCINIT) +#define bfin_write_HMDMA1_BCINIT(val) bfin_write16(HMDMA1_BCINIT,val) +#define bfin_read_HMDMA1_ECURGENT() bfin_read16(HMDMA1_ECURGENT) +#define bfin_write_HMDMA1_ECURGENT(val) bfin_write16(HMDMA1_ECURGENT,val) +#define bfin_read_HMDMA1_ECOVERFLOW() bfin_read16(HMDMA1_ECOVERFLOW) +#define bfin_write_HMDMA1_ECOVERFLOW(val) bfin_write16(HMDMA1_ECOVERFLOW,val) +#define bfin_read_HMDMA1_ECOUNT() bfin_read16(HMDMA1_ECOUNT) +#define bfin_write_HMDMA1_ECOUNT(val) bfin_write16(HMDMA1_ECOUNT,val) +#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT) +#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT,val) + +#endif /* _CDEF_BF534_H */ diff --git a/include/asm-blackfin/mach-bf537/cdefBF537.h b/include/asm-blackfin/mach-bf537/cdefBF537.h new file mode 100644 index 000000000000..932a1b6b5d14 --- /dev/null +++ b/include/asm-blackfin/mach-bf537/cdefBF537.h @@ -0,0 +1,209 @@ +/* + * File: include/asm-blackfin/mach-bf537/cdefBF537.h + * Based on: + * Author: + * + * Created: + * Description: + * System MMR Register Map + * Rev: + * + * Modified: + * + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _CDEF_BF537_H +#define _CDEF_BF537_H + +/* Include MMRs Common to BF534 */ +#include "cdefBF534.h" + +/* Include all Core registers and bit definitions */ +#include "defBF537.h" + +/* Include Macro "Defines" For EMAC (Unique to BF536/BF537 */ +/* 10/100 Ethernet Controller (0xFFC03000 - 0xFFC031FF) */ +#define pEMAC_OPMODE ((volatile unsigned long *)EMAC_OPMODE) +#define bfin_read_EMAC_OPMODE() bfin_read32(EMAC_OPMODE) +#define bfin_write_EMAC_OPMODE(val) bfin_write32(EMAC_OPMODE,val) +#define bfin_read_EMAC_ADDRLO() bfin_read32(EMAC_ADDRLO) +#define bfin_write_EMAC_ADDRLO(val) bfin_write32(EMAC_ADDRLO,val) +#define bfin_read_EMAC_ADDRHI() bfin_read32(EMAC_ADDRHI) +#define bfin_write_EMAC_ADDRHI(val) bfin_write32(EMAC_ADDRHI,val) +#define bfin_read_EMAC_HASHLO() bfin_read32(EMAC_HASHLO) +#define bfin_write_EMAC_HASHLO(val) bfin_write32(EMAC_HASHLO,val) +#define bfin_read_EMAC_HASHHI() bfin_read32(EMAC_HASHHI) +#define bfin_write_EMAC_HASHHI(val) bfin_write32(EMAC_HASHHI,val) +#define bfin_read_EMAC_STAADD() bfin_read32(EMAC_STAADD) +#define bfin_write_EMAC_STAADD(val) bfin_write32(EMAC_STAADD,val) +#define bfin_read_EMAC_STADAT() bfin_read32(EMAC_STADAT) +#define bfin_write_EMAC_STADAT(val) bfin_write32(EMAC_STADAT,val) +#define bfin_read_EMAC_FLC() bfin_read32(EMAC_FLC) +#define bfin_write_EMAC_FLC(val) bfin_write32(EMAC_FLC,val) +#define bfin_read_EMAC_VLAN1() bfin_read32(EMAC_VLAN1) +#define bfin_write_EMAC_VLAN1(val) bfin_write32(EMAC_VLAN1,val) +#define bfin_read_EMAC_VLAN2() bfin_read32(EMAC_VLAN2) +#define bfin_write_EMAC_VLAN2(val) bfin_write32(EMAC_VLAN2,val) +#define bfin_read_EMAC_WKUP_CTL() bfin_read32(EMAC_WKUP_CTL) +#define bfin_write_EMAC_WKUP_CTL(val) bfin_write32(EMAC_WKUP_CTL,val) +#define bfin_read_EMAC_WKUP_FFMSK0() bfin_read32(EMAC_WKUP_FFMSK0) +#define bfin_write_EMAC_WKUP_FFMSK0(val) bfin_write32(EMAC_WKUP_FFMSK0,val) +#define bfin_read_EMAC_WKUP_FFMSK1() bfin_read32(EMAC_WKUP_FFMSK1) +#define bfin_write_EMAC_WKUP_FFMSK1(val) bfin_write32(EMAC_WKUP_FFMSK1,val) +#define bfin_read_EMAC_WKUP_FFMSK2() bfin_read32(EMAC_WKUP_FFMSK2) +#define bfin_write_EMAC_WKUP_FFMSK2(val) bfin_write32(EMAC_WKUP_FFMSK2,val) +#define bfin_read_EMAC_WKUP_FFMSK3() bfin_read32(EMAC_WKUP_FFMSK3) +#define bfin_write_EMAC_WKUP_FFMSK3(val) bfin_write32(EMAC_WKUP_FFMSK3,val) +#define bfin_read_EMAC_WKUP_FFCMD() bfin_read32(EMAC_WKUP_FFCMD) +#define bfin_write_EMAC_WKUP_FFCMD(val) bfin_write32(EMAC_WKUP_FFCMD,val) +#define bfin_read_EMAC_WKUP_FFOFF() bfin_read32(EMAC_WKUP_FFOFF) +#define bfin_write_EMAC_WKUP_FFOFF(val) bfin_write32(EMAC_WKUP_FFOFF,val) +#define bfin_read_EMAC_WKUP_FFCRC0() bfin_read32(EMAC_WKUP_FFCRC0) +#define bfin_write_EMAC_WKUP_FFCRC0(val) bfin_write32(EMAC_WKUP_FFCRC0,val) +#define bfin_read_EMAC_WKUP_FFCRC1() bfin_read32(EMAC_WKUP_FFCRC1) +#define bfin_write_EMAC_WKUP_FFCRC1(val) bfin_write32(EMAC_WKUP_FFCRC1,val) + +#define pEMAC_SYSCTL ((volatile unsigned long *)EMAC_SYSCTL) +#define bfin_read_EMAC_SYSCTL() bfin_read32(EMAC_SYSCTL) +#define bfin_write_EMAC_SYSCTL(val) bfin_write32(EMAC_SYSCTL,val) +#define bfin_read_EMAC_SYSTAT() bfin_read32(EMAC_SYSTAT) +#define bfin_write_EMAC_SYSTAT(val) bfin_write32(EMAC_SYSTAT,val) +#define bfin_read_EMAC_RX_STAT() bfin_read32(EMAC_RX_STAT) +#define bfin_write_EMAC_RX_STAT(val) bfin_write32(EMAC_RX_STAT,val) +#define bfin_read_EMAC_RX_STKY() bfin_read32(EMAC_RX_STKY) +#define bfin_write_EMAC_RX_STKY(val) bfin_write32(EMAC_RX_STKY,val) +#define bfin_read_EMAC_RX_IRQE() bfin_read32(EMAC_RX_IRQE) +#define bfin_write_EMAC_RX_IRQE(val) bfin_write32(EMAC_RX_IRQE,val) +#define bfin_read_EMAC_TX_STAT() bfin_read32(EMAC_TX_STAT) +#define bfin_write_EMAC_TX_STAT(val) bfin_write32(EMAC_TX_STAT,val) +#define bfin_read_EMAC_TX_STKY() bfin_read32(EMAC_TX_STKY) +#define bfin_write_EMAC_TX_STKY(val) bfin_write32(EMAC_TX_STKY,val) +#define bfin_read_EMAC_TX_IRQE() bfin_read32(EMAC_TX_IRQE) +#define bfin_write_EMAC_TX_IRQE(val) bfin_write32(EMAC_TX_IRQE,val) + +#define bfin_read_EMAC_MMC_CTL() bfin_read32(EMAC_MMC_CTL) +#define bfin_write_EMAC_MMC_CTL(val) bfin_write32(EMAC_MMC_CTL,val) +#define bfin_read_EMAC_MMC_RIRQS() bfin_read32(EMAC_MMC_RIRQS) +#define bfin_write_EMAC_MMC_RIRQS(val) bfin_write32(EMAC_MMC_RIRQS,val) +#define bfin_read_EMAC_MMC_RIRQE() bfin_read32(EMAC_MMC_RIRQE) +#define bfin_write_EMAC_MMC_RIRQE(val) bfin_write32(EMAC_MMC_RIRQE,val) +#define bfin_read_EMAC_MMC_TIRQS() bfin_read32(EMAC_MMC_TIRQS) +#define bfin_write_EMAC_MMC_TIRQS(val) bfin_write32(EMAC_MMC_TIRQS,val) +#define bfin_read_EMAC_MMC_TIRQE() bfin_read32(EMAC_MMC_TIRQE) +#define bfin_write_EMAC_MMC_TIRQE(val) bfin_write32(EMAC_MMC_TIRQE,val) + +#define bfin_read_EMAC_RXC_OK() bfin_read32(EMAC_RXC_OK) +#define bfin_write_EMAC_RXC_OK(val) bfin_write32(EMAC_RXC_OK,val) +#define bfin_read_EMAC_RXC_FCS() bfin_read32(EMAC_RXC_FCS) +#define bfin_write_EMAC_RXC_FCS(val) bfin_write32(EMAC_RXC_FCS,val) +#define bfin_read_EMAC_RXC_ALIGN() bfin_read32(EMAC_RXC_ALIGN) +#define bfin_write_EMAC_RXC_ALIGN(val) bfin_write32(EMAC_RXC_ALIGN,val) +#define bfin_read_EMAC_RXC_OCTET() bfin_read32(EMAC_RXC_OCTET) +#define bfin_write_EMAC_RXC_OCTET(val) bfin_write32(EMAC_RXC_OCTET,val) +#define bfin_read_EMAC_RXC_DMAOVF() bfin_read32(EMAC_RXC_DMAOVF) +#define bfin_write_EMAC_RXC_DMAOVF(val) bfin_write32(EMAC_RXC_DMAOVF,val) +#define bfin_read_EMAC_RXC_UNICST() bfin_read32(EMAC_RXC_UNICST) +#define bfin_write_EMAC_RXC_UNICST(val) bfin_write32(EMAC_RXC_UNICST,val) +#define bfin_read_EMAC_RXC_MULTI() bfin_read32(EMAC_RXC_MULTI) +#define bfin_write_EMAC_RXC_MULTI(val) bfin_write32(EMAC_RXC_MULTI,val) +#define bfin_read_EMAC_RXC_BROAD() bfin_read32(EMAC_RXC_BROAD) +#define bfin_write_EMAC_RXC_BROAD(val) bfin_write32(EMAC_RXC_BROAD,val) +#define bfin_read_EMAC_RXC_LNERRI() bfin_read32(EMAC_RXC_LNERRI) +#define bfin_write_EMAC_RXC_LNERRI(val) bfin_write32(EMAC_RXC_LNERRI,val) +#define bfin_read_EMAC_RXC_LNERRO() bfin_read32(EMAC_RXC_LNERRO) +#define bfin_write_EMAC_RXC_LNERRO(val) bfin_write32(EMAC_RXC_LNERRO,val) +#define bfin_read_EMAC_RXC_LONG() bfin_read32(EMAC_RXC_LONG) +#define bfin_write_EMAC_RXC_LONG(val) bfin_write32(EMAC_RXC_LONG,val) +#define bfin_read_EMAC_RXC_MACCTL() bfin_read32(EMAC_RXC_MACCTL) +#define bfin_write_EMAC_RXC_MACCTL(val) bfin_write32(EMAC_RXC_MACCTL,val) +#define bfin_read_EMAC_RXC_OPCODE() bfin_read32(EMAC_RXC_OPCODE) +#define bfin_write_EMAC_RXC_OPCODE(val) bfin_write32(EMAC_RXC_OPCODE,val) +#define bfin_read_EMAC_RXC_PAUSE() bfin_read32(EMAC_RXC_PAUSE) +#define bfin_write_EMAC_RXC_PAUSE(val) bfin_write32(EMAC_RXC_PAUSE,val) +#define bfin_read_EMAC_RXC_ALLFRM() bfin_read32(EMAC_RXC_ALLFRM) +#define bfin_write_EMAC_RXC_ALLFRM(val) bfin_write32(EMAC_RXC_ALLFRM,val) +#define bfin_read_EMAC_RXC_ALLOCT() bfin_read32(EMAC_RXC_ALLOCT) +#define bfin_write_EMAC_RXC_ALLOCT(val) bfin_write32(EMAC_RXC_ALLOCT,val) +#define bfin_read_EMAC_RXC_TYPED() bfin_read32(EMAC_RXC_TYPED) +#define bfin_write_EMAC_RXC_TYPED(val) bfin_write32(EMAC_RXC_TYPED,val) +#define bfin_read_EMAC_RXC_SHORT() bfin_read32(EMAC_RXC_SHORT) +#define bfin_write_EMAC_RXC_SHORT(val) bfin_write32(EMAC_RXC_SHORT,val) +#define bfin_read_EMAC_RXC_EQ64() bfin_read32(EMAC_RXC_EQ64) +#define bfin_write_EMAC_RXC_EQ64(val) bfin_write32(EMAC_RXC_EQ64,val) +#define pEMAC_RXC_LT128 ((volatile unsigned long *)EMAC_RXC_LT128) +#define bfin_read_EMAC_RXC_LT128() bfin_read32(EMAC_RXC_LT128) +#define bfin_write_EMAC_RXC_LT128(val) bfin_write32(EMAC_RXC_LT128,val) +#define bfin_read_EMAC_RXC_LT256() bfin_read32(EMAC_RXC_LT256) +#define bfin_write_EMAC_RXC_LT256(val) bfin_write32(EMAC_RXC_LT256,val) +#define bfin_read_EMAC_RXC_LT512() bfin_read32(EMAC_RXC_LT512) +#define bfin_write_EMAC_RXC_LT512(val) bfin_write32(EMAC_RXC_LT512,val) +#define bfin_read_EMAC_RXC_LT1024() bfin_read32(EMAC_RXC_LT1024) +#define bfin_write_EMAC_RXC_LT1024(val) bfin_write32(EMAC_RXC_LT1024,val) +#define bfin_read_EMAC_RXC_GE1024() bfin_read32(EMAC_RXC_GE1024) +#define bfin_write_EMAC_RXC_GE1024(val) bfin_write32(EMAC_RXC_GE1024,val) + +#define bfin_read_EMAC_TXC_OK() bfin_read32(EMAC_TXC_OK) +#define bfin_write_EMAC_TXC_OK(val) bfin_write32(EMAC_TXC_OK,val) +#define bfin_read_EMAC_TXC_1COL() bfin_read32(EMAC_TXC_1COL) +#define bfin_write_EMAC_TXC_1COL(val) bfin_write32(EMAC_TXC_1COL,val) +#define bfin_read_EMAC_TXC_GT1COL() bfin_read32(EMAC_TXC_GT1COL) +#define bfin_write_EMAC_TXC_GT1COL(val) bfin_write32(EMAC_TXC_GT1COL,val) +#define bfin_read_EMAC_TXC_OCTET() bfin_read32(EMAC_TXC_OCTET) +#define bfin_write_EMAC_TXC_OCTET(val) bfin_write32(EMAC_TXC_OCTET,val) +#define bfin_read_EMAC_TXC_DEFER() bfin_read32(EMAC_TXC_DEFER) +#define bfin_write_EMAC_TXC_DEFER(val) bfin_write32(EMAC_TXC_DEFER,val) +#define bfin_read_EMAC_TXC_LATECL() bfin_read32(EMAC_TXC_LATECL) +#define bfin_write_EMAC_TXC_LATECL(val) bfin_write32(EMAC_TXC_LATECL,val) +#define bfin_read_EMAC_TXC_XS_COL() bfin_read32(EMAC_TXC_XS_COL) +#define bfin_write_EMAC_TXC_XS_COL(val) bfin_write32(EMAC_TXC_XS_COL,val) +#define bfin_read_EMAC_TXC_DMAUND() bfin_read32(EMAC_TXC_DMAUND) +#define bfin_write_EMAC_TXC_DMAUND(val) bfin_write32(EMAC_TXC_DMAUND,val) +#define bfin_read_EMAC_TXC_CRSERR() bfin_read32(EMAC_TXC_CRSERR) +#define bfin_write_EMAC_TXC_CRSERR(val) bfin_write32(EMAC_TXC_CRSERR,val) +#define bfin_read_EMAC_TXC_UNICST() bfin_read32(EMAC_TXC_UNICST) +#define bfin_write_EMAC_TXC_UNICST(val) bfin_write32(EMAC_TXC_UNICST,val) +#define bfin_read_EMAC_TXC_MULTI() bfin_read32(EMAC_TXC_MULTI) +#define bfin_write_EMAC_TXC_MULTI(val) bfin_write32(EMAC_TXC_MULTI,val) +#define bfin_read_EMAC_TXC_BROAD() bfin_read32(EMAC_TXC_BROAD) +#define bfin_write_EMAC_TXC_BROAD(val) bfin_write32(EMAC_TXC_BROAD,val) +#define bfin_read_EMAC_TXC_XS_DFR() bfin_read32(EMAC_TXC_XS_DFR) +#define bfin_write_EMAC_TXC_XS_DFR(val) bfin_write32(EMAC_TXC_XS_DFR,val) +#define bfin_read_EMAC_TXC_MACCTL() bfin_read32(EMAC_TXC_MACCTL) +#define bfin_write_EMAC_TXC_MACCTL(val) bfin_write32(EMAC_TXC_MACCTL,val) +#define bfin_read_EMAC_TXC_ALLFRM() bfin_read32(EMAC_TXC_ALLFRM) +#define bfin_write_EMAC_TXC_ALLFRM(val) bfin_write32(EMAC_TXC_ALLFRM,val) +#define bfin_read_EMAC_TXC_ALLOCT() bfin_read32(EMAC_TXC_ALLOCT) +#define bfin_write_EMAC_TXC_ALLOCT(val) bfin_write32(EMAC_TXC_ALLOCT,val) +#define bfin_read_EMAC_TXC_EQ64() bfin_read32(EMAC_TXC_EQ64) +#define bfin_write_EMAC_TXC_EQ64(val) bfin_write32(EMAC_TXC_EQ64,val) +#define bfin_read_EMAC_TXC_LT128() bfin_read32(EMAC_TXC_LT128) +#define bfin_write_EMAC_TXC_LT128(val) bfin_write32(EMAC_TXC_LT128,val) +#define bfin_read_EMAC_TXC_LT256() bfin_read32(EMAC_TXC_LT256) +#define bfin_write_EMAC_TXC_LT256(val) bfin_write32(EMAC_TXC_LT256,val) +#define bfin_read_EMAC_TXC_LT512() bfin_read32(EMAC_TXC_LT512) +#define bfin_write_EMAC_TXC_LT512(val) bfin_write32(EMAC_TXC_LT512,val) +#define bfin_read_EMAC_TXC_LT1024() bfin_read32(EMAC_TXC_LT1024) +#define bfin_write_EMAC_TXC_LT1024(val) bfin_write32(EMAC_TXC_LT1024,val) +#define bfin_read_EMAC_TXC_GE1024() bfin_read32(EMAC_TXC_GE1024) +#define bfin_write_EMAC_TXC_GE1024(val) bfin_write32(EMAC_TXC_GE1024,val) +#define bfin_read_EMAC_TXC_ABORT() bfin_read32(EMAC_TXC_ABORT) +#define bfin_write_EMAC_TXC_ABORT(val) bfin_write32(EMAC_TXC_ABORT,val) + +#endif /* _CDEF_BF537_H */ diff --git a/include/asm-blackfin/mach-bf537/defBF534.h b/include/asm-blackfin/mach-bf537/defBF534.h new file mode 100644 index 000000000000..e605e9709004 --- /dev/null +++ b/include/asm-blackfin/mach-bf537/defBF534.h @@ -0,0 +1,2501 @@ +/* + * File: include/asm-blackfin/mach-bf537/cdefBF537.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _DEF_BF534_H +#define _DEF_BF534_H + +/* Include all Core registers and bit definitions */ +#include + +/************************************************************************************ +** System MMR Register Map +*************************************************************************************/ +/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */ +#define PLL_CTL 0xFFC00000 /* PLL Control Register */ +#define PLL_DIV 0xFFC00004 /* PLL Divide Register */ +#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register */ +#define PLL_STAT 0xFFC0000C /* PLL Status Register */ +#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count Register */ +#define CHIPID 0xFFC00014 /* Chip ID Register */ + +/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */ +#define SWRST 0xFFC00100 /* Software Reset Register */ +#define SYSCR 0xFFC00104 /* System Configuration Register */ +#define SIC_RVECT 0xFFC00108 /* Interrupt Reset Vector Address Register */ +#define SIC_IMASK 0xFFC0010C /* Interrupt Mask Register */ +#define SIC_IAR0 0xFFC00110 /* Interrupt Assignment Register 0 */ +#define SIC_IAR1 0xFFC00114 /* Interrupt Assignment Register 1 */ +#define SIC_IAR2 0xFFC00118 /* Interrupt Assignment Register 2 */ +#define SIC_IAR3 0xFFC0011C /* Interrupt Assignment Register 3 */ +#define SIC_ISR 0xFFC00120 /* Interrupt Status Register */ +#define SIC_IWR 0xFFC00124 /* Interrupt Wakeup Register */ + +/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */ +#define WDOG_CTL 0xFFC00200 /* Watchdog Control Register */ +#define WDOG_CNT 0xFFC00204 /* Watchdog Count Register */ +#define WDOG_STAT 0xFFC00208 /* Watchdog Status Register */ + +/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */ +#define RTC_STAT 0xFFC00300 /* RTC Status Register */ +#define RTC_ICTL 0xFFC00304 /* RTC Interrupt Control Register */ +#define RTC_ISTAT 0xFFC00308 /* RTC Interrupt Status Register */ +#define RTC_SWCNT 0xFFC0030C /* RTC Stopwatch Count Register */ +#define RTC_ALARM 0xFFC00310 /* RTC Alarm Time Register */ +#define RTC_FAST 0xFFC00314 /* RTC Prescaler Enable Register */ +#define RTC_PREN 0xFFC00314 /* RTC Prescaler Enable Alternate Macro */ + +/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */ +#define UART0_THR 0xFFC00400 /* Transmit Holding register */ +#define UART0_RBR 0xFFC00400 /* Receive Buffer register */ +#define UART0_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */ +#define UART0_IER 0xFFC00404 /* Interrupt Enable Register */ +#define UART0_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */ +#define UART0_IIR 0xFFC00408 /* Interrupt Identification Register */ +#define UART0_LCR 0xFFC0040C /* Line Control Register */ +#define UART0_MCR 0xFFC00410 /* Modem Control Register */ +#define UART0_LSR 0xFFC00414 /* Line Status Register */ +#define UART0_MSR 0xFFC00418 /* Modem Status Register */ +#define UART0_SCR 0xFFC0041C /* SCR Scratch Register */ +#define UART0_GCTL 0xFFC00424 /* Global Control Register */ + +/* SPI Controller (0xFFC00500 - 0xFFC005FF) */ +#define SPI_CTL 0xFFC00500 /* SPI Control Register */ +#define SPI_FLG 0xFFC00504 /* SPI Flag register */ +#define SPI_STAT 0xFFC00508 /* SPI Status register */ +#define SPI_TDBR 0xFFC0050C /* SPI Transmit Data Buffer Register */ +#define SPI_RDBR 0xFFC00510 /* SPI Receive Data Buffer Register */ +#define SPI_BAUD 0xFFC00514 /* SPI Baud rate Register */ +#define SPI_SHADOW 0xFFC00518 /* SPI_RDBR Shadow Register */ + +/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */ +#define TIMER0_CONFIG 0xFFC00600 /* Timer 0 Configuration Register */ +#define TIMER0_COUNTER 0xFFC00604 /* Timer 0 Counter Register */ +#define TIMER0_PERIOD 0xFFC00608 /* Timer 0 Period Register */ +#define TIMER0_WIDTH 0xFFC0060C /* Timer 0 Width Register */ + +#define TIMER1_CONFIG 0xFFC00610 /* Timer 1 Configuration Register */ +#define TIMER1_COUNTER 0xFFC00614 /* Timer 1 Counter Register */ +#define TIMER1_PERIOD 0xFFC00618 /* Timer 1 Period Register */ +#define TIMER1_WIDTH 0xFFC0061C /* Timer 1 Width Register */ + +#define TIMER2_CONFIG 0xFFC00620 /* Timer 2 Configuration Register */ +#define TIMER2_COUNTER 0xFFC00624 /* Timer 2 Counter Register */ +#define TIMER2_PERIOD 0xFFC00628 /* Timer 2 Period Register */ +#define TIMER2_WIDTH 0xFFC0062C /* Timer 2 Width Register */ + +#define TIMER3_CONFIG 0xFFC00630 /* Timer 3 Configuration Register */ +#define TIMER3_COUNTER 0xFFC00634 /* Timer 3 Counter Register */ +#define TIMER3_PERIOD 0xFFC00638 /* Timer 3 Period Register */ +#define TIMER3_WIDTH 0xFFC0063C /* Timer 3 Width Register */ + +#define TIMER4_CONFIG 0xFFC00640 /* Timer 4 Configuration Register */ +#define TIMER4_COUNTER 0xFFC00644 /* Timer 4 Counter Register */ +#define TIMER4_PERIOD 0xFFC00648 /* Timer 4 Period Register */ +#define TIMER4_WIDTH 0xFFC0064C /* Timer 4 Width Register */ + +#define TIMER5_CONFIG 0xFFC00650 /* Timer 5 Configuration Register */ +#define TIMER5_COUNTER 0xFFC00654 /* Timer 5 Counter Register */ +#define TIMER5_PERIOD 0xFFC00658 /* Timer 5 Period Register */ +#define TIMER5_WIDTH 0xFFC0065C /* Timer 5 Width Register */ + +#define TIMER6_CONFIG 0xFFC00660 /* Timer 6 Configuration Register */ +#define TIMER6_COUNTER 0xFFC00664 /* Timer 6 Counter Register */ +#define TIMER6_PERIOD 0xFFC00668 /* Timer 6 Period Register */ +#define TIMER6_WIDTH 0xFFC0066C /* Timer 6 Width Register */ + +#define TIMER7_CONFIG 0xFFC00670 /* Timer 7 Configuration Register */ +#define TIMER7_COUNTER 0xFFC00674 /* Timer 7 Counter Register */ +#define TIMER7_PERIOD 0xFFC00678 /* Timer 7 Period Register */ +#define TIMER7_WIDTH 0xFFC0067C /* Timer 7 Width Register */ + +#define TIMER_ENABLE 0xFFC00680 /* Timer Enable Register */ +#define TIMER_DISABLE 0xFFC00684 /* Timer Disable Register */ +#define TIMER_STATUS 0xFFC00688 /* Timer Status Register */ + +/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */ +#define PORTFIO 0xFFC00700 /* Port F I/O Pin State Specify Register */ +#define PORTFIO_CLEAR 0xFFC00704 /* Port F I/O Peripheral Interrupt Clear Register */ +#define PORTFIO_SET 0xFFC00708 /* Port F I/O Peripheral Interrupt Set Register */ +#define PORTFIO_TOGGLE 0xFFC0070C /* Port F I/O Pin State Toggle Register */ +#define PORTFIO_MASKA 0xFFC00710 /* Port F I/O Mask State Specify Interrupt A Register */ +#define PORTFIO_MASKA_CLEAR 0xFFC00714 /* Port F I/O Mask Disable Interrupt A Register */ +#define PORTFIO_MASKA_SET 0xFFC00718 /* Port F I/O Mask Enable Interrupt A Register */ +#define PORTFIO_MASKA_TOGGLE 0xFFC0071C /* Port F I/O Mask Toggle Enable Interrupt A Register */ +#define PORTFIO_MASKB 0xFFC00720 /* Port F I/O Mask State Specify Interrupt B Register */ +#define PORTFIO_MASKB_CLEAR 0xFFC00724 /* Port F I/O Mask Disable Interrupt B Register */ +#define PORTFIO_MASKB_SET 0xFFC00728 /* Port F I/O Mask Enable Interrupt B Register */ +#define PORTFIO_MASKB_TOGGLE 0xFFC0072C /* Port F I/O Mask Toggle Enable Interrupt B Register */ +#define PORTFIO_DIR 0xFFC00730 /* Port F I/O Direction Register */ +#define PORTFIO_POLAR 0xFFC00734 /* Port F I/O Source Polarity Register */ +#define PORTFIO_EDGE 0xFFC00738 /* Port F I/O Source Sensitivity Register */ +#define PORTFIO_BOTH 0xFFC0073C /* Port F I/O Set on BOTH Edges Register */ +#define PORTFIO_INEN 0xFFC00740 /* Port F I/O Input Enable Register */ + +/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */ +#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */ +#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */ +#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */ +#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */ +#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */ +#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */ +#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */ +#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */ +#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */ +#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */ +#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */ +#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */ +#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */ +#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */ +#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */ +#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */ +#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */ +#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */ +#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */ +#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */ +#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */ +#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */ + +/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */ +#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */ +#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */ +#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */ +#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */ +#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */ +#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */ +#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */ +#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */ +#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */ +#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */ +#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */ +#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */ +#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */ +#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */ +#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */ +#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */ +#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */ +#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */ +#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */ +#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */ +#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */ +#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */ + +/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */ +#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */ +#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */ +#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */ +#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */ +#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */ +#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */ +#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */ + +/* DMA Traffic Control Registers */ +#define DMA_TCPER 0xFFC00B0C /* Traffic Control Periods Register */ +#define DMA_TCCNT 0xFFC00B10 /* Traffic Control Current Counts Register */ + +/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */ +#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */ +#define DMA0_START_ADDR 0xFFC00C04 /* DMA Channel 0 Start Address Register */ +#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */ +#define DMA0_X_COUNT 0xFFC00C10 /* DMA Channel 0 X Count Register */ +#define DMA0_X_MODIFY 0xFFC00C14 /* DMA Channel 0 X Modify Register */ +#define DMA0_Y_COUNT 0xFFC00C18 /* DMA Channel 0 Y Count Register */ +#define DMA0_Y_MODIFY 0xFFC00C1C /* DMA Channel 0 Y Modify Register */ +#define DMA0_CURR_DESC_PTR 0xFFC00C20 /* DMA Channel 0 Current Descriptor Pointer Register */ +#define DMA0_CURR_ADDR 0xFFC00C24 /* DMA Channel 0 Current Address Register */ +#define DMA0_IRQ_STATUS 0xFFC00C28 /* DMA Channel 0 Interrupt/Status Register */ +#define DMA0_PERIPHERAL_MAP 0xFFC00C2C /* DMA Channel 0 Peripheral Map Register */ +#define DMA0_CURR_X_COUNT 0xFFC00C30 /* DMA Channel 0 Current X Count Register */ +#define DMA0_CURR_Y_COUNT 0xFFC00C38 /* DMA Channel 0 Current Y Count Register */ + +#define DMA1_NEXT_DESC_PTR 0xFFC00C40 /* DMA Channel 1 Next Descriptor Pointer Register */ +#define DMA1_START_ADDR 0xFFC00C44 /* DMA Channel 1 Start Address Register */ +#define DMA1_CONFIG 0xFFC00C48 /* DMA Channel 1 Configuration Register */ +#define DMA1_X_COUNT 0xFFC00C50 /* DMA Channel 1 X Count Register */ +#define DMA1_X_MODIFY 0xFFC00C54 /* DMA Channel 1 X Modify Register */ +#define DMA1_Y_COUNT 0xFFC00C58 /* DMA Channel 1 Y Count Register */ +#define DMA1_Y_MODIFY 0xFFC00C5C /* DMA Channel 1 Y Modify Register */ +#define DMA1_CURR_DESC_PTR 0xFFC00C60 /* DMA Channel 1 Current Descriptor Pointer Register */ +#define DMA1_CURR_ADDR 0xFFC00C64 /* DMA Channel 1 Current Address Register */ +#define DMA1_IRQ_STATUS 0xFFC00C68 /* DMA Channel 1 Interrupt/Status Register */ +#define DMA1_PERIPHERAL_MAP 0xFFC00C6C /* DMA Channel 1 Peripheral Map Register */ +#define DMA1_CURR_X_COUNT 0xFFC00C70 /* DMA Channel 1 Current X Count Register */ +#define DMA1_CURR_Y_COUNT 0xFFC00C78 /* DMA Channel 1 Current Y Count Register */ + +#define DMA2_NEXT_DESC_PTR 0xFFC00C80 /* DMA Channel 2 Next Descriptor Pointer Register */ +#define DMA2_START_ADDR 0xFFC00C84 /* DMA Channel 2 Start Address Register */ +#define DMA2_CONFIG 0xFFC00C88 /* DMA Channel 2 Configuration Register */ +#define DMA2_X_COUNT 0xFFC00C90 /* DMA Channel 2 X Count Register */ +#define DMA2_X_MODIFY 0xFFC00C94 /* DMA Channel 2 X Modify Register */ +#define DMA2_Y_COUNT 0xFFC00C98 /* DMA Channel 2 Y Count Register */ +#define DMA2_Y_MODIFY 0xFFC00C9C /* DMA Channel 2 Y Modify Register */ +#define DMA2_CURR_DESC_PTR 0xFFC00CA0 /* DMA Channel 2 Current Descriptor Pointer Register */ +#define DMA2_CURR_ADDR 0xFFC00CA4 /* DMA Channel 2 Current Address Register */ +#define DMA2_IRQ_STATUS 0xFFC00CA8 /* DMA Channel 2 Interrupt/Status Register */ +#define DMA2_PERIPHERAL_MAP 0xFFC00CAC /* DMA Channel 2 Peripheral Map Register */ +#define DMA2_CURR_X_COUNT 0xFFC00CB0 /* DMA Channel 2 Current X Count Register */ +#define DMA2_CURR_Y_COUNT 0xFFC00CB8 /* DMA Channel 2 Current Y Count Register */ + +#define DMA3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA Channel 3 Next Descriptor Pointer Register */ +#define DMA3_START_ADDR 0xFFC00CC4 /* DMA Channel 3 Start Address Register */ +#define DMA3_CONFIG 0xFFC00CC8 /* DMA Channel 3 Configuration Register */ +#define DMA3_X_COUNT 0xFFC00CD0 /* DMA Channel 3 X Count Register */ +#define DMA3_X_MODIFY 0xFFC00CD4 /* DMA Channel 3 X Modify Register */ +#define DMA3_Y_COUNT 0xFFC00CD8 /* DMA Channel 3 Y Count Register */ +#define DMA3_Y_MODIFY 0xFFC00CDC /* DMA Channel 3 Y Modify Register */ +#define DMA3_CURR_DESC_PTR 0xFFC00CE0 /* DMA Channel 3 Current Descriptor Pointer Register */ +#define DMA3_CURR_ADDR 0xFFC00CE4 /* DMA Channel 3 Current Address Register */ +#define DMA3_IRQ_STATUS 0xFFC00CE8 /* DMA Channel 3 Interrupt/Status Register */ +#define DMA3_PERIPHERAL_MAP 0xFFC00CEC /* DMA Channel 3 Peripheral Map Register */ +#define DMA3_CURR_X_COUNT 0xFFC00CF0 /* DMA Channel 3 Current X Count Register */ +#define DMA3_CURR_Y_COUNT 0xFFC00CF8 /* DMA Channel 3 Current Y Count Register */ + +#define DMA4_NEXT_DESC_PTR 0xFFC00D00 /* DMA Channel 4 Next Descriptor Pointer Register */ +#define DMA4_START_ADDR 0xFFC00D04 /* DMA Channel 4 Start Address Register */ +#define DMA4_CONFIG 0xFFC00D08 /* DMA Channel 4 Configuration Register */ +#define DMA4_X_COUNT 0xFFC00D10 /* DMA Channel 4 X Count Register */ +#define DMA4_X_MODIFY 0xFFC00D14 /* DMA Channel 4 X Modify Register */ +#define DMA4_Y_COUNT 0xFFC00D18 /* DMA Channel 4 Y Count Register */ +#define DMA4_Y_MODIFY 0xFFC00D1C /* DMA Channel 4 Y Modify Register */ +#define DMA4_CURR_DESC_PTR 0xFFC00D20 /* DMA Channel 4 Current Descriptor Pointer Register */ +#define DMA4_CURR_ADDR 0xFFC00D24 /* DMA Channel 4 Current Address Register */ +#define DMA4_IRQ_STATUS 0xFFC00D28 /* DMA Channel 4 Interrupt/Status Register */ +#define DMA4_PERIPHERAL_MAP 0xFFC00D2C /* DMA Channel 4 Peripheral Map Register */ +#define DMA4_CURR_X_COUNT 0xFFC00D30 /* DMA Channel 4 Current X Count Register */ +#define DMA4_CURR_Y_COUNT 0xFFC00D38 /* DMA Channel 4 Current Y Count Register */ + +#define DMA5_NEXT_DESC_PTR 0xFFC00D40 /* DMA Channel 5 Next Descriptor Pointer Register */ +#define DMA5_START_ADDR 0xFFC00D44 /* DMA Channel 5 Start Address Register */ +#define DMA5_CONFIG 0xFFC00D48 /* DMA Channel 5 Configuration Register */ +#define DMA5_X_COUNT 0xFFC00D50 /* DMA Channel 5 X Count Register */ +#define DMA5_X_MODIFY 0xFFC00D54 /* DMA Channel 5 X Modify Register */ +#define DMA5_Y_COUNT 0xFFC00D58 /* DMA Channel 5 Y Count Register */ +#define DMA5_Y_MODIFY 0xFFC00D5C /* DMA Channel 5 Y Modify Register */ +#define DMA5_CURR_DESC_PTR 0xFFC00D60 /* DMA Channel 5 Current Descriptor Pointer Register */ +#define DMA5_CURR_ADDR 0xFFC00D64 /* DMA Channel 5 Current Address Register */ +#define DMA5_IRQ_STATUS 0xFFC00D68 /* DMA Channel 5 Interrupt/Status Register */ +#define DMA5_PERIPHERAL_MAP 0xFFC00D6C /* DMA Channel 5 Peripheral Map Register */ +#define DMA5_CURR_X_COUNT 0xFFC00D70 /* DMA Channel 5 Current X Count Register */ +#define DMA5_CURR_Y_COUNT 0xFFC00D78 /* DMA Channel 5 Current Y Count Register */ + +#define DMA6_NEXT_DESC_PTR 0xFFC00D80 /* DMA Channel 6 Next Descriptor Pointer Register */ +#define DMA6_START_ADDR 0xFFC00D84 /* DMA Channel 6 Start Address Register */ +#define DMA6_CONFIG 0xFFC00D88 /* DMA Channel 6 Configuration Register */ +#define DMA6_X_COUNT 0xFFC00D90 /* DMA Channel 6 X Count Register */ +#define DMA6_X_MODIFY 0xFFC00D94 /* DMA Channel 6 X Modify Register */ +#define DMA6_Y_COUNT 0xFFC00D98 /* DMA Channel 6 Y Count Register */ +#define DMA6_Y_MODIFY 0xFFC00D9C /* DMA Channel 6 Y Modify Register */ +#define DMA6_CURR_DESC_PTR 0xFFC00DA0 /* DMA Channel 6 Current Descriptor Pointer Register */ +#define DMA6_CURR_ADDR 0xFFC00DA4 /* DMA Channel 6 Current Address Register */ +#define DMA6_IRQ_STATUS 0xFFC00DA8 /* DMA Channel 6 Interrupt/Status Register */ +#define DMA6_PERIPHERAL_MAP 0xFFC00DAC /* DMA Channel 6 Peripheral Map Register */ +#define DMA6_CURR_X_COUNT 0xFFC00DB0 /* DMA Channel 6 Current X Count Register */ +#define DMA6_CURR_Y_COUNT 0xFFC00DB8 /* DMA Channel 6 Current Y Count Register */ + +#define DMA7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA Channel 7 Next Descriptor Pointer Register */ +#define DMA7_START_ADDR 0xFFC00DC4 /* DMA Channel 7 Start Address Register */ +#define DMA7_CONFIG 0xFFC00DC8 /* DMA Channel 7 Configuration Register */ +#define DMA7_X_COUNT 0xFFC00DD0 /* DMA Channel 7 X Count Register */ +#define DMA7_X_MODIFY 0xFFC00DD4 /* DMA Channel 7 X Modify Register */ +#define DMA7_Y_COUNT 0xFFC00DD8 /* DMA Channel 7 Y Count Register */ +#define DMA7_Y_MODIFY 0xFFC00DDC /* DMA Channel 7 Y Modify Register */ +#define DMA7_CURR_DESC_PTR 0xFFC00DE0 /* DMA Channel 7 Current Descriptor Pointer Register */ +#define DMA7_CURR_ADDR 0xFFC00DE4 /* DMA Channel 7 Current Address Register */ +#define DMA7_IRQ_STATUS 0xFFC00DE8 /* DMA Channel 7 Interrupt/Status Register */ +#define DMA7_PERIPHERAL_MAP 0xFFC00DEC /* DMA Channel 7 Peripheral Map Register */ +#define DMA7_CURR_X_COUNT 0xFFC00DF0 /* DMA Channel 7 Current X Count Register */ +#define DMA7_CURR_Y_COUNT 0xFFC00DF8 /* DMA Channel 7 Current Y Count Register */ + +#define DMA8_NEXT_DESC_PTR 0xFFC00E00 /* DMA Channel 8 Next Descriptor Pointer Register */ +#define DMA8_START_ADDR 0xFFC00E04 /* DMA Channel 8 Start Address Register */ +#define DMA8_CONFIG 0xFFC00E08 /* DMA Channel 8 Configuration Register */ +#define DMA8_X_COUNT 0xFFC00E10 /* DMA Channel 8 X Count Register */ +#define DMA8_X_MODIFY 0xFFC00E14 /* DMA Channel 8 X Modify Register */ +#define DMA8_Y_COUNT 0xFFC00E18 /* DMA Channel 8 Y Count Register */ +#define DMA8_Y_MODIFY 0xFFC00E1C /* DMA Channel 8 Y Modify Register */ +#define DMA8_CURR_DESC_PTR 0xFFC00E20 /* DMA Channel 8 Current Descriptor Pointer Register */ +#define DMA8_CURR_ADDR 0xFFC00E24 /* DMA Channel 8 Current Address Register */ +#define DMA8_IRQ_STATUS 0xFFC00E28 /* DMA Channel 8 Interrupt/Status Register */ +#define DMA8_PERIPHERAL_MAP 0xFFC00E2C /* DMA Channel 8 Peripheral Map Register */ +#define DMA8_CURR_X_COUNT 0xFFC00E30 /* DMA Channel 8 Current X Count Register */ +#define DMA8_CURR_Y_COUNT 0xFFC00E38 /* DMA Channel 8 Current Y Count Register */ + +#define DMA9_NEXT_DESC_PTR 0xFFC00E40 /* DMA Channel 9 Next Descriptor Pointer Register */ +#define DMA9_START_ADDR 0xFFC00E44 /* DMA Channel 9 Start Address Register */ +#define DMA9_CONFIG 0xFFC00E48 /* DMA Channel 9 Configuration Register */ +#define DMA9_X_COUNT 0xFFC00E50 /* DMA Channel 9 X Count Register */ +#define DMA9_X_MODIFY 0xFFC00E54 /* DMA Channel 9 X Modify Register */ +#define DMA9_Y_COUNT 0xFFC00E58 /* DMA Channel 9 Y Count Register */ +#define DMA9_Y_MODIFY 0xFFC00E5C /* DMA Channel 9 Y Modify Register */ +#define DMA9_CURR_DESC_PTR 0xFFC00E60 /* DMA Channel 9 Current Descriptor Pointer Register */ +#define DMA9_CURR_ADDR 0xFFC00E64 /* DMA Channel 9 Current Address Register */ +#define DMA9_IRQ_STATUS 0xFFC00E68 /* DMA Channel 9 Interrupt/Status Register */ +#define DMA9_PERIPHERAL_MAP 0xFFC00E6C /* DMA Channel 9 Peripheral Map Register */ +#define DMA9_CURR_X_COUNT 0xFFC00E70 /* DMA Channel 9 Current X Count Register */ +#define DMA9_CURR_Y_COUNT 0xFFC00E78 /* DMA Channel 9 Current Y Count Register */ + +#define DMA10_NEXT_DESC_PTR 0xFFC00E80 /* DMA Channel 10 Next Descriptor Pointer Register */ +#define DMA10_START_ADDR 0xFFC00E84 /* DMA Channel 10 Start Address Register */ +#define DMA10_CONFIG 0xFFC00E88 /* DMA Channel 10 Configuration Register */ +#define DMA10_X_COUNT 0xFFC00E90 /* DMA Channel 10 X Count Register */ +#define DMA10_X_MODIFY 0xFFC00E94 /* DMA Channel 10 X Modify Register */ +#define DMA10_Y_COUNT 0xFFC00E98 /* DMA Channel 10 Y Count Register */ +#define DMA10_Y_MODIFY 0xFFC00E9C /* DMA Channel 10 Y Modify Register */ +#define DMA10_CURR_DESC_PTR 0xFFC00EA0 /* DMA Channel 10 Current Descriptor Pointer Register */ +#define DMA10_CURR_ADDR 0xFFC00EA4 /* DMA Channel 10 Current Address Register */ +#define DMA10_IRQ_STATUS 0xFFC00EA8 /* DMA Channel 10 Interrupt/Status Register */ +#define DMA10_PERIPHERAL_MAP 0xFFC00EAC /* DMA Channel 10 Peripheral Map Register */ +#define DMA10_CURR_X_COUNT 0xFFC00EB0 /* DMA Channel 10 Current X Count Register */ +#define DMA10_CURR_Y_COUNT 0xFFC00EB8 /* DMA Channel 10 Current Y Count Register */ + +#define DMA11_NEXT_DESC_PTR 0xFFC00EC0 /* DMA Channel 11 Next Descriptor Pointer Register */ +#define DMA11_START_ADDR 0xFFC00EC4 /* DMA Channel 11 Start Address Register */ +#define DMA11_CONFIG 0xFFC00EC8 /* DMA Channel 11 Configuration Register */ +#define DMA11_X_COUNT 0xFFC00ED0 /* DMA Channel 11 X Count Register */ +#define DMA11_X_MODIFY 0xFFC00ED4 /* DMA Channel 11 X Modify Register */ +#define DMA11_Y_COUNT 0xFFC00ED8 /* DMA Channel 11 Y Count Register */ +#define DMA11_Y_MODIFY 0xFFC00EDC /* DMA Channel 11 Y Modify Register */ +#define DMA11_CURR_DESC_PTR 0xFFC00EE0 /* DMA Channel 11 Current Descriptor Pointer Register */ +#define DMA11_CURR_ADDR 0xFFC00EE4 /* DMA Channel 11 Current Address Register */ +#define DMA11_IRQ_STATUS 0xFFC00EE8 /* DMA Channel 11 Interrupt/Status Register */ +#define DMA11_PERIPHERAL_MAP 0xFFC00EEC /* DMA Channel 11 Peripheral Map Register */ +#define DMA11_CURR_X_COUNT 0xFFC00EF0 /* DMA Channel 11 Current X Count Register */ +#define DMA11_CURR_Y_COUNT 0xFFC00EF8 /* DMA Channel 11 Current Y Count Register */ + +#define MDMA_D0_NEXT_DESC_PTR 0xFFC00F00 /* MemDMA Stream 0 Destination Next Descriptor Pointer Register */ +#define MDMA_D0_START_ADDR 0xFFC00F04 /* MemDMA Stream 0 Destination Start Address Register */ +#define MDMA_D0_CONFIG 0xFFC00F08 /* MemDMA Stream 0 Destination Configuration Register */ +#define MDMA_D0_X_COUNT 0xFFC00F10 /* MemDMA Stream 0 Destination X Count Register */ +#define MDMA_D0_X_MODIFY 0xFFC00F14 /* MemDMA Stream 0 Destination X Modify Register */ +#define MDMA_D0_Y_COUNT 0xFFC00F18 /* MemDMA Stream 0 Destination Y Count Register */ +#define MDMA_D0_Y_MODIFY 0xFFC00F1C /* MemDMA Stream 0 Destination Y Modify Register */ +#define MDMA_D0_CURR_DESC_PTR 0xFFC00F20 /* MemDMA Stream 0 Destination Current Descriptor Pointer Register */ +#define MDMA_D0_CURR_ADDR 0xFFC00F24 /* MemDMA Stream 0 Destination Current Address Register */ +#define MDMA_D0_IRQ_STATUS 0xFFC00F28 /* MemDMA Stream 0 Destination Interrupt/Status Register */ +#define MDMA_D0_PERIPHERAL_MAP 0xFFC00F2C /* MemDMA Stream 0 Destination Peripheral Map Register */ +#define MDMA_D0_CURR_X_COUNT 0xFFC00F30 /* MemDMA Stream 0 Destination Current X Count Register */ +#define MDMA_D0_CURR_Y_COUNT 0xFFC00F38 /* MemDMA Stream 0 Destination Current Y Count Register */ + +#define MDMA_S0_NEXT_DESC_PTR 0xFFC00F40 /* MemDMA Stream 0 Source Next Descriptor Pointer Register */ +#define MDMA_S0_START_ADDR 0xFFC00F44 /* MemDMA Stream 0 Source Start Address Register */ +#define MDMA_S0_CONFIG 0xFFC00F48 /* MemDMA Stream 0 Source Configuration Register */ +#define MDMA_S0_X_COUNT 0xFFC00F50 /* MemDMA Stream 0 Source X Count Register */ +#define MDMA_S0_X_MODIFY 0xFFC00F54 /* MemDMA Stream 0 Source X Modify Register */ +#define MDMA_S0_Y_COUNT 0xFFC00F58 /* MemDMA Stream 0 Source Y Count Register */ +#define MDMA_S0_Y_MODIFY 0xFFC00F5C /* MemDMA Stream 0 Source Y Modify Register */ +#define MDMA_S0_CURR_DESC_PTR 0xFFC00F60 /* MemDMA Stream 0 Source Current Descriptor Pointer Register */ +#define MDMA_S0_CURR_ADDR 0xFFC00F64 /* MemDMA Stream 0 Source Current Address Register */ +#define MDMA_S0_IRQ_STATUS 0xFFC00F68 /* MemDMA Stream 0 Source Interrupt/Status Register */ +#define MDMA_S0_PERIPHERAL_MAP 0xFFC00F6C /* MemDMA Stream 0 Source Peripheral Map Register */ +#define MDMA_S0_CURR_X_COUNT 0xFFC00F70 /* MemDMA Stream 0 Source Current X Count Register */ +#define MDMA_S0_CURR_Y_COUNT 0xFFC00F78 /* MemDMA Stream 0 Source Current Y Count Register */ + +#define MDMA_D1_NEXT_DESC_PTR 0xFFC00F80 /* MemDMA Stream 1 Destination Next Descriptor Pointer Register */ +#define MDMA_D1_START_ADDR 0xFFC00F84 /* MemDMA Stream 1 Destination Start Address Register */ +#define MDMA_D1_CONFIG 0xFFC00F88 /* MemDMA Stream 1 Destination Configuration Register */ +#define MDMA_D1_X_COUNT 0xFFC00F90 /* MemDMA Stream 1 Destination X Count Register */ +#define MDMA_D1_X_MODIFY 0xFFC00F94 /* MemDMA Stream 1 Destination X Modify Register */ +#define MDMA_D1_Y_COUNT 0xFFC00F98 /* MemDMA Stream 1 Destination Y Count Register */ +#define MDMA_D1_Y_MODIFY 0xFFC00F9C /* MemDMA Stream 1 Destination Y Modify Register */ +#define MDMA_D1_CURR_DESC_PTR 0xFFC00FA0 /* MemDMA Stream 1 Destination Current Descriptor Pointer Register */ +#define MDMA_D1_CURR_ADDR 0xFFC00FA4 /* MemDMA Stream 1 Destination Current Address Register */ +#define MDMA_D1_IRQ_STATUS 0xFFC00FA8 /* MemDMA Stream 1 Destination Interrupt/Status Register */ +#define MDMA_D1_PERIPHERAL_MAP 0xFFC00FAC /* MemDMA Stream 1 Destination Peripheral Map Register */ +#define MDMA_D1_CURR_X_COUNT 0xFFC00FB0 /* MemDMA Stream 1 Destination Current X Count Register */ +#define MDMA_D1_CURR_Y_COUNT 0xFFC00FB8 /* MemDMA Stream 1 Destination Current Y Count Register */ + +#define MDMA_S1_NEXT_DESC_PTR 0xFFC00FC0 /* MemDMA Stream 1 Source Next Descriptor Pointer Register */ +#define MDMA_S1_START_ADDR 0xFFC00FC4 /* MemDMA Stream 1 Source Start Address Register */ +#define MDMA_S1_CONFIG 0xFFC00FC8 /* MemDMA Stream 1 Source Configuration Register */ +#define MDMA_S1_X_COUNT 0xFFC00FD0 /* MemDMA Stream 1 Source X Count Register */ +#define MDMA_S1_X_MODIFY 0xFFC00FD4 /* MemDMA Stream 1 Source X Modify Register */ +#define MDMA_S1_Y_COUNT 0xFFC00FD8 /* MemDMA Stream 1 Source Y Count Register */ +#define MDMA_S1_Y_MODIFY 0xFFC00FDC /* MemDMA Stream 1 Source Y Modify Register */ +#define MDMA_S1_CURR_DESC_PTR 0xFFC00FE0 /* MemDMA Stream 1 Source Current Descriptor Pointer Register */ +#define MDMA_S1_CURR_ADDR 0xFFC00FE4 /* MemDMA Stream 1 Source Current Address Register */ +#define MDMA_S1_IRQ_STATUS 0xFFC00FE8 /* MemDMA Stream 1 Source Interrupt/Status Register */ +#define MDMA_S1_PERIPHERAL_MAP 0xFFC00FEC /* MemDMA Stream 1 Source Peripheral Map Register */ +#define MDMA_S1_CURR_X_COUNT 0xFFC00FF0 /* MemDMA Stream 1 Source Current X Count Register */ +#define MDMA_S1_CURR_Y_COUNT 0xFFC00FF8 /* MemDMA Stream 1 Source Current Y Count Register */ + +/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */ +#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */ +#define PPI_STATUS 0xFFC01004 /* PPI Status Register */ +#define PPI_COUNT 0xFFC01008 /* PPI Transfer Count Register */ +#define PPI_DELAY 0xFFC0100C /* PPI Delay Count Register */ +#define PPI_FRAME 0xFFC01010 /* PPI Frame Length Register */ + +/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */ +#define TWI_CLKDIV 0xFFC01400 /* Serial Clock Divider Register */ +#define TWI_CONTROL 0xFFC01404 /* TWI Control Register */ +#define TWI_SLAVE_CTL 0xFFC01408 /* Slave Mode Control Register */ +#define TWI_SLAVE_STAT 0xFFC0140C /* Slave Mode Status Register */ +#define TWI_SLAVE_ADDR 0xFFC01410 /* Slave Mode Address Register */ +#define TWI_MASTER_CTL 0xFFC01414 /* Master Mode Control Register */ +#define TWI_MASTER_STAT 0xFFC01418 /* Master Mode Status Register */ +#define TWI_MASTER_ADDR 0xFFC0141C /* Master Mode Address Register */ +#define TWI_INT_STAT 0xFFC01420 /* TWI Interrupt Status Register */ +#define TWI_INT_MASK 0xFFC01424 /* TWI Master Interrupt Mask Register */ +#define TWI_FIFO_CTL 0xFFC01428 /* FIFO Control Register */ +#define TWI_FIFO_STAT 0xFFC0142C /* FIFO Status Register */ +#define TWI_XMT_DATA8 0xFFC01480 /* FIFO Transmit Data Single Byte Register */ +#define TWI_XMT_DATA16 0xFFC01484 /* FIFO Transmit Data Double Byte Register */ +#define TWI_RCV_DATA8 0xFFC01488 /* FIFO Receive Data Single Byte Register */ +#define TWI_RCV_DATA16 0xFFC0148C /* FIFO Receive Data Double Byte Register */ + +/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */ +#define PORTGIO 0xFFC01500 /* Port G I/O Pin State Specify Register */ +#define PORTGIO_CLEAR 0xFFC01504 /* Port G I/O Peripheral Interrupt Clear Register */ +#define PORTGIO_SET 0xFFC01508 /* Port G I/O Peripheral Interrupt Set Register */ +#define PORTGIO_TOGGLE 0xFFC0150C /* Port G I/O Pin State Toggle Register */ +#define PORTGIO_MASKA 0xFFC01510 /* Port G I/O Mask State Specify Interrupt A Register */ +#define PORTGIO_MASKA_CLEAR 0xFFC01514 /* Port G I/O Mask Disable Interrupt A Register */ +#define PORTGIO_MASKA_SET 0xFFC01518 /* Port G I/O Mask Enable Interrupt A Register */ +#define PORTGIO_MASKA_TOGGLE 0xFFC0151C /* Port G I/O Mask Toggle Enable Interrupt A Register */ +#define PORTGIO_MASKB 0xFFC01520 /* Port G I/O Mask State Specify Interrupt B Register */ +#define PORTGIO_MASKB_CLEAR 0xFFC01524 /* Port G I/O Mask Disable Interrupt B Register */ +#define PORTGIO_MASKB_SET 0xFFC01528 /* Port G I/O Mask Enable Interrupt B Register */ +#define PORTGIO_MASKB_TOGGLE 0xFFC0152C /* Port G I/O Mask Toggle Enable Interrupt B Register */ +#define PORTGIO_DIR 0xFFC01530 /* Port G I/O Direction Register */ +#define PORTGIO_POLAR 0xFFC01534 /* Port G I/O Source Polarity Register */ +#define PORTGIO_EDGE 0xFFC01538 /* Port G I/O Source Sensitivity Register */ +#define PORTGIO_BOTH 0xFFC0153C /* Port G I/O Set on BOTH Edges Register */ +#define PORTGIO_INEN 0xFFC01540 /* Port G I/O Input Enable Register */ + +/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */ +#define PORTHIO 0xFFC01700 /* Port H I/O Pin State Specify Register */ +#define PORTHIO_CLEAR 0xFFC01704 /* Port H I/O Peripheral Interrupt Clear Register */ +#define PORTHIO_SET 0xFFC01708 /* Port H I/O Peripheral Interrupt Set Register */ +#define PORTHIO_TOGGLE 0xFFC0170C /* Port H I/O Pin State Toggle Register */ +#define PORTHIO_MASKA 0xFFC01710 /* Port H I/O Mask State Specify Interrupt A Register */ +#define PORTHIO_MASKA_CLEAR 0xFFC01714 /* Port H I/O Mask Disable Interrupt A Register */ +#define PORTHIO_MASKA_SET 0xFFC01718 /* Port H I/O Mask Enable Interrupt A Register */ +#define PORTHIO_MASKA_TOGGLE 0xFFC0171C /* Port H I/O Mask Toggle Enable Interrupt A Register */ +#define PORTHIO_MASKB 0xFFC01720 /* Port H I/O Mask State Specify Interrupt B Register */ +#define PORTHIO_MASKB_CLEAR 0xFFC01724 /* Port H I/O Mask Disable Interrupt B Register */ +#define PORTHIO_MASKB_SET 0xFFC01728 /* Port H I/O Mask Enable Interrupt B Register */ +#define PORTHIO_MASKB_TOGGLE 0xFFC0172C /* Port H I/O Mask Toggle Enable Interrupt B Register */ +#define PORTHIO_DIR 0xFFC01730 /* Port H I/O Direction Register */ +#define PORTHIO_POLAR 0xFFC01734 /* Port H I/O Source Polarity Register */ +#define PORTHIO_EDGE 0xFFC01738 /* Port H I/O Source Sensitivity Register */ +#define PORTHIO_BOTH 0xFFC0173C /* Port H I/O Set on BOTH Edges Register */ +#define PORTHIO_INEN 0xFFC01740 /* Port H I/O Input Enable Register */ + +/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */ +#define UART1_THR 0xFFC02000 /* Transmit Holding register */ +#define UART1_RBR 0xFFC02000 /* Receive Buffer register */ +#define UART1_DLL 0xFFC02000 /* Divisor Latch (Low-Byte) */ +#define UART1_IER 0xFFC02004 /* Interrupt Enable Register */ +#define UART1_DLH 0xFFC02004 /* Divisor Latch (High-Byte) */ +#define UART1_IIR 0xFFC02008 /* Interrupt Identification Register */ +#define UART1_LCR 0xFFC0200C /* Line Control Register */ +#define UART1_MCR 0xFFC02010 /* Modem Control Register */ +#define UART1_LSR 0xFFC02014 /* Line Status Register */ +#define UART1_MSR 0xFFC02018 /* Modem Status Register */ +#define UART1_SCR 0xFFC0201C /* SCR Scratch Register */ +#define UART1_GCTL 0xFFC02024 /* Global Control Register */ + +/* CAN Controller (0xFFC02A00 - 0xFFC02FFF) */ +/* For Mailboxes 0-15 */ +#define CAN_MC1 0xFFC02A00 /* Mailbox config reg 1 */ +#define CAN_MD1 0xFFC02A04 /* Mailbox direction reg 1 */ +#define CAN_TRS1 0xFFC02A08 /* Transmit Request Set reg 1 */ +#define CAN_TRR1 0xFFC02A0C /* Transmit Request Reset reg 1 */ +#define CAN_TA1 0xFFC02A10 /* Transmit Acknowledge reg 1 */ +#define CAN_AA1 0xFFC02A14 /* Transmit Abort Acknowledge reg 1 */ +#define CAN_RMP1 0xFFC02A18 /* Receive Message Pending reg 1 */ +#define CAN_RML1 0xFFC02A1C /* Receive Message Lost reg 1 */ +#define CAN_MBTIF1 0xFFC02A20 /* Mailbox Transmit Interrupt Flag reg 1 */ +#define CAN_MBRIF1 0xFFC02A24 /* Mailbox Receive Interrupt Flag reg 1 */ +#define CAN_MBIM1 0xFFC02A28 /* Mailbox Interrupt Mask reg 1 */ +#define CAN_RFH1 0xFFC02A2C /* Remote Frame Handling reg 1 */ +#define CAN_OPSS1 0xFFC02A30 /* Overwrite Protection Single Shot Xmit reg 1 */ + +/* For Mailboxes 16-31 */ +#define CAN_MC2 0xFFC02A40 /* Mailbox config reg 2 */ +#define CAN_MD2 0xFFC02A44 /* Mailbox direction reg 2 */ +#define CAN_TRS2 0xFFC02A48 /* Transmit Request Set reg 2 */ +#define CAN_TRR2 0xFFC02A4C /* Transmit Request Reset reg 2 */ +#define CAN_TA2 0xFFC02A50 /* Transmit Acknowledge reg 2 */ +#define CAN_AA2 0xFFC02A54 /* Transmit Abort Acknowledge reg 2 */ +#define CAN_RMP2 0xFFC02A58 /* Receive Message Pending reg 2 */ +#define CAN_RML2 0xFFC02A5C /* Receive Message Lost reg 2 */ +#define CAN_MBTIF2 0xFFC02A60 /* Mailbox Transmit Interrupt Flag reg 2 */ +#define CAN_MBRIF2 0xFFC02A64 /* Mailbox Receive Interrupt Flag reg 2 */ +#define CAN_MBIM2 0xFFC02A68 /* Mailbox Interrupt Mask reg 2 */ +#define CAN_RFH2 0xFFC02A6C /* Remote Frame Handling reg 2 */ +#define CAN_OPSS2 0xFFC02A70 /* Overwrite Protection Single Shot Xmit reg 2 */ + +/* CAN Configuration, Control, and Status Registers */ +#define CAN_CLOCK 0xFFC02A80 /* Bit Timing Configuration register 0 */ +#define CAN_TIMING 0xFFC02A84 /* Bit Timing Configuration register 1 */ +#define CAN_DEBUG 0xFFC02A88 /* Debug Register */ +#define CAN_STATUS 0xFFC02A8C /* Global Status Register */ +#define CAN_CEC 0xFFC02A90 /* Error Counter Register */ +#define CAN_GIS 0xFFC02A94 /* Global Interrupt Status Register */ +#define CAN_GIM 0xFFC02A98 /* Global Interrupt Mask Register */ +#define CAN_GIF 0xFFC02A9C /* Global Interrupt Flag Register */ +#define CAN_CONTROL 0xFFC02AA0 /* Master Control Register */ +#define CAN_INTR 0xFFC02AA4 /* Interrupt Pending Register */ +#define CAN_SFCMVER 0xFFC02AA8 /* Version Code Register */ +#define CAN_MBTD 0xFFC02AAC /* Mailbox Temporary Disable Feature */ +#define CAN_EWR 0xFFC02AB0 /* Programmable Warning Level */ +#define CAN_ESR 0xFFC02AB4 /* Error Status Register */ +#define CAN_UCREG 0xFFC02AC0 /* Universal Counter Register/Capture Register */ +#define CAN_UCCNT 0xFFC02AC4 /* Universal Counter */ +#define CAN_UCRC 0xFFC02AC8 /* Universal Counter Force Reload Register */ +#define CAN_UCCNF 0xFFC02ACC /* Universal Counter Configuration Register */ + +/* Mailbox Acceptance Masks */ +#define CAN_AM00L 0xFFC02B00 /* Mailbox 0 Low Acceptance Mask */ +#define CAN_AM00H 0xFFC02B04 /* Mailbox 0 High Acceptance Mask */ +#define CAN_AM01L 0xFFC02B08 /* Mailbox 1 Low Acceptance Mask */ +#define CAN_AM01H 0xFFC02B0C /* Mailbox 1 High Acceptance Mask */ +#define CAN_AM02L 0xFFC02B10 /* Mailbox 2 Low Acceptance Mask */ +#define CAN_AM02H 0xFFC02B14 /* Mailbox 2 High Acceptance Mask */ +#define CAN_AM03L 0xFFC02B18 /* Mailbox 3 Low Acceptance Mask */ +#define CAN_AM03H 0xFFC02B1C /* Mailbox 3 High Acceptance Mask */ +#define CAN_AM04L 0xFFC02B20 /* Mailbox 4 Low Acceptance Mask */ +#define CAN_AM04H 0xFFC02B24 /* Mailbox 4 High Acceptance Mask */ +#define CAN_AM05L 0xFFC02B28 /* Mailbox 5 Low Acceptance Mask */ +#define CAN_AM05H 0xFFC02B2C /* Mailbox 5 High Acceptance Mask */ +#define CAN_AM06L 0xFFC02B30 /* Mailbox 6 Low Acceptance Mask */ +#define CAN_AM06H 0xFFC02B34 /* Mailbox 6 High Acceptance Mask */ +#define CAN_AM07L 0xFFC02B38 /* Mailbox 7 Low Acceptance Mask */ +#define CAN_AM07H 0xFFC02B3C /* Mailbox 7 High Acceptance Mask */ +#define CAN_AM08L 0xFFC02B40 /* Mailbox 8 Low Acceptance Mask */ +#define CAN_AM08H 0xFFC02B44 /* Mailbox 8 High Acceptance Mask */ +#define CAN_AM09L 0xFFC02B48 /* Mailbox 9 Low Acceptance Mask */ +#define CAN_AM09H 0xFFC02B4C /* Mailbox 9 High Acceptance Mask */ +#define CAN_AM10L 0xFFC02B50 /* Mailbox 10 Low Acceptance Mask */ +#define CAN_AM10H 0xFFC02B54 /* Mailbox 10 High Acceptance Mask */ +#define CAN_AM11L 0xFFC02B58 /* Mailbox 11 Low Acceptance Mask */ +#define CAN_AM11H 0xFFC02B5C /* Mailbox 11 High Acceptance Mask */ +#define CAN_AM12L 0xFFC02B60 /* Mailbox 12 Low Acceptance Mask */ +#define CAN_AM12H 0xFFC02B64 /* Mailbox 12 High Acceptance Mask */ +#define CAN_AM13L 0xFFC02B68 /* Mailbox 13 Low Acceptance Mask */ +#define CAN_AM13H 0xFFC02B6C /* Mailbox 13 High Acceptance Mask */ +#define CAN_AM14L 0xFFC02B70 /* Mailbox 14 Low Acceptance Mask */ +#define CAN_AM14H 0xFFC02B74 /* Mailbox 14 High Acceptance Mask */ +#define CAN_AM15L 0xFFC02B78 /* Mailbox 15 Low Acceptance Mask */ +#define CAN_AM15H 0xFFC02B7C /* Mailbox 15 High Acceptance Mask */ + +#define CAN_AM16L 0xFFC02B80 /* Mailbox 16 Low Acceptance Mask */ +#define CAN_AM16H 0xFFC02B84 /* Mailbox 16 High Acceptance Mask */ +#define CAN_AM17L 0xFFC02B88 /* Mailbox 17 Low Acceptance Mask */ +#define CAN_AM17H 0xFFC02B8C /* Mailbox 17 High Acceptance Mask */ +#define CAN_AM18L 0xFFC02B90 /* Mailbox 18 Low Acceptance Mask */ +#define CAN_AM18H 0xFFC02B94 /* Mailbox 18 High Acceptance Mask */ +#define CAN_AM19L 0xFFC02B98 /* Mailbox 19 Low Acceptance Mask */ +#define CAN_AM19H 0xFFC02B9C /* Mailbox 19 High Acceptance Mask */ +#define CAN_AM20L 0xFFC02BA0 /* Mailbox 20 Low Acceptance Mask */ +#define CAN_AM20H 0xFFC02BA4 /* Mailbox 20 High Acceptance Mask */ +#define CAN_AM21L 0xFFC02BA8 /* Mailbox 21 Low Acceptance Mask */ +#define CAN_AM21H 0xFFC02BAC /* Mailbox 21 High Acceptance Mask */ +#define CAN_AM22L 0xFFC02BB0 /* Mailbox 22 Low Acceptance Mask */ +#define CAN_AM22H 0xFFC02BB4 /* Mailbox 22 High Acceptance Mask */ +#define CAN_AM23L 0xFFC02BB8 /* Mailbox 23 Low Acceptance Mask */ +#define CAN_AM23H 0xFFC02BBC /* Mailbox 23 High Acceptance Mask */ +#define CAN_AM24L 0xFFC02BC0 /* Mailbox 24 Low Acceptance Mask */ +#define CAN_AM24H 0xFFC02BC4 /* Mailbox 24 High Acceptance Mask */ +#define CAN_AM25L 0xFFC02BC8 /* Mailbox 25 Low Acceptance Mask */ +#define CAN_AM25H 0xFFC02BCC /* Mailbox 25 High Acceptance Mask */ +#define CAN_AM26L 0xFFC02BD0 /* Mailbox 26 Low Acceptance Mask */ +#define CAN_AM26H 0xFFC02BD4 /* Mailbox 26 High Acceptance Mask */ +#define CAN_AM27L 0xFFC02BD8 /* Mailbox 27 Low Acceptance Mask */ +#define CAN_AM27H 0xFFC02BDC /* Mailbox 27 High Acceptance Mask */ +#define CAN_AM28L 0xFFC02BE0 /* Mailbox 28 Low Acceptance Mask */ +#define CAN_AM28H 0xFFC02BE4 /* Mailbox 28 High Acceptance Mask */ +#define CAN_AM29L 0xFFC02BE8 /* Mailbox 29 Low Acceptance Mask */ +#define CAN_AM29H 0xFFC02BEC /* Mailbox 29 High Acceptance Mask */ +#define CAN_AM30L 0xFFC02BF0 /* Mailbox 30 Low Acceptance Mask */ +#define CAN_AM30H 0xFFC02BF4 /* Mailbox 30 High Acceptance Mask */ +#define CAN_AM31L 0xFFC02BF8 /* Mailbox 31 Low Acceptance Mask */ +#define CAN_AM31H 0xFFC02BFC /* Mailbox 31 High Acceptance Mask */ + +/* CAN Acceptance Mask Macros */ +#define CAN_AM_L(x) (CAN_AM00L+((x)*0x8)) +#define CAN_AM_H(x) (CAN_AM00H+((x)*0x8)) + +/* Mailbox Registers */ +#define CAN_MB00_DATA0 0xFFC02C00 /* Mailbox 0 Data Word 0 [15:0] Register */ +#define CAN_MB00_DATA1 0xFFC02C04 /* Mailbox 0 Data Word 1 [31:16] Register */ +#define CAN_MB00_DATA2 0xFFC02C08 /* Mailbox 0 Data Word 2 [47:32] Register */ +#define CAN_MB00_DATA3 0xFFC02C0C /* Mailbox 0 Data Word 3 [63:48] Register */ +#define CAN_MB00_LENGTH 0xFFC02C10 /* Mailbox 0 Data Length Code Register */ +#define CAN_MB00_TIMESTAMP 0xFFC02C14 /* Mailbox 0 Time Stamp Value Register */ +#define CAN_MB00_ID0 0xFFC02C18 /* Mailbox 0 Identifier Low Register */ +#define CAN_MB00_ID1 0xFFC02C1C /* Mailbox 0 Identifier High Register */ + +#define CAN_MB01_DATA0 0xFFC02C20 /* Mailbox 1 Data Word 0 [15:0] Register */ +#define CAN_MB01_DATA1 0xFFC02C24 /* Mailbox 1 Data Word 1 [31:16] Register */ +#define CAN_MB01_DATA2 0xFFC02C28 /* Mailbox 1 Data Word 2 [47:32] Register */ +#define CAN_MB01_DATA3 0xFFC02C2C /* Mailbox 1 Data Word 3 [63:48] Register */ +#define CAN_MB01_LENGTH 0xFFC02C30 /* Mailbox 1 Data Length Code Register */ +#define CAN_MB01_TIMESTAMP 0xFFC02C34 /* Mailbox 1 Time Stamp Value Register */ +#define CAN_MB01_ID0 0xFFC02C38 /* Mailbox 1 Identifier Low Register */ +#define CAN_MB01_ID1 0xFFC02C3C /* Mailbox 1 Identifier High Register */ + +#define CAN_MB02_DATA0 0xFFC02C40 /* Mailbox 2 Data Word 0 [15:0] Register */ +#define CAN_MB02_DATA1 0xFFC02C44 /* Mailbox 2 Data Word 1 [31:16] Register */ +#define CAN_MB02_DATA2 0xFFC02C48 /* Mailbox 2 Data Word 2 [47:32] Register */ +#define CAN_MB02_DATA3 0xFFC02C4C /* Mailbox 2 Data Word 3 [63:48] Register */ +#define CAN_MB02_LENGTH 0xFFC02C50 /* Mailbox 2 Data Length Code Register */ +#define CAN_MB02_TIMESTAMP 0xFFC02C54 /* Mailbox 2 Time Stamp Value Register */ +#define CAN_MB02_ID0 0xFFC02C58 /* Mailbox 2 Identifier Low Register */ +#define CAN_MB02_ID1 0xFFC02C5C /* Mailbox 2 Identifier High Register */ + +#define CAN_MB03_DATA0 0xFFC02C60 /* Mailbox 3 Data Word 0 [15:0] Register */ +#define CAN_MB03_DATA1 0xFFC02C64 /* Mailbox 3 Data Word 1 [31:16] Register */ +#define CAN_MB03_DATA2 0xFFC02C68 /* Mailbox 3 Data Word 2 [47:32] Register */ +#define CAN_MB03_DATA3 0xFFC02C6C /* Mailbox 3 Data Word 3 [63:48] Register */ +#define CAN_MB03_LENGTH 0xFFC02C70 /* Mailbox 3 Data Length Code Register */ +#define CAN_MB03_TIMESTAMP 0xFFC02C74 /* Mailbox 3 Time Stamp Value Register */ +#define CAN_MB03_ID0 0xFFC02C78 /* Mailbox 3 Identifier Low Register */ +#define CAN_MB03_ID1 0xFFC02C7C /* Mailbox 3 Identifier High Register */ + +#define CAN_MB04_DATA0 0xFFC02C80 /* Mailbox 4 Data Word 0 [15:0] Register */ +#define CAN_MB04_DATA1 0xFFC02C84 /* Mailbox 4 Data Word 1 [31:16] Register */ +#define CAN_MB04_DATA2 0xFFC02C88 /* Mailbox 4 Data Word 2 [47:32] Register */ +#define CAN_MB04_DATA3 0xFFC02C8C /* Mailbox 4 Data Word 3 [63:48] Register */ +#define CAN_MB04_LENGTH 0xFFC02C90 /* Mailbox 4 Data Length Code Register */ +#define CAN_MB04_TIMESTAMP 0xFFC02C94 /* Mailbox 4 Time Stamp Value Register */ +#define CAN_MB04_ID0 0xFFC02C98 /* Mailbox 4 Identifier Low Register */ +#define CAN_MB04_ID1 0xFFC02C9C /* Mailbox 4 Identifier High Register */ + +#define CAN_MB05_DATA0 0xFFC02CA0 /* Mailbox 5 Data Word 0 [15:0] Register */ +#define CAN_MB05_DATA1 0xFFC02CA4 /* Mailbox 5 Data Word 1 [31:16] Register */ +#define CAN_MB05_DATA2 0xFFC02CA8 /* Mailbox 5 Data Word 2 [47:32] Register */ +#define CAN_MB05_DATA3 0xFFC02CAC /* Mailbox 5 Data Word 3 [63:48] Register */ +#define CAN_MB05_LENGTH 0xFFC02CB0 /* Mailbox 5 Data Length Code Register */ +#define CAN_MB05_TIMESTAMP 0xFFC02CB4 /* Mailbox 5 Time Stamp Value Register */ +#define CAN_MB05_ID0 0xFFC02CB8 /* Mailbox 5 Identifier Low Register */ +#define CAN_MB05_ID1 0xFFC02CBC /* Mailbox 5 Identifier High Register */ + +#define CAN_MB06_DATA0 0xFFC02CC0 /* Mailbox 6 Data Word 0 [15:0] Register */ +#define CAN_MB06_DATA1 0xFFC02CC4 /* Mailbox 6 Data Word 1 [31:16] Register */ +#define CAN_MB06_DATA2 0xFFC02CC8 /* Mailbox 6 Data Word 2 [47:32] Register */ +#define CAN_MB06_DATA3 0xFFC02CCC /* Mailbox 6 Data Word 3 [63:48] Register */ +#define CAN_MB06_LENGTH 0xFFC02CD0 /* Mailbox 6 Data Length Code Register */ +#define CAN_MB06_TIMESTAMP 0xFFC02CD4 /* Mailbox 6 Time Stamp Value Register */ +#define CAN_MB06_ID0 0xFFC02CD8 /* Mailbox 6 Identifier Low Register */ +#define CAN_MB06_ID1 0xFFC02CDC /* Mailbox 6 Identifier High Register */ + +#define CAN_MB07_DATA0 0xFFC02CE0 /* Mailbox 7 Data Word 0 [15:0] Register */ +#define CAN_MB07_DATA1 0xFFC02CE4 /* Mailbox 7 Data Word 1 [31:16] Register */ +#define CAN_MB07_DATA2 0xFFC02CE8 /* Mailbox 7 Data Word 2 [47:32] Register */ +#define CAN_MB07_DATA3 0xFFC02CEC /* Mailbox 7 Data Word 3 [63:48] Register */ +#define CAN_MB07_LENGTH 0xFFC02CF0 /* Mailbox 7 Data Length Code Register */ +#define CAN_MB07_TIMESTAMP 0xFFC02CF4 /* Mailbox 7 Time Stamp Value Register */ +#define CAN_MB07_ID0 0xFFC02CF8 /* Mailbox 7 Identifier Low Register */ +#define CAN_MB07_ID1 0xFFC02CFC /* Mailbox 7 Identifier High Register */ + +#define CAN_MB08_DATA0 0xFFC02D00 /* Mailbox 8 Data Word 0 [15:0] Register */ +#define CAN_MB08_DATA1 0xFFC02D04 /* Mailbox 8 Data Word 1 [31:16] Register */ +#define CAN_MB08_DATA2 0xFFC02D08 /* Mailbox 8 Data Word 2 [47:32] Register */ +#define CAN_MB08_DATA3 0xFFC02D0C /* Mailbox 8 Data Word 3 [63:48] Register */ +#define CAN_MB08_LENGTH 0xFFC02D10 /* Mailbox 8 Data Length Code Register */ +#define CAN_MB08_TIMESTAMP 0xFFC02D14 /* Mailbox 8 Time Stamp Value Register */ +#define CAN_MB08_ID0 0xFFC02D18 /* Mailbox 8 Identifier Low Register */ +#define CAN_MB08_ID1 0xFFC02D1C /* Mailbox 8 Identifier High Register */ + +#define CAN_MB09_DATA0 0xFFC02D20 /* Mailbox 9 Data Word 0 [15:0] Register */ +#define CAN_MB09_DATA1 0xFFC02D24 /* Mailbox 9 Data Word 1 [31:16] Register */ +#define CAN_MB09_DATA2 0xFFC02D28 /* Mailbox 9 Data Word 2 [47:32] Register */ +#define CAN_MB09_DATA3 0xFFC02D2C /* Mailbox 9 Data Word 3 [63:48] Register */ +#define CAN_MB09_LENGTH 0xFFC02D30 /* Mailbox 9 Data Length Code Register */ +#define CAN_MB09_TIMESTAMP 0xFFC02D34 /* Mailbox 9 Time Stamp Value Register */ +#define CAN_MB09_ID0 0xFFC02D38 /* Mailbox 9 Identifier Low Register */ +#define CAN_MB09_ID1 0xFFC02D3C /* Mailbox 9 Identifier High Register */ + +#define CAN_MB10_DATA0 0xFFC02D40 /* Mailbox 10 Data Word 0 [15:0] Register */ +#define CAN_MB10_DATA1 0xFFC02D44 /* Mailbox 10 Data Word 1 [31:16] Register */ +#define CAN_MB10_DATA2 0xFFC02D48 /* Mailbox 10 Data Word 2 [47:32] Register */ +#define CAN_MB10_DATA3 0xFFC02D4C /* Mailbox 10 Data Word 3 [63:48] Register */ +#define CAN_MB10_LENGTH 0xFFC02D50 /* Mailbox 10 Data Length Code Register */ +#define CAN_MB10_TIMESTAMP 0xFFC02D54 /* Mailbox 10 Time Stamp Value Register */ +#define CAN_MB10_ID0 0xFFC02D58 /* Mailbox 10 Identifier Low Register */ +#define CAN_MB10_ID1 0xFFC02D5C /* Mailbox 10 Identifier High Register */ + +#define CAN_MB11_DATA0 0xFFC02D60 /* Mailbox 11 Data Word 0 [15:0] Register */ +#define CAN_MB11_DATA1 0xFFC02D64 /* Mailbox 11 Data Word 1 [31:16] Register */ +#define CAN_MB11_DATA2 0xFFC02D68 /* Mailbox 11 Data Word 2 [47:32] Register */ +#define CAN_MB11_DATA3 0xFFC02D6C /* Mailbox 11 Data Word 3 [63:48] Register */ +#define CAN_MB11_LENGTH 0xFFC02D70 /* Mailbox 11 Data Length Code Register */ +#define CAN_MB11_TIMESTAMP 0xFFC02D74 /* Mailbox 11 Time Stamp Value Register */ +#define CAN_MB11_ID0 0xFFC02D78 /* Mailbox 11 Identifier Low Register */ +#define CAN_MB11_ID1 0xFFC02D7C /* Mailbox 11 Identifier High Register */ + +#define CAN_MB12_DATA0 0xFFC02D80 /* Mailbox 12 Data Word 0 [15:0] Register */ +#define CAN_MB12_DATA1 0xFFC02D84 /* Mailbox 12 Data Word 1 [31:16] Register */ +#define CAN_MB12_DATA2 0xFFC02D88 /* Mailbox 12 Data Word 2 [47:32] Register */ +#define CAN_MB12_DATA3 0xFFC02D8C /* Mailbox 12 Data Word 3 [63:48] Register */ +#define CAN_MB12_LENGTH 0xFFC02D90 /* Mailbox 12 Data Length Code Register */ +#define CAN_MB12_TIMESTAMP 0xFFC02D94 /* Mailbox 12 Time Stamp Value Register */ +#define CAN_MB12_ID0 0xFFC02D98 /* Mailbox 12 Identifier Low Register */ +#define CAN_MB12_ID1 0xFFC02D9C /* Mailbox 12 Identifier High Register */ + +#define CAN_MB13_DATA0 0xFFC02DA0 /* Mailbox 13 Data Word 0 [15:0] Register */ +#define CAN_MB13_DATA1 0xFFC02DA4 /* Mailbox 13 Data Word 1 [31:16] Register */ +#define CAN_MB13_DATA2 0xFFC02DA8 /* Mailbox 13 Data Word 2 [47:32] Register */ +#define CAN_MB13_DATA3 0xFFC02DAC /* Mailbox 13 Data Word 3 [63:48] Register */ +#define CAN_MB13_LENGTH 0xFFC02DB0 /* Mailbox 13 Data Length Code Register */ +#define CAN_MB13_TIMESTAMP 0xFFC02DB4 /* Mailbox 13 Time Stamp Value Register */ +#define CAN_MB13_ID0 0xFFC02DB8 /* Mailbox 13 Identifier Low Register */ +#define CAN_MB13_ID1 0xFFC02DBC /* Mailbox 13 Identifier High Register */ + +#define CAN_MB14_DATA0 0xFFC02DC0 /* Mailbox 14 Data Word 0 [15:0] Register */ +#define CAN_MB14_DATA1 0xFFC02DC4 /* Mailbox 14 Data Word 1 [31:16] Register */ +#define CAN_MB14_DATA2 0xFFC02DC8 /* Mailbox 14 Data Word 2 [47:32] Register */ +#define CAN_MB14_DATA3 0xFFC02DCC /* Mailbox 14 Data Word 3 [63:48] Register */ +#define CAN_MB14_LENGTH 0xFFC02DD0 /* Mailbox 14 Data Length Code Register */ +#define CAN_MB14_TIMESTAMP 0xFFC02DD4 /* Mailbox 14 Time Stamp Value Register */ +#define CAN_MB14_ID0 0xFFC02DD8 /* Mailbox 14 Identifier Low Register */ +#define CAN_MB14_ID1 0xFFC02DDC /* Mailbox 14 Identifier High Register */ + +#define CAN_MB15_DATA0 0xFFC02DE0 /* Mailbox 15 Data Word 0 [15:0] Register */ +#define CAN_MB15_DATA1 0xFFC02DE4 /* Mailbox 15 Data Word 1 [31:16] Register */ +#define CAN_MB15_DATA2 0xFFC02DE8 /* Mailbox 15 Data Word 2 [47:32] Register */ +#define CAN_MB15_DATA3 0xFFC02DEC /* Mailbox 15 Data Word 3 [63:48] Register */ +#define CAN_MB15_LENGTH 0xFFC02DF0 /* Mailbox 15 Data Length Code Register */ +#define CAN_MB15_TIMESTAMP 0xFFC02DF4 /* Mailbox 15 Time Stamp Value Register */ +#define CAN_MB15_ID0 0xFFC02DF8 /* Mailbox 15 Identifier Low Register */ +#define CAN_MB15_ID1 0xFFC02DFC /* Mailbox 15 Identifier High Register */ + +#define CAN_MB16_DATA0 0xFFC02E00 /* Mailbox 16 Data Word 0 [15:0] Register */ +#define CAN_MB16_DATA1 0xFFC02E04 /* Mailbox 16 Data Word 1 [31:16] Register */ +#define CAN_MB16_DATA2 0xFFC02E08 /* Mailbox 16 Data Word 2 [47:32] Register */ +#define CAN_MB16_DATA3 0xFFC02E0C /* Mailbox 16 Data Word 3 [63:48] Register */ +#define CAN_MB16_LENGTH 0xFFC02E10 /* Mailbox 16 Data Length Code Register */ +#define CAN_MB16_TIMESTAMP 0xFFC02E14 /* Mailbox 16 Time Stamp Value Register */ +#define CAN_MB16_ID0 0xFFC02E18 /* Mailbox 16 Identifier Low Register */ +#define CAN_MB16_ID1 0xFFC02E1C /* Mailbox 16 Identifier High Register */ + +#define CAN_MB17_DATA0 0xFFC02E20 /* Mailbox 17 Data Word 0 [15:0] Register */ +#define CAN_MB17_DATA1 0xFFC02E24 /* Mailbox 17 Data Word 1 [31:16] Register */ +#define CAN_MB17_DATA2 0xFFC02E28 /* Mailbox 17 Data Word 2 [47:32] Register */ +#define CAN_MB17_DATA3 0xFFC02E2C /* Mailbox 17 Data Word 3 [63:48] Register */ +#define CAN_MB17_LENGTH 0xFFC02E30 /* Mailbox 17 Data Length Code Register */ +#define CAN_MB17_TIMESTAMP 0xFFC02E34 /* Mailbox 17 Time Stamp Value Register */ +#define CAN_MB17_ID0 0xFFC02E38 /* Mailbox 17 Identifier Low Register */ +#define CAN_MB17_ID1 0xFFC02E3C /* Mailbox 17 Identifier High Register */ + +#define CAN_MB18_DATA0 0xFFC02E40 /* Mailbox 18 Data Word 0 [15:0] Register */ +#define CAN_MB18_DATA1 0xFFC02E44 /* Mailbox 18 Data Word 1 [31:16] Register */ +#define CAN_MB18_DATA2 0xFFC02E48 /* Mailbox 18 Data Word 2 [47:32] Register */ +#define CAN_MB18_DATA3 0xFFC02E4C /* Mailbox 18 Data Word 3 [63:48] Register */ +#define CAN_MB18_LENGTH 0xFFC02E50 /* Mailbox 18 Data Length Code Register */ +#define CAN_MB18_TIMESTAMP 0xFFC02E54 /* Mailbox 18 Time Stamp Value Register */ +#define CAN_MB18_ID0 0xFFC02E58 /* Mailbox 18 Identifier Low Register */ +#define CAN_MB18_ID1 0xFFC02E5C /* Mailbox 18 Identifier High Register */ + +#define CAN_MB19_DATA0 0xFFC02E60 /* Mailbox 19 Data Word 0 [15:0] Register */ +#define CAN_MB19_DATA1 0xFFC02E64 /* Mailbox 19 Data Word 1 [31:16] Register */ +#define CAN_MB19_DATA2 0xFFC02E68 /* Mailbox 19 Data Word 2 [47:32] Register */ +#define CAN_MB19_DATA3 0xFFC02E6C /* Mailbox 19 Data Word 3 [63:48] Register */ +#define CAN_MB19_LENGTH 0xFFC02E70 /* Mailbox 19 Data Length Code Register */ +#define CAN_MB19_TIMESTAMP 0xFFC02E74 /* Mailbox 19 Time Stamp Value Register */ +#define CAN_MB19_ID0 0xFFC02E78 /* Mailbox 19 Identifier Low Register */ +#define CAN_MB19_ID1 0xFFC02E7C /* Mailbox 19 Identifier High Register */ + +#define CAN_MB20_DATA0 0xFFC02E80 /* Mailbox 20 Data Word 0 [15:0] Register */ +#define CAN_MB20_DATA1 0xFFC02E84 /* Mailbox 20 Data Word 1 [31:16] Register */ +#define CAN_MB20_DATA2 0xFFC02E88 /* Mailbox 20 Data Word 2 [47:32] Register */ +#define CAN_MB20_DATA3 0xFFC02E8C /* Mailbox 20 Data Word 3 [63:48] Register */ +#define CAN_MB20_LENGTH 0xFFC02E90 /* Mailbox 20 Data Length Code Register */ +#define CAN_MB20_TIMESTAMP 0xFFC02E94 /* Mailbox 20 Time Stamp Value Register */ +#define CAN_MB20_ID0 0xFFC02E98 /* Mailbox 20 Identifier Low Register */ +#define CAN_MB20_ID1 0xFFC02E9C /* Mailbox 20 Identifier High Register */ + +#define CAN_MB21_DATA0 0xFFC02EA0 /* Mailbox 21 Data Word 0 [15:0] Register */ +#define CAN_MB21_DATA1 0xFFC02EA4 /* Mailbox 21 Data Word 1 [31:16] Register */ +#define CAN_MB21_DATA2 0xFFC02EA8 /* Mailbox 21 Data Word 2 [47:32] Register */ +#define CAN_MB21_DATA3 0xFFC02EAC /* Mailbox 21 Data Word 3 [63:48] Register */ +#define CAN_MB21_LENGTH 0xFFC02EB0 /* Mailbox 21 Data Length Code Register */ +#define CAN_MB21_TIMESTAMP 0xFFC02EB4 /* Mailbox 21 Time Stamp Value Register */ +#define CAN_MB21_ID0 0xFFC02EB8 /* Mailbox 21 Identifier Low Register */ +#define CAN_MB21_ID1 0xFFC02EBC /* Mailbox 21 Identifier High Register */ + +#define CAN_MB22_DATA0 0xFFC02EC0 /* Mailbox 22 Data Word 0 [15:0] Register */ +#define CAN_MB22_DATA1 0xFFC02EC4 /* Mailbox 22 Data Word 1 [31:16] Register */ +#define CAN_MB22_DATA2 0xFFC02EC8 /* Mailbox 22 Data Word 2 [47:32] Register */ +#define CAN_MB22_DATA3 0xFFC02ECC /* Mailbox 22 Data Word 3 [63:48] Register */ +#define CAN_MB22_LENGTH 0xFFC02ED0 /* Mailbox 22 Data Length Code Register */ +#define CAN_MB22_TIMESTAMP 0xFFC02ED4 /* Mailbox 22 Time Stamp Value Register */ +#define CAN_MB22_ID0 0xFFC02ED8 /* Mailbox 22 Identifier Low Register */ +#define CAN_MB22_ID1 0xFFC02EDC /* Mailbox 22 Identifier High Register */ + +#define CAN_MB23_DATA0 0xFFC02EE0 /* Mailbox 23 Data Word 0 [15:0] Register */ +#define CAN_MB23_DATA1 0xFFC02EE4 /* Mailbox 23 Data Word 1 [31:16] Register */ +#define CAN_MB23_DATA2 0xFFC02EE8 /* Mailbox 23 Data Word 2 [47:32] Register */ +#define CAN_MB23_DATA3 0xFFC02EEC /* Mailbox 23 Data Word 3 [63:48] Register */ +#define CAN_MB23_LENGTH 0xFFC02EF0 /* Mailbox 23 Data Length Code Register */ +#define CAN_MB23_TIMESTAMP 0xFFC02EF4 /* Mailbox 23 Time Stamp Value Register */ +#define CAN_MB23_ID0 0xFFC02EF8 /* Mailbox 23 Identifier Low Register */ +#define CAN_MB23_ID1 0xFFC02EFC /* Mailbox 23 Identifier High Register */ + +#define CAN_MB24_DATA0 0xFFC02F00 /* Mailbox 24 Data Word 0 [15:0] Register */ +#define CAN_MB24_DATA1 0xFFC02F04 /* Mailbox 24 Data Word 1 [31:16] Register */ +#define CAN_MB24_DATA2 0xFFC02F08 /* Mailbox 24 Data Word 2 [47:32] Register */ +#define CAN_MB24_DATA3 0xFFC02F0C /* Mailbox 24 Data Word 3 [63:48] Register */ +#define CAN_MB24_LENGTH 0xFFC02F10 /* Mailbox 24 Data Length Code Register */ +#define CAN_MB24_TIMESTAMP 0xFFC02F14 /* Mailbox 24 Time Stamp Value Register */ +#define CAN_MB24_ID0 0xFFC02F18 /* Mailbox 24 Identifier Low Register */ +#define CAN_MB24_ID1 0xFFC02F1C /* Mailbox 24 Identifier High Register */ + +#define CAN_MB25_DATA0 0xFFC02F20 /* Mailbox 25 Data Word 0 [15:0] Register */ +#define CAN_MB25_DATA1 0xFFC02F24 /* Mailbox 25 Data Word 1 [31:16] Register */ +#define CAN_MB25_DATA2 0xFFC02F28 /* Mailbox 25 Data Word 2 [47:32] Register */ +#define CAN_MB25_DATA3 0xFFC02F2C /* Mailbox 25 Data Word 3 [63:48] Register */ +#define CAN_MB25_LENGTH 0xFFC02F30 /* Mailbox 25 Data Length Code Register */ +#define CAN_MB25_TIMESTAMP 0xFFC02F34 /* Mailbox 25 Time Stamp Value Register */ +#define CAN_MB25_ID0 0xFFC02F38 /* Mailbox 25 Identifier Low Register */ +#define CAN_MB25_ID1 0xFFC02F3C /* Mailbox 25 Identifier High Register */ + +#define CAN_MB26_DATA0 0xFFC02F40 /* Mailbox 26 Data Word 0 [15:0] Register */ +#define CAN_MB26_DATA1 0xFFC02F44 /* Mailbox 26 Data Word 1 [31:16] Register */ +#define CAN_MB26_DATA2 0xFFC02F48 /* Mailbox 26 Data Word 2 [47:32] Register */ +#define CAN_MB26_DATA3 0xFFC02F4C /* Mailbox 26 Data Word 3 [63:48] Register */ +#define CAN_MB26_LENGTH 0xFFC02F50 /* Mailbox 26 Data Length Code Register */ +#define CAN_MB26_TIMESTAMP 0xFFC02F54 /* Mailbox 26 Time Stamp Value Register */ +#define CAN_MB26_ID0 0xFFC02F58 /* Mailbox 26 Identifier Low Register */ +#define CAN_MB26_ID1 0xFFC02F5C /* Mailbox 26 Identifier High Register */ + +#define CAN_MB27_DATA0 0xFFC02F60 /* Mailbox 27 Data Word 0 [15:0] Register */ +#define CAN_MB27_DATA1 0xFFC02F64 /* Mailbox 27 Data Word 1 [31:16] Register */ +#define CAN_MB27_DATA2 0xFFC02F68 /* Mailbox 27 Data Word 2 [47:32] Register */ +#define CAN_MB27_DATA3 0xFFC02F6C /* Mailbox 27 Data Word 3 [63:48] Register */ +#define CAN_MB27_LENGTH 0xFFC02F70 /* Mailbox 27 Data Length Code Register */ +#define CAN_MB27_TIMESTAMP 0xFFC02F74 /* Mailbox 27 Time Stamp Value Register */ +#define CAN_MB27_ID0 0xFFC02F78 /* Mailbox 27 Identifier Low Register */ +#define CAN_MB27_ID1 0xFFC02F7C /* Mailbox 27 Identifier High Register */ + +#define CAN_MB28_DATA0 0xFFC02F80 /* Mailbox 28 Data Word 0 [15:0] Register */ +#define CAN_MB28_DATA1 0xFFC02F84 /* Mailbox 28 Data Word 1 [31:16] Register */ +#define CAN_MB28_DATA2 0xFFC02F88 /* Mailbox 28 Data Word 2 [47:32] Register */ +#define CAN_MB28_DATA3 0xFFC02F8C /* Mailbox 28 Data Word 3 [63:48] Register */ +#define CAN_MB28_LENGTH 0xFFC02F90 /* Mailbox 28 Data Length Code Register */ +#define CAN_MB28_TIMESTAMP 0xFFC02F94 /* Mailbox 28 Time Stamp Value Register */ +#define CAN_MB28_ID0 0xFFC02F98 /* Mailbox 28 Identifier Low Register */ +#define CAN_MB28_ID1 0xFFC02F9C /* Mailbox 28 Identifier High Register */ + +#define CAN_MB29_DATA0 0xFFC02FA0 /* Mailbox 29 Data Word 0 [15:0] Register */ +#define CAN_MB29_DATA1 0xFFC02FA4 /* Mailbox 29 Data Word 1 [31:16] Register */ +#define CAN_MB29_DATA2 0xFFC02FA8 /* Mailbox 29 Data Word 2 [47:32] Register */ +#define CAN_MB29_DATA3 0xFFC02FAC /* Mailbox 29 Data Word 3 [63:48] Register */ +#define CAN_MB29_LENGTH 0xFFC02FB0 /* Mailbox 29 Data Length Code Register */ +#define CAN_MB29_TIMESTAMP 0xFFC02FB4 /* Mailbox 29 Time Stamp Value Register */ +#define CAN_MB29_ID0 0xFFC02FB8 /* Mailbox 29 Identifier Low Register */ +#define CAN_MB29_ID1 0xFFC02FBC /* Mailbox 29 Identifier High Register */ + +#define CAN_MB30_DATA0 0xFFC02FC0 /* Mailbox 30 Data Word 0 [15:0] Register */ +#define CAN_MB30_DATA1 0xFFC02FC4 /* Mailbox 30 Data Word 1 [31:16] Register */ +#define CAN_MB30_DATA2 0xFFC02FC8 /* Mailbox 30 Data Word 2 [47:32] Register */ +#define CAN_MB30_DATA3 0xFFC02FCC /* Mailbox 30 Data Word 3 [63:48] Register */ +#define CAN_MB30_LENGTH 0xFFC02FD0 /* Mailbox 30 Data Length Code Register */ +#define CAN_MB30_TIMESTAMP 0xFFC02FD4 /* Mailbox 30 Time Stamp Value Register */ +#define CAN_MB30_ID0 0xFFC02FD8 /* Mailbox 30 Identifier Low Register */ +#define CAN_MB30_ID1 0xFFC02FDC /* Mailbox 30 Identifier High Register */ + +#define CAN_MB31_DATA0 0xFFC02FE0 /* Mailbox 31 Data Word 0 [15:0] Register */ +#define CAN_MB31_DATA1 0xFFC02FE4 /* Mailbox 31 Data Word 1 [31:16] Register */ +#define CAN_MB31_DATA2 0xFFC02FE8 /* Mailbox 31 Data Word 2 [47:32] Register */ +#define CAN_MB31_DATA3 0xFFC02FEC /* Mailbox 31 Data Word 3 [63:48] Register */ +#define CAN_MB31_LENGTH 0xFFC02FF0 /* Mailbox 31 Data Length Code Register */ +#define CAN_MB31_TIMESTAMP 0xFFC02FF4 /* Mailbox 31 Time Stamp Value Register */ +#define CAN_MB31_ID0 0xFFC02FF8 /* Mailbox 31 Identifier Low Register */ +#define CAN_MB31_ID1 0xFFC02FFC /* Mailbox 31 Identifier High Register */ + +/* CAN Mailbox Area Macros */ +#define CAN_MB_ID1(x) (CAN_MB00_ID1+((x)*0x20)) +#define CAN_MB_ID0(x) (CAN_MB00_ID0+((x)*0x20)) +#define CAN_MB_TIMESTAMP(x) (CAN_MB00_TIMESTAMP+((x)*0x20)) +#define CAN_MB_LENGTH(x) (CAN_MB00_LENGTH+((x)*0x20)) +#define CAN_MB_DATA3(x) (CAN_MB00_DATA3+((x)*0x20)) +#define CAN_MB_DATA2(x) (CAN_MB00_DATA2+((x)*0x20)) +#define CAN_MB_DATA1(x) (CAN_MB00_DATA1+((x)*0x20)) +#define CAN_MB_DATA0(x) (CAN_MB00_DATA0+((x)*0x20)) + +/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */ +#define PORTF_FER 0xFFC03200 /* Port F Function Enable Register (Alternate/Flag*) */ +#define PORTG_FER 0xFFC03204 /* Port G Function Enable Register (Alternate/Flag*) */ +#define PORTH_FER 0xFFC03208 /* Port H Function Enable Register (Alternate/Flag*) */ +#define BFIN_PORT_MUX 0xFFC0320C /* Port Multiplexer Control Register */ + +/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */ +#define HMDMA0_CONTROL 0xFFC03300 /* Handshake MDMA0 Control Register */ +#define HMDMA0_ECINIT 0xFFC03304 /* HMDMA0 Initial Edge Count Register */ +#define HMDMA0_BCINIT 0xFFC03308 /* HMDMA0 Initial Block Count Register */ +#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshhold Register */ +#define HMDMA0_ECOVERFLOW 0xFFC03310 /* HMDMA0 Edge Count Overflow Interrupt Register */ +#define HMDMA0_ECOUNT 0xFFC03314 /* HMDMA0 Current Edge Count Register */ +#define HMDMA0_BCOUNT 0xFFC03318 /* HMDMA0 Current Block Count Register */ + +#define HMDMA1_CONTROL 0xFFC03340 /* Handshake MDMA1 Control Register */ +#define HMDMA1_ECINIT 0xFFC03344 /* HMDMA1 Initial Edge Count Register */ +#define HMDMA1_BCINIT 0xFFC03348 /* HMDMA1 Initial Block Count Register */ +#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshhold Register */ +#define HMDMA1_ECOVERFLOW 0xFFC03350 /* HMDMA1 Edge Count Overflow Interrupt Register */ +#define HMDMA1_ECOUNT 0xFFC03354 /* HMDMA1 Current Edge Count Register */ +#define HMDMA1_BCOUNT 0xFFC03358 /* HMDMA1 Current Block Count Register */ + +/*********************************************************************************** +** System MMR Register Bits And Macros +** +** Disclaimer: All macros are intended to make C and Assembly code more readable. +** Use these macros carefully, as any that do left shifts for field +** depositing will result in the lower order bits being destroyed. Any +** macro that shifts left to properly position the bit-field should be +** used as part of an OR to initialize a register and NOT as a dynamic +** modifier UNLESS the lower order bits are saved and ORed back in when +** the macro is used. +*************************************************************************************/ +/* +** ********************* PLL AND RESET MASKS ****************************************/ +/* PLL_CTL Masks */ +#define DF 0x0001 /* 0: PLL = CLKIN, 1: PLL = CLKIN/2 */ +#define PLL_OFF 0x0002 /* PLL Not Powered */ +#define STOPCK 0x0008 /* Core Clock Off */ +#define PDWN 0x0020 /* Enter Deep Sleep Mode */ +#define IN_DELAY 0x0040 /* Add 200ps Delay To EBIU Input Latches */ +#define OUT_DELAY 0x0080 /* Add 200ps Delay To EBIU Output Signals */ +#define BYPASS 0x0100 /* Bypass the PLL */ +#define MSEL 0x7E00 /* Multiplier Select For CCLK/VCO Factors */ +/* PLL_CTL Macros (Only Use With Logic OR While Setting Lower Order Bits) */ +#define SET_MSEL(x) (((x)&0x3F) << 0x9) /* Set MSEL = 0-63 --> VCO = CLKIN*MSEL */ + +/* PLL_DIV Masks */ +#define SSEL 0x000F /* System Select */ +#define CSEL 0x0030 /* Core Select */ +#define CSEL_DIV1 0x0000 /* CCLK = VCO / 1 */ +#define CSEL_DIV2 0x0010 /* CCLK = VCO / 2 */ +#define CSEL_DIV4 0x0020 /* CCLK = VCO / 4 */ +#define CSEL_DIV8 0x0030 /* CCLK = VCO / 8 */ +/* PLL_DIV Macros */ +#define SET_SSEL(x) ((x)&0xF) /* Set SSEL = 0-15 --> SCLK = VCO/SSEL */ + +/* VR_CTL Masks */ +#define FREQ 0x0003 /* Switching Oscillator Frequency For Regulator */ +#define HIBERNATE 0x0000 /* Powerdown/Bypass On-Board Regulation */ +#define FREQ_333 0x0001 /* Switching Frequency Is 333 kHz */ +#define FREQ_667 0x0002 /* Switching Frequency Is 667 kHz */ +#define FREQ_1000 0x0003 /* Switching Frequency Is 1 MHz */ + +#define GAIN 0x000C /* Voltage Level Gain */ +#define GAIN_5 0x0000 /* GAIN = 5 */ +#define GAIN_10 0x0004 /* GAIN = 10 */ +#define GAIN_20 0x0008 /* GAIN = 20 */ +#define GAIN_50 0x000C /* GAIN = 50 */ + +#define VLEV 0x00F0 /* Internal Voltage Level */ +#define VLEV_085 0x0060 /* VLEV = 0.85 V (-5% - +10% Accuracy) */ +#define VLEV_090 0x0070 /* VLEV = 0.90 V (-5% - +10% Accuracy) */ +#define VLEV_095 0x0080 /* VLEV = 0.95 V (-5% - +10% Accuracy) */ +#define VLEV_100 0x0090 /* VLEV = 1.00 V (-5% - +10% Accuracy) */ +#define VLEV_105 0x00A0 /* VLEV = 1.05 V (-5% - +10% Accuracy) */ +#define VLEV_110 0x00B0 /* VLEV = 1.10 V (-5% - +10% Accuracy) */ +#define VLEV_115 0x00C0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */ +#define VLEV_120 0x00D0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */ +#define VLEV_125 0x00E0 /* VLEV = 1.25 V (-5% - +10% Accuracy) */ +#define VLEV_130 0x00F0 /* VLEV = 1.30 V (-5% - +10% Accuracy) */ + +#define WAKE 0x0100 /* Enable RTC/Reset Wakeup From Hibernate */ +#define PHYWE 0x0200 /* Enable PHY Wakeup From Hibernate */ +#define CANWE 0x0400 /* Enable CAN Wakeup From Hibernate */ +#define PHYCLKOE 0x4000 /* PHY Clock Output Enable */ +#define CKELOW 0x8000 /* Enable Drive CKE Low During Reset */ + +/* PLL_STAT Masks */ +#define ACTIVE_PLLENABLED 0x0001 /* Processor In Active Mode With PLL Enabled */ +#define FULL_ON 0x0002 /* Processor In Full On Mode */ +#define ACTIVE_PLLDISABLED 0x0004 /* Processor In Active Mode With PLL Disabled */ +#define PLL_LOCKED 0x0020 /* PLL_LOCKCNT Has Been Reached */ + +/* CHIPID Masks */ +#define CHIPID_VERSION 0xF0000000 +#define CHIPID_FAMILY 0x0FFFF000 +#define CHIPID_MANUFACTURE 0x00000FFE + +/* SWRST Masks */ +#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */ +#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */ +#define RESET_DOUBLE 0x2000 /* SW Reset Generated By Core Double-Fault */ +#define RESET_WDOG 0x4000 /* SW Reset Generated By Watchdog Timer */ +#define RESET_SOFTWARE 0x8000 /* SW Reset Occurred Since Last Read Of SWRST */ + +/* SYSCR Masks */ +#define BMODE 0x0006 /* Boot Mode - Latched During HW Reset From Mode Pins */ +#define NOBOOT 0x0010 /* Execute From L1 or ASYNC Bank 0 When BMODE = 0 */ + +/* ************* SYSTEM INTERRUPT CONTROLLER MASKS *************************************/ + +/* SIC_IAR0 Macros */ +#define P0_IVG(x) (((x)&0xF)-7) /* Peripheral #0 assigned IVG #x */ +#define P1_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #1 assigned IVG #x */ +#define P2_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #2 assigned IVG #x */ +#define P3_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #3 assigned IVG #x */ +#define P4_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #4 assigned IVG #x */ +#define P5_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #5 assigned IVG #x */ +#define P6_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #6 assigned IVG #x */ +#define P7_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #7 assigned IVG #x */ + +/* SIC_IAR1 Macros */ +#define P8_IVG(x) (((x)&0xF)-7) /* Peripheral #8 assigned IVG #x */ +#define P9_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #9 assigned IVG #x */ +#define P10_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #10 assigned IVG #x */ +#define P11_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #11 assigned IVG #x */ +#define P12_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #12 assigned IVG #x */ +#define P13_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #13 assigned IVG #x */ +#define P14_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #14 assigned IVG #x */ +#define P15_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #15 assigned IVG #x */ + +/* SIC_IAR2 Macros */ +#define P16_IVG(x) (((x)&0xF)-7) /* Peripheral #16 assigned IVG #x */ +#define P17_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #17 assigned IVG #x */ +#define P18_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #18 assigned IVG #x */ +#define P19_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #19 assigned IVG #x */ +#define P20_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #20 assigned IVG #x */ +#define P21_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #21 assigned IVG #x */ +#define P22_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #22 assigned IVG #x */ +#define P23_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #23 assigned IVG #x */ + +/* SIC_IAR3 Macros */ +#define P24_IVG(x) (((x)&0xF)-7) /* Peripheral #24 assigned IVG #x */ +#define P25_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #25 assigned IVG #x */ +#define P26_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #26 assigned IVG #x */ +#define P27_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #27 assigned IVG #x */ +#define P28_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #28 assigned IVG #x */ +#define P29_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #29 assigned IVG #x */ +#define P30_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #30 assigned IVG #x */ +#define P31_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #31 assigned IVG #x */ + +/* SIC_IMASK Masks */ +#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */ +#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */ +#define SIC_MASK(x) (1 << ((x)&0x1F)) /* Mask Peripheral #x interrupt */ +#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Unmask Peripheral #x interrupt */ + +/* SIC_IWR Masks */ +#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */ +#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */ +#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */ +#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */ + +/* *************** WATCHDOG TIMER MASKS *******************************************/ +/* WDOG_CTL Masks */ +#define WDOG_RESET 0x0000 /* Generate Reset Event */ +#define WDOG_NMI 0x0002 /* Generate Non-Maskable Interrupt (NMI) Event */ +#define WDOG_GPI 0x0004 /* Generate General Purpose (GP) Interrupt */ +#define WDOG_NONE 0x0006 /* Disable Watchdog Timer Interrupts */ +#define TMR_EN 0x0FF0 /* Watchdog Counter Enable */ +#define TMR_DIS 0x0AD0 /* Watchdog Counter Disable */ +#define TRO 0x8000 /* Watchdog Expired */ + +/* ************** UART CONTROLLER MASKS *************************/ +/* UARTx_LCR Masks */ +#define WLS(x) ((((x)&0x3)-5) & 0x03) /* Word Length Select */ +#define STB 0x04 /* Stop Bits */ +#define PEN 0x08 /* Parity Enable */ +#define EPS 0x10 /* Even Parity Select */ +#define STP 0x20 /* Stick Parity */ +#define SB 0x40 /* Set Break */ +#define DLAB 0x80 /* Divisor Latch Access */ + +/* UARTx_MCR Mask */ +#define LOOP 0x10 /* Loopback Mode Enable */ + +/* UARTx_LSR Masks */ +#define DR 0x01 /* Data Ready */ +#define OE 0x02 /* Overrun Error */ +#define PE 0x04 /* Parity Error */ +#define FE 0x08 /* Framing Error */ +#define BI 0x10 /* Break Interrupt */ +#define THRE 0x20 /* THR Empty */ +#define TEMT 0x40 /* TSR and UART_THR Empty */ + +/* UARTx_IER Masks */ +#define ERBFI 0x01 /* Enable Receive Buffer Full Interrupt */ +#define ETBEI 0x02 /* Enable Transmit Buffer Empty Interrupt */ +#define ELSI 0x04 /* Enable RX Status Interrupt */ + +/* UARTx_IIR Masks */ +#define NINT 0x01 /* Pending Interrupt */ +#define IIR_TX_READY 0x02 /* UART_THR empty */ +#define IIR_RX_READY 0x04 /* Receive data ready */ +#define IIR_LINE_CHANGE 0x06 /* Receive line status */ +#define IIR_STATUS 0x06 + +/* UARTx_GCTL Masks */ +#define UCEN 0x01 /* Enable UARTx Clocks */ +#define IREN 0x02 /* Enable IrDA Mode */ +#define TPOLC 0x04 /* IrDA TX Polarity Change */ +#define RPOLC 0x08 /* IrDA RX Polarity Change */ +#define FPE 0x10 /* Force Parity Error On Transmit */ +#define FFE 0x20 /* Force Framing Error On Transmit */ + +/* *********** SERIAL PERIPHERAL INTERFACE (SPI) MASKS ****************************/ +/* SPI_CTL Masks */ +#define TIMOD 0x0003 /* Transfer Initiate Mode */ +#define RDBR_CORE 0x0000 /* RDBR Read Initiates, IRQ When RDBR Full */ +#define TDBR_CORE 0x0001 /* TDBR Write Initiates, IRQ When TDBR Empty */ +#define RDBR_DMA 0x0002 /* DMA Read, DMA Until FIFO Empty */ +#define TDBR_DMA 0x0003 /* DMA Write, DMA Until FIFO Full */ +#define SZ 0x0004 /* Send Zero (When TDBR Empty, Send Zero/Last*) */ +#define GM 0x0008 /* Get More (When RDBR Full, Overwrite/Discard*) */ +#define PSSE 0x0010 /* Slave-Select Input Enable */ +#define EMISO 0x0020 /* Enable MISO As Output */ +#define SPI_SIZE 0x0100 /* Size of Words (16/8* Bits) */ +#define LSBF 0x0200 /* LSB First */ +#define CPHA 0x0400 /* Clock Phase */ +#define CPOL 0x0800 /* Clock Polarity */ +#define MSTR 0x1000 /* Master/Slave* */ +#define WOM 0x2000 /* Write Open Drain Master */ +#define SPE 0x4000 /* SPI Enable */ + +/* SPI_FLG Masks */ +#define FLS1 0x0002 /* Enables SPI_FLOUT1 as SPI Slave-Select Output */ +#define FLS2 0x0004 /* Enables SPI_FLOUT2 as SPI Slave-Select Output */ +#define FLS3 0x0008 /* Enables SPI_FLOUT3 as SPI Slave-Select Output */ +#define FLS4 0x0010 /* Enables SPI_FLOUT4 as SPI Slave-Select Output */ +#define FLS5 0x0020 /* Enables SPI_FLOUT5 as SPI Slave-Select Output */ +#define FLS6 0x0040 /* Enables SPI_FLOUT6 as SPI Slave-Select Output */ +#define FLS7 0x0080 /* Enables SPI_FLOUT7 as SPI Slave-Select Output */ +#define FLG1 0xFDFF /* Activates SPI_FLOUT1 */ +#define FLG2 0xFBFF /* Activates SPI_FLOUT2 */ +#define FLG3 0xF7FF /* Activates SPI_FLOUT3 */ +#define FLG4 0xEFFF /* Activates SPI_FLOUT4 */ +#define FLG5 0xDFFF /* Activates SPI_FLOUT5 */ +#define FLG6 0xBFFF /* Activates SPI_FLOUT6 */ +#define FLG7 0x7FFF /* Activates SPI_FLOUT7 */ + +/* SPI_STAT Masks */ +#define SPIF 0x0001 /* SPI Finished (Single-Word Transfer Complete) */ +#define MODF 0x0002 /* Mode Fault Error (Another Device Tried To Become Master) */ +#define TXE 0x0004 /* Transmission Error (Data Sent With No New Data In TDBR) */ +#define TXS 0x0008 /* SPI_TDBR Data Buffer Status (Full/Empty*) */ +#define RBSY 0x0010 /* Receive Error (Data Received With RDBR Full) */ +#define RXS 0x0020 /* SPI_RDBR Data Buffer Status (Full/Empty*) */ +#define TXCOL 0x0040 /* Transmit Collision Error (Corrupt Data May Have Been Sent) */ + +/* **************** GENERAL PURPOSE TIMER MASKS **********************/ +/* TIMER_ENABLE Masks */ +#define TIMEN0 0x0001 /* Enable Timer 0 */ +#define TIMEN1 0x0002 /* Enable Timer 1 */ +#define TIMEN2 0x0004 /* Enable Timer 2 */ +#define TIMEN3 0x0008 /* Enable Timer 3 */ +#define TIMEN4 0x0010 /* Enable Timer 4 */ +#define TIMEN5 0x0020 /* Enable Timer 5 */ +#define TIMEN6 0x0040 /* Enable Timer 6 */ +#define TIMEN7 0x0080 /* Enable Timer 7 */ + +/* TIMER_DISABLE Masks */ +#define TIMDIS0 TIMEN0 /* Disable Timer 0 */ +#define TIMDIS1 TIMEN1 /* Disable Timer 1 */ +#define TIMDIS2 TIMEN2 /* Disable Timer 2 */ +#define TIMDIS3 TIMEN3 /* Disable Timer 3 */ +#define TIMDIS4 TIMEN4 /* Disable Timer 4 */ +#define TIMDIS5 TIMEN5 /* Disable Timer 5 */ +#define TIMDIS6 TIMEN6 /* Disable Timer 6 */ +#define TIMDIS7 TIMEN7 /* Disable Timer 7 */ + +/* TIMER_STATUS Masks */ +#define TIMIL0 0x00000001 /* Timer 0 Interrupt */ +#define TIMIL1 0x00000002 /* Timer 1 Interrupt */ +#define TIMIL2 0x00000004 /* Timer 2 Interrupt */ +#define TIMIL3 0x00000008 /* Timer 3 Interrupt */ +#define TOVL_ERR0 0x00000010 /* Timer 0 Counter Overflow */ +#define TOVL_ERR1 0x00000020 /* Timer 1 Counter Overflow */ +#define TOVL_ERR2 0x00000040 /* Timer 2 Counter Overflow */ +#define TOVL_ERR3 0x00000080 /* Timer 3 Counter Overflow */ +#define TRUN0 0x00001000 /* Timer 0 Slave Enable Status */ +#define TRUN1 0x00002000 /* Timer 1 Slave Enable Status */ +#define TRUN2 0x00004000 /* Timer 2 Slave Enable Status */ +#define TRUN3 0x00008000 /* Timer 3 Slave Enable Status */ +#define TIMIL4 0x00010000 /* Timer 4 Interrupt */ +#define TIMIL5 0x00020000 /* Timer 5 Interrupt */ +#define TIMIL6 0x00040000 /* Timer 6 Interrupt */ +#define TIMIL7 0x00080000 /* Timer 7 Interrupt */ +#define TOVL_ERR4 0x00100000 /* Timer 4 Counter Overflow */ +#define TOVL_ERR5 0x00200000 /* Timer 5 Counter Overflow */ +#define TOVL_ERR6 0x00400000 /* Timer 6 Counter Overflow */ +#define TOVL_ERR7 0x00800000 /* Timer 7 Counter Overflow */ +#define TRUN4 0x10000000 /* Timer 4 Slave Enable Status */ +#define TRUN5 0x20000000 /* Timer 5 Slave Enable Status */ +#define TRUN6 0x40000000 /* Timer 6 Slave Enable Status */ +#define TRUN7 0x80000000 /* Timer 7 Slave Enable Status */ + +/* TIMERx_CONFIG Masks */ +#define PWM_OUT 0x0001 /* Pulse-Width Modulation Output Mode */ +#define WDTH_CAP 0x0002 /* Width Capture Input Mode */ +#define EXT_CLK 0x0003 /* External Clock Mode */ +#define PULSE_HI 0x0004 /* Action Pulse (Positive/Negative*) */ +#define PERIOD_CNT 0x0008 /* Period Count */ +#define IRQ_ENA 0x0010 /* Interrupt Request Enable */ +#define TIN_SEL 0x0020 /* Timer Input Select */ +#define OUT_DIS 0x0040 /* Output Pad Disable */ +#define CLK_SEL 0x0080 /* Timer Clock Select */ +#define TOGGLE_HI 0x0100 /* PWM_OUT PULSE_HI Toggle Mode */ +#define EMU_RUN 0x0200 /* Emulation Behavior Select */ +#define ERR_TYP 0xC000 /* Error Type */ + +/* ****************** GPIO PORTS F, G, H MASKS ***********************/ +/* General Purpose IO (0xFFC00700 - 0xFFC007FF) Masks */ +/* Port F Masks */ +#define PF0 0x0001 +#define PF1 0x0002 +#define PF2 0x0004 +#define PF3 0x0008 +#define PF4 0x0010 +#define PF5 0x0020 +#define PF6 0x0040 +#define PF7 0x0080 +#define PF8 0x0100 +#define PF9 0x0200 +#define PF10 0x0400 +#define PF11 0x0800 +#define PF12 0x1000 +#define PF13 0x2000 +#define PF14 0x4000 +#define PF15 0x8000 + +/* Port G Masks */ +#define PG0 0x0001 +#define PG1 0x0002 +#define PG2 0x0004 +#define PG3 0x0008 +#define PG4 0x0010 +#define PG5 0x0020 +#define PG6 0x0040 +#define PG7 0x0080 +#define PG8 0x0100 +#define PG9 0x0200 +#define PG10 0x0400 +#define PG11 0x0800 +#define PG12 0x1000 +#define PG13 0x2000 +#define PG14 0x4000 +#define PG15 0x8000 + +/* Port H Masks */ +#define PH0 0x0001 +#define PH1 0x0002 +#define PH2 0x0004 +#define PH3 0x0008 +#define PH4 0x0010 +#define PH5 0x0020 +#define PH6 0x0040 +#define PH7 0x0080 +#define PH8 0x0100 +#define PH9 0x0200 +#define PH10 0x0400 +#define PH11 0x0800 +#define PH12 0x1000 +#define PH13 0x2000 +#define PH14 0x4000 +#define PH15 0x8000 + +/* ******************* SERIAL PORT MASKS **************************************/ +/* SPORTx_TCR1 Masks */ +#define TSPEN 0x0001 /* Transmit Enable */ +#define ITCLK 0x0002 /* Internal Transmit Clock Select */ +#define DTYPE_NORM 0x0004 /* Data Format Normal */ +#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */ +#define DTYPE_ALAW 0x000C /* Compand Using A-Law */ +#define TLSBIT 0x0010 /* Transmit Bit Order */ +#define ITFS 0x0200 /* Internal Transmit Frame Sync Select */ +#define TFSR 0x0400 /* Transmit Frame Sync Required Select */ +#define DITFS 0x0800 /* Data-Independent Transmit Frame Sync Select */ +#define LTFS 0x1000 /* Low Transmit Frame Sync Select */ +#define LATFS 0x2000 /* Late Transmit Frame Sync Select */ +#define TCKFE 0x4000 /* Clock Falling Edge Select */ + +/* SPORTx_TCR2 Masks and Macro */ +#define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */ +#define TXSE 0x0100 /* TX Secondary Enable */ +#define TSFSE 0x0200 /* Transmit Stereo Frame Sync Enable */ +#define TRFST 0x0400 /* Left/Right Order (1 = Right Channel 1st) */ + +/* SPORTx_RCR1 Masks */ +#define RSPEN 0x0001 /* Receive Enable */ +#define IRCLK 0x0002 /* Internal Receive Clock Select */ +#define DTYPE_NORM 0x0004 /* Data Format Normal */ +#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */ +#define DTYPE_ALAW 0x000C /* Compand Using A-Law */ +#define RLSBIT 0x0010 /* Receive Bit Order */ +#define IRFS 0x0200 /* Internal Receive Frame Sync Select */ +#define RFSR 0x0400 /* Receive Frame Sync Required Select */ +#define LRFS 0x1000 /* Low Receive Frame Sync Select */ +#define LARFS 0x2000 /* Late Receive Frame Sync Select */ +#define RCKFE 0x4000 /* Clock Falling Edge Select */ + +/* SPORTx_RCR2 Masks */ +#define SLEN(x) ((x)&0x1F) /* SPORT RX Word Length (2 - 31) */ +#define RXSE 0x0100 /* RX Secondary Enable */ +#define RSFSE 0x0200 /* RX Stereo Frame Sync Enable */ +#define RRFST 0x0400 /* Right-First Data Order */ + +/* SPORTx_STAT Masks */ +#define RXNE 0x0001 /* Receive FIFO Not Empty Status */ +#define RUVF 0x0002 /* Sticky Receive Underflow Status */ +#define ROVF 0x0004 /* Sticky Receive Overflow Status */ +#define TXF 0x0008 /* Transmit FIFO Full Status */ +#define TUVF 0x0010 /* Sticky Transmit Underflow Status */ +#define TOVF 0x0020 /* Sticky Transmit Overflow Status */ +#define TXHRE 0x0040 /* Transmit Hold Register Empty */ + +/* SPORTx_MCMC1 Macros */ +#define SP_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */ + +/* Only use WSIZE Macro With Logic OR While Setting Lower Order Bits */ +#define SP_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */ + +/* SPORTx_MCMC2 Masks */ +#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */ +#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */ +#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */ +#define MCDTXPE 0x0004 /* Multichannel DMA Transmit Packing */ +#define MCDRXPE 0x0008 /* Multichannel DMA Receive Packing */ +#define MCMEN 0x0010 /* Multichannel Frame Mode Enable */ +#define FSDR 0x0080 /* Multichannel Frame Sync to Data Relationship */ +#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */ +#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */ +#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */ +#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */ +#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */ +#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */ +#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */ +#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */ +#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */ +#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */ +#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */ +#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */ +#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */ +#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */ +#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */ +#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */ + +/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/ +/* EBIU_AMGCTL Masks */ +#define AMCKEN 0x0001 /* Enable CLKOUT */ +#define AMBEN_NONE 0x0000 /* All Banks Disabled */ +#define AMBEN_B0 0x0002 /* Enable Async Memory Bank 0 only */ +#define AMBEN_B0_B1 0x0004 /* Enable Async Memory Banks 0 & 1 only */ +#define AMBEN_B0_B1_B2 0x0006 /* Enable Async Memory Banks 0, 1, and 2 */ +#define AMBEN_ALL 0x0008 /* Enable Async Memory Banks (all) 0, 1, 2, and 3 */ + +/* EBIU_AMBCTL0 Masks */ +#define B0RDYEN 0x00000001 /* Bank 0 (B0) RDY Enable */ +#define B0RDYPOL 0x00000002 /* B0 RDY Active High */ +#define B0TT_1 0x00000004 /* B0 Transition Time (Read to Write) = 1 cycle */ +#define B0TT_2 0x00000008 /* B0 Transition Time (Read to Write) = 2 cycles */ +#define B0TT_3 0x0000000C /* B0 Transition Time (Read to Write) = 3 cycles */ +#define B0TT_4 0x00000000 /* B0 Transition Time (Read to Write) = 4 cycles */ +#define B0ST_1 0x00000010 /* B0 Setup Time (AOE to Read/Write) = 1 cycle */ +#define B0ST_2 0x00000020 /* B0 Setup Time (AOE to Read/Write) = 2 cycles */ +#define B0ST_3 0x00000030 /* B0 Setup Time (AOE to Read/Write) = 3 cycles */ +#define B0ST_4 0x00000000 /* B0 Setup Time (AOE to Read/Write) = 4 cycles */ +#define B0HT_1 0x00000040 /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle */ +#define B0HT_2 0x00000080 /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles */ +#define B0HT_3 0x000000C0 /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles */ +#define B0HT_0 0x00000000 /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles */ +#define B0RAT_1 0x00000100 /* B0 Read Access Time = 1 cycle */ +#define B0RAT_2 0x00000200 /* B0 Read Access Time = 2 cycles */ +#define B0RAT_3 0x00000300 /* B0 Read Access Time = 3 cycles */ +#define B0RAT_4 0x00000400 /* B0 Read Access Time = 4 cycles */ +#define B0RAT_5 0x00000500 /* B0 Read Access Time = 5 cycles */ +#define B0RAT_6 0x00000600 /* B0 Read Access Time = 6 cycles */ +#define B0RAT_7 0x00000700 /* B0 Read Access Time = 7 cycles */ +#define B0RAT_8 0x00000800 /* B0 Read Access Time = 8 cycles */ +#define B0RAT_9 0x00000900 /* B0 Read Access Time = 9 cycles */ +#define B0RAT_10 0x00000A00 /* B0 Read Access Time = 10 cycles */ +#define B0RAT_11 0x00000B00 /* B0 Read Access Time = 11 cycles */ +#define B0RAT_12 0x00000C00 /* B0 Read Access Time = 12 cycles */ +#define B0RAT_13 0x00000D00 /* B0 Read Access Time = 13 cycles */ +#define B0RAT_14 0x00000E00 /* B0 Read Access Time = 14 cycles */ +#define B0RAT_15 0x00000F00 /* B0 Read Access Time = 15 cycles */ +#define B0WAT_1 0x00001000 /* B0 Write Access Time = 1 cycle */ +#define B0WAT_2 0x00002000 /* B0 Write Access Time = 2 cycles */ +#define B0WAT_3 0x00003000 /* B0 Write Access Time = 3 cycles */ +#define B0WAT_4 0x00004000 /* B0 Write Access Time = 4 cycles */ +#define B0WAT_5 0x00005000 /* B0 Write Access Time = 5 cycles */ +#define B0WAT_6 0x00006000 /* B0 Write Access Time = 6 cycles */ +#define B0WAT_7 0x00007000 /* B0 Write Access Time = 7 cycles */ +#define B0WAT_8 0x00008000 /* B0 Write Access Time = 8 cycles */ +#define B0WAT_9 0x00009000 /* B0 Write Access Time = 9 cycles */ +#define B0WAT_10 0x0000A000 /* B0 Write Access Time = 10 cycles */ +#define B0WAT_11 0x0000B000 /* B0 Write Access Time = 11 cycles */ +#define B0WAT_12 0x0000C000 /* B0 Write Access Time = 12 cycles */ +#define B0WAT_13 0x0000D000 /* B0 Write Access Time = 13 cycles */ +#define B0WAT_14 0x0000E000 /* B0 Write Access Time = 14 cycles */ +#define B0WAT_15 0x0000F000 /* B0 Write Access Time = 15 cycles */ + +#define B1RDYEN 0x00010000 /* Bank 1 (B1) RDY Enable */ +#define B1RDYPOL 0x00020000 /* B1 RDY Active High */ +#define B1TT_1 0x00040000 /* B1 Transition Time (Read to Write) = 1 cycle */ +#define B1TT_2 0x00080000 /* B1 Transition Time (Read to Write) = 2 cycles */ +#define B1TT_3 0x000C0000 /* B1 Transition Time (Read to Write) = 3 cycles */ +#define B1TT_4 0x00000000 /* B1 Transition Time (Read to Write) = 4 cycles */ +#define B1ST_1 0x00100000 /* B1 Setup Time (AOE to Read/Write) = 1 cycle */ +#define B1ST_2 0x00200000 /* B1 Setup Time (AOE to Read/Write) = 2 cycles */ +#define B1ST_3 0x00300000 /* B1 Setup Time (AOE to Read/Write) = 3 cycles */ +#define B1ST_4 0x00000000 /* B1 Setup Time (AOE to Read/Write) = 4 cycles */ +#define B1HT_1 0x00400000 /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle */ +#define B1HT_2 0x00800000 /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles */ +#define B1HT_3 0x00C00000 /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles */ +#define B1HT_0 0x00000000 /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles */ +#define B1RAT_1 0x01000000 /* B1 Read Access Time = 1 cycle */ +#define B1RAT_2 0x02000000 /* B1 Read Access Time = 2 cycles */ +#define B1RAT_3 0x03000000 /* B1 Read Access Time = 3 cycles */ +#define B1RAT_4 0x04000000 /* B1 Read Access Time = 4 cycles */ +#define B1RAT_5 0x05000000 /* B1 Read Access Time = 5 cycles */ +#define B1RAT_6 0x06000000 /* B1 Read Access Time = 6 cycles */ +#define B1RAT_7 0x07000000 /* B1 Read Access Time = 7 cycles */ +#define B1RAT_8 0x08000000 /* B1 Read Access Time = 8 cycles */ +#define B1RAT_9 0x09000000 /* B1 Read Access Time = 9 cycles */ +#define B1RAT_10 0x0A000000 /* B1 Read Access Time = 10 cycles */ +#define B1RAT_11 0x0B000000 /* B1 Read Access Time = 11 cycles */ +#define B1RAT_12 0x0C000000 /* B1 Read Access Time = 12 cycles */ +#define B1RAT_13 0x0D000000 /* B1 Read Access Time = 13 cycles */ +#define B1RAT_14 0x0E000000 /* B1 Read Access Time = 14 cycles */ +#define B1RAT_15 0x0F000000 /* B1 Read Access Time = 15 cycles */ +#define B1WAT_1 0x10000000 /* B1 Write Access Time = 1 cycle */ +#define B1WAT_2 0x20000000 /* B1 Write Access Time = 2 cycles */ +#define B1WAT_3 0x30000000 /* B1 Write Access Time = 3 cycles */ +#define B1WAT_4 0x40000000 /* B1 Write Access Time = 4 cycles */ +#define B1WAT_5 0x50000000 /* B1 Write Access Time = 5 cycles */ +#define B1WAT_6 0x60000000 /* B1 Write Access Time = 6 cycles */ +#define B1WAT_7 0x70000000 /* B1 Write Access Time = 7 cycles */ +#define B1WAT_8 0x80000000 /* B1 Write Access Time = 8 cycles */ +#define B1WAT_9 0x90000000 /* B1 Write Access Time = 9 cycles */ +#define B1WAT_10 0xA0000000 /* B1 Write Access Time = 10 cycles */ +#define B1WAT_11 0xB0000000 /* B1 Write Access Time = 11 cycles */ +#define B1WAT_12 0xC0000000 /* B1 Write Access Time = 12 cycles */ +#define B1WAT_13 0xD0000000 /* B1 Write Access Time = 13 cycles */ +#define B1WAT_14 0xE0000000 /* B1 Write Access Time = 14 cycles */ +#define B1WAT_15 0xF0000000 /* B1 Write Access Time = 15 cycles */ + +/* EBIU_AMBCTL1 Masks */ +#define B2RDYEN 0x00000001 /* Bank 2 (B2) RDY Enable */ +#define B2RDYPOL 0x00000002 /* B2 RDY Active High */ +#define B2TT_1 0x00000004 /* B2 Transition Time (Read to Write) = 1 cycle */ +#define B2TT_2 0x00000008 /* B2 Transition Time (Read to Write) = 2 cycles */ +#define B2TT_3 0x0000000C /* B2 Transition Time (Read to Write) = 3 cycles */ +#define B2TT_4 0x00000000 /* B2 Transition Time (Read to Write) = 4 cycles */ +#define B2ST_1 0x00000010 /* B2 Setup Time (AOE to Read/Write) = 1 cycle */ +#define B2ST_2 0x00000020 /* B2 Setup Time (AOE to Read/Write) = 2 cycles */ +#define B2ST_3 0x00000030 /* B2 Setup Time (AOE to Read/Write) = 3 cycles */ +#define B2ST_4 0x00000000 /* B2 Setup Time (AOE to Read/Write) = 4 cycles */ +#define B2HT_1 0x00000040 /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle */ +#define B2HT_2 0x00000080 /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles */ +#define B2HT_3 0x000000C0 /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles */ +#define B2HT_0 0x00000000 /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles */ +#define B2RAT_1 0x00000100 /* B2 Read Access Time = 1 cycle */ +#define B2RAT_2 0x00000200 /* B2 Read Access Time = 2 cycles */ +#define B2RAT_3 0x00000300 /* B2 Read Access Time = 3 cycles */ +#define B2RAT_4 0x00000400 /* B2 Read Access Time = 4 cycles */ +#define B2RAT_5 0x00000500 /* B2 Read Access Time = 5 cycles */ +#define B2RAT_6 0x00000600 /* B2 Read Access Time = 6 cycles */ +#define B2RAT_7 0x00000700 /* B2 Read Access Time = 7 cycles */ +#define B2RAT_8 0x00000800 /* B2 Read Access Time = 8 cycles */ +#define B2RAT_9 0x00000900 /* B2 Read Access Time = 9 cycles */ +#define B2RAT_10 0x00000A00 /* B2 Read Access Time = 10 cycles */ +#define B2RAT_11 0x00000B00 /* B2 Read Access Time = 11 cycles */ +#define B2RAT_12 0x00000C00 /* B2 Read Access Time = 12 cycles */ +#define B2RAT_13 0x00000D00 /* B2 Read Access Time = 13 cycles */ +#define B2RAT_14 0x00000E00 /* B2 Read Access Time = 14 cycles */ +#define B2RAT_15 0x00000F00 /* B2 Read Access Time = 15 cycles */ +#define B2WAT_1 0x00001000 /* B2 Write Access Time = 1 cycle */ +#define B2WAT_2 0x00002000 /* B2 Write Access Time = 2 cycles */ +#define B2WAT_3 0x00003000 /* B2 Write Access Time = 3 cycles */ +#define B2WAT_4 0x00004000 /* B2 Write Access Time = 4 cycles */ +#define B2WAT_5 0x00005000 /* B2 Write Access Time = 5 cycles */ +#define B2WAT_6 0x00006000 /* B2 Write Access Time = 6 cycles */ +#define B2WAT_7 0x00007000 /* B2 Write Access Time = 7 cycles */ +#define B2WAT_8 0x00008000 /* B2 Write Access Time = 8 cycles */ +#define B2WAT_9 0x00009000 /* B2 Write Access Time = 9 cycles */ +#define B2WAT_10 0x0000A000 /* B2 Write Access Time = 10 cycles */ +#define B2WAT_11 0x0000B000 /* B2 Write Access Time = 11 cycles */ +#define B2WAT_12 0x0000C000 /* B2 Write Access Time = 12 cycles */ +#define B2WAT_13 0x0000D000 /* B2 Write Access Time = 13 cycles */ +#define B2WAT_14 0x0000E000 /* B2 Write Access Time = 14 cycles */ +#define B2WAT_15 0x0000F000 /* B2 Write Access Time = 15 cycles */ + +#define B3RDYEN 0x00010000 /* Bank 3 (B3) RDY Enable */ +#define B3RDYPOL 0x00020000 /* B3 RDY Active High */ +#define B3TT_1 0x00040000 /* B3 Transition Time (Read to Write) = 1 cycle */ +#define B3TT_2 0x00080000 /* B3 Transition Time (Read to Write) = 2 cycles */ +#define B3TT_3 0x000C0000 /* B3 Transition Time (Read to Write) = 3 cycles */ +#define B3TT_4 0x00000000 /* B3 Transition Time (Read to Write) = 4 cycles */ +#define B3ST_1 0x00100000 /* B3 Setup Time (AOE to Read/Write) = 1 cycle */ +#define B3ST_2 0x00200000 /* B3 Setup Time (AOE to Read/Write) = 2 cycles */ +#define B3ST_3 0x00300000 /* B3 Setup Time (AOE to Read/Write) = 3 cycles */ +#define B3ST_4 0x00000000 /* B3 Setup Time (AOE to Read/Write) = 4 cycles */ +#define B3HT_1 0x00400000 /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle */ +#define B3HT_2 0x00800000 /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles */ +#define B3HT_3 0x00C00000 /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles */ +#define B3HT_0 0x00000000 /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles */ +#define B3RAT_1 0x01000000 /* B3 Read Access Time = 1 cycle */ +#define B3RAT_2 0x02000000 /* B3 Read Access Time = 2 cycles */ +#define B3RAT_3 0x03000000 /* B3 Read Access Time = 3 cycles */ +#define B3RAT_4 0x04000000 /* B3 Read Access Time = 4 cycles */ +#define B3RAT_5 0x05000000 /* B3 Read Access Time = 5 cycles */ +#define B3RAT_6 0x06000000 /* B3 Read Access Time = 6 cycles */ +#define B3RAT_7 0x07000000 /* B3 Read Access Time = 7 cycles */ +#define B3RAT_8 0x08000000 /* B3 Read Access Time = 8 cycles */ +#define B3RAT_9 0x09000000 /* B3 Read Access Time = 9 cycles */ +#define B3RAT_10 0x0A000000 /* B3 Read Access Time = 10 cycles */ +#define B3RAT_11 0x0B000000 /* B3 Read Access Time = 11 cycles */ +#define B3RAT_12 0x0C000000 /* B3 Read Access Time = 12 cycles */ +#define B3RAT_13 0x0D000000 /* B3 Read Access Time = 13 cycles */ +#define B3RAT_14 0x0E000000 /* B3 Read Access Time = 14 cycles */ +#define B3RAT_15 0x0F000000 /* B3 Read Access Time = 15 cycles */ +#define B3WAT_1 0x10000000 /* B3 Write Access Time = 1 cycle */ +#define B3WAT_2 0x20000000 /* B3 Write Access Time = 2 cycles */ +#define B3WAT_3 0x30000000 /* B3 Write Access Time = 3 cycles */ +#define B3WAT_4 0x40000000 /* B3 Write Access Time = 4 cycles */ +#define B3WAT_5 0x50000000 /* B3 Write Access Time = 5 cycles */ +#define B3WAT_6 0x60000000 /* B3 Write Access Time = 6 cycles */ +#define B3WAT_7 0x70000000 /* B3 Write Access Time = 7 cycles */ +#define B3WAT_8 0x80000000 /* B3 Write Access Time = 8 cycles */ +#define B3WAT_9 0x90000000 /* B3 Write Access Time = 9 cycles */ +#define B3WAT_10 0xA0000000 /* B3 Write Access Time = 10 cycles */ +#define B3WAT_11 0xB0000000 /* B3 Write Access Time = 11 cycles */ +#define B3WAT_12 0xC0000000 /* B3 Write Access Time = 12 cycles */ +#define B3WAT_13 0xD0000000 /* B3 Write Access Time = 13 cycles */ +#define B3WAT_14 0xE0000000 /* B3 Write Access Time = 14 cycles */ +#define B3WAT_15 0xF0000000 /* B3 Write Access Time = 15 cycles */ + +/* ********************** SDRAM CONTROLLER MASKS **********************************************/ +/* EBIU_SDGCTL Masks */ +#define SCTLE 0x00000001 /* Enable SDRAM Signals */ +#define CL_2 0x00000008 /* SDRAM CAS Latency = 2 cycles */ +#define CL_3 0x0000000C /* SDRAM CAS Latency = 3 cycles */ +#define PASR_ALL 0x00000000 /* All 4 SDRAM Banks Refreshed In Self-Refresh */ +#define PASR_B0_B1 0x00000010 /* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh */ +#define PASR_B0 0x00000020 /* Only SDRAM Bank 0 Is Refreshed In Self-Refresh */ +#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */ +#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */ +#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */ +#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */ +#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */ +#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */ +#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */ +#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */ +#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */ +#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */ +#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */ +#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */ +#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */ +#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */ +#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */ +#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */ +#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */ +#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */ +#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */ +#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */ +#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */ +#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */ +#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */ +#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */ +#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */ +#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */ +#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */ +#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */ +#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */ +#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */ +#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */ +#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */ +#define PUPSD 0x00200000 /* Power-Up Start Delay (15 SCLK Cycles Delay) */ +#define PSM 0x00400000 /* Power-Up Sequence (Mode Register Before/After* Refresh) */ +#define PSS 0x00800000 /* Enable Power-Up Sequence on Next SDRAM Access */ +#define SRFS 0x01000000 /* Enable SDRAM Self-Refresh Mode */ +#define EBUFE 0x02000000 /* Enable External Buffering Timing */ +#define FBBRW 0x04000000 /* Enable Fast Back-To-Back Read To Write */ +#define EMREN 0x10000000 /* Extended Mode Register Enable */ +#define TCSR 0x20000000 /* Temp-Compensated Self-Refresh Value (85/45* Deg C) */ +#define CDDBG 0x40000000 /* Tristate SDRAM Controls During Bus Grant */ + +/* EBIU_SDBCTL Masks */ +#define EBE 0x0001 /* Enable SDRAM External Bank */ +#define EBSZ_16 0x0000 /* SDRAM External Bank Size = 16MB */ +#define EBSZ_32 0x0002 /* SDRAM External Bank Size = 32MB */ +#define EBSZ_64 0x0004 /* SDRAM External Bank Size = 64MB */ +#define EBSZ_128 0x0006 /* SDRAM External Bank Size = 128MB */ +#define EBCAW_8 0x0000 /* SDRAM External Bank Column Address Width = 8 Bits */ +#define EBCAW_9 0x0010 /* SDRAM External Bank Column Address Width = 9 Bits */ +#define EBCAW_10 0x0020 /* SDRAM External Bank Column Address Width = 10 Bits */ +#define EBCAW_11 0x0030 /* SDRAM External Bank Column Address Width = 11 Bits */ + +/* EBIU_SDSTAT Masks */ +#define SDCI 0x0001 /* SDRAM Controller Idle */ +#define SDSRA 0x0002 /* SDRAM Self-Refresh Active */ +#define SDPUA 0x0004 /* SDRAM Power-Up Active */ +#define SDRS 0x0008 /* SDRAM Will Power-Up On Next Access */ +#define SDEASE 0x0010 /* SDRAM EAB Sticky Error Status */ +#define BGSTAT 0x0020 /* Bus Grant Status */ + +/* ************************** DMA CONTROLLER MASKS ********************************/ +/* DMAx_CONFIG, MDMA_yy_CONFIG Masks */ +#define DMAEN 0x0001 /* DMA Channel Enable */ +#define WNR 0x0002 /* Channel Direction (W/R*) */ +#define WDSIZE_8 0x0000 /* Transfer Word Size = 8 */ +#define WDSIZE_16 0x0004 /* Transfer Word Size = 16 */ +#define WDSIZE_32 0x0008 /* Transfer Word Size = 32 */ +#define DMA2D 0x0010 /* DMA Mode (2D/1D*) */ +#define RESTART 0x0020 /* DMA Buffer Clear */ +#define DI_SEL 0x0040 /* Data Interrupt Timing Select */ +#define DI_EN 0x0080 /* Data Interrupt Enable */ +#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */ +#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */ +#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */ +#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */ +#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */ +#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */ +#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */ +#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */ +#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */ +#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */ +#define NDSIZE 0x0900 /* Next Descriptor Size */ + +#define DMAFLOW 0x7000 /* Flow Control */ +#define DMAFLOW_STOP 0x0000 /* Stop Mode */ +#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */ +#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */ +#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */ +#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */ + +/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */ +#define CTYPE 0x0040 /* DMA Channel Type Indicator (Memory/Peripheral*) */ +#define PMAP 0xF000 /* Peripheral Mapped To This Channel */ +#define PMAP_PPI 0x0000 /* PPI Port DMA */ +#define PMAP_EMACRX 0x1000 /* Ethernet Receive DMA */ +#define PMAP_EMACTX 0x2000 /* Ethernet Transmit DMA */ +#define PMAP_SPORT0RX 0x3000 /* SPORT0 Receive DMA */ +#define PMAP_SPORT0TX 0x4000 /* SPORT0 Transmit DMA */ +#define PMAP_SPORT1RX 0x5000 /* SPORT1 Receive DMA */ +#define PMAP_SPORT1TX 0x6000 /* SPORT1 Transmit DMA */ +#define PMAP_SPI 0x7000 /* SPI Port DMA */ +#define PMAP_UART0RX 0x8000 /* UART0 Port Receive DMA */ +#define PMAP_UART0TX 0x9000 /* UART0 Port Transmit DMA */ +#define PMAP_UART1RX 0xA000 /* UART1 Port Receive DMA */ +#define PMAP_UART1TX 0xB000 /* UART1 Port Transmit DMA */ + +/* DMAx_IRQ_STATUS, MDMA_yy_IRQ_STATUS Masks */ +#define DMA_DONE 0x0001 /* DMA Completion Interrupt Status */ +#define DMA_ERR 0x0002 /* DMA Error Interrupt Status */ +#define DFETCH 0x0004 /* DMA Descriptor Fetch Indicator */ +#define DMA_RUN 0x0008 /* DMA Channel Running Indicator */ + +/* ************ PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/ +/* PPI_CONTROL Masks */ +#define PORT_EN 0x0001 /* PPI Port Enable */ +#define PORT_DIR 0x0002 /* PPI Port Direction */ +#define XFR_TYPE 0x000C /* PPI Transfer Type */ +#define PORT_CFG 0x0030 /* PPI Port Configuration */ +#define FLD_SEL 0x0040 /* PPI Active Field Select */ +#define PACK_EN 0x0080 /* PPI Packing Mode */ +#define DMA32 0x0100 /* PPI 32-bit DMA Enable */ +#define SKIP_EN 0x0200 /* PPI Skip Element Enable */ +#define SKIP_EO 0x0400 /* PPI Skip Even/Odd Elements */ +#define DLENGTH 0x3800 /* PPI Data Length */ +#define DLEN_8 0x0000 /* Data Length = 8 Bits */ +#define DLEN_10 0x0800 /* Data Length = 10 Bits */ +#define DLEN_11 0x1000 /* Data Length = 11 Bits */ +#define DLEN_12 0x1800 /* Data Length = 12 Bits */ +#define DLEN_13 0x2000 /* Data Length = 13 Bits */ +#define DLEN_14 0x2800 /* Data Length = 14 Bits */ +#define DLEN_15 0x3000 /* Data Length = 15 Bits */ +#define DLEN_16 0x3800 /* Data Length = 16 Bits */ +#define POLC 0x4000 /* PPI Clock Polarity */ +#define POLS 0x8000 /* PPI Frame Sync Polarity */ + +/* PPI_STATUS Masks */ +#define FLD 0x0400 /* Field Indicator */ +#define FT_ERR 0x0800 /* Frame Track Error */ +#define OVR 0x1000 /* FIFO Overflow Error */ +#define UNDR 0x2000 /* FIFO Underrun Error */ +#define ERR_DET 0x4000 /* Error Detected Indicator */ +#define ERR_NCOR 0x8000 /* Error Not Corrected Indicator */ + +/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ***********************/ +/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */ +#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */ +#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */ + +/* TWI_PRESCALE Masks */ +#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */ +#define TWI_ENA 0x0080 /* TWI Enable */ +#define SCCB 0x0200 /* SCCB Compatibility Enable */ + +/* TWI_SLAVE_CTRL Masks */ +#define SEN 0x0001 /* Slave Enable */ +#define SADD_LEN 0x0002 /* Slave Address Length */ +#define STDVAL 0x0004 /* Slave Transmit Data Valid */ +#define NAK 0x0008 /* NAK/ACK* Generated At Conclusion Of Transfer */ +#define GEN 0x0010 /* General Call Adrress Matching Enabled */ + +/* TWI_SLAVE_STAT Masks */ +#define SDIR 0x0001 /* Slave Transfer Direction (Transmit/Receive*) */ +#define GCALL 0x0002 /* General Call Indicator */ + +/* TWI_MASTER_CTRL Masks */ +#define MEN 0x0001 /* Master Mode Enable */ +#define MADD_LEN 0x0002 /* Master Address Length */ +#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */ +#define FAST 0x0008 /* Use Fast Mode Timing Specs */ +#define STOP 0x0010 /* Issue Stop Condition */ +#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */ +#define DCNT 0x3FC0 /* Data Bytes To Transfer */ +#define SDAOVR 0x4000 /* Serial Data Override */ +#define SCLOVR 0x8000 /* Serial Clock Override */ + +/* TWI_MASTER_STAT Masks */ +#define MPROG 0x0001 /* Master Transfer In Progress */ +#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */ +#define ANAK 0x0004 /* Address Not Acknowledged */ +#define DNAK 0x0008 /* Data Not Acknowledged */ +#define BUFRDERR 0x0010 /* Buffer Read Error */ +#define BUFWRERR 0x0020 /* Buffer Write Error */ +#define SDASEN 0x0040 /* Serial Data Sense */ +#define SCLSEN 0x0080 /* Serial Clock Sense */ +#define BUSBUSY 0x0100 /* Bus Busy Indicator */ + +/* TWI_INT_SRC and TWI_INT_ENABLE Masks */ +#define SINIT 0x0001 /* Slave Transfer Initiated */ +#define SCOMP 0x0002 /* Slave Transfer Complete */ +#define SERR 0x0004 /* Slave Transfer Error */ +#define SOVF 0x0008 /* Slave Overflow */ +#define MCOMP 0x0010 /* Master Transfer Complete */ +#define MERR 0x0020 /* Master Transfer Error */ +#define XMTSERV 0x0040 /* Transmit FIFO Service */ +#define RCVSERV 0x0080 /* Receive FIFO Service */ + +/* TWI_FIFO_CTRL Masks */ +#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */ +#define RCVFLUSH 0x0002 /* Receive Buffer Flush */ +#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */ +#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */ + +/* TWI_FIFO_STAT Masks */ +#define XMTSTAT 0x0003 /* Transmit FIFO Status */ +#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */ +#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */ +#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */ + +#define RCVSTAT 0x000C /* Receive FIFO Status */ +#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */ +#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */ +#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */ + +/* ************ CONTROLLER AREA NETWORK (CAN) MASKS ***************/ +/* CAN_CONTROL Masks */ +#define SRS 0x0001 /* Software Reset */ +#define DNM 0x0002 /* Device Net Mode */ +#define ABO 0x0004 /* Auto-Bus On Enable */ +#define TXPRIO 0x0008 /* TX Priority (Priority/Mailbox*) */ +#define WBA 0x0010 /* Wake-Up On CAN Bus Activity Enable */ +#define SMR 0x0020 /* Sleep Mode Request */ +#define CSR 0x0040 /* CAN Suspend Mode Request */ +#define CCR 0x0080 /* CAN Configuration Mode Request */ + +/* CAN_STATUS Masks */ +#define WT 0x0001 /* TX Warning Flag */ +#define WR 0x0002 /* RX Warning Flag */ +#define EP 0x0004 /* Error Passive Mode */ +#define EBO 0x0008 /* Error Bus Off Mode */ +#define SMA 0x0020 /* Sleep Mode Acknowledge */ +#define CSA 0x0040 /* Suspend Mode Acknowledge */ +#define CCA 0x0080 /* Configuration Mode Acknowledge */ +#define MBPTR 0x1F00 /* Mailbox Pointer */ +#define TRM 0x4000 /* Transmit Mode */ +#define REC 0x8000 /* Receive Mode */ + +/* CAN_CLOCK Masks */ +#define BRP 0x03FF /* Bit-Rate Pre-Scaler */ + +/* CAN_TIMING Masks */ +#define TSEG1 0x000F /* Time Segment 1 */ +#define TSEG2 0x0070 /* Time Segment 2 */ +#define SAM 0x0080 /* Sampling */ +#define SJW 0x0300 /* Synchronization Jump Width */ + +/* CAN_DEBUG Masks */ +#define DEC 0x0001 /* Disable CAN Error Counters */ +#define DRI 0x0002 /* Disable CAN RX Input */ +#define DTO 0x0004 /* Disable CAN TX Output */ +#define DIL 0x0008 /* Disable CAN Internal Loop */ +#define MAA 0x0010 /* Mode Auto-Acknowledge Enable */ +#define MRB 0x0020 /* Mode Read Back Enable */ +#define CDE 0x8000 /* CAN Debug Enable */ + +/* CAN_CEC Masks */ +#define RXECNT 0x00FF /* Receive Error Counter */ +#define TXECNT 0xFF00 /* Transmit Error Counter */ + +/* CAN_INTR Masks */ +#define MBRIF 0x0001 /* Mailbox Receive Interrupt */ +#define MBTIF 0x0002 /* Mailbox Transmit Interrupt */ +#define GIRQ 0x0004 /* Global Interrupt */ +#define SMACK 0x0008 /* Sleep Mode Acknowledge */ +#define CANTX 0x0040 /* CAN TX Bus Value */ +#define CANRX 0x0080 /* CAN RX Bus Value */ + +/* CAN_MBxx_ID1 and CAN_MBxx_ID0 Masks */ +#define DFC 0xFFFF /* Data Filtering Code (If Enabled) (ID0) */ +#define EXTID_LO 0xFFFF /* Lower 16 Bits of Extended Identifier (ID0) */ +#define EXTID_HI 0x0003 /* Upper 2 Bits of Extended Identifier (ID1) */ +#define BASEID 0x1FFC /* Base Identifier */ +#define IDE 0x2000 /* Identifier Extension */ +#define RTR 0x4000 /* Remote Frame Transmission Request */ +#define AME 0x8000 /* Acceptance Mask Enable */ + +/* CAN_MBxx_TIMESTAMP Masks */ +#define TSV 0xFFFF /* Timestamp */ + +/* CAN_MBxx_LENGTH Masks */ +#define DLC 0x000F /* Data Length Code */ + +/* CAN_AMxxH and CAN_AMxxL Masks */ +#define DFM 0xFFFF /* Data Field Mask (If Enabled) (CAN_AMxxL) */ +#define EXTID_LO 0xFFFF /* Lower 16 Bits of Extended Identifier (CAN_AMxxL) */ +#define EXTID_HI 0x0003 /* Upper 2 Bits of Extended Identifier (CAN_AMxxH) */ +#define BASEID 0x1FFC /* Base Identifier */ +#define AMIDE 0x2000 /* Acceptance Mask ID Extension Enable */ +#define FMD 0x4000 /* Full Mask Data Field Enable */ +#define FDF 0x8000 /* Filter On Data Field Enable */ + +/* CAN_MC1 Masks */ +#define MC0 0x0001 /* Enable Mailbox 0 */ +#define MC1 0x0002 /* Enable Mailbox 1 */ +#define MC2 0x0004 /* Enable Mailbox 2 */ +#define MC3 0x0008 /* Enable Mailbox 3 */ +#define MC4 0x0010 /* Enable Mailbox 4 */ +#define MC5 0x0020 /* Enable Mailbox 5 */ +#define MC6 0x0040 /* Enable Mailbox 6 */ +#define MC7 0x0080 /* Enable Mailbox 7 */ +#define MC8 0x0100 /* Enable Mailbox 8 */ +#define MC9 0x0200 /* Enable Mailbox 9 */ +#define MC10 0x0400 /* Enable Mailbox 10 */ +#define MC11 0x0800 /* Enable Mailbox 11 */ +#define MC12 0x1000 /* Enable Mailbox 12 */ +#define MC13 0x2000 /* Enable Mailbox 13 */ +#define MC14 0x4000 /* Enable Mailbox 14 */ +#define MC15 0x8000 /* Enable Mailbox 15 */ + +/* CAN_MC2 Masks */ +#define MC16 0x0001 /* Enable Mailbox 16 */ +#define MC17 0x0002 /* Enable Mailbox 17 */ +#define MC18 0x0004 /* Enable Mailbox 18 */ +#define MC19 0x0008 /* Enable Mailbox 19 */ +#define MC20 0x0010 /* Enable Mailbox 20 */ +#define MC21 0x0020 /* Enable Mailbox 21 */ +#define MC22 0x0040 /* Enable Mailbox 22 */ +#define MC23 0x0080 /* Enable Mailbox 23 */ +#define MC24 0x0100 /* Enable Mailbox 24 */ +#define MC25 0x0200 /* Enable Mailbox 25 */ +#define MC26 0x0400 /* Enable Mailbox 26 */ +#define MC27 0x0800 /* Enable Mailbox 27 */ +#define MC28 0x1000 /* Enable Mailbox 28 */ +#define MC29 0x2000 /* Enable Mailbox 29 */ +#define MC30 0x4000 /* Enable Mailbox 30 */ +#define MC31 0x8000 /* Enable Mailbox 31 */ + +/* CAN_MD1 Masks */ +#define MD0 0x0001 /* Enable Mailbox 0 For Receive */ +#define MD1 0x0002 /* Enable Mailbox 1 For Receive */ +#define MD2 0x0004 /* Enable Mailbox 2 For Receive */ +#define MD3 0x0008 /* Enable Mailbox 3 For Receive */ +#define MD4 0x0010 /* Enable Mailbox 4 For Receive */ +#define MD5 0x0020 /* Enable Mailbox 5 For Receive */ +#define MD6 0x0040 /* Enable Mailbox 6 For Receive */ +#define MD7 0x0080 /* Enable Mailbox 7 For Receive */ +#define MD8 0x0100 /* Enable Mailbox 8 For Receive */ +#define MD9 0x0200 /* Enable Mailbox 9 For Receive */ +#define MD10 0x0400 /* Enable Mailbox 10 For Receive */ +#define MD11 0x0800 /* Enable Mailbox 11 For Receive */ +#define MD12 0x1000 /* Enable Mailbox 12 For Receive */ +#define MD13 0x2000 /* Enable Mailbox 13 For Receive */ +#define MD14 0x4000 /* Enable Mailbox 14 For Receive */ +#define MD15 0x8000 /* Enable Mailbox 15 For Receive */ + +/* CAN_MD2 Masks */ +#define MD16 0x0001 /* Enable Mailbox 16 For Receive */ +#define MD17 0x0002 /* Enable Mailbox 17 For Receive */ +#define MD18 0x0004 /* Enable Mailbox 18 For Receive */ +#define MD19 0x0008 /* Enable Mailbox 19 For Receive */ +#define MD20 0x0010 /* Enable Mailbox 20 For Receive */ +#define MD21 0x0020 /* Enable Mailbox 21 For Receive */ +#define MD22 0x0040 /* Enable Mailbox 22 For Receive */ +#define MD23 0x0080 /* Enable Mailbox 23 For Receive */ +#define MD24 0x0100 /* Enable Mailbox 24 For Receive */ +#define MD25 0x0200 /* Enable Mailbox 25 For Receive */ +#define MD26 0x0400 /* Enable Mailbox 26 For Receive */ +#define MD27 0x0800 /* Enable Mailbox 27 For Receive */ +#define MD28 0x1000 /* Enable Mailbox 28 For Receive */ +#define MD29 0x2000 /* Enable Mailbox 29 For Receive */ +#define MD30 0x4000 /* Enable Mailbox 30 For Receive */ +#define MD31 0x8000 /* Enable Mailbox 31 For Receive */ + +/* CAN_RMP1 Masks */ +#define RMP0 0x0001 /* RX Message Pending In Mailbox 0 */ +#define RMP1 0x0002 /* RX Message Pending In Mailbox 1 */ +#define RMP2 0x0004 /* RX Message Pending In Mailbox 2 */ +#define RMP3 0x0008 /* RX Message Pending In Mailbox 3 */ +#define RMP4 0x0010 /* RX Message Pending In Mailbox 4 */ +#define RMP5 0x0020 /* RX Message Pending In Mailbox 5 */ +#define RMP6 0x0040 /* RX Message Pending In Mailbox 6 */ +#define RMP7 0x0080 /* RX Message Pending In Mailbox 7 */ +#define RMP8 0x0100 /* RX Message Pending In Mailbox 8 */ +#define RMP9 0x0200 /* RX Message Pending In Mailbox 9 */ +#define RMP10 0x0400 /* RX Message Pending In Mailbox 10 */ +#define RMP11 0x0800 /* RX Message Pending In Mailbox 11 */ +#define RMP12 0x1000 /* RX Message Pending In Mailbox 12 */ +#define RMP13 0x2000 /* RX Message Pending In Mailbox 13 */ +#define RMP14 0x4000 /* RX Message Pending In Mailbox 14 */ +#define RMP15 0x8000 /* RX Message Pending In Mailbox 15 */ + +/* CAN_RMP2 Masks */ +#define RMP16 0x0001 /* RX Message Pending In Mailbox 16 */ +#define RMP17 0x0002 /* RX Message Pending In Mailbox 17 */ +#define RMP18 0x0004 /* RX Message Pending In Mailbox 18 */ +#define RMP19 0x0008 /* RX Message Pending In Mailbox 19 */ +#define RMP20 0x0010 /* RX Message Pending In Mailbox 20 */ +#define RMP21 0x0020 /* RX Message Pending In Mailbox 21 */ +#define RMP22 0x0040 /* RX Message Pending In Mailbox 22 */ +#define RMP23 0x0080 /* RX Message Pending In Mailbox 23 */ +#define RMP24 0x0100 /* RX Message Pending In Mailbox 24 */ +#define RMP25 0x0200 /* RX Message Pending In Mailbox 25 */ +#define RMP26 0x0400 /* RX Message Pending In Mailbox 26 */ +#define RMP27 0x0800 /* RX Message Pending In Mailbox 27 */ +#define RMP28 0x1000 /* RX Message Pending In Mailbox 28 */ +#define RMP29 0x2000 /* RX Message Pending In Mailbox 29 */ +#define RMP30 0x4000 /* RX Message Pending In Mailbox 30 */ +#define RMP31 0x8000 /* RX Message Pending In Mailbox 31 */ + +/* CAN_RML1 Masks */ +#define RML0 0x0001 /* RX Message Lost In Mailbox 0 */ +#define RML1 0x0002 /* RX Message Lost In Mailbox 1 */ +#define RML2 0x0004 /* RX Message Lost In Mailbox 2 */ +#define RML3 0x0008 /* RX Message Lost In Mailbox 3 */ +#define RML4 0x0010 /* RX Message Lost In Mailbox 4 */ +#define RML5 0x0020 /* RX Message Lost In Mailbox 5 */ +#define RML6 0x0040 /* RX Message Lost In Mailbox 6 */ +#define RML7 0x0080 /* RX Message Lost In Mailbox 7 */ +#define RML8 0x0100 /* RX Message Lost In Mailbox 8 */ +#define RML9 0x0200 /* RX Message Lost In Mailbox 9 */ +#define RML10 0x0400 /* RX Message Lost In Mailbox 10 */ +#define RML11 0x0800 /* RX Message Lost In Mailbox 11 */ +#define RML12 0x1000 /* RX Message Lost In Mailbox 12 */ +#define RML13 0x2000 /* RX Message Lost In Mailbox 13 */ +#define RML14 0x4000 /* RX Message Lost In Mailbox 14 */ +#define RML15 0x8000 /* RX Message Lost In Mailbox 15 */ + +/* CAN_RML2 Masks */ +#define RML16 0x0001 /* RX Message Lost In Mailbox 16 */ +#define RML17 0x0002 /* RX Message Lost In Mailbox 17 */ +#define RML18 0x0004 /* RX Message Lost In Mailbox 18 */ +#define RML19 0x0008 /* RX Message Lost In Mailbox 19 */ +#define RML20 0x0010 /* RX Message Lost In Mailbox 20 */ +#define RML21 0x0020 /* RX Message Lost In Mailbox 21 */ +#define RML22 0x0040 /* RX Message Lost In Mailbox 22 */ +#define RML23 0x0080 /* RX Message Lost In Mailbox 23 */ +#define RML24 0x0100 /* RX Message Lost In Mailbox 24 */ +#define RML25 0x0200 /* RX Message Lost In Mailbox 25 */ +#define RML26 0x0400 /* RX Message Lost In Mailbox 26 */ +#define RML27 0x0800 /* RX Message Lost In Mailbox 27 */ +#define RML28 0x1000 /* RX Message Lost In Mailbox 28 */ +#define RML29 0x2000 /* RX Message Lost In Mailbox 29 */ +#define RML30 0x4000 /* RX Message Lost In Mailbox 30 */ +#define RML31 0x8000 /* RX Message Lost In Mailbox 31 */ + +/* CAN_OPSS1 Masks */ +#define OPSS0 0x0001 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 0 */ +#define OPSS1 0x0002 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 1 */ +#define OPSS2 0x0004 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 2 */ +#define OPSS3 0x0008 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 3 */ +#define OPSS4 0x0010 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 4 */ +#define OPSS5 0x0020 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 5 */ +#define OPSS6 0x0040 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 6 */ +#define OPSS7 0x0080 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 7 */ +#define OPSS8 0x0100 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 8 */ +#define OPSS9 0x0200 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 9 */ +#define OPSS10 0x0400 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 10 */ +#define OPSS11 0x0800 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 11 */ +#define OPSS12 0x1000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 12 */ +#define OPSS13 0x2000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 13 */ +#define OPSS14 0x4000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 14 */ +#define OPSS15 0x8000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 15 */ + +/* CAN_OPSS2 Masks */ +#define OPSS16 0x0001 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 16 */ +#define OPSS17 0x0002 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 17 */ +#define OPSS18 0x0004 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 18 */ +#define OPSS19 0x0008 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 19 */ +#define OPSS20 0x0010 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 20 */ +#define OPSS21 0x0020 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 21 */ +#define OPSS22 0x0040 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 22 */ +#define OPSS23 0x0080 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 23 */ +#define OPSS24 0x0100 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 24 */ +#define OPSS25 0x0200 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 25 */ +#define OPSS26 0x0400 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 26 */ +#define OPSS27 0x0800 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 27 */ +#define OPSS28 0x1000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 28 */ +#define OPSS29 0x2000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 29 */ +#define OPSS30 0x4000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 30 */ +#define OPSS31 0x8000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 31 */ + +/* CAN_TRR1 Masks */ +#define TRR0 0x0001 /* Deny But Don't Lock Access To Mailbox 0 */ +#define TRR1 0x0002 /* Deny But Don't Lock Access To Mailbox 1 */ +#define TRR2 0x0004 /* Deny But Don't Lock Access To Mailbox 2 */ +#define TRR3 0x0008 /* Deny But Don't Lock Access To Mailbox 3 */ +#define TRR4 0x0010 /* Deny But Don't Lock Access To Mailbox 4 */ +#define TRR5 0x0020 /* Deny But Don't Lock Access To Mailbox 5 */ +#define TRR6 0x0040 /* Deny But Don't Lock Access To Mailbox 6 */ +#define TRR7 0x0080 /* Deny But Don't Lock Access To Mailbox 7 */ +#define TRR8 0x0100 /* Deny But Don't Lock Access To Mailbox 8 */ +#define TRR9 0x0200 /* Deny But Don't Lock Access To Mailbox 9 */ +#define TRR10 0x0400 /* Deny But Don't Lock Access To Mailbox 10 */ +#define TRR11 0x0800 /* Deny But Don't Lock Access To Mailbox 11 */ +#define TRR12 0x1000 /* Deny But Don't Lock Access To Mailbox 12 */ +#define TRR13 0x2000 /* Deny But Don't Lock Access To Mailbox 13 */ +#define TRR14 0x4000 /* Deny But Don't Lock Access To Mailbox 14 */ +#define TRR15 0x8000 /* Deny But Don't Lock Access To Mailbox 15 */ + +/* CAN_TRR2 Masks */ +#define TRR16 0x0001 /* Deny But Don't Lock Access To Mailbox 16 */ +#define TRR17 0x0002 /* Deny But Don't Lock Access To Mailbox 17 */ +#define TRR18 0x0004 /* Deny But Don't Lock Access To Mailbox 18 */ +#define TRR19 0x0008 /* Deny But Don't Lock Access To Mailbox 19 */ +#define TRR20 0x0010 /* Deny But Don't Lock Access To Mailbox 20 */ +#define TRR21 0x0020 /* Deny But Don't Lock Access To Mailbox 21 */ +#define TRR22 0x0040 /* Deny But Don't Lock Access To Mailbox 22 */ +#define TRR23 0x0080 /* Deny But Don't Lock Access To Mailbox 23 */ +#define TRR24 0x0100 /* Deny But Don't Lock Access To Mailbox 24 */ +#define TRR25 0x0200 /* Deny But Don't Lock Access To Mailbox 25 */ +#define TRR26 0x0400 /* Deny But Don't Lock Access To Mailbox 26 */ +#define TRR27 0x0800 /* Deny But Don't Lock Access To Mailbox 27 */ +#define TRR28 0x1000 /* Deny But Don't Lock Access To Mailbox 28 */ +#define TRR29 0x2000 /* Deny But Don't Lock Access To Mailbox 29 */ +#define TRR30 0x4000 /* Deny But Don't Lock Access To Mailbox 30 */ +#define TRR31 0x8000 /* Deny But Don't Lock Access To Mailbox 31 */ + +/* CAN_TRS1 Masks */ +#define TRS0 0x0001 /* Remote Frame Request For Mailbox 0 */ +#define TRS1 0x0002 /* Remote Frame Request For Mailbox 1 */ +#define TRS2 0x0004 /* Remote Frame Request For Mailbox 2 */ +#define TRS3 0x0008 /* Remote Frame Request For Mailbox 3 */ +#define TRS4 0x0010 /* Remote Frame Request For Mailbox 4 */ +#define TRS5 0x0020 /* Remote Frame Request For Mailbox 5 */ +#define TRS6 0x0040 /* Remote Frame Request For Mailbox 6 */ +#define TRS7 0x0080 /* Remote Frame Request For Mailbox 7 */ +#define TRS8 0x0100 /* Remote Frame Request For Mailbox 8 */ +#define TRS9 0x0200 /* Remote Frame Request For Mailbox 9 */ +#define TRS10 0x0400 /* Remote Frame Request For Mailbox 10 */ +#define TRS11 0x0800 /* Remote Frame Request For Mailbox 11 */ +#define TRS12 0x1000 /* Remote Frame Request For Mailbox 12 */ +#define TRS13 0x2000 /* Remote Frame Request For Mailbox 13 */ +#define TRS14 0x4000 /* Remote Frame Request For Mailbox 14 */ +#define TRS15 0x8000 /* Remote Frame Request For Mailbox 15 */ + +/* CAN_TRS2 Masks */ +#define TRS16 0x0001 /* Remote Frame Request For Mailbox 16 */ +#define TRS17 0x0002 /* Remote Frame Request For Mailbox 17 */ +#define TRS18 0x0004 /* Remote Frame Request For Mailbox 18 */ +#define TRS19 0x0008 /* Remote Frame Request For Mailbox 19 */ +#define TRS20 0x0010 /* Remote Frame Request For Mailbox 20 */ +#define TRS21 0x0020 /* Remote Frame Request For Mailbox 21 */ +#define TRS22 0x0040 /* Remote Frame Request For Mailbox 22 */ +#define TRS23 0x0080 /* Remote Frame Request For Mailbox 23 */ +#define TRS24 0x0100 /* Remote Frame Request For Mailbox 24 */ +#define TRS25 0x0200 /* Remote Frame Request For Mailbox 25 */ +#define TRS26 0x0400 /* Remote Frame Request For Mailbox 26 */ +#define TRS27 0x0800 /* Remote Frame Request For Mailbox 27 */ +#define TRS28 0x1000 /* Remote Frame Request For Mailbox 28 */ +#define TRS29 0x2000 /* Remote Frame Request For Mailbox 29 */ +#define TRS30 0x4000 /* Remote Frame Request For Mailbox 30 */ +#define TRS31 0x8000 /* Remote Frame Request For Mailbox 31 */ + +/* CAN_AA1 Masks */ +#define AA0 0x0001 /* Aborted Message In Mailbox 0 */ +#define AA1 0x0002 /* Aborted Message In Mailbox 1 */ +#define AA2 0x0004 /* Aborted Message In Mailbox 2 */ +#define AA3 0x0008 /* Aborted Message In Mailbox 3 */ +#define AA4 0x0010 /* Aborted Message In Mailbox 4 */ +#define AA5 0x0020 /* Aborted Message In Mailbox 5 */ +#define AA6 0x0040 /* Aborted Message In Mailbox 6 */ +#define AA7 0x0080 /* Aborted Message In Mailbox 7 */ +#define AA8 0x0100 /* Aborted Message In Mailbox 8 */ +#define AA9 0x0200 /* Aborted Message In Mailbox 9 */ +#define AA10 0x0400 /* Aborted Message In Mailbox 10 */ +#define AA11 0x0800 /* Aborted Message In Mailbox 11 */ +#define AA12 0x1000 /* Aborted Message In Mailbox 12 */ +#define AA13 0x2000 /* Aborted Message In Mailbox 13 */ +#define AA14 0x4000 /* Aborted Message In Mailbox 14 */ +#define AA15 0x8000 /* Aborted Message In Mailbox 15 */ + +/* CAN_AA2 Masks */ +#define AA16 0x0001 /* Aborted Message In Mailbox 16 */ +#define AA17 0x0002 /* Aborted Message In Mailbox 17 */ +#define AA18 0x0004 /* Aborted Message In Mailbox 18 */ +#define AA19 0x0008 /* Aborted Message In Mailbox 19 */ +#define AA20 0x0010 /* Aborted Message In Mailbox 20 */ +#define AA21 0x0020 /* Aborted Message In Mailbox 21 */ +#define AA22 0x0040 /* Aborted Message In Mailbox 22 */ +#define AA23 0x0080 /* Aborted Message In Mailbox 23 */ +#define AA24 0x0100 /* Aborted Message In Mailbox 24 */ +#define AA25 0x0200 /* Aborted Message In Mailbox 25 */ +#define AA26 0x0400 /* Aborted Message In Mailbox 26 */ +#define AA27 0x0800 /* Aborted Message In Mailbox 27 */ +#define AA28 0x1000 /* Aborted Message In Mailbox 28 */ +#define AA29 0x2000 /* Aborted Message In Mailbox 29 */ +#define AA30 0x4000 /* Aborted Message In Mailbox 30 */ +#define AA31 0x8000 /* Aborted Message In Mailbox 31 */ + +/* CAN_TA1 Masks */ +#define TA0 0x0001 /* Transmit Successful From Mailbox 0 */ +#define TA1 0x0002 /* Transmit Successful From Mailbox 1 */ +#define TA2 0x0004 /* Transmit Successful From Mailbox 2 */ +#define TA3 0x0008 /* Transmit Successful From Mailbox 3 */ +#define TA4 0x0010 /* Transmit Successful From Mailbox 4 */ +#define TA5 0x0020 /* Transmit Successful From Mailbox 5 */ +#define TA6 0x0040 /* Transmit Successful From Mailbox 6 */ +#define TA7 0x0080 /* Transmit Successful From Mailbox 7 */ +#define TA8 0x0100 /* Transmit Successful From Mailbox 8 */ +#define TA9 0x0200 /* Transmit Successful From Mailbox 9 */ +#define TA10 0x0400 /* Transmit Successful From Mailbox 10 */ +#define TA11 0x0800 /* Transmit Successful From Mailbox 11 */ +#define TA12 0x1000 /* Transmit Successful From Mailbox 12 */ +#define TA13 0x2000 /* Transmit Successful From Mailbox 13 */ +#define TA14 0x4000 /* Transmit Successful From Mailbox 14 */ +#define TA15 0x8000 /* Transmit Successful From Mailbox 15 */ + +/* CAN_TA2 Masks */ +#define TA16 0x0001 /* Transmit Successful From Mailbox 16 */ +#define TA17 0x0002 /* Transmit Successful From Mailbox 17 */ +#define TA18 0x0004 /* Transmit Successful From Mailbox 18 */ +#define TA19 0x0008 /* Transmit Successful From Mailbox 19 */ +#define TA20 0x0010 /* Transmit Successful From Mailbox 20 */ +#define TA21 0x0020 /* Transmit Successful From Mailbox 21 */ +#define TA22 0x0040 /* Transmit Successful From Mailbox 22 */ +#define TA23 0x0080 /* Transmit Successful From Mailbox 23 */ +#define TA24 0x0100 /* Transmit Successful From Mailbox 24 */ +#define TA25 0x0200 /* Transmit Successful From Mailbox 25 */ +#define TA26 0x0400 /* Transmit Successful From Mailbox 26 */ +#define TA27 0x0800 /* Transmit Successful From Mailbox 27 */ +#define TA28 0x1000 /* Transmit Successful From Mailbox 28 */ +#define TA29 0x2000 /* Transmit Successful From Mailbox 29 */ +#define TA30 0x4000 /* Transmit Successful From Mailbox 30 */ +#define TA31 0x8000 /* Transmit Successful From Mailbox 31 */ + +/* CAN_MBTD Masks */ +#define TDPTR 0x001F /* Mailbox To Temporarily Disable */ +#define TDA 0x0040 /* Temporary Disable Acknowledge */ +#define TDR 0x0080 /* Temporary Disable Request */ + +/* CAN_RFH1 Masks */ +#define RFH0 0x0001 /* Enable Automatic Remote Frame Handling For Mailbox 0 */ +#define RFH1 0x0002 /* Enable Automatic Remote Frame Handling For Mailbox 1 */ +#define RFH2 0x0004 /* Enable Automatic Remote Frame Handling For Mailbox 2 */ +#define RFH3 0x0008 /* Enable Automatic Remote Frame Handling For Mailbox 3 */ +#define RFH4 0x0010 /* Enable Automatic Remote Frame Handling For Mailbox 4 */ +#define RFH5 0x0020 /* Enable Automatic Remote Frame Handling For Mailbox 5 */ +#define RFH6 0x0040 /* Enable Automatic Remote Frame Handling For Mailbox 6 */ +#define RFH7 0x0080 /* Enable Automatic Remote Frame Handling For Mailbox 7 */ +#define RFH8 0x0100 /* Enable Automatic Remote Frame Handling For Mailbox 8 */ +#define RFH9 0x0200 /* Enable Automatic Remote Frame Handling For Mailbox 9 */ +#define RFH10 0x0400 /* Enable Automatic Remote Frame Handling For Mailbox 10 */ +#define RFH11 0x0800 /* Enable Automatic Remote Frame Handling For Mailbox 11 */ +#define RFH12 0x1000 /* Enable Automatic Remote Frame Handling For Mailbox 12 */ +#define RFH13 0x2000 /* Enable Automatic Remote Frame Handling For Mailbox 13 */ +#define RFH14 0x4000 /* Enable Automatic Remote Frame Handling For Mailbox 14 */ +#define RFH15 0x8000 /* Enable Automatic Remote Frame Handling For Mailbox 15 */ + +/* CAN_RFH2 Masks */ +#define RFH16 0x0001 /* Enable Automatic Remote Frame Handling For Mailbox 16 */ +#define RFH17 0x0002 /* Enable Automatic Remote Frame Handling For Mailbox 17 */ +#define RFH18 0x0004 /* Enable Automatic Remote Frame Handling For Mailbox 18 */ +#define RFH19 0x0008 /* Enable Automatic Remote Frame Handling For Mailbox 19 */ +#define RFH20 0x0010 /* Enable Automatic Remote Frame Handling For Mailbox 20 */ +#define RFH21 0x0020 /* Enable Automatic Remote Frame Handling For Mailbox 21 */ +#define RFH22 0x0040 /* Enable Automatic Remote Frame Handling For Mailbox 22 */ +#define RFH23 0x0080 /* Enable Automatic Remote Frame Handling For Mailbox 23 */ +#define RFH24 0x0100 /* Enable Automatic Remote Frame Handling For Mailbox 24 */ +#define RFH25 0x0200 /* Enable Automatic Remote Frame Handling For Mailbox 25 */ +#define RFH26 0x0400 /* Enable Automatic Remote Frame Handling For Mailbox 26 */ +#define RFH27 0x0800 /* Enable Automatic Remote Frame Handling For Mailbox 27 */ +#define RFH28 0x1000 /* Enable Automatic Remote Frame Handling For Mailbox 28 */ +#define RFH29 0x2000 /* Enable Automatic Remote Frame Handling For Mailbox 29 */ +#define RFH30 0x4000 /* Enable Automatic Remote Frame Handling For Mailbox 30 */ +#define RFH31 0x8000 /* Enable Automatic Remote Frame Handling For Mailbox 31 */ + +/* CAN_MBTIF1 Masks */ +#define MBTIF0 0x0001 /* TX Interrupt Active In Mailbox 0 */ +#define MBTIF1 0x0002 /* TX Interrupt Active In Mailbox 1 */ +#define MBTIF2 0x0004 /* TX Interrupt Active In Mailbox 2 */ +#define MBTIF3 0x0008 /* TX Interrupt Active In Mailbox 3 */ +#define MBTIF4 0x0010 /* TX Interrupt Active In Mailbox 4 */ +#define MBTIF5 0x0020 /* TX Interrupt Active In Mailbox 5 */ +#define MBTIF6 0x0040 /* TX Interrupt Active In Mailbox 6 */ +#define MBTIF7 0x0080 /* TX Interrupt Active In Mailbox 7 */ +#define MBTIF8 0x0100 /* TX Interrupt Active In Mailbox 8 */ +#define MBTIF9 0x0200 /* TX Interrupt Active In Mailbox 9 */ +#define MBTIF10 0x0400 /* TX Interrupt Active In Mailbox 10 */ +#define MBTIF11 0x0800 /* TX Interrupt Active In Mailbox 11 */ +#define MBTIF12 0x1000 /* TX Interrupt Active In Mailbox 12 */ +#define MBTIF13 0x2000 /* TX Interrupt Active In Mailbox 13 */ +#define MBTIF14 0x4000 /* TX Interrupt Active In Mailbox 14 */ +#define MBTIF15 0x8000 /* TX Interrupt Active In Mailbox 15 */ + +/* CAN_MBTIF2 Masks */ +#define MBTIF16 0x0001 /* TX Interrupt Active In Mailbox 16 */ +#define MBTIF17 0x0002 /* TX Interrupt Active In Mailbox 17 */ +#define MBTIF18 0x0004 /* TX Interrupt Active In Mailbox 18 */ +#define MBTIF19 0x0008 /* TX Interrupt Active In Mailbox 19 */ +#define MBTIF20 0x0010 /* TX Interrupt Active In Mailbox 20 */ +#define MBTIF21 0x0020 /* TX Interrupt Active In Mailbox 21 */ +#define MBTIF22 0x0040 /* TX Interrupt Active In Mailbox 22 */ +#define MBTIF23 0x0080 /* TX Interrupt Active In Mailbox 23 */ +#define MBTIF24 0x0100 /* TX Interrupt Active In Mailbox 24 */ +#define MBTIF25 0x0200 /* TX Interrupt Active In Mailbox 25 */ +#define MBTIF26 0x0400 /* TX Interrupt Active In Mailbox 26 */ +#define MBTIF27 0x0800 /* TX Interrupt Active In Mailbox 27 */ +#define MBTIF28 0x1000 /* TX Interrupt Active In Mailbox 28 */ +#define MBTIF29 0x2000 /* TX Interrupt Active In Mailbox 29 */ +#define MBTIF30 0x4000 /* TX Interrupt Active In Mailbox 30 */ +#define MBTIF31 0x8000 /* TX Interrupt Active In Mailbox 31 */ + +/* CAN_MBRIF1 Masks */ +#define MBRIF0 0x0001 /* RX Interrupt Active In Mailbox 0 */ +#define MBRIF1 0x0002 /* RX Interrupt Active In Mailbox 1 */ +#define MBRIF2 0x0004 /* RX Interrupt Active In Mailbox 2 */ +#define MBRIF3 0x0008 /* RX Interrupt Active In Mailbox 3 */ +#define MBRIF4 0x0010 /* RX Interrupt Active In Mailbox 4 */ +#define MBRIF5 0x0020 /* RX Interrupt Active In Mailbox 5 */ +#define MBRIF6 0x0040 /* RX Interrupt Active In Mailbox 6 */ +#define MBRIF7 0x0080 /* RX Interrupt Active In Mailbox 7 */ +#define MBRIF8 0x0100 /* RX Interrupt Active In Mailbox 8 */ +#define MBRIF9 0x0200 /* RX Interrupt Active In Mailbox 9 */ +#define MBRIF10 0x0400 /* RX Interrupt Active In Mailbox 10 */ +#define MBRIF11 0x0800 /* RX Interrupt Active In Mailbox 11 */ +#define MBRIF12 0x1000 /* RX Interrupt Active In Mailbox 12 */ +#define MBRIF13 0x2000 /* RX Interrupt Active In Mailbox 13 */ +#define MBRIF14 0x4000 /* RX Interrupt Active In Mailbox 14 */ +#define MBRIF15 0x8000 /* RX Interrupt Active In Mailbox 15 */ + +/* CAN_MBRIF2 Masks */ +#define MBRIF16 0x0001 /* RX Interrupt Active In Mailbox 16 */ +#define MBRIF17 0x0002 /* RX Interrupt Active In Mailbox 17 */ +#define MBRIF18 0x0004 /* RX Interrupt Active In Mailbox 18 */ +#define MBRIF19 0x0008 /* RX Interrupt Active In Mailbox 19 */ +#define MBRIF20 0x0010 /* RX Interrupt Active In Mailbox 20 */ +#define MBRIF21 0x0020 /* RX Interrupt Active In Mailbox 21 */ +#define MBRIF22 0x0040 /* RX Interrupt Active In Mailbox 22 */ +#define MBRIF23 0x0080 /* RX Interrupt Active In Mailbox 23 */ +#define MBRIF24 0x0100 /* RX Interrupt Active In Mailbox 24 */ +#define MBRIF25 0x0200 /* RX Interrupt Active In Mailbox 25 */ +#define MBRIF26 0x0400 /* RX Interrupt Active In Mailbox 26 */ +#define MBRIF27 0x0800 /* RX Interrupt Active In Mailbox 27 */ +#define MBRIF28 0x1000 /* RX Interrupt Active In Mailbox 28 */ +#define MBRIF29 0x2000 /* RX Interrupt Active In Mailbox 29 */ +#define MBRIF30 0x4000 /* RX Interrupt Active In Mailbox 30 */ +#define MBRIF31 0x8000 /* RX Interrupt Active In Mailbox 31 */ + +/* CAN_MBIM1 Masks */ +#define MBIM0 0x0001 /* Enable Interrupt For Mailbox 0 */ +#define MBIM1 0x0002 /* Enable Interrupt For Mailbox 1 */ +#define MBIM2 0x0004 /* Enable Interrupt For Mailbox 2 */ +#define MBIM3 0x0008 /* Enable Interrupt For Mailbox 3 */ +#define MBIM4 0x0010 /* Enable Interrupt For Mailbox 4 */ +#define MBIM5 0x0020 /* Enable Interrupt For Mailbox 5 */ +#define MBIM6 0x0040 /* Enable Interrupt For Mailbox 6 */ +#define MBIM7 0x0080 /* Enable Interrupt For Mailbox 7 */ +#define MBIM8 0x0100 /* Enable Interrupt For Mailbox 8 */ +#define MBIM9 0x0200 /* Enable Interrupt For Mailbox 9 */ +#define MBIM10 0x0400 /* Enable Interrupt For Mailbox 10 */ +#define MBIM11 0x0800 /* Enable Interrupt For Mailbox 11 */ +#define MBIM12 0x1000 /* Enable Interrupt For Mailbox 12 */ +#define MBIM13 0x2000 /* Enable Interrupt For Mailbox 13 */ +#define MBIM14 0x4000 /* Enable Interrupt For Mailbox 14 */ +#define MBIM15 0x8000 /* Enable Interrupt For Mailbox 15 */ + +/* CAN_MBIM2 Masks */ +#define MBIM16 0x0001 /* Enable Interrupt For Mailbox 16 */ +#define MBIM17 0x0002 /* Enable Interrupt For Mailbox 17 */ +#define MBIM18 0x0004 /* Enable Interrupt For Mailbox 18 */ +#define MBIM19 0x0008 /* Enable Interrupt For Mailbox 19 */ +#define MBIM20 0x0010 /* Enable Interrupt For Mailbox 20 */ +#define MBIM21 0x0020 /* Enable Interrupt For Mailbox 21 */ +#define MBIM22 0x0040 /* Enable Interrupt For Mailbox 22 */ +#define MBIM23 0x0080 /* Enable Interrupt For Mailbox 23 */ +#define MBIM24 0x0100 /* Enable Interrupt For Mailbox 24 */ +#define MBIM25 0x0200 /* Enable Interrupt For Mailbox 25 */ +#define MBIM26 0x0400 /* Enable Interrupt For Mailbox 26 */ +#define MBIM27 0x0800 /* Enable Interrupt For Mailbox 27 */ +#define MBIM28 0x1000 /* Enable Interrupt For Mailbox 28 */ +#define MBIM29 0x2000 /* Enable Interrupt For Mailbox 29 */ +#define MBIM30 0x4000 /* Enable Interrupt For Mailbox 30 */ +#define MBIM31 0x8000 /* Enable Interrupt For Mailbox 31 */ + +/* CAN_GIM Masks */ +#define EWTIM 0x0001 /* Enable TX Error Count Interrupt */ +#define EWRIM 0x0002 /* Enable RX Error Count Interrupt */ +#define EPIM 0x0004 /* Enable Error-Passive Mode Interrupt */ +#define BOIM 0x0008 /* Enable Bus Off Interrupt */ +#define WUIM 0x0010 /* Enable Wake-Up Interrupt */ +#define UIAIM 0x0020 /* Enable Access To Unimplemented Address Interrupt */ +#define AAIM 0x0040 /* Enable Abort Acknowledge Interrupt */ +#define RMLIM 0x0080 /* Enable RX Message Lost Interrupt */ +#define UCEIM 0x0100 /* Enable Universal Counter Overflow Interrupt */ +#define EXTIM 0x0200 /* Enable External Trigger Output Interrupt */ +#define ADIM 0x0400 /* Enable Access Denied Interrupt */ + +/* CAN_GIS Masks */ +#define EWTIS 0x0001 /* TX Error Count IRQ Status */ +#define EWRIS 0x0002 /* RX Error Count IRQ Status */ +#define EPIS 0x0004 /* Error-Passive Mode IRQ Status */ +#define BOIS 0x0008 /* Bus Off IRQ Status */ +#define WUIS 0x0010 /* Wake-Up IRQ Status */ +#define UIAIS 0x0020 /* Access To Unimplemented Address IRQ Status */ +#define AAIS 0x0040 /* Abort Acknowledge IRQ Status */ +#define RMLIS 0x0080 /* RX Message Lost IRQ Status */ +#define UCEIS 0x0100 /* Universal Counter Overflow IRQ Status */ +#define EXTIS 0x0200 /* External Trigger Output IRQ Status */ +#define ADIS 0x0400 /* Access Denied IRQ Status */ + +/* CAN_GIF Masks */ +#define EWTIF 0x0001 /* TX Error Count IRQ Flag */ +#define EWRIF 0x0002 /* RX Error Count IRQ Flag */ +#define EPIF 0x0004 /* Error-Passive Mode IRQ Flag */ +#define BOIF 0x0008 /* Bus Off IRQ Flag */ +#define WUIF 0x0010 /* Wake-Up IRQ Flag */ +#define UIAIF 0x0020 /* Access To Unimplemented Address IRQ Flag */ +#define AAIF 0x0040 /* Abort Acknowledge IRQ Flag */ +#define RMLIF 0x0080 /* RX Message Lost IRQ Flag */ +#define UCEIF 0x0100 /* Universal Counter Overflow IRQ Flag */ +#define EXTIF 0x0200 /* External Trigger Output IRQ Flag */ +#define ADIF 0x0400 /* Access Denied IRQ Flag */ + +/* CAN_UCCNF Masks */ +#define UCCNF 0x000F /* Universal Counter Mode */ +#define UC_STAMP 0x0001 /* Timestamp Mode */ +#define UC_WDOG 0x0002 /* Watchdog Mode */ +#define UC_AUTOTX 0x0003 /* Auto-Transmit Mode */ +#define UC_ERROR 0x0006 /* CAN Error Frame Count */ +#define UC_OVER 0x0007 /* CAN Overload Frame Count */ +#define UC_LOST 0x0008 /* Arbitration Lost During TX Count */ +#define UC_AA 0x0009 /* TX Abort Count */ +#define UC_TA 0x000A /* TX Successful Count */ +#define UC_REJECT 0x000B /* RX Message Rejected Count */ +#define UC_RML 0x000C /* RX Message Lost Count */ +#define UC_RX 0x000D /* Total Successful RX Messages Count */ +#define UC_RMP 0x000E /* Successful RX W/Matching ID Count */ +#define UC_ALL 0x000F /* Correct Message On CAN Bus Line Count */ +#define UCRC 0x0020 /* Universal Counter Reload/Clear */ +#define UCCT 0x0040 /* Universal Counter CAN Trigger */ +#define UCE 0x0080 /* Universal Counter Enable */ + +/* CAN_ESR Masks */ +#define ACKE 0x0004 /* Acknowledge Error */ +#define SER 0x0008 /* Stuff Error */ +#define CRCE 0x0010 /* CRC Error */ +#define SA0 0x0020 /* Stuck At Dominant Error */ +#define BEF 0x0040 /* Bit Error Flag */ +#define FER 0x0080 /* Form Error Flag */ + +/* CAN_EWR Masks */ +#define EWLREC 0x00FF /* RX Error Count Limit (For EWRIS) */ +#define EWLTEC 0xFF00 /* TX Error Count Limit (For EWTIS) */ + +/* ******************* PIN CONTROL REGISTER MASKS ************************/ +/* PORT_MUX Masks */ +#define PJSE 0x0001 /* Port J SPI/SPORT Enable */ +#define PJSE_SPORT 0x0000 /* Enable TFS0/DT0PRI */ +#define PJSE_SPI 0x0001 /* Enable SPI_SSEL3:2 */ + +#define PJCE(x) (((x)&0x3)<<1) /* Port J CAN/SPI/SPORT Enable */ +#define PJCE_SPORT 0x0000 /* Enable DR0SEC/DT0SEC */ +#define PJCE_CAN 0x0002 /* Enable CAN RX/TX */ +#define PJCE_SPI 0x0004 /* Enable SPI_SSEL7 */ + +#define PFDE 0x0008 /* Port F DMA Request Enable */ +#define PGDE_UART 0x0000 /* Enable UART0 RX/TX */ +#define PGDE_DMA 0x0008 /* Enable DMAR1:0 */ + +#define PFTE 0x0010 /* Port F Timer Enable */ +#define PFTE_UART 0x0000 /* Enable UART1 RX/TX */ +#define PFTE_TIMER 0x0010 /* Enable TMR7:6 */ + +#define PFS6E 0x0020 /* Port F SPI SSEL 6 Enable */ +#define PFS6E_TIMER 0x0000 /* Enable TMR5 */ +#define PFS6E_SPI 0x0020 /* Enable SPI_SSEL6 */ + +#define PFS5E 0x0040 /* Port F SPI SSEL 5 Enable */ +#define PFS5E_TIMER 0x0000 /* Enable TMR4 */ +#define PFS5E_SPI 0x0040 /* Enable SPI_SSEL5 */ + +#define PFS4E 0x0080 /* Port F SPI SSEL 4 Enable */ +#define PFS4E_TIMER 0x0000 /* Enable TMR3 */ +#define PFS4E_SPI 0x0080 /* Enable SPI_SSEL4 */ + +#define PFFE 0x0100 /* Port F PPI Frame Sync Enable */ +#define PFFE_TIMER 0x0000 /* Enable TMR2 */ +#define PFFE_PPI 0x0100 /* Enable PPI FS3 */ + +#define PGSE 0x0200 /* Port G SPORT1 Secondary Enable */ +#define PGSE_PPI 0x0000 /* Enable PPI D9:8 */ +#define PGSE_SPORT 0x0200 /* Enable DR1SEC/DT1SEC */ + +#define PGRE 0x0400 /* Port G SPORT1 Receive Enable */ +#define PGRE_PPI 0x0000 /* Enable PPI D12:10 */ +#define PGRE_SPORT 0x0400 /* Enable DR1PRI/RFS1/RSCLK1 */ + +#define PGTE 0x0800 /* Port G SPORT1 Transmit Enable */ +#define PGTE_PPI 0x0000 /* Enable PPI D15:13 */ +#define PGTE_SPORT 0x0800 /* Enable DT1PRI/TFS1/TSCLK1 */ + +/* ****************** HANDSHAKE DMA (HDMA) MASKS *********************/ +/* HDMAx_CTL Masks */ +#define HMDMAEN 0x0001 /* Enable Handshake DMA 0/1 */ +#define REP 0x0002 /* HDMA Request Polarity */ +#define UTE 0x0004 /* Urgency Threshold Enable */ +#define OIE 0x0010 /* Overflow Interrupt Enable */ +#define BDIE 0x0020 /* Block Done Interrupt Enable */ +#define MBDI 0x0040 /* Mask Block Done IRQ If Pending ECNT */ +#define DRQ 0x0300 /* HDMA Request Type */ +#define DRQ_NONE 0x0000 /* No Request */ +#define DRQ_SINGLE 0x0100 /* Channels Request Single */ +#define DRQ_MULTI 0x0200 /* Channels Request Multi (Default) */ +#define DRQ_URGENT 0x0300 /* Channels Request Multi Urgent */ +#define RBC 0x1000 /* Reload BCNT With IBCNT */ +#define PS 0x2000 /* HDMA Pin Status */ +#define OI 0x4000 /* Overflow Interrupt Generated */ +#define BDI 0x8000 /* Block Done Interrupt Generated */ + +#endif /* _DEF_BF534_H */ diff --git a/include/asm-blackfin/mach-bf537/defBF537.h b/include/asm-blackfin/mach-bf537/defBF537.h new file mode 100644 index 000000000000..26f9c02eb73c --- /dev/null +++ b/include/asm-blackfin/mach-bf537/defBF537.h @@ -0,0 +1,404 @@ +/* + * file: include/asm-blackfin/mach-bf537/defbf537.h + * based on: + * author: + * + * created: + * description: + * system mmr register map + * rev: + * + * modified: + * + * + * bugs: enter bugs at http://blackfin.uclinux.org/ + * + * this program is free software; you can redistribute it and/or modify + * it under the terms of the gnu general public license as published by + * the free software foundation; either version 2, or (at your option) + * any later version. + * + * this program is distributed in the hope that it will be useful, + * but without any warranty; without even the implied warranty of + * merchantability or fitness for a particular purpose. see the + * gnu general public license for more details. + * + * you should have received a copy of the gnu general public license + * along with this program; see the file copying. + * if not, write to the free software foundation, + * 59 temple place - suite 330, boston, ma 02111-1307, usa. + */ + +#ifndef _DEF_BF537_H +#define _DEF_BF537_H + +/*include all Core registers and bit definitions*/ +#include "defBF537.h" + +/*include core specific register pointer definitions*/ +#include + +/************************************************************************************ +** Define EMAC Section Unique to BF536/BF537 +*************************************************************************************/ + +/* 10/100 Ethernet Controller (0xFFC03000 - 0xFFC031FF) */ +#define EMAC_OPMODE 0xFFC03000 /* Operating Mode Register */ +#define EMAC_ADDRLO 0xFFC03004 /* Address Low (32 LSBs) Register */ +#define EMAC_ADDRHI 0xFFC03008 /* Address High (16 MSBs) Register */ +#define EMAC_HASHLO 0xFFC0300C /* Multicast Hash Table Low (Bins 31-0) Register */ +#define EMAC_HASHHI 0xFFC03010 /* Multicast Hash Table High (Bins 63-32) Register */ +#define EMAC_STAADD 0xFFC03014 /* Station Management Address Register */ +#define EMAC_STADAT 0xFFC03018 /* Station Management Data Register */ +#define EMAC_FLC 0xFFC0301C /* Flow Control Register */ +#define EMAC_VLAN1 0xFFC03020 /* VLAN1 Tag Register */ +#define EMAC_VLAN2 0xFFC03024 /* VLAN2 Tag Register */ +#define EMAC_WKUP_CTL 0xFFC0302C /* Wake-Up Control/Status Register */ +#define EMAC_WKUP_FFMSK0 0xFFC03030 /* Wake-Up Frame Filter 0 Byte Mask Register */ +#define EMAC_WKUP_FFMSK1 0xFFC03034 /* Wake-Up Frame Filter 1 Byte Mask Register */ +#define EMAC_WKUP_FFMSK2 0xFFC03038 /* Wake-Up Frame Filter 2 Byte Mask Register */ +#define EMAC_WKUP_FFMSK3 0xFFC0303C /* Wake-Up Frame Filter 3 Byte Mask Register */ +#define EMAC_WKUP_FFCMD 0xFFC03040 /* Wake-Up Frame Filter Commands Register */ +#define EMAC_WKUP_FFOFF 0xFFC03044 /* Wake-Up Frame Filter Offsets Register */ +#define EMAC_WKUP_FFCRC0 0xFFC03048 /* Wake-Up Frame Filter 0,1 CRC-16 Register */ +#define EMAC_WKUP_FFCRC1 0xFFC0304C /* Wake-Up Frame Filter 2,3 CRC-16 Register */ + +#define EMAC_SYSCTL 0xFFC03060 /* EMAC System Control Register */ +#define EMAC_SYSTAT 0xFFC03064 /* EMAC System Status Register */ +#define EMAC_RX_STAT 0xFFC03068 /* RX Current Frame Status Register */ +#define EMAC_RX_STKY 0xFFC0306C /* RX Sticky Frame Status Register */ +#define EMAC_RX_IRQE 0xFFC03070 /* RX Frame Status Interrupt Enables Register */ +#define EMAC_TX_STAT 0xFFC03074 /* TX Current Frame Status Register */ +#define EMAC_TX_STKY 0xFFC03078 /* TX Sticky Frame Status Register */ +#define EMAC_TX_IRQE 0xFFC0307C /* TX Frame Status Interrupt Enables Register */ + +#define EMAC_MMC_CTL 0xFFC03080 /* MMC Counter Control Register */ +#define EMAC_MMC_RIRQS 0xFFC03084 /* MMC RX Interrupt Status Register */ +#define EMAC_MMC_RIRQE 0xFFC03088 /* MMC RX Interrupt Enables Register */ +#define EMAC_MMC_TIRQS 0xFFC0308C /* MMC TX Interrupt Status Register */ +#define EMAC_MMC_TIRQE 0xFFC03090 /* MMC TX Interrupt Enables Register */ + +#define EMAC_RXC_OK 0xFFC03100 /* RX Frame Successful Count */ +#define EMAC_RXC_FCS 0xFFC03104 /* RX Frame FCS Failure Count */ +#define EMAC_RXC_ALIGN 0xFFC03108 /* RX Alignment Error Count */ +#define EMAC_RXC_OCTET 0xFFC0310C /* RX Octets Successfully Received Count */ +#define EMAC_RXC_DMAOVF 0xFFC03110 /* Internal MAC Sublayer Error RX Frame Count */ +#define EMAC_RXC_UNICST 0xFFC03114 /* Unicast RX Frame Count */ +#define EMAC_RXC_MULTI 0xFFC03118 /* Multicast RX Frame Count */ +#define EMAC_RXC_BROAD 0xFFC0311C /* Broadcast RX Frame Count */ +#define EMAC_RXC_LNERRI 0xFFC03120 /* RX Frame In Range Error Count */ +#define EMAC_RXC_LNERRO 0xFFC03124 /* RX Frame Out Of Range Error Count */ +#define EMAC_RXC_LONG 0xFFC03128 /* RX Frame Too Long Count */ +#define EMAC_RXC_MACCTL 0xFFC0312C /* MAC Control RX Frame Count */ +#define EMAC_RXC_OPCODE 0xFFC03130 /* Unsupported Op-Code RX Frame Count */ +#define EMAC_RXC_PAUSE 0xFFC03134 /* MAC Control Pause RX Frame Count */ +#define EMAC_RXC_ALLFRM 0xFFC03138 /* Overall RX Frame Count */ +#define EMAC_RXC_ALLOCT 0xFFC0313C /* Overall RX Octet Count */ +#define EMAC_RXC_TYPED 0xFFC03140 /* Type/Length Consistent RX Frame Count */ +#define EMAC_RXC_SHORT 0xFFC03144 /* RX Frame Fragment Count - Byte Count x < 64 */ +#define EMAC_RXC_EQ64 0xFFC03148 /* Good RX Frame Count - Byte Count x = 64 */ +#define EMAC_RXC_LT128 0xFFC0314C /* Good RX Frame Count - Byte Count 64 <= x < 128 */ +#define EMAC_RXC_LT256 0xFFC03150 /* Good RX Frame Count - Byte Count 128 <= x < 256 */ +#define EMAC_RXC_LT512 0xFFC03154 /* Good RX Frame Count - Byte Count 256 <= x < 512 */ +#define EMAC_RXC_LT1024 0xFFC03158 /* Good RX Frame Count - Byte Count 512 <= x < 1024 */ +#define EMAC_RXC_GE1024 0xFFC0315C /* Good RX Frame Count - Byte Count x >= 1024 */ + +#define EMAC_TXC_OK 0xFFC03180 /* TX Frame Successful Count */ +#define EMAC_TXC_1COL 0xFFC03184 /* TX Frames Successful After Single Collision Count */ +#define EMAC_TXC_GT1COL 0xFFC03188 /* TX Frames Successful After Multiple Collisions Count */ +#define EMAC_TXC_OCTET 0xFFC0318C /* TX Octets Successfully Received Count */ +#define EMAC_TXC_DEFER 0xFFC03190 /* TX Frame Delayed Due To Busy Count */ +#define EMAC_TXC_LATECL 0xFFC03194 /* Late TX Collisions Count */ +#define EMAC_TXC_XS_COL 0xFFC03198 /* TX Frame Failed Due To Excessive Collisions Count */ +#define EMAC_TXC_DMAUND 0xFFC0319C /* Internal MAC Sublayer Error TX Frame Count */ +#define EMAC_TXC_CRSERR 0xFFC031A0 /* Carrier Sense Deasserted During TX Frame Count */ +#define EMAC_TXC_UNICST 0xFFC031A4 /* Unicast TX Frame Count */ +#define EMAC_TXC_MULTI 0xFFC031A8 /* Multicast TX Frame Count */ +#define EMAC_TXC_BROAD 0xFFC031AC /* Broadcast TX Frame Count */ +#define EMAC_TXC_XS_DFR 0xFFC031B0 /* TX Frames With Excessive Deferral Count */ +#define EMAC_TXC_MACCTL 0xFFC031B4 /* MAC Control TX Frame Count */ +#define EMAC_TXC_ALLFRM 0xFFC031B8 /* Overall TX Frame Count */ +#define EMAC_TXC_ALLOCT 0xFFC031BC /* Overall TX Octet Count */ +#define EMAC_TXC_EQ64 0xFFC031C0 /* Good TX Frame Count - Byte Count x = 64 */ +#define EMAC_TXC_LT128 0xFFC031C4 /* Good TX Frame Count - Byte Count 64 <= x < 128 */ +#define EMAC_TXC_LT256 0xFFC031C8 /* Good TX Frame Count - Byte Count 128 <= x < 256 */ +#define EMAC_TXC_LT512 0xFFC031CC /* Good TX Frame Count - Byte Count 256 <= x < 512 */ +#define EMAC_TXC_LT1024 0xFFC031D0 /* Good TX Frame Count - Byte Count 512 <= x < 1024 */ +#define EMAC_TXC_GE1024 0xFFC031D4 /* Good TX Frame Count - Byte Count x >= 1024 */ +#define EMAC_TXC_ABORT 0xFFC031D8 /* Total TX Frames Aborted Count */ + +/* Listing for IEEE-Supported Count Registers */ +#define FramesReceivedOK EMAC_RXC_OK /* RX Frame Successful Count */ +#define FrameCheckSequenceErrors EMAC_RXC_FCS /* RX Frame FCS Failure Count */ +#define AlignmentErrors EMAC_RXC_ALIGN /* RX Alignment Error Count */ +#define OctetsReceivedOK EMAC_RXC_OCTET /* RX Octets Successfully Received Count */ +#define FramesLostDueToIntMACRcvError EMAC_RXC_DMAOVF /* Internal MAC Sublayer Error RX Frame Count */ +#define UnicastFramesReceivedOK EMAC_RXC_UNICST /* Unicast RX Frame Count */ +#define MulticastFramesReceivedOK EMAC_RXC_MULTI /* Multicast RX Frame Count */ +#define BroadcastFramesReceivedOK EMAC_RXC_BROAD /* Broadcast RX Frame Count */ +#define InRangeLengthErrors EMAC_RXC_LNERRI /* RX Frame In Range Error Count */ +#define OutOfRangeLengthField EMAC_RXC_LNERRO /* RX Frame Out Of Range Error Count */ +#define FrameTooLongErrors EMAC_RXC_LONG /* RX Frame Too Long Count */ +#define MACControlFramesReceived EMAC_RXC_MACCTL /* MAC Control RX Frame Count */ +#define UnsupportedOpcodesReceived EMAC_RXC_OPCODE /* Unsupported Op-Code RX Frame Count */ +#define PAUSEMACCtrlFramesReceived EMAC_RXC_PAUSE /* MAC Control Pause RX Frame Count */ +#define FramesReceivedAll EMAC_RXC_ALLFRM /* Overall RX Frame Count */ +#define OctetsReceivedAll EMAC_RXC_ALLOCT /* Overall RX Octet Count */ +#define TypedFramesReceived EMAC_RXC_TYPED /* Type/Length Consistent RX Frame Count */ +#define FramesLenLt64Received EMAC_RXC_SHORT /* RX Frame Fragment Count - Byte Count x < 64 */ +#define FramesLenEq64Received EMAC_RXC_EQ64 /* Good RX Frame Count - Byte Count x = 64 */ +#define FramesLen65_127Received EMAC_RXC_LT128 /* Good RX Frame Count - Byte Count 64 <= x < 128 */ +#define FramesLen128_255Received EMAC_RXC_LT256 /* Good RX Frame Count - Byte Count 128 <= x < 256 */ +#define FramesLen256_511Received EMAC_RXC_LT512 /* Good RX Frame Count - Byte Count 256 <= x < 512 */ +#define FramesLen512_1023Received EMAC_RXC_LT1024 /* Good RX Frame Count - Byte Count 512 <= x < 1024 */ +#define FramesLen1024_MaxReceived EMAC_RXC_GE1024 /* Good RX Frame Count - Byte Count x >= 1024 */ + +#define FramesTransmittedOK EMAC_TXC_OK /* TX Frame Successful Count */ +#define SingleCollisionFrames EMAC_TXC_1COL /* TX Frames Successful After Single Collision Count */ +#define MultipleCollisionFrames EMAC_TXC_GT1COL /* TX Frames Successful After Multiple Collisions Count */ +#define OctetsTransmittedOK EMAC_TXC_OCTET /* TX Octets Successfully Received Count */ +#define FramesWithDeferredXmissions EMAC_TXC_DEFER /* TX Frame Delayed Due To Busy Count */ +#define LateCollisions EMAC_TXC_LATECL /* Late TX Collisions Count */ +#define FramesAbortedDueToXSColls EMAC_TXC_XS_COL /* TX Frame Failed Due To Excessive Collisions Count */ +#define FramesLostDueToIntMacXmitError EMAC_TXC_DMAUND /* Internal MAC Sublayer Error TX Frame Count */ +#define CarrierSenseErrors EMAC_TXC_CRSERR /* Carrier Sense Deasserted During TX Frame Count */ +#define UnicastFramesXmittedOK EMAC_TXC_UNICST /* Unicast TX Frame Count */ +#define MulticastFramesXmittedOK EMAC_TXC_MULTI /* Multicast TX Frame Count */ +#define BroadcastFramesXmittedOK EMAC_TXC_BROAD /* Broadcast TX Frame Count */ +#define FramesWithExcessiveDeferral EMAC_TXC_XS_DFR /* TX Frames With Excessive Deferral Count */ +#define MACControlFramesTransmitted EMAC_TXC_MACCTL /* MAC Control TX Frame Count */ +#define FramesTransmittedAll EMAC_TXC_ALLFRM /* Overall TX Frame Count */ +#define OctetsTransmittedAll EMAC_TXC_ALLOCT /* Overall TX Octet Count */ +#define FramesLenEq64Transmitted EMAC_TXC_EQ64 /* Good TX Frame Count - Byte Count x = 64 */ +#define FramesLen65_127Transmitted EMAC_TXC_LT128 /* Good TX Frame Count - Byte Count 64 <= x < 128 */ +#define FramesLen128_255Transmitted EMAC_TXC_LT256 /* Good TX Frame Count - Byte Count 128 <= x < 256 */ +#define FramesLen256_511Transmitted EMAC_TXC_LT512 /* Good TX Frame Count - Byte Count 256 <= x < 512 */ +#define FramesLen512_1023Transmitted EMAC_TXC_LT1024 /* Good TX Frame Count - Byte Count 512 <= x < 1024 */ +#define FramesLen1024_MaxTransmitted EMAC_TXC_GE1024 /* Good TX Frame Count - Byte Count x >= 1024 */ +#define TxAbortedFrames EMAC_TXC_ABORT /* Total TX Frames Aborted Count */ + +/*********************************************************************************** +** System MMR Register Bits And Macros +** +** Disclaimer: All macros are intended to make C and Assembly code more readable. +** Use these macros carefully, as any that do left shifts for field +** depositing will result in the lower order bits being destroyed. Any +** macro that shifts left to properly position the bit-field should be +** used as part of an OR to initialize a register and NOT as a dynamic +** modifier UNLESS the lower order bits are saved and ORed back in when +** the macro is used. +*************************************************************************************/ +/************************ ETHERNET 10/100 CONTROLLER MASKS ************************/ +/* EMAC_OPMODE Masks */ +#define RE 0x00000001 /* Receiver Enable */ +#define ASTP 0x00000002 /* Enable Automatic Pad Stripping On RX Frames */ +#define HU 0x00000010 /* Hash Filter Unicast Address */ +#define HM 0x00000020 /* Hash Filter Multicast Address */ +#define PAM 0x00000040 /* Pass-All-Multicast Mode Enable */ +#define PR 0x00000080 /* Promiscuous Mode Enable */ +#define IFE 0x00000100 /* Inverse Filtering Enable */ +#define DBF 0x00000200 /* Disable Broadcast Frame Reception */ +#define PBF 0x00000400 /* Pass Bad Frames Enable */ +#define PSF 0x00000800 /* Pass Short Frames Enable */ +#define RAF 0x00001000 /* Receive-All Mode */ +#define TE 0x00010000 /* Transmitter Enable */ +#define DTXPAD 0x00020000 /* Disable Automatic TX Padding */ +#define DTXCRC 0x00040000 /* Disable Automatic TX CRC Generation */ +#define DC 0x00080000 /* Deferral Check */ +#define BOLMT 0x00300000 /* Back-Off Limit */ +#define BOLMT_10 0x00000000 /* 10-bit range */ +#define BOLMT_8 0x00100000 /* 8-bit range */ +#define BOLMT_4 0x00200000 /* 4-bit range */ +#define BOLMT_1 0x00300000 /* 1-bit range */ +#define DRTY 0x00400000 /* Disable TX Retry On Collision */ +#define LCTRE 0x00800000 /* Enable TX Retry On Late Collision */ +#define RMII 0x01000000 /* RMII/MII* Mode */ +#define RMII_10 0x02000000 /* Speed Select for RMII Port (10MBit/100MBit*) */ +#define FDMODE 0x04000000 /* Duplex Mode Enable (Full/Half*) */ +#define LB 0x08000000 /* Internal Loopback Enable */ +#define DRO 0x10000000 /* Disable Receive Own Frames (Half-Duplex Mode) */ + +/* EMAC_STAADD Masks */ +#define STABUSY 0x00000001 /* Initiate Station Mgt Reg Access / STA Busy Stat */ +#define STAOP 0x00000002 /* Station Management Operation Code (Write/Read*) */ +#define STADISPRE 0x00000004 /* Disable Preamble Generation */ +#define STAIE 0x00000008 /* Station Mgt. Transfer Done Interrupt Enable */ +#define REGAD 0x000007C0 /* STA Register Address */ +#define PHYAD 0x0000F800 /* PHY Device Address */ + +#define SET_REGAD(x) (((x)&0x1F)<< 6 ) /* Set STA Register Address */ +#define SET_PHYAD(x) (((x)&0x1F)<< 11 ) /* Set PHY Device Address */ + +/* EMAC_STADAT Mask */ +#define STADATA 0x0000FFFF /* Station Management Data */ + +/* EMAC_FLC Masks */ +#define FLCBUSY 0x00000001 /* Send Flow Ctrl Frame / Flow Ctrl Busy Status */ +#define FLCE 0x00000002 /* Flow Control Enable */ +#define PCF 0x00000004 /* Pass Control Frames */ +#define BKPRSEN 0x00000008 /* Enable Backpressure */ +#define FLCPAUSE 0xFFFF0000 /* Pause Time */ + +#define SET_FLCPAUSE(x) (((x)&0xFFFF)<< 16) /* Set Pause Time */ + +/* EMAC_WKUP_CTL Masks */ +#define CAPWKFRM 0x00000001 /* Capture Wake-Up Frames */ +#define MPKE 0x00000002 /* Magic Packet Enable */ +#define RWKE 0x00000004 /* Remote Wake-Up Frame Enable */ +#define GUWKE 0x00000008 /* Global Unicast Wake Enable */ +#define MPKS 0x00000020 /* Magic Packet Received Status */ +#define RWKS 0x00000F00 /* Wake-Up Frame Received Status, Filters 3:0 */ + +/* EMAC_WKUP_FFCMD Masks */ +#define WF0_E 0x00000001 /* Enable Wake-Up Filter 0 */ +#define WF0_T 0x00000008 /* Wake-Up Filter 0 Addr Type (Multicast/Unicast*) */ +#define WF1_E 0x00000100 /* Enable Wake-Up Filter 1 */ +#define WF1_T 0x00000800 /* Wake-Up Filter 1 Addr Type (Multicast/Unicast*) */ +#define WF2_E 0x00010000 /* Enable Wake-Up Filter 2 */ +#define WF2_T 0x00080000 /* Wake-Up Filter 2 Addr Type (Multicast/Unicast*) */ +#define WF3_E 0x01000000 /* Enable Wake-Up Filter 3 */ +#define WF3_T 0x08000000 /* Wake-Up Filter 3 Addr Type (Multicast/Unicast*) */ + +/* EMAC_WKUP_FFOFF Masks */ +#define WF0_OFF 0x000000FF /* Wake-Up Filter 0 Pattern Offset */ +#define WF1_OFF 0x0000FF00 /* Wake-Up Filter 1 Pattern Offset */ +#define WF2_OFF 0x00FF0000 /* Wake-Up Filter 2 Pattern Offset */ +#define WF3_OFF 0xFF000000 /* Wake-Up Filter 3 Pattern Offset */ + +#define SET_WF0_OFF(x) (((x)&0xFF)<< 0 ) /* Set Wake-Up Filter 0 Byte Offset */ +#define SET_WF1_OFF(x) (((x)&0xFF)<< 8 ) /* Set Wake-Up Filter 1 Byte Offset */ +#define SET_WF2_OFF(x) (((x)&0xFF)<< 16 ) /* Set Wake-Up Filter 2 Byte Offset */ +#define SET_WF3_OFF(x) (((x)&0xFF)<< 24 ) /* Set Wake-Up Filter 3 Byte Offset */ +/* Set ALL Offsets */ +#define SET_WF_OFFS(x0,x1,x2,x3) (SET_WF0_OFF((x0))|SET_WF1_OFF((x1))|SET_WF2_OFF((x2))|SET_WF3_OFF((x3))) + +/* EMAC_WKUP_FFCRC0 Masks */ +#define WF0_CRC 0x0000FFFF /* Wake-Up Filter 0 Pattern CRC */ +#define WF1_CRC 0xFFFF0000 /* Wake-Up Filter 1 Pattern CRC */ + +#define SET_WF0_CRC(x) (((x)&0xFFFF)<< 0 ) /* Set Wake-Up Filter 0 Target CRC */ +#define SET_WF1_CRC(x) (((x)&0xFFFF)<< 16 ) /* Set Wake-Up Filter 1 Target CRC */ + +/* EMAC_WKUP_FFCRC1 Masks */ +#define WF2_CRC 0x0000FFFF /* Wake-Up Filter 2 Pattern CRC */ +#define WF3_CRC 0xFFFF0000 /* Wake-Up Filter 3 Pattern CRC */ + +#define SET_WF2_CRC(x) (((x)&0xFFFF)<< 0 ) /* Set Wake-Up Filter 2 Target CRC */ +#define SET_WF3_CRC(x) (((x)&0xFFFF)<< 16 ) /* Set Wake-Up Filter 3 Target CRC */ + +/* EMAC_SYSCTL Masks */ +#define PHYIE 0x00000001 /* PHY_INT Interrupt Enable */ +#define RXDWA 0x00000002 /* Receive Frame DMA Word Alignment (Odd/Even*) */ +#define RXCKS 0x00000004 /* Enable RX Frame TCP/UDP Checksum Computation */ +#define MDCDIV 0x00003F00 /* SCLK:MDC Clock Divisor [MDC=SCLK/(2*(N+1))] */ + +#define SET_MDCDIV(x) (((x)&0x3F)<< 8) /* Set MDC Clock Divisor */ + +/* EMAC_SYSTAT Masks */ +#define PHYINT 0x00000001 /* PHY_INT Interrupt Status */ +#define MMCINT 0x00000002 /* MMC Counter Interrupt Status */ +#define RXFSINT 0x00000004 /* RX Frame-Status Interrupt Status */ +#define TXFSINT 0x00000008 /* TX Frame-Status Interrupt Status */ +#define WAKEDET 0x00000010 /* Wake-Up Detected Status */ +#define RXDMAERR 0x00000020 /* RX DMA Direction Error Status */ +#define TXDMAERR 0x00000040 /* TX DMA Direction Error Status */ +#define STMDONE 0x00000080 /* Station Mgt. Transfer Done Interrupt Status */ + +/* EMAC_RX_STAT, EMAC_RX_STKY, and EMAC_RX_IRQE Masks */ +#define RX_FRLEN 0x000007FF /* Frame Length In Bytes */ +#define RX_COMP 0x00001000 /* RX Frame Complete */ +#define RX_OK 0x00002000 /* RX Frame Received With No Errors */ +#define RX_LONG 0x00004000 /* RX Frame Too Long Error */ +#define RX_ALIGN 0x00008000 /* RX Frame Alignment Error */ +#define RX_CRC 0x00010000 /* RX Frame CRC Error */ +#define RX_LEN 0x00020000 /* RX Frame Length Error */ +#define RX_FRAG 0x00040000 /* RX Frame Fragment Error */ +#define RX_ADDR 0x00080000 /* RX Frame Address Filter Failed Error */ +#define RX_DMAO 0x00100000 /* RX Frame DMA Overrun Error */ +#define RX_PHY 0x00200000 /* RX Frame PHY Error */ +#define RX_LATE 0x00400000 /* RX Frame Late Collision Error */ +#define RX_RANGE 0x00800000 /* RX Frame Length Field Out of Range Error */ +#define RX_MULTI 0x01000000 /* RX Multicast Frame Indicator */ +#define RX_BROAD 0x02000000 /* RX Broadcast Frame Indicator */ +#define RX_CTL 0x04000000 /* RX Control Frame Indicator */ +#define RX_UCTL 0x08000000 /* Unsupported RX Control Frame Indicator */ +#define RX_TYPE 0x10000000 /* RX Typed Frame Indicator */ +#define RX_VLAN1 0x20000000 /* RX VLAN1 Frame Indicator */ +#define RX_VLAN2 0x40000000 /* RX VLAN2 Frame Indicator */ +#define RX_ACCEPT 0x80000000 /* RX Frame Accepted Indicator */ + +/* EMAC_TX_STAT, EMAC_TX_STKY, and EMAC_TX_IRQE Masks */ +#define TX_COMP 0x00000001 /* TX Frame Complete */ +#define TX_OK 0x00000002 /* TX Frame Sent With No Errors */ +#define TX_ECOLL 0x00000004 /* TX Frame Excessive Collision Error */ +#define TX_LATE 0x00000008 /* TX Frame Late Collision Error */ +#define TX_DMAU 0x00000010 /* TX Frame DMA Underrun Error (STAT) */ +#define TX_MACE 0x00000010 /* Internal MAC Error Detected (STKY and IRQE) */ +#define TX_EDEFER 0x00000020 /* TX Frame Excessive Deferral Error */ +#define TX_BROAD 0x00000040 /* TX Broadcast Frame Indicator */ +#define TX_MULTI 0x00000080 /* TX Multicast Frame Indicator */ +#define TX_CCNT 0x00000F00 /* TX Frame Collision Count */ +#define TX_DEFER 0x00001000 /* TX Frame Deferred Indicator */ +#define TX_CRS 0x00002000 /* TX Frame Carrier Sense Not Asserted Error */ +#define TX_LOSS 0x00004000 /* TX Frame Carrier Lost During TX Error */ +#define TX_RETRY 0x00008000 /* TX Frame Successful After Retry */ +#define TX_FRLEN 0x07FF0000 /* TX Frame Length (Bytes) */ + +/* EMAC_MMC_CTL Masks */ +#define RSTC 0x00000001 /* Reset All Counters */ +#define CROLL 0x00000002 /* Counter Roll-Over Enable */ +#define CCOR 0x00000004 /* Counter Clear-On-Read Mode Enable */ +#define MMCE 0x00000008 /* Enable MMC Counter Operation */ + +/* EMAC_MMC_RIRQS and EMAC_MMC_RIRQE Masks */ +#define RX_OK_CNT 0x00000001 /* RX Frames Received With No Errors */ +#define RX_FCS_CNT 0x00000002 /* RX Frames W/Frame Check Sequence Errors */ +#define RX_ALIGN_CNT 0x00000004 /* RX Frames With Alignment Errors */ +#define RX_OCTET_CNT 0x00000008 /* RX Octets Received OK */ +#define RX_LOST_CNT 0x00000010 /* RX Frames Lost Due To Internal MAC RX Error */ +#define RX_UNI_CNT 0x00000020 /* Unicast RX Frames Received OK */ +#define RX_MULTI_CNT 0x00000040 /* Multicast RX Frames Received OK */ +#define RX_BROAD_CNT 0x00000080 /* Broadcast RX Frames Received OK */ +#define RX_IRL_CNT 0x00000100 /* RX Frames With In-Range Length Errors */ +#define RX_ORL_CNT 0x00000200 /* RX Frames With Out-Of-Range Length Errors */ +#define RX_LONG_CNT 0x00000400 /* RX Frames With Frame Too Long Errors */ +#define RX_MACCTL_CNT 0x00000800 /* MAC Control RX Frames Received */ +#define RX_OPCODE_CTL 0x00001000 /* Unsupported Op-Code RX Frames Received */ +#define RX_PAUSE_CNT 0x00002000 /* PAUSEMAC Control RX Frames Received */ +#define RX_ALLF_CNT 0x00004000 /* All RX Frames Received */ +#define RX_ALLO_CNT 0x00008000 /* All RX Octets Received */ +#define RX_TYPED_CNT 0x00010000 /* Typed RX Frames Received */ +#define RX_SHORT_CNT 0x00020000 /* RX Frame Fragments (< 64 Bytes) Received */ +#define RX_EQ64_CNT 0x00040000 /* 64-Byte RX Frames Received */ +#define RX_LT128_CNT 0x00080000 /* 65-127-Byte RX Frames Received */ +#define RX_LT256_CNT 0x00100000 /* 128-255-Byte RX Frames Received */ +#define RX_LT512_CNT 0x00200000 /* 256-511-Byte RX Frames Received */ +#define RX_LT1024_CNT 0x00400000 /* 512-1023-Byte RX Frames Received */ +#define RX_GE1024_CNT 0x00800000 /* 1024-Max-Byte RX Frames Received */ + +/* EMAC_MMC_TIRQS and EMAC_MMC_TIRQE Masks */ +#define TX_OK_CNT 0x00000001 /* TX Frames Sent OK */ +#define TX_SCOLL_CNT 0x00000002 /* TX Frames With Single Collisions */ +#define TX_MCOLL_CNT 0x00000004 /* TX Frames With Multiple Collisions */ +#define TX_OCTET_CNT 0x00000008 /* TX Octets Sent OK */ +#define TX_DEFER_CNT 0x00000010 /* TX Frames With Deferred Transmission */ +#define TX_LATE_CNT 0x00000020 /* TX Frames With Late Collisions */ +#define TX_ABORTC_CNT 0x00000040 /* TX Frames Aborted Due To Excess Collisions */ +#define TX_LOST_CNT 0x00000080 /* TX Frames Lost Due To Internal MAC TX Error */ +#define TX_CRS_CNT 0x00000100 /* TX Frames With Carrier Sense Errors */ +#define TX_UNI_CNT 0x00000200 /* Unicast TX Frames Sent */ +#define TX_MULTI_CNT 0x00000400 /* Multicast TX Frames Sent */ +#define TX_BROAD_CNT 0x00000800 /* Broadcast TX Frames Sent */ +#define TX_EXDEF_CTL 0x00001000 /* TX Frames With Excessive Deferral */ +#define TX_MACCTL_CNT 0x00002000 /* MAC Control TX Frames Sent */ +#define TX_ALLF_CNT 0x00004000 /* All TX Frames Sent */ +#define TX_ALLO_CNT 0x00008000 /* All TX Octets Sent */ +#define TX_EQ64_CNT 0x00010000 /* 64-Byte TX Frames Sent */ +#define TX_LT128_CNT 0x00020000 /* 65-127-Byte TX Frames Sent */ +#define TX_LT256_CNT 0x00040000 /* 128-255-Byte TX Frames Sent */ +#define TX_LT512_CNT 0x00080000 /* 256-511-Byte TX Frames Sent */ +#define TX_LT1024_CNT 0x00100000 /* 512-1023-Byte TX Frames Sent */ +#define TX_GE1024_CNT 0x00200000 /* 1024-Max-Byte TX Frames Sent */ +#define TX_ABORT_CNT 0x00400000 /* TX Frames Aborted */ + +#endif /* _DEF_BF537_H */ diff --git a/include/asm-blackfin/mach-bf537/dma.h b/include/asm-blackfin/mach-bf537/dma.h new file mode 100644 index 000000000000..7a964040870a --- /dev/null +++ b/include/asm-blackfin/mach-bf537/dma.h @@ -0,0 +1,55 @@ +/* + * file: include/asm-blackfin/mach-bf537/dma.h + * based on: + * author: + * + * created: + * description: + * system mmr register map + * rev: + * + * modified: + * + * + * bugs: enter bugs at http://blackfin.uclinux.org/ + * + * this program is free software; you can redistribute it and/or modify + * it under the terms of the gnu general public license as published by + * the free software foundation; either version 2, or (at your option) + * any later version. + * + * this program is distributed in the hope that it will be useful, + * but without any warranty; without even the implied warranty of + * merchantability or fitness for a particular purpose. see the + * gnu general public license for more details. + * + * you should have received a copy of the gnu general public license + * along with this program; see the file copying. + * if not, write to the free software foundation, + * 59 temple place - suite 330, boston, ma 02111-1307, usa. + */ + +#ifndef _MACH_DMA_H_ +#define _MACH_DMA_H_ + +#define MAX_BLACKFIN_DMA_CHANNEL 16 + +#define CH_PPI 0 +#define CH_EMAC_RX 1 +#define CH_EMAC_TX 2 +#define CH_SPORT0_RX 3 +#define CH_SPORT0_TX 4 +#define CH_SPORT1_RX 5 +#define CH_SPORT1_TX 6 +#define CH_SPI 7 +#define CH_UART0_RX 8 +#define CH_UART0_TX 9 +#define CH_UART1_RX 10 +#define CH_UART1_TX 11 + +#define CH_MEM_STREAM0_DEST 12 /* TX */ +#define CH_MEM_STREAM0_SRC 13 /* RX */ +#define CH_MEM_STREAM1_DEST 14 /* TX */ +#define CH_MEM_STREAM1_SRC 15 /* RX */ + +#endif diff --git a/include/asm-blackfin/mach-bf537/irq.h b/include/asm-blackfin/mach-bf537/irq.h new file mode 100644 index 000000000000..8af2a832ef6b --- /dev/null +++ b/include/asm-blackfin/mach-bf537/irq.h @@ -0,0 +1,219 @@ +/* + * file: include/asm-blackfin/mach-bf537/irq.h + * based on: + * author: + * + * created: + * description: + * system mmr register map + * rev: + * + * modified: + * + * + * bugs: enter bugs at http://blackfin.uclinux.org/ + * + * this program is free software; you can redistribute it and/or modify + * it under the terms of the gnu general public license as published by + * the free software foundation; either version 2, or (at your option) + * any later version. + * + * this program is distributed in the hope that it will be useful, + * but without any warranty; without even the implied warranty of + * merchantability or fitness for a particular purpose. see the + * gnu general public license for more details. + * + * you should have received a copy of the gnu general public license + * along with this program; see the file copying. + * if not, write to the free software foundation, + * 59 temple place - suite 330, boston, ma 02111-1307, usa. + */ + +#ifndef _BF537_IRQ_H_ +#define _BF537_IRQ_H_ + +/* + * Interrupt source definitions + Event Source Core Event Name +Core Emulation ** + Events (highest priority) EMU 0 + Reset RST 1 + NMI NMI 2 + Exception EVX 3 + Reserved -- 4 + Hardware Error IVHW 5 + Core Timer IVTMR 6 * + +..... + + Software Interrupt 1 IVG14 31 + Software Interrupt 2 -- + (lowest priority) IVG15 32 * + */ + +#define SYS_IRQS 41 +#define NR_PERI_INTS 32 + +/* The ABSTRACT IRQ definitions */ +/** the first seven of the following are fixed, the rest you change if you need to **/ +#define IRQ_EMU 0 /*Emulation */ +#define IRQ_RST 1 /*reset */ +#define IRQ_NMI 2 /*Non Maskable */ +#define IRQ_EVX 3 /*Exception */ +#define IRQ_UNUSED 4 /*- unused interrupt*/ +#define IRQ_HWERR 5 /*Hardware Error */ +#define IRQ_CORETMR 6 /*Core timer */ + +#define IRQ_PLL_WAKEUP 7 /*PLL Wakeup Interrupt */ +#define IRQ_DMA_ERROR 8 /*DMA Error (general) */ +#define IRQ_GENERIC_ERROR 9 /*GENERIC Error Interrupt */ +#define IRQ_RTC 10 /*RTC Interrupt */ +#define IRQ_PPI 11 /*DMA0 Interrupt (PPI) */ +#define IRQ_SPORT0_RX 12 /*DMA3 Interrupt (SPORT0 RX) */ +#define IRQ_SPORT0_TX 13 /*DMA4 Interrupt (SPORT0 TX) */ +#define IRQ_SPORT1_RX 14 /*DMA5 Interrupt (SPORT1 RX) */ +#define IRQ_SPORT1_TX 15 /*DMA6 Interrupt (SPORT1 TX) */ +#define IRQ_TWI 16 /*TWI Interrupt */ +#define IRQ_SPI 17 /*DMA7 Interrupt (SPI) */ +#define IRQ_UART0_RX 18 /*DMA8 Interrupt (UART0 RX) */ +#define IRQ_UART0_TX 19 /*DMA9 Interrupt (UART0 TX) */ +#define IRQ_UART1_RX 20 /*DMA10 Interrupt (UART1 RX) */ +#define IRQ_UART1_TX 21 /*DMA11 Interrupt (UART1 TX) */ +#define IRQ_CAN_RX 22 /*CAN Receive Interrupt */ +#define IRQ_CAN_TX 23 /*CAN Transmit Interrupt */ +#define IRQ_MAC_RX 24 /*DMA1 (Ethernet RX) Interrupt */ +#define IRQ_MAC_TX 25 /*DMA2 (Ethernet TX) Interrupt */ +#define IRQ_TMR0 26 /*Timer 0 */ +#define IRQ_TMR1 27 /*Timer 1 */ +#define IRQ_TMR2 28 /*Timer 2 */ +#define IRQ_TMR3 29 /*Timer 3 */ +#define IRQ_TMR4 30 /*Timer 4 */ +#define IRQ_TMR5 31 /*Timer 5 */ +#define IRQ_TMR6 32 /*Timer 6 */ +#define IRQ_TMR7 33 /*Timer 7 */ +#define IRQ_PROG_INTA 34 /* PF Ports F&G (PF15:0) Interrupt A */ +#define IRQ_PORTG_INTB 35 /* PF Port G (PF15:0) Interrupt B */ +#define IRQ_MEM_DMA0 36 /*(Memory DMA Stream 0) */ +#define IRQ_MEM_DMA1 37 /*(Memory DMA Stream 1) */ +#define IRQ_PROG_INTB 38 /* PF Ports F (PF15:0) Interrupt B */ +#define IRQ_WATCH 38 /*Watch Dog Timer */ +#define IRQ_SW_INT1 40 /*Software Int 1 */ +#define IRQ_SW_INT2 41 /*Software Int 2 (reserved for SYSCALL) */ + +#define IRQ_PPI_ERROR 42 /*PPI Error Interrupt */ +#define IRQ_CAN_ERROR 43 /*CAN Error Interrupt */ +#define IRQ_MAC_ERROR 44 /*PPI Error Interrupt */ +#define IRQ_SPORT0_ERROR 45 /*SPORT0 Error Interrupt */ +#define IRQ_SPORT1_ERROR 46 /*SPORT1 Error Interrupt */ +#define IRQ_SPI_ERROR 47 /*SPI Error Interrupt */ +#define IRQ_UART0_ERROR 48 /*UART Error Interrupt */ +#define IRQ_UART1_ERROR 49 /*UART Error Interrupt */ + +#define IRQ_PF0 50 +#define IRQ_PF1 51 +#define IRQ_PF2 52 +#define IRQ_PF3 53 +#define IRQ_PF4 54 +#define IRQ_PF5 55 +#define IRQ_PF6 56 +#define IRQ_PF7 57 +#define IRQ_PF8 58 +#define IRQ_PF9 59 +#define IRQ_PF10 60 +#define IRQ_PF11 61 +#define IRQ_PF12 62 +#define IRQ_PF13 63 +#define IRQ_PF14 64 +#define IRQ_PF15 65 + +#define IRQ_PG0 66 +#define IRQ_PG1 67 +#define IRQ_PG2 68 +#define IRQ_PG3 69 +#define IRQ_PG4 70 +#define IRQ_PG5 71 +#define IRQ_PG6 72 +#define IRQ_PG7 73 +#define IRQ_PG8 74 +#define IRQ_PG9 75 +#define IRQ_PG10 76 +#define IRQ_PG11 77 +#define IRQ_PG12 78 +#define IRQ_PG13 79 +#define IRQ_PG14 80 +#define IRQ_PG15 81 + +#define IRQ_PH0 82 +#define IRQ_PH1 83 +#define IRQ_PH2 84 +#define IRQ_PH3 85 +#define IRQ_PH4 86 +#define IRQ_PH5 87 +#define IRQ_PH6 88 +#define IRQ_PH7 89 +#define IRQ_PH8 90 +#define IRQ_PH9 91 +#define IRQ_PH10 92 +#define IRQ_PH11 93 +#define IRQ_PH12 94 +#define IRQ_PH13 95 +#define IRQ_PH14 96 +#define IRQ_PH15 97 + +#ifdef CONFIG_IRQCHIP_DEMUX_GPIO +#define NR_IRQS (IRQ_PH15+1) +#else +#define NR_IRQS (IRQ_UART1_ERROR+1) +#endif + +#define IVG7 7 +#define IVG8 8 +#define IVG9 9 +#define IVG10 10 +#define IVG11 11 +#define IVG12 12 +#define IVG13 13 +#define IVG14 14 +#define IVG15 15 + +/* IAR0 BIT FIELDS*/ +#define IRQ_PLL_WAKEUP_POS 0 +#define IRQ_DMA_ERROR_POS 4 +#define IRQ_ERROR_POS 8 +#define IRQ_RTC_POS 12 +#define IRQ_PPI_POS 16 +#define IRQ_SPORT0_RX_POS 20 +#define IRQ_SPORT0_TX_POS 24 +#define IRQ_SPORT1_RX_POS 28 + +/* IAR1 BIT FIELDS*/ +#define IRQ_SPORT1_TX_POS 0 +#define IRQ_TWI_POS 4 +#define IRQ_SPI_POS 8 +#define IRQ_UART0_RX_POS 12 +#define IRQ_UART0_TX_POS 16 +#define IRQ_UART1_RX_POS 20 +#define IRQ_UART1_TX_POS 24 +#define IRQ_CAN_RX_POS 28 + +/* IAR2 BIT FIELDS*/ +#define IRQ_CAN_TX_POS 0 +#define IRQ_MAC_RX_POS 4 +#define IRQ_MAC_TX_POS 8 +#define IRQ_TMR0_POS 12 +#define IRQ_TMR1_POS 16 +#define IRQ_TMR2_POS 20 +#define IRQ_TMR3_POS 24 +#define IRQ_TMR4_POS 28 + +/* IAR3 BIT FIELDS*/ +#define IRQ_TMR5_POS 0 +#define IRQ_TMR6_POS 4 +#define IRQ_TMR7_POS 8 +#define IRQ_PROG_INTA_POS 12 +#define IRQ_PORTG_INTB_POS 16 +#define IRQ_MEM_DMA0_POS 20 +#define IRQ_MEM_DMA1_POS 24 +#define IRQ_WATCH_POS 28 + +#endif /* _BF537_IRQ_H_ */ diff --git a/include/asm-blackfin/mach-bf537/mem_init.h b/include/asm-blackfin/mach-bf537/mem_init.h new file mode 100644 index 000000000000..9ad979d416c6 --- /dev/null +++ b/include/asm-blackfin/mach-bf537/mem_init.h @@ -0,0 +1,330 @@ +/* + * File: include/asm-blackfin/mach-bf537/mem_init.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#if (CONFIG_MEM_MT48LC16M16A2TG_75 || CONFIG_MEM_MT48LC64M4A2FB_7E || CONFIG_MEM_MT48LC16M8A2TG_75 || CONFIG_MEM_GENERIC_BOARD || CONFIG_MEM_MT48LC32M8A2_75) +#if (CONFIG_SCLK_HZ > 119402985) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_7 +#define SDRAM_tRAS_num 7 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 104477612) && (CONFIG_SCLK_HZ <= 119402985) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_6 +#define SDRAM_tRAS_num 6 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 89552239) && (CONFIG_SCLK_HZ <= 104477612) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_5 +#define SDRAM_tRAS_num 5 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 74626866) && (CONFIG_SCLK_HZ <= 89552239) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_4 +#define SDRAM_tRAS_num 4 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 66666667) && (CONFIG_SCLK_HZ <= 74626866) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_3 +#define SDRAM_tRAS_num 3 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 59701493) && (CONFIG_SCLK_HZ <= 66666667) +#define SDRAM_tRP TRP_1 +#define SDRAM_tRP_num 1 +#define SDRAM_tRAS TRAS_4 +#define SDRAM_tRAS_num 3 +#define SDRAM_tRCD TRCD_1 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 44776119) && (CONFIG_SCLK_HZ <= 59701493) +#define SDRAM_tRP TRP_1 +#define SDRAM_tRP_num 1 +#define SDRAM_tRAS TRAS_3 +#define SDRAM_tRAS_num 3 +#define SDRAM_tRCD TRCD_1 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 29850746) && (CONFIG_SCLK_HZ <= 44776119) +#define SDRAM_tRP TRP_1 +#define SDRAM_tRP_num 1 +#define SDRAM_tRAS TRAS_2 +#define SDRAM_tRAS_num 2 +#define SDRAM_tRCD TRCD_1 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ <= 29850746) +#define SDRAM_tRP TRP_1 +#define SDRAM_tRP_num 1 +#define SDRAM_tRAS TRAS_1 +#define SDRAM_tRAS_num 1 +#define SDRAM_tRCD TRCD_1 +#define SDRAM_tWR TWR_2 +#endif +#endif + +#if (CONFIG_MEM_MT48LC16M16A2TG_75) + /*SDRAM INFORMATION: */ +#define SDRAM_Tref 64 /* Refresh period in milliseconds */ +#define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */ +#define SDRAM_CL CL_3 +#endif + +#if (CONFIG_MEM_MT48LC16M8A2TG_75) + /*SDRAM INFORMATION: */ +#define SDRAM_Tref 64 /* Refresh period in milliseconds */ +#define SDRAM_NRA 4096 /* Number of row addresses in SDRAM */ +#define SDRAM_CL CL_3 +#endif + +#if (CONFIG_MEM_MT48LC32M8A2_75) + /*SDRAM INFORMATION: */ +#define SDRAM_Tref 64 /* Refresh period in milliseconds */ +#define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */ +#define SDRAM_CL CL_3 +#endif + +#if (CONFIG_MEM_MT48LC64M4A2FB_7E) + /*SDRAM INFORMATION: */ +#define SDRAM_Tref 64 /* Refresh period in milliseconds */ +#define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */ +#define SDRAM_CL CL_3 +#endif + +#if (CONFIG_MEM_GENERIC_BOARD) + /*SDRAM INFORMATION: Modify this for your board */ +#define SDRAM_Tref 64 /* Refresh period in milliseconds */ +#define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */ +#define SDRAM_CL CL_3 +#endif + +#if (CONFIG_MEM_SIZE == 128) +#define SDRAM_SIZE EBSZ_128 +#endif +#if (CONFIG_MEM_SIZE == 64) +#define SDRAM_SIZE EBSZ_64 +#endif +#if (CONFIG_MEM_SIZE == 32) +#define SDRAM_SIZE EBSZ_32 +#endif +#if (CONFIG_MEM_SIZE == 16) +#define SDRAM_SIZE EBSZ_16 +#endif +#if (CONFIG_MEM_ADD_WIDTH == 11) +#define SDRAM_WIDTH EBCAW_11 +#endif +#if (CONFIG_MEM_ADD_WIDTH == 10) +#define SDRAM_WIDTH EBCAW_10 +#endif +#if (CONFIG_MEM_ADD_WIDTH == 9) +#define SDRAM_WIDTH EBCAW_9 +#endif +#if (CONFIG_MEM_ADD_WIDTH == 8) +#define SDRAM_WIDTH EBCAW_8 +#endif + +#define mem_SDBCTL (SDRAM_WIDTH | SDRAM_SIZE | EBE) + +/* Equation from section 17 (p17-46) of BF533 HRM */ +#define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num) + +/* Enable SCLK Out */ +#define mem_SDGCTL (SCTLE | SDRAM_CL | SDRAM_tRAS | SDRAM_tRP | SDRAM_tRCD | SDRAM_tWR | PSS) + +#if defined CONFIG_CLKIN_HALF +#define CLKIN_HALF 1 +#else +#define CLKIN_HALF 0 +#endif + +#if defined CONFIG_PLL_BYPASS +#define PLL_BYPASS 1 +#else +#define PLL_BYPASS 0 +#endif + +/***************************************Currently Not Being Used *********************************/ +#define flash_EBIU_AMBCTL_WAT ((CONFIG_FLASH_SPEED_BWAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 +#define flash_EBIU_AMBCTL_RAT ((CONFIG_FLASH_SPEED_BRAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 +#define flash_EBIU_AMBCTL_HT ((CONFIG_FLASH_SPEED_BHT * 4) / (4000000000 / CONFIG_SCLK_HZ)) +#define flash_EBIU_AMBCTL_ST ((CONFIG_FLASH_SPEED_BST * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 +#define flash_EBIU_AMBCTL_TT ((CONFIG_FLASH_SPEED_BTT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 + +#if (flash_EBIU_AMBCTL_TT > 3) +#define flash_EBIU_AMBCTL0_TT B0TT_4 +#endif +#if (flash_EBIU_AMBCTL_TT == 3) +#define flash_EBIU_AMBCTL0_TT B0TT_3 +#endif +#if (flash_EBIU_AMBCTL_TT == 2) +#define flash_EBIU_AMBCTL0_TT B0TT_2 +#endif +#if (flash_EBIU_AMBCTL_TT < 2) +#define flash_EBIU_AMBCTL0_TT B0TT_1 +#endif + +#if (flash_EBIU_AMBCTL_ST > 3) +#define flash_EBIU_AMBCTL0_ST B0ST_4 +#endif +#if (flash_EBIU_AMBCTL_ST == 3) +#define flash_EBIU_AMBCTL0_ST B0ST_3 +#endif +#if (flash_EBIU_AMBCTL_ST == 2) +#define flash_EBIU_AMBCTL0_ST B0ST_2 +#endif +#if (flash_EBIU_AMBCTL_ST < 2) +#define flash_EBIU_AMBCTL0_ST B0ST_1 +#endif + +#if (flash_EBIU_AMBCTL_HT > 2) +#define flash_EBIU_AMBCTL0_HT B0HT_3 +#endif +#if (flash_EBIU_AMBCTL_HT == 2) +#define flash_EBIU_AMBCTL0_HT B0HT_2 +#endif +#if (flash_EBIU_AMBCTL_HT == 1) +#define flash_EBIU_AMBCTL0_HT B0HT_1 +#endif +#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT == 0) +#define flash_EBIU_AMBCTL0_HT B0HT_0 +#endif +#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT != 0) +#define flash_EBIU_AMBCTL0_HT B0HT_1 +#endif + +#if (flash_EBIU_AMBCTL_WAT > 14) +#define flash_EBIU_AMBCTL0_WAT B0WAT_15 +#endif +#if (flash_EBIU_AMBCTL_WAT == 14) +#define flash_EBIU_AMBCTL0_WAT B0WAT_14 +#endif +#if (flash_EBIU_AMBCTL_WAT == 13) +#define flash_EBIU_AMBCTL0_WAT B0WAT_13 +#endif +#if (flash_EBIU_AMBCTL_WAT == 12) +#define flash_EBIU_AMBCTL0_WAT B0WAT_12 +#endif +#if (flash_EBIU_AMBCTL_WAT == 11) +#define flash_EBIU_AMBCTL0_WAT B0WAT_11 +#endif +#if (flash_EBIU_AMBCTL_WAT == 10) +#define flash_EBIU_AMBCTL0_WAT B0WAT_10 +#endif +#if (flash_EBIU_AMBCTL_WAT == 9) +#define flash_EBIU_AMBCTL0_WAT B0WAT_9 +#endif +#if (flash_EBIU_AMBCTL_WAT == 8) +#define flash_EBIU_AMBCTL0_WAT B0WAT_8 +#endif +#if (flash_EBIU_AMBCTL_WAT == 7) +#define flash_EBIU_AMBCTL0_WAT B0WAT_7 +#endif +#if (flash_EBIU_AMBCTL_WAT == 6) +#define flash_EBIU_AMBCTL0_WAT B0WAT_6 +#endif +#if (flash_EBIU_AMBCTL_WAT == 5) +#define flash_EBIU_AMBCTL0_WAT B0WAT_5 +#endif +#if (flash_EBIU_AMBCTL_WAT == 4) +#define flash_EBIU_AMBCTL0_WAT B0WAT_4 +#endif +#if (flash_EBIU_AMBCTL_WAT == 3) +#define flash_EBIU_AMBCTL0_WAT B0WAT_3 +#endif +#if (flash_EBIU_AMBCTL_WAT == 2) +#define flash_EBIU_AMBCTL0_WAT B0WAT_2 +#endif +#if (flash_EBIU_AMBCTL_WAT == 1) +#define flash_EBIU_AMBCTL0_WAT B0WAT_1 +#endif + +#if (flash_EBIU_AMBCTL_RAT > 14) +#define flash_EBIU_AMBCTL0_RAT B0RAT_15 +#endif +#if (flash_EBIU_AMBCTL_RAT == 14) +#define flash_EBIU_AMBCTL0_RAT B0RAT_14 +#endif +#if (flash_EBIU_AMBCTL_RAT == 13) +#define flash_EBIU_AMBCTL0_RAT B0RAT_13 +#endif +#if (flash_EBIU_AMBCTL_RAT == 12) +#define flash_EBIU_AMBCTL0_RAT B0RAT_12 +#endif +#if (flash_EBIU_AMBCTL_RAT == 11) +#define flash_EBIU_AMBCTL0_RAT B0RAT_11 +#endif +#if (flash_EBIU_AMBCTL_RAT == 10) +#define flash_EBIU_AMBCTL0_RAT B0RAT_10 +#endif +#if (flash_EBIU_AMBCTL_RAT == 9) +#define flash_EBIU_AMBCTL0_RAT B0RAT_9 +#endif +#if (flash_EBIU_AMBCTL_RAT == 8) +#define flash_EBIU_AMBCTL0_RAT B0RAT_8 +#endif +#if (flash_EBIU_AMBCTL_RAT == 7) +#define flash_EBIU_AMBCTL0_RAT B0RAT_7 +#endif +#if (flash_EBIU_AMBCTL_RAT == 6) +#define flash_EBIU_AMBCTL0_RAT B0RAT_6 +#endif +#if (flash_EBIU_AMBCTL_RAT == 5) +#define flash_EBIU_AMBCTL0_RAT B0RAT_5 +#endif +#if (flash_EBIU_AMBCTL_RAT == 4) +#define flash_EBIU_AMBCTL0_RAT B0RAT_4 +#endif +#if (flash_EBIU_AMBCTL_RAT == 3) +#define flash_EBIU_AMBCTL0_RAT B0RAT_3 +#endif +#if (flash_EBIU_AMBCTL_RAT == 2) +#define flash_EBIU_AMBCTL0_RAT B0RAT_2 +#endif +#if (flash_EBIU_AMBCTL_RAT == 1) +#define flash_EBIU_AMBCTL0_RAT B0RAT_1 +#endif + +#define flash_EBIU_AMBCTL0 \ + (flash_EBIU_AMBCTL0_WAT | flash_EBIU_AMBCTL0_RAT | flash_EBIU_AMBCTL0_HT | \ + flash_EBIU_AMBCTL0_ST | flash_EBIU_AMBCTL0_TT | CONFIG_FLASH_SPEED_RDYEN) diff --git a/include/asm-blackfin/mach-bf537/mem_map.h b/include/asm-blackfin/mach-bf537/mem_map.h new file mode 100644 index 000000000000..2a808c1202bf --- /dev/null +++ b/include/asm-blackfin/mach-bf537/mem_map.h @@ -0,0 +1,175 @@ +/* + * file: include/asm-blackfin/mach-bf537/mem_map.h + * based on: + * author: + * + * created: + * description: + * Memory MAP Common header file for blackfin BF537/6/4 of processors. + * rev: + * + * modified: + * + * bugs: enter bugs at http://blackfin.uclinux.org/ + * + * this program is free software; you can redistribute it and/or modify + * it under the terms of the gnu general public license as published by + * the free software foundation; either version 2, or (at your option) + * any later version. + * + * this program is distributed in the hope that it will be useful, + * but without any warranty; without even the implied warranty of + * merchantability or fitness for a particular purpose. see the + * gnu general public license for more details. + * + * you should have received a copy of the gnu general public license + * along with this program; see the file copying. + * if not, write to the free software foundation, + * 59 temple place - suite 330, boston, ma 02111-1307, usa. + */ + +#ifndef _MEM_MAP_537_H_ +#define _MEM_MAP_537_H_ + +#define COREMMR_BASE 0xFFE00000 /* Core MMRs */ +#define SYSMMR_BASE 0xFFC00000 /* System MMRs */ + +/* Async Memory Banks */ +#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */ +#define ASYNC_BANK3_SIZE 0x00100000 /* 1M */ +#define ASYNC_BANK2_BASE 0x20200000 /* Async Bank 2 */ +#define ASYNC_BANK2_SIZE 0x00100000 /* 1M */ +#define ASYNC_BANK1_BASE 0x20100000 /* Async Bank 1 */ +#define ASYNC_BANK1_SIZE 0x00100000 /* 1M */ +#define ASYNC_BANK0_BASE 0x20000000 /* Async Bank 0 */ +#define ASYNC_BANK0_SIZE 0x00100000 /* 1M */ + +/* Boot ROM Memory */ + +#define BOOT_ROM_START 0xEF000000 + +/* Level 1 Memory */ + +/* Memory Map for ADSP-BF537 processors */ + +#ifdef CONFIG_BLKFIN_CACHE +#define BLKFIN_ICACHESIZE (16*1024) +#else +#define BLKFIN_ICACHESIZE (0*1024) +#endif + + +#ifdef CONFIG_BF537 +#define L1_CODE_START 0xFFA00000 +#define L1_DATA_A_START 0xFF800000 +#define L1_DATA_B_START 0xFF900000 + +#define L1_CODE_LENGTH 0xC000 + +#ifdef CONFIG_BLKFIN_DCACHE + +#ifdef CONFIG_BLKFIN_DCACHE_BANKA +#define DMEM_CNTR (ACACHE_BSRAM | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH (0x8000 - 0x4000) +#define L1_DATA_B_LENGTH 0x8000 +#define BLKFIN_DCACHESIZE (16*1024) +#define BLKFIN_DSUPBANKS 1 +#else +#define DMEM_CNTR (ACACHE_BCACHE | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH (0x8000 - 0x4000) +#define L1_DATA_B_LENGTH (0x8000 - 0x4000) +#define BLKFIN_DCACHESIZE (32*1024) +#define BLKFIN_DSUPBANKS 2 +#endif + +#else +#define DMEM_CNTR (ASRAM_BSRAM | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH 0x8000 +#define L1_DATA_B_LENGTH 0x8000 +#define BLKFIN_DCACHESIZE (0*1024) +#define BLKFIN_DSUPBANKS 0 +#endif /*CONFIG_BLKFIN_DCACHE*/ + +#endif /*CONFIG_BF537*/ + +/* Memory Map for ADSP-BF536 processors */ + +#ifdef CONFIG_BF536 +#define L1_CODE_START 0xFFA00000 +#define L1_DATA_A_START 0xFF804000 +#define L1_DATA_B_START 0xFF904000 + +#define L1_CODE_LENGTH 0xC000 + + +#ifdef CONFIG_BLKFIN_DCACHE + +#ifdef CONFIG_BLKFIN_DCACHE_BANKA +#define DMEM_CNTR (ACACHE_BSRAM | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH (0x4000 - 0x4000) +#define L1_DATA_B_LENGTH 0x4000 +#define BLKFIN_DCACHESIZE (16*1024) +#define BLKFIN_DSUPBANKS 1 + +#else +#define DMEM_CNTR (ACACHE_BCACHE | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH (0x4000 - 0x4000) +#define L1_DATA_B_LENGTH (0x4000 - 0x4000) +#define BLKFIN_DCACHESIZE (32*1024) +#define BLKFIN_DSUPBANKS 2 +#endif + +#else +#define DMEM_CNTR (ASRAM_BSRAM | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH 0x4000 +#define L1_DATA_B_LENGTH 0x4000 +#define BLKFIN_DCACHESIZE (0*1024) +#define BLKFIN_DSUPBANKS 0 +#endif /*CONFIG_BLKFIN_DCACHE*/ + +#endif + +/* Memory Map for ADSP-BF534 processors */ + +#ifdef CONFIG_BF534 +#define L1_CODE_START 0xFFA00000 +#define L1_DATA_A_START 0xFF800000 +#define L1_DATA_B_START 0xFF900000 + +#define L1_CODE_LENGTH 0xC000 + +#ifdef CONFIG_BLKFIN_DCACHE + +#ifdef CONFIG_BLKFIN_DCACHE_BANKA +#define DMEM_CNTR (ACACHE_BSRAM | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH (0x8000 - 0x4000) +#define L1_DATA_B_LENGTH 0x8000 +#define BLKFIN_DCACHESIZE (16*1024) +#define BLKFIN_DSUPBANKS 1 + +#else +#define DMEM_CNTR (ACACHE_BCACHE | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH (0x8000 - 0x4000) +#define L1_DATA_B_LENGTH (0x8000 - 0x4000) +#define BLKFIN_DCACHESIZE (32*1024) +#define BLKFIN_DSUPBANKS 2 +#endif + +#else +#define DMEM_CNTR (ASRAM_BSRAM | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH 0x8000 +#define L1_DATA_B_LENGTH 0x8000 +#define BLKFIN_DCACHESIZE (0*1024) +#define BLKFIN_DSUPBANKS 0 +#endif /*CONFIG_BLKFIN_DCACHE*/ + +#endif + +/* Scratch Pad Memory */ + +#if defined(CONFIG_BF537) || defined(CONFIG_BF536) || defined(CONFIG_BF534) +#define L1_SCRATCH_START 0xFFB00000 +#define L1_SCRATCH_LENGTH 0x1000 +#endif + +#endif /* _MEM_MAP_537_H_ */ diff --git a/include/asm-blackfin/mach-bf561/anomaly.h b/include/asm-blackfin/mach-bf561/anomaly.h new file mode 100644 index 000000000000..f5b32d66517d --- /dev/null +++ b/include/asm-blackfin/mach-bf561/anomaly.h @@ -0,0 +1,184 @@ + +/* + * File: include/asm-blackfin/mach-bf561/anomaly.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* This file shoule be up to date with: + * - Revision L, 10Aug2006; ADSP-BF561 Silicon Anomaly List + */ + +#ifndef _MACH_ANOMALY_H_ +#define _MACH_ANOMALY_H_ + +/* We do not support 0.1 or 0.4 silicon - sorry */ +#if (defined(CONFIG_BF_REV_0_1) || defined(CONFIG_BF_REV_0_2) || defined(CONFIG_BF_REV_0_4)) +#error Kernel will not work on BF561 Version 0.1, 0.2, or 0.4 +#endif + +/* Issues that are common to 0.5 and 0.3 silicon */ +#if (defined(CONFIG_BF_REV_0_5) || defined(CONFIG_BF_REV_0_3)) +#define ANOMALY_05000074 /* A multi issue instruction with dsp32shiftimm in + slot1 and store of a P register in slot 2 is not + supported */ +#define ANOMALY_05000099 /* UART Line Status Register (UART_LSR) bits are not + updated at the same time. */ +#define ANOMALY_05000120 /* Testset instructions restricted to 32-bit aligned + memory locations */ +#define ANOMALY_05000122 /* Rx.H cannot be used to access 16-bit System MMR + registers */ +#define ANOMALY_05000127 /* Signbits instruction not functional under certain + conditions */ +#define ANOMALY_05000149 /* IMDMA S1/D1 channel may stall */ +#define ANOMALY_05000166 /* PPI Data Lengths Between 8 and 16 do not zero out + upper bits */ +#define ANOMALY_05000167 /* Turning Serial Ports on With External Frame Syncs */ +#define ANOMALY_05000180 /* PPI_DELAY not functional in PPI modes with 0 frame + syncs */ +#define ANOMALY_05000182 /* IMDMA does not operate to full speed for 600MHz + and higher devices */ +#define ANOMALY_05000187 /* IMDMA Corrupted Data after a Halt */ +#define ANOMALY_05000190 /* PPI not functional at core voltage < 1Volt */ +#define ANOMALY_05000208 /* VSTAT status bit in PLL_STAT register is not + functional */ +#define ANOMALY_05000245 /* Spurious Hardware Error from an access in the + shadow of a conditional branch */ +#define ANOMALY_05000257 /* Interrupt/Exception during short hardware loop + may cause bad instruction fetches */ +#define ANOMALY_05000265 /* Sensitivity to noise with slow input edge rates on + external SPORT TX and RX clocks */ +#define ANOMALY_05000267 /* IMDMA may corrupt data under certain conditions */ +#define ANOMALY_05000269 /* High I/O activity causes output voltage of internal + voltage regulator (VDDint) to increase */ +#define ANOMALY_05000270 /* High I/O activity causes output voltage of internal + voltage regulator (VDDint) to decrease */ +#define ANOMALY_05000272 /* Certain data cache write through modes fail for + VDDint <=0.9V */ +#define ANOMALY_05000274 /* Data cache write back to external synchronous memory + may be lost */ +#define ANOMALY_05000275 /* PPI Timing and sampling informaton updates */ +#define ANOMALY_05000312 /* Errors when SSYNC, CSYNC, or loads to LT, LB and LC + registers are interrupted */ + +#endif /* (defined(CONFIG_BF_REV_0_5) || defined(CONFIG_BF_REV_0_3)) */ + +#if (defined(CONFIG_BF_REV_0_5)) +#define ANOMALY_05000254 /* Incorrect Timer Pulse Width in Single-Shot PWM_OUT + mode with external clock */ +#define ANOMALY_05000266 /* IMDMA destination IRQ status must be read prior to + using IMDMA */ +#endif + +#if (defined(CONFIG_BF_REV_0_3)) +#define ANOMALY_05000156 /* Timers in PWM-Out Mode with PPI GP Receive (Input) + Mode with 0 Frame Syncs */ +#define ANOMALY_05000168 /* SDRAM auto-refresh and subsequent Power Ups */ +#define ANOMALY_05000169 /* DATA CPLB page miss can result in lost write-through + cache data writes */ +#define ANOMALY_05000171 /* Boot-ROM code modifies SICA_IWRx wakeup registers */ +#define ANOMALY_05000174 /* Cache Fill Buffer Data lost */ +#define ANOMALY_05000175 /* Overlapping Sequencer and Memory Stalls */ +#define ANOMALY_05000176 /* Multiplication of (-1) by (-1) followed by an + accumulator saturation */ +#define ANOMALY_05000179 /* PPI_COUNT cannot be programmed to 0 in General + Purpose TX or RX modes */ +#define ANOMALY_05000181 /* Disabling the PPI resets the PPI configuration + registers */ +#define ANOMALY_05000184 /* Timer Pin limitations for PPI TX Modes with + External Frame Syncs */ +#define ANOMALY_05000185 /* PPI TX Mode with 2 External Frame Syncs */ +#define ANOMALY_05000186 /* PPI packing with Data Length greater than 8 bits + (not a meaningful mode) */ +#define ANOMALY_05000188 /* IMDMA Restrictions on Descriptor and Buffer + Placement in Memory */ +#define ANOMALY_05000189 /* False Protection Exception */ +#define ANOMALY_05000193 /* False Flag Pin Interrupts on Edge Sensitive Inputs + when polarity setting is changed */ +#define ANOMALY_05000194 /* Restarting SPORT in specific modes may cause data + corruption */ +#define ANOMALY_05000198 /* Failing MMR accesses when stalled by preceding + memory read */ +#define ANOMALY_05000199 /* DMA current address shows wrong value during carry + fix */ +#define ANOMALY_05000200 /* SPORT TFS and DT are incorrectly driven during + inactive channels in certain conditions */ +#define ANOMALY_05000202 /* Possible infinite stall with specific dual-DAG + situation */ +#define ANOMALY_05000204 /* Incorrect data read with write-through cache and + allocate cache lines on reads only mode */ +#define ANOMALY_05000205 /* Specific sequence that can cause DMA error or DMA + stopping */ +#define ANOMALY_05000207 /* Recovery from "brown-out" condition */ +#define ANOMALY_05000209 /* Speed-Path in computational unit affects certain + instructions */ +#define ANOMALY_05000215 /* UART TX Interrupt masked erroneously */ +#define ANOMALY_05000219 /* NMI event at boot time results in unpredictable + state */ +#define ANOMALY_05000220 /* Data Corruption with Cached External Memory and + Non-Cached On-Chip L2 Memory */ +#define ANOMALY_05000225 /* Incorrect pulse-width of UART start-bit */ +#define ANOMALY_05000227 /* Scratchpad memory bank reads may return incorrect + data */ +#define ANOMALY_05000230 /* UART Receiver is less robust against Baudrate + Differences in certain Conditions */ +#define ANOMALY_05000231 /* UART STB bit incorrectly affects receiver setting */ +#define ANOMALY_05000232 /* SPORT data transmit lines are incorrectly driven in + multichannel mode */ +#define ANOMALY_05000242 /* DF bit in PLL_CTL register does not respond to + hardware reset */ +#define ANOMALY_05000244 /* If i-cache is on, CSYNC/SSYNC/IDLE around Change of + Control causes failures */ +#define ANOMALY_05000248 /* TESTSET operation forces stall on the other core */ +#define ANOMALY_05000250 /* Incorrect Bit-Shift of Data Word in Multichannel + (TDM) mode in certain conditions */ +#define ANOMALY_05000251 /* Exception not generated for MMR accesses in + reserved region */ +#define ANOMALY_05000253 /* Maximum external clock speed for Timers */ +#define ANOMALY_05000258 /* Instruction Cache is corrupted when bits 9 and 12 + of the ICPLB Data registers differ */ +#define ANOMALY_05000260 /* ICPLB_STATUS MMR register may be corrupted */ +#define ANOMALY_05000261 /* DCPLB_FAULT_ADDR MMR register may be corrupted */ +#define ANOMALY_05000262 /* Stores to data cache may be lost */ +#define ANOMALY_05000263 /* Hardware loop corrupted when taking an ICPLB + exception */ +#define ANOMALY_05000264 /* CSYNC/SSYNC/IDLE causes infinite stall in second + to last instruction in hardware loop */ +#define ANOMALY_05000276 /* Timing requirements change for External Frame + Sync PPI Modes with non-zero PPI_DELAY */ +#define ANOMALY_05000278 /* Disabling Peripherals with DMA running may cause + DMA system instability */ +#define ANOMALY_05000281 /* False Hardware Error Exception when ISR context is + not restored */ +#define ANOMALY_05000283 /* An MMR write is stalled indefinitely when killed + in a particular stage */ +#define ANOMALY_05000287 /* A read will receive incorrect data under certain + conditions */ +#define ANOMALY_05000288 /* SPORTs may receive bad data if FIFOs fill up */ +#endif + +#endif /* _MACH_ANOMALY_H_ */ diff --git a/include/asm-blackfin/mach-bf561/bf561.h b/include/asm-blackfin/mach-bf561/bf561.h new file mode 100644 index 000000000000..96a5d3a47e45 --- /dev/null +++ b/include/asm-blackfin/mach-bf561/bf561.h @@ -0,0 +1,408 @@ +/* + * File: include/asm-blackfin/mach-bf561/bf561.h + * Based on: + * Author: + * + * Created: + * Description: SYSTEM MMR REGISTER AND MEMORY MAP FOR ADSP-BF561 + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __MACH_BF561_H__ +#define __MACH_BF561_H__ + +#define SUPPORTED_REVID 0x3 + +#define OFFSET_(x) ((x) & 0x0000FFFF) +#define L1_ISRAM 0xFFA00000 +#define L1_ISRAM_END 0xFFA04000 +#define DATA_BANKA_SRAM 0xFF800000 +#define DATA_BANKA_SRAM_END 0xFF804000 +#define DATA_BANKB_SRAM 0xFF900000 +#define DATA_BANKB_SRAM_END 0xFF904000 +#define L1_DSRAMA 0xFF800000 +#define L1_DSRAMA_END 0xFF804000 +#define L1_DSRAMB 0xFF900000 +#define L1_DSRAMB_END 0xFF904000 +#define L2_SRAM 0xFEB00000 +#define L2_SRAM_END 0xFEB20000 +#define AMB_FLASH 0x20000000 +#define AMB_FLASH_END 0x21000000 +#define AMB_FLASH_LENGTH 0x01000000 +#define L1_ISRAM_LENGTH 0x4000 +#define L1_DSRAMA_LENGTH 0x4000 +#define L1_DSRAMB_LENGTH 0x4000 +#define L2_SRAM_LENGTH 0x20000 + +/*some misc defines*/ +#define IMASK_IVG15 0x8000 +#define IMASK_IVG14 0x4000 +#define IMASK_IVG13 0x2000 +#define IMASK_IVG12 0x1000 + +#define IMASK_IVG11 0x0800 +#define IMASK_IVG10 0x0400 +#define IMASK_IVG9 0x0200 +#define IMASK_IVG8 0x0100 + +#define IMASK_IVG7 0x0080 +#define IMASK_IVGTMR 0x0040 +#define IMASK_IVGHW 0x0020 + +/*************************** + * Blackfin Cache setup + */ + + +#define BLKFIN_ISUBBANKS 4 +#define BLKFIN_IWAYS 4 +#define BLKFIN_ILINES 32 + +#define BLKFIN_DSUBBANKS 4 +#define BLKFIN_DWAYS 2 +#define BLKFIN_DLINES 64 + +#define WAY0_L 0x1 +#define WAY1_L 0x2 +#define WAY01_L 0x3 +#define WAY2_L 0x4 +#define WAY02_L 0x5 +#define WAY12_L 0x6 +#define WAY012_L 0x7 + +#define WAY3_L 0x8 +#define WAY03_L 0x9 +#define WAY13_L 0xA +#define WAY013_L 0xB + +#define WAY32_L 0xC +#define WAY320_L 0xD +#define WAY321_L 0xE +#define WAYALL_L 0xF + +#define DMC_ENABLE (2<<2) /*yes, 2, not 1 */ + +/* IAR0 BIT FIELDS */ +#define PLL_WAKEUP_BIT 0xFFFFFFFF +#define DMA1_ERROR_BIT 0xFFFFFF0F +#define DMA2_ERROR_BIT 0xFFFFF0FF +#define IMDMA_ERROR_BIT 0xFFFF0FFF +#define PPI1_ERROR_BIT 0xFFF0FFFF +#define PPI2_ERROR_BIT 0xFF0FFFFF +#define SPORT0_ERROR_BIT 0xF0FFFFFF +#define SPORT1_ERROR_BIT 0x0FFFFFFF +/* IAR1 BIT FIELDS */ +#define SPI_ERROR_BIT 0xFFFFFFFF +#define UART_ERROR_BIT 0xFFFFFF0F +#define RESERVED_ERROR_BIT 0xFFFFF0FF +#define DMA1_0_BIT 0xFFFF0FFF +#define DMA1_1_BIT 0xFFF0FFFF +#define DMA1_2_BIT 0xFF0FFFFF +#define DMA1_3_BIT 0xF0FFFFFF +#define DMA1_4_BIT 0x0FFFFFFF +/* IAR2 BIT FIELDS */ +#define DMA1_5_BIT 0xFFFFFFFF +#define DMA1_6_BIT 0xFFFFFF0F +#define DMA1_7_BIT 0xFFFFF0FF +#define DMA1_8_BIT 0xFFFF0FFF +#define DMA1_9_BIT 0xFFF0FFFF +#define DMA1_10_BIT 0xFF0FFFFF +#define DMA1_11_BIT 0xF0FFFFFF +#define DMA2_0_BIT 0x0FFFFFFF +/* IAR3 BIT FIELDS */ +#define DMA2_1_BIT 0xFFFFFFFF +#define DMA2_2_BIT 0xFFFFFF0F +#define DMA2_3_BIT 0xFFFFF0FF +#define DMA2_4_BIT 0xFFFF0FFF +#define DMA2_5_BIT 0xFFF0FFFF +#define DMA2_6_BIT 0xFF0FFFFF +#define DMA2_7_BIT 0xF0FFFFFF +#define DMA2_8_BIT 0x0FFFFFFF +/* IAR4 BIT FIELDS */ +#define DMA2_9_BIT 0xFFFFFFFF +#define DMA2_10_BIT 0xFFFFFF0F +#define DMA2_11_BIT 0xFFFFF0FF +#define TIMER0_BIT 0xFFFF0FFF +#define TIMER1_BIT 0xFFF0FFFF +#define TIMER2_BIT 0xFF0FFFFF +#define TIMER3_BIT 0xF0FFFFFF +#define TIMER4_BIT 0x0FFFFFFF +/* IAR5 BIT FIELDS */ +#define TIMER5_BIT 0xFFFFFFFF +#define TIMER6_BIT 0xFFFFFF0F +#define TIMER7_BIT 0xFFFFF0FF +#define TIMER8_BIT 0xFFFF0FFF +#define TIMER9_BIT 0xFFF0FFFF +#define TIMER10_BIT 0xFF0FFFFF +#define TIMER11_BIT 0xF0FFFFFF +#define PROG0_INTA_BIT 0x0FFFFFFF +/* IAR6 BIT FIELDS */ +#define PROG0_INTB_BIT 0xFFFFFFFF +#define PROG1_INTA_BIT 0xFFFFFF0F +#define PROG1_INTB_BIT 0xFFFFF0FF +#define PROG2_INTA_BIT 0xFFFF0FFF +#define PROG2_INTB_BIT 0xFFF0FFFF +#define DMA1_WRRD0_BIT 0xFF0FFFFF +#define DMA1_WRRD1_BIT 0xF0FFFFFF +#define DMA2_WRRD0_BIT 0x0FFFFFFF +/* IAR7 BIT FIELDS */ +#define DMA2_WRRD1_BIT 0xFFFFFFFF +#define IMDMA_WRRD0_BIT 0xFFFFFF0F +#define IMDMA_WRRD1_BIT 0xFFFFF0FF +#define WATCH_BIT 0xFFFF0FFF +#define RESERVED_1_BIT 0xFFF0FFFF +#define RESERVED_2_BIT 0xFF0FFFFF +#define SUPPLE_0_BIT 0xF0FFFFFF +#define SUPPLE_1_BIT 0x0FFFFFFF + +/* Miscellaneous Values */ + +/****************************** EBIU Settings ********************************/ +#define AMBCTL0VAL ((CONFIG_BANK_1 << 16) | CONFIG_BANK_0) +#define AMBCTL1VAL ((CONFIG_BANK_3 << 16) | CONFIG_BANK_2) + +#if defined(CONFIG_C_AMBEN_ALL) +#define V_AMBEN AMBEN_ALL +#elif defined(CONFIG_C_AMBEN) +#define V_AMBEN 0x0 +#elif defined(CONFIG_C_AMBEN_B0) +#define V_AMBEN AMBEN_B0 +#elif defined(CONFIG_C_AMBEN_B0_B1) +#define V_AMBEN AMBEN_B0_B1 +#elif defined(CONFIG_C_AMBEN_B0_B1_B2) +#define V_AMBEN AMBEN_B0_B1_B2 +#endif + +#ifdef CONFIG_C_AMCKEN +#define V_AMCKEN AMCKEN +#else +#define V_AMCKEN 0x0 +#endif + +#ifdef CONFIG_C_B0PEN +#define V_B0PEN 0x10 +#else +#define V_B0PEN 0x00 +#endif + +#ifdef CONFIG_C_B1PEN +#define V_B1PEN 0x20 +#else +#define V_B1PEN 0x00 +#endif + +#ifdef CONFIG_C_B2PEN +#define V_B2PEN 0x40 +#else +#define V_B2PEN 0x00 +#endif + +#ifdef CONFIG_C_B3PEN +#define V_B3PEN 0x80 +#else +#define V_B3PEN 0x00 +#endif + +#ifdef CONFIG_C_CDPRIO +#define V_CDPRIO 0x100 +#else +#define V_CDPRIO 0x0 +#endif + +#define AMGCTLVAL (V_AMBEN | V_AMCKEN | V_CDPRIO | V_B0PEN | V_B1PEN | V_B2PEN | V_B3PEN | 0x0002) + +#define MAX_VC 600000000 +#define MIN_VC 50000000 + +/******************************* PLL Settings ********************************/ +#ifdef CONFIG_BFIN_KERNEL_CLOCK +#if (CONFIG_VCO_MULT < 0) +#error "VCO Multiplier is less than 0. Please select a different value" +#endif + +#if (CONFIG_VCO_MULT == 0) +#error "VCO Multiplier should be greater than 0. Please select a different value" +#endif + +#ifndef CONFIG_CLKIN_HALF +#define CONFIG_VCO_HZ (CONFIG_CLKIN_HZ * CONFIG_VCO_MULT) +#else +#define CONFIG_VCO_HZ ((CONFIG_CLKIN_HZ * CONFIG_VCO_MULT)/2) +#endif + +#ifndef CONFIG_PLL_BYPASS +#define CONFIG_CCLK_HZ (CONFIG_VCO_HZ/CONFIG_CCLK_DIV) +#define CONFIG_SCLK_HZ (CONFIG_VCO_HZ/CONFIG_SCLK_DIV) +#else +#define CONFIG_CCLK_HZ CONFIG_CLKIN_HZ +#define CONFIG_SCLK_HZ CONFIG_CLKIN_HZ +#endif + +#if (CONFIG_SCLK_DIV < 1) +#error "SCLK DIV cannot be less than 1 or more than 15. Please select a proper value" +#endif + +#if (CONFIG_SCLK_DIV > 15) +#error "SCLK DIV cannot be less than 1 or more than 15. Please select a proper value" +#endif + +#if (CONFIG_CCLK_DIV != 1) +#if (CONFIG_CCLK_DIV != 2) +#if (CONFIG_CCLK_DIV != 4) +#if (CONFIG_CCLK_DIV != 8) +#error "CCLK DIV can be 1,2,4 or 8 only. Please select a proper value" +#endif +#endif +#endif +#endif + +#if (CONFIG_VCO_HZ > MAX_VC) +#error "VCO selected is more than maximum value. Please change the VCO multipler" +#endif + +#if (CONFIG_SCLK_HZ > 133000000) +#error "Sclk value selected is more than maximum. Please select a proper value for SCLK multiplier" +#endif + +#if (CONFIG_SCLK_HZ < 27000000) +#error "Sclk value selected is less than minimum. Please select a proper value for SCLK multiplier" +#endif + +#if (CONFIG_SCLK_HZ >= CONFIG_CCLK_HZ) +#if (CONFIG_SCLK_HZ != CONFIG_CLKIN_HZ) +#if (CONFIG_CCLK_HZ != CONFIG_CLKIN_HZ) +#error "Please select sclk less than cclk" +#endif +#endif +#endif + +#if (CONFIG_CCLK_DIV == 1) +#define CONFIG_CCLK_ACT_DIV CCLK_DIV1 +#endif +#if (CONFIG_CCLK_DIV == 2) +#define CONFIG_CCLK_ACT_DIV CCLK_DIV2 +#endif +#if (CONFIG_CCLK_DIV == 4) +#define CONFIG_CCLK_ACT_DIV CCLK_DIV4 +#endif +#if (CONFIG_CCLK_DIV == 8) +#define CONFIG_CCLK_ACT_DIV CCLK_DIV8 +#endif +#ifndef CONFIG_CCLK_ACT_DIV +#define CONFIG_CCLK_ACT_DIV CONFIG_CCLK_DIV_not_defined_properly +#endif + +#if defined(ANOMALY_05000273) && (CONFIG_CCLK_DIV == 1) +#error ANOMALY 05000273, please make sure CCLK is at least 2x SCLK +#endif + +#endif /* CONFIG_BFIN_KERNEL_CLOCK */ + +#ifdef CONFIG_BF561 +#define CPU "BF561" +#define CPUID 0x027bb000 +#endif +#ifndef CPU +#define CPU "UNKNOWN" +#define CPUID 0x0 +#endif + +#if (CONFIG_MEM_SIZE % 4) +#error "SDRAM memory size must be a multiple of 4MB!" +#endif +#define SDRAM_IGENERIC (CPLB_L1_CHBL | CPLB_USER_RD | CPLB_VALID | CPLB_PORTPRIO) +#define SDRAM_IKERNEL (SDRAM_IGENERIC | CPLB_LOCK) +#define L1_IMEMORY ( CPLB_USER_RD | CPLB_VALID | CPLB_LOCK) +#define SDRAM_INON_CHBL ( CPLB_USER_RD | CPLB_VALID) + +/*Use the menuconfig cache policy here - CONFIG_BLKFIN_WT/CONFIG_BLKFIN_WB*/ + +#define ANOMALY_05000158_WORKAROUND 0x200 +#ifdef CONFIG_BLKFIN_WB /*Write Back Policy */ +#define SDRAM_DGENERIC (CPLB_L1_CHBL | CPLB_DIRTY \ + | CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND) +#else /*Write Through */ +#define SDRAM_DGENERIC (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW | CPLB_DIRTY \ + | CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND) +#endif + + +#define L1_DMEMORY (CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_LOCK | CPLB_DIRTY) +#define SDRAM_DNON_CHBL (CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_DIRTY) +#define SDRAM_EBIU (CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_DIRTY) +#define SDRAM_OOPS (CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_LOCK | CPLB_DIRTY) + +#define L2_MEMORY (CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_DIRTY) + +#define SIZE_1K 0x00000400 /* 1K */ +#define SIZE_4K 0x00001000 /* 4K */ +#define SIZE_1M 0x00100000 /* 1M */ +#define SIZE_4M 0x00400000 /* 4M */ + +#define MAX_CPLBS (16 * 2) + +/* +* Number of required data CPLB switchtable entries +* MEMSIZE / 4 (we mostly install 4M page size CPLBs +* approx 16 for smaller 1MB page size CPLBs for allignment purposes +* 1 for L1 Data Memory +* 1 for L2 Data Memory +* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO +* 64 for ASYNC Memory +*/ + + +#define MAX_SWITCH_D_CPLBS (((CONFIG_MEM_SIZE / 4) + 16 + 1 + 1 + 1 + 64) * 2) + +/* +* Number of required instruction CPLB switchtable entries +* MEMSIZE / 4 (we mostly install 4M page size CPLBs +* approx 12 for smaller 1MB page size CPLBs for allignment purposes +* 1 for L1 Instruction Memory +* 1 for L2 Instruction Memory +* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO +*/ + +#define MAX_SWITCH_I_CPLBS (((CONFIG_MEM_SIZE / 4) + 12 + 1 + 1 + 1) * 2) + +#if 0 /* comment by mhfan */ +/* Event Vector Table Address */ +#define EVT_EMULATION_ADDR 0xffe02000 +#define EVT_RESET_ADDR 0xffe02004 +#define EVT_NMI_ADDR 0xffe02008 +#define EVT_EXCEPTION_ADDR 0xffe0200c +#define EVT_GLOBAL_INT_ENB_ADDR 0xffe02010 +#define EVT_HARDWARE_ERROR_ADDR 0xffe02014 +#define EVT_TIMER_ADDR 0xffe02018 +#define EVT_IVG7_ADDR 0xffe0201c +#define EVT_IVG8_ADDR 0xffe02020 +#define EVT_IVG9_ADDR 0xffe02024 +#define EVT_IVG10_ADDR 0xffe02028 +#define EVT_IVG11_ADDR 0xffe0202c +#define EVT_IVG12_ADDR 0xffe02030 +#define EVT_IVG13_ADDR 0xffe02034 +#define EVT_IVG14_ADDR 0xffe02038 +#define EVT_IVG15_ADDR 0xffe0203c +#define EVT_OVERRIDE_ADDR 0xffe02100 +#endif /* comment by mhfan */ + +#endif /* __MACH_BF561_H__ */ diff --git a/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h new file mode 100644 index 000000000000..23bf76aa3451 --- /dev/null +++ b/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h @@ -0,0 +1,108 @@ +#include +#include + +#define NR_PORTS 1 + +#define OFFSET_THR 0x00 /* Transmit Holding register */ +#define OFFSET_RBR 0x00 /* Receive Buffer register */ +#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */ +#define OFFSET_IER 0x04 /* Interrupt Enable Register */ +#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */ +#define OFFSET_IIR 0x08 /* Interrupt Identification Register */ +#define OFFSET_LCR 0x0C /* Line Control Register */ +#define OFFSET_MCR 0x10 /* Modem Control Register */ +#define OFFSET_LSR 0x14 /* Line Status Register */ +#define OFFSET_MSR 0x18 /* Modem Status Register */ +#define OFFSET_SCR 0x1C /* SCR Scratch Register */ +#define OFFSET_GCTL 0x24 /* Global Control Register */ + +#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR)) +#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL)) +#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER)) +#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH)) +#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR)) +#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR)) +#define UART_GET_LSR(uart) bfin_read16(((uart)->port.membase + OFFSET_LSR)) +#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL)) + +#define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v) +#define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v) +#define UART_PUT_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER),v) +#define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v) +#define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v) +#define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v) + +#ifdef CONFIG_BFIN_UART0_CTSRTS +# define CONFIG_SERIAL_BFIN_CTSRTS +# ifndef CONFIG_UART0_CTS_PIN +# define CONFIG_UART0_CTS_PIN -1 +# endif +# ifndef CONFIG_UART0_RTS_PIN +# define CONFIG_UART0_RTS_PIN -1 +# endif +#endif + +struct bfin_serial_port { + struct uart_port port; + unsigned int old_status; +#ifdef CONFIG_SERIAL_BFIN_DMA + int tx_done; + int tx_count; + struct circ_buf rx_dma_buf; + struct timer_list rx_dma_timer; + int rx_dma_nrows; + unsigned int tx_dma_channel; + unsigned int rx_dma_channel; + struct work_struct tx_dma_workqueue; +#else + struct work_struct cts_workqueue; +#endif +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + int cts_pin; + int rts_pin; +#endif +}; + +struct bfin_serial_port bfin_serial_ports[NR_PORTS]; +struct bfin_serial_res { + unsigned long uart_base_addr; + int uart_irq; +#ifdef CONFIG_SERIAL_BFIN_DMA + unsigned int uart_tx_dma_channel; + unsigned int uart_rx_dma_channel; +#endif +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + int uart_cts_pin; + int uart_rts_pin; +#endif +}; + +struct bfin_serial_res bfin_serial_resource[] = { + 0xFFC00400, + IRQ_UART_RX, +#ifdef CONFIG_SERIAL_BFIN_DMA + CH_UART_TX, + CH_UART_RX, +#endif +#ifdef CONFIG_BFIN_UART0_CTSRTS + CONFIG_UART0_CTS_PIN, + CONFIG_UART0_RTS_PIN, +#endif +}; + + +int nr_ports = NR_PORTS; +static void bfin_serial_hw_init(struct bfin_serial_port *uart) +{ + +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + if (uart->cts_pin >= 0) { + gpio_request(uart->cts_pin, NULL); + gpio_direction_input(uart->cts_pin); + } + if (uart->rts_pin >= 0) { + gpio_request(uart->rts_pin, NULL); + gpio_direction_input(uart->rts_pin); + } +#endif +} diff --git a/include/asm-blackfin/mach-bf561/blackfin.h b/include/asm-blackfin/mach-bf561/blackfin.h new file mode 100644 index 000000000000..2537c845e8b0 --- /dev/null +++ b/include/asm-blackfin/mach-bf561/blackfin.h @@ -0,0 +1,52 @@ +/* + * File: include/asm-blackfin/mach-bf561/blackfin.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _MACH_BLACKFIN_H_ +#define _MACH_BLACKFIN_H_ + +#define BF561_FAMILY + +#include "bf561.h" +#include "mem_map.h" +#include "defBF561.h" +#include "anomaly.h" + +#if !(defined(__ASSEMBLY__) || defined(ASSEMBLY)) +#include "cdefBF561.h" +#endif + +#define bfin_read_FIO_FLAG_D() bfin_read_FIO0_FLAG_D() +#define bfin_write_FIO_FLAG_D(val) bfin_write_FIO0_FLAG_D(val) +#define bfin_read_FIO_DIR() bfin_read_FIO0_DIR() +#define bfin_write_FIO_DIR(val) bfin_write_FIO0_DIR(val) +#define bfin_read_FIO_INEN() bfin_read_FIO0_INEN() +#define bfin_write_FIO_INEN(val) bfin_write_FIO0_INEN(val) + +#endif /* _MACH_BLACKFIN_H_ */ diff --git a/include/asm-blackfin/mach-bf561/cdefBF561.h b/include/asm-blackfin/mach-bf561/cdefBF561.h new file mode 100644 index 000000000000..5dc0ed835447 --- /dev/null +++ b/include/asm-blackfin/mach-bf561/cdefBF561.h @@ -0,0 +1,1543 @@ +/* + * File: include/asm-blackfin/mach-bf561/cdefBF561.h + * Based on: + * Author: + * + * Created: + * Description: C POINTERS TO SYSTEM MMR REGISTER AND MEMORY MAP FOR ADSP-BF561 + * + * Rev: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _CDEF_BF561_H +#define _CDEF_BF561_H + +/* +#if !defined(__ADSPBF561__) +#warning cdefBF561.h should only be included for BF561 chip. +#endif +*/ +/* include all Core registers and bit definitions */ +#include "defBF561.h" + +/*include core specific register pointer definitions*/ +#include + +#include + +/*********************************************************************************** */ +/* System MMR Register Map */ +/*********************************************************************************** */ + +/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */ +#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL) +#define bfin_write_PLL_CTL(val) bfin_write16(PLL_CTL,val) +#define bfin_read_PLL_DIV() bfin_read16(PLL_DIV) +#define bfin_write_PLL_DIV(val) bfin_write16(PLL_DIV,val) +#define bfin_read_VR_CTL() bfin_read16(VR_CTL) +/* Writing to VR_CTL initiates a PLL relock sequence. */ +static __inline__ void bfin_write_VR_CTL(unsigned int val) +{ + unsigned long flags, iwr; + + bfin_write16(VR_CTL, val); + __builtin_bfin_ssync(); + /* Enable the PLL Wakeup bit in SIC IWR */ + iwr = bfin_read32(SICA_IWR0); + /* Only allow PPL Wakeup) */ + bfin_write32(SICA_IWR0, IWR_ENABLE(0)); + local_irq_save(flags); + asm("IDLE;"); + local_irq_restore(flags); + bfin_write32(SICA_IWR0, iwr); +} +#define bfin_read_PLL_STAT() bfin_read16(PLL_STAT) +#define bfin_write_PLL_STAT(val) bfin_write16(PLL_STAT,val) +#define bfin_read_PLL_LOCKCNT() bfin_read16(PLL_LOCKCNT) +#define bfin_write_PLL_LOCKCNT(val) bfin_write16(PLL_LOCKCNT,val) +#define bfin_read_CHIPID() bfin_read32(CHIPID) + +/* System Reset and Interrupt Controller registers for core A (0xFFC0 0100-0xFFC0 01FF) */ +#define bfin_read_SICA_SWRST() bfin_read16(SICA_SWRST) +#define bfin_write_SICA_SWRST(val) bfin_write16(SICA_SWRST,val) +#define bfin_read_SICA_SYSCR() bfin_read16(SICA_SYSCR) +#define bfin_write_SICA_SYSCR(val) bfin_write16(SICA_SYSCR,val) +#define bfin_read_SICA_RVECT() bfin_read16(SICA_RVECT) +#define bfin_write_SICA_RVECT(val) bfin_write16(SICA_RVECT,val) +#define bfin_read_SICA_IMASK() bfin_read32(SICA_IMASK) +#define bfin_write_SICA_IMASK(val) bfin_write32(SICA_IMASK,val) +#define bfin_read_SICA_IMASK0() bfin_read32(SICA_IMASK0) +#define bfin_write_SICA_IMASK0(val) bfin_write32(SICA_IMASK0,val) +#define bfin_read_SICA_IMASK1() bfin_read32(SICA_IMASK1) +#define bfin_write_SICA_IMASK1(val) bfin_write32(SICA_IMASK1,val) +#define bfin_read_SICA_IAR0() bfin_read32(SICA_IAR0) +#define bfin_write_SICA_IAR0(val) bfin_write32(SICA_IAR0,val) +#define bfin_read_SICA_IAR1() bfin_read32(SICA_IAR1) +#define bfin_write_SICA_IAR1(val) bfin_write32(SICA_IAR1,val) +#define bfin_read_SICA_IAR2() bfin_read32(SICA_IAR2) +#define bfin_write_SICA_IAR2(val) bfin_write32(SICA_IAR2,val) +#define bfin_read_SICA_IAR3() bfin_read32(SICA_IAR3) +#define bfin_write_SICA_IAR3(val) bfin_write32(SICA_IAR3,val) +#define bfin_read_SICA_IAR4() bfin_read32(SICA_IAR4) +#define bfin_write_SICA_IAR4(val) bfin_write32(SICA_IAR4,val) +#define bfin_read_SICA_IAR5() bfin_read32(SICA_IAR5) +#define bfin_write_SICA_IAR5(val) bfin_write32(SICA_IAR5,val) +#define bfin_read_SICA_IAR6() bfin_read32(SICA_IAR6) +#define bfin_write_SICA_IAR6(val) bfin_write32(SICA_IAR6,val) +#define bfin_read_SICA_IAR7() bfin_read32(SICA_IAR7) +#define bfin_write_SICA_IAR7(val) bfin_write32(SICA_IAR7,val) +#define bfin_read_SICA_ISR0() bfin_read32(SICA_ISR0) +#define bfin_write_SICA_ISR0(val) bfin_write32(SICA_ISR0,val) +#define bfin_read_SICA_ISR1() bfin_read32(SICA_ISR1) +#define bfin_write_SICA_ISR1(val) bfin_write32(SICA_ISR1,val) +#define bfin_read_SICA_IWR0() bfin_read32(SICA_IWR0) +#define bfin_write_SICA_IWR0(val) bfin_write32(SICA_IWR0,val) +#define bfin_read_SICA_IWR1() bfin_read32(SICA_IWR1) +#define bfin_write_SICA_IWR1(val) bfin_write32(SICA_IWR1,val) + +/* System Reset and Interrupt Controller registers for Core B (0xFFC0 1100-0xFFC0 11FF) */ +#define bfin_read_SICB_SWRST() bfin_read16(SICB_SWRST) +#define bfin_write_SICB_SWRST(val) bfin_write16(SICB_SWRST,val) +#define bfin_read_SICB_SYSCR() bfin_read16(SICB_SYSCR) +#define bfin_write_SICB_SYSCR(val) bfin_write16(SICB_SYSCR,val) +#define bfin_read_SICB_RVECT() bfin_read16(SICB_RVECT) +#define bfin_write_SICB_RVECT(val) bfin_write16(SICB_RVECT,val) +#define bfin_read_SICB_IMASK0() bfin_read32(SICB_IMASK0) +#define bfin_write_SICB_IMASK0(val) bfin_write32(SICB_IMASK0,val) +#define bfin_read_SICB_IMASK1() bfin_read32(SICB_IMASK1) +#define bfin_write_SICB_IMASK1(val) bfin_write32(SICB_IMASK1,val) +#define bfin_read_SICB_IAR0() bfin_read32(SICB_IAR0) +#define bfin_write_SICB_IAR0(val) bfin_write32(SICB_IAR0,val) +#define bfin_read_SICB_IAR1() bfin_read32(SICB_IAR1) +#define bfin_write_SICB_IAR1(val) bfin_write32(SICB_IAR1,val) +#define bfin_read_SICB_IAR2() bfin_read32(SICB_IAR2) +#define bfin_write_SICB_IAR2(val) bfin_write32(SICB_IAR2,val) +#define bfin_read_SICB_IAR3() bfin_read32(SICB_IAR3) +#define bfin_write_SICB_IAR3(val) bfin_write32(SICB_IAR3,val) +#define bfin_read_SICB_IAR4() bfin_read32(SICB_IAR4) +#define bfin_write_SICB_IAR4(val) bfin_write32(SICB_IAR4,val) +#define bfin_read_SICB_IAR5() bfin_read32(SICB_IAR5) +#define bfin_write_SICB_IAR5(val) bfin_write32(SICB_IAR5,val) +#define bfin_read_SICB_IAR6() bfin_read32(SICB_IAR6) +#define bfin_write_SICB_IAR6(val) bfin_write32(SICB_IAR6,val) +#define bfin_read_SICB_IAR7() bfin_read32(SICB_IAR7) +#define bfin_write_SICB_IAR7(val) bfin_write32(SICB_IAR7,val) +#define bfin_read_SICB_ISR0() bfin_read32(SICB_ISR0) +#define bfin_write_SICB_ISR0(val) bfin_write32(SICB_ISR0,val) +#define bfin_read_SICB_ISR1() bfin_read32(SICB_ISR1) +#define bfin_write_SICB_ISR1(val) bfin_write32(SICB_ISR1,val) +#define bfin_read_SICB_IWR0() bfin_read32(SICB_IWR0) +#define bfin_write_SICB_IWR0(val) bfin_write32(SICB_IWR0,val) +#define bfin_read_SICB_IWR1() bfin_read32(SICB_IWR1) +#define bfin_write_SICB_IWR1(val) bfin_write32(SICB_IWR1,val) +/* Watchdog Timer registers for Core A (0xFFC0 0200-0xFFC0 02FF) */ +#define bfin_read_WDOGA_CTL() bfin_read16(WDOGA_CTL) +#define bfin_write_WDOGA_CTL(val) bfin_write16(WDOGA_CTL,val) +#define bfin_read_WDOGA_CNT() bfin_read32(WDOGA_CNT) +#define bfin_write_WDOGA_CNT(val) bfin_write32(WDOGA_CNT,val) +#define bfin_read_WDOGA_STAT() bfin_read32(WDOGA_STAT) +#define bfin_write_WDOGA_STAT(val) bfin_write32(WDOGA_STAT,val) + +/* Watchdog Timer registers for Core B (0xFFC0 1200-0xFFC0 12FF) */ +#define bfin_read_WDOGB_CTL() bfin_read16(WDOGB_CTL) +#define bfin_write_WDOGB_CTL(val) bfin_write16(WDOGB_CTL,val) +#define bfin_read_WDOGB_CNT() bfin_read32(WDOGB_CNT) +#define bfin_write_WDOGB_CNT(val) bfin_write32(WDOGB_CNT,val) +#define bfin_read_WDOGB_STAT() bfin_read32(WDOGB_STAT) +#define bfin_write_WDOGB_STAT(val) bfin_write32(WDOGB_STAT,val) + +/* UART Controller (0xFFC00400 - 0xFFC004FF) */ +#define bfin_read_UART_THR() bfin_read16(UART_THR) +#define bfin_write_UART_THR(val) bfin_write16(UART_THR,val) +#define bfin_read_UART_RBR() bfin_read16(UART_RBR) +#define bfin_write_UART_RBR(val) bfin_write16(UART_RBR,val) +#define bfin_read_UART_DLL() bfin_read16(UART_DLL) +#define bfin_write_UART_DLL(val) bfin_write16(UART_DLL,val) +#define bfin_read_UART_IER() bfin_read16(UART_IER) +#define bfin_write_UART_IER(val) bfin_write16(UART_IER,val) +#define bfin_read_UART_DLH() bfin_read16(UART_DLH) +#define bfin_write_UART_DLH(val) bfin_write16(UART_DLH,val) +#define bfin_read_UART_IIR() bfin_read16(UART_IIR) +#define bfin_write_UART_IIR(val) bfin_write16(UART_IIR,val) +#define bfin_read_UART_LCR() bfin_read16(UART_LCR) +#define bfin_write_UART_LCR(val) bfin_write16(UART_LCR,val) +#define bfin_read_UART_MCR() bfin_read16(UART_MCR) +#define bfin_write_UART_MCR(val) bfin_write16(UART_MCR,val) +#define bfin_read_UART_LSR() bfin_read16(UART_LSR) +#define bfin_write_UART_LSR(val) bfin_write16(UART_LSR,val) +#define bfin_read_UART_MSR() bfin_read16(UART_MSR) +#define bfin_write_UART_MSR(val) bfin_write16(UART_MSR,val) +#define bfin_read_UART_SCR() bfin_read16(UART_SCR) +#define bfin_write_UART_SCR(val) bfin_write16(UART_SCR,val) +#define bfin_read_UART_GCTL() bfin_read16(UART_GCTL) +#define bfin_write_UART_GCTL(val) bfin_write16(UART_GCTL,val) + +/* SPI Controller (0xFFC00500 - 0xFFC005FF) */ +#define bfin_read_SPI_CTL() bfin_read16(SPI_CTL) +#define bfin_write_SPI_CTL(val) bfin_write16(SPI_CTL,val) +#define bfin_read_SPI_FLG() bfin_read16(SPI_FLG) +#define bfin_write_SPI_FLG(val) bfin_write16(SPI_FLG,val) +#define bfin_read_SPI_STAT() bfin_read16(SPI_STAT) +#define bfin_write_SPI_STAT(val) bfin_write16(SPI_STAT,val) +#define bfin_read_SPI_TDBR() bfin_read16(SPI_TDBR) +#define bfin_write_SPI_TDBR(val) bfin_write16(SPI_TDBR,val) +#define bfin_read_SPI_RDBR() bfin_read16(SPI_RDBR) +#define bfin_write_SPI_RDBR(val) bfin_write16(SPI_RDBR,val) +#define bfin_read_SPI_BAUD() bfin_read16(SPI_BAUD) +#define bfin_write_SPI_BAUD(val) bfin_write16(SPI_BAUD,val) +#define bfin_read_SPI_SHADOW() bfin_read16(SPI_SHADOW) +#define bfin_write_SPI_SHADOW(val) bfin_write16(SPI_SHADOW,val) + +/* Timer 0-7 registers (0xFFC0 0600-0xFFC0 06FF) */ +#define bfin_read_TIMER0_CONFIG() bfin_read16(TIMER0_CONFIG) +#define bfin_write_TIMER0_CONFIG(val) bfin_write16(TIMER0_CONFIG,val) +#define bfin_read_TIMER0_COUNTER() bfin_read32(TIMER0_COUNTER) +#define bfin_write_TIMER0_COUNTER(val) bfin_write32(TIMER0_COUNTER,val) +#define bfin_read_TIMER0_PERIOD() bfin_read32(TIMER0_PERIOD) +#define bfin_write_TIMER0_PERIOD(val) bfin_write32(TIMER0_PERIOD,val) +#define bfin_read_TIMER0_WIDTH() bfin_read32(TIMER0_WIDTH) +#define bfin_write_TIMER0_WIDTH(val) bfin_write32(TIMER0_WIDTH,val) +#define bfin_read_TIMER1_CONFIG() bfin_read16(TIMER1_CONFIG) +#define bfin_write_TIMER1_CONFIG(val) bfin_write16(TIMER1_CONFIG,val) +#define bfin_read_TIMER1_COUNTER() bfin_read32(TIMER1_COUNTER) +#define bfin_write_TIMER1_COUNTER(val) bfin_write32(TIMER1_COUNTER,val) +#define bfin_read_TIMER1_PERIOD() bfin_read32(TIMER1_PERIOD) +#define bfin_write_TIMER1_PERIOD(val) bfin_write32(TIMER1_PERIOD,val) +#define bfin_read_TIMER1_WIDTH() bfin_read32(TIMER1_WIDTH) +#define bfin_write_TIMER1_WIDTH(val) bfin_write32(TIMER1_WIDTH,val) +#define bfin_read_TIMER2_CONFIG() bfin_read16(TIMER2_CONFIG) +#define bfin_write_TIMER2_CONFIG(val) bfin_write16(TIMER2_CONFIG,val) +#define bfin_read_TIMER2_COUNTER() bfin_read32(TIMER2_COUNTER) +#define bfin_write_TIMER2_COUNTER(val) bfin_write32(TIMER2_COUNTER,val) +#define bfin_read_TIMER2_PERIOD() bfin_read32(TIMER2_PERIOD) +#define bfin_write_TIMER2_PERIOD(val) bfin_write32(TIMER2_PERIOD,val) +#define bfin_read_TIMER2_WIDTH() bfin_read32(TIMER2_WIDTH) +#define bfin_write_TIMER2_WIDTH(val) bfin_write32(TIMER2_WIDTH,val) +#define bfin_read_TIMER3_CONFIG() bfin_read16(TIMER3_CONFIG) +#define bfin_write_TIMER3_CONFIG(val) bfin_write16(TIMER3_CONFIG,val) +#define bfin_read_TIMER3_COUNTER() bfin_read32(TIMER3_COUNTER) +#define bfin_write_TIMER3_COUNTER(val) bfin_write32(TIMER3_COUNTER,val) +#define bfin_read_TIMER3_PERIOD() bfin_read32(TIMER3_PERIOD) +#define bfin_write_TIMER3_PERIOD(val) bfin_write32(TIMER3_PERIOD,val) +#define bfin_read_TIMER3_WIDTH() bfin_read32(TIMER3_WIDTH) +#define bfin_write_TIMER3_WIDTH(val) bfin_write32(TIMER3_WIDTH,val) +#define bfin_read_TIMER4_CONFIG() bfin_read16(TIMER4_CONFIG) +#define bfin_write_TIMER4_CONFIG(val) bfin_write16(TIMER4_CONFIG,val) +#define bfin_read_TIMER4_COUNTER() bfin_read32(TIMER4_COUNTER) +#define bfin_write_TIMER4_COUNTER(val) bfin_write32(TIMER4_COUNTER,val) +#define bfin_read_TIMER4_PERIOD() bfin_read32(TIMER4_PERIOD) +#define bfin_write_TIMER4_PERIOD(val) bfin_write32(TIMER4_PERIOD,val) +#define bfin_read_TIMER4_WIDTH() bfin_read32(TIMER4_WIDTH) +#define bfin_write_TIMER4_WIDTH(val) bfin_write32(TIMER4_WIDTH,val) +#define bfin_read_TIMER5_CONFIG() bfin_read16(TIMER5_CONFIG) +#define bfin_write_TIMER5_CONFIG(val) bfin_write16(TIMER5_CONFIG,val) +#define bfin_read_TIMER5_COUNTER() bfin_read32(TIMER5_COUNTER) +#define bfin_write_TIMER5_COUNTER(val) bfin_write32(TIMER5_COUNTER,val) +#define bfin_read_TIMER5_PERIOD() bfin_read32(TIMER5_PERIOD) +#define bfin_write_TIMER5_PERIOD(val) bfin_write32(TIMER5_PERIOD,val) +#define bfin_read_TIMER5_WIDTH() bfin_read32(TIMER5_WIDTH) +#define bfin_write_TIMER5_WIDTH(val) bfin_write32(TIMER5_WIDTH,val) +#define bfin_read_TIMER6_CONFIG() bfin_read16(TIMER6_CONFIG) +#define bfin_write_TIMER6_CONFIG(val) bfin_write16(TIMER6_CONFIG,val) +#define bfin_read_TIMER6_COUNTER() bfin_read32(TIMER6_COUNTER) +#define bfin_write_TIMER6_COUNTER(val) bfin_write32(TIMER6_COUNTER,val) +#define bfin_read_TIMER6_PERIOD() bfin_read32(TIMER6_PERIOD) +#define bfin_write_TIMER6_PERIOD(val) bfin_write32(TIMER6_PERIOD,val) +#define bfin_read_TIMER6_WIDTH() bfin_read32(TIMER6_WIDTH) +#define bfin_write_TIMER6_WIDTH(val) bfin_write32(TIMER6_WIDTH,val) +#define bfin_read_TIMER7_CONFIG() bfin_read16(TIMER7_CONFIG) +#define bfin_write_TIMER7_CONFIG(val) bfin_write16(TIMER7_CONFIG,val) +#define bfin_read_TIMER7_COUNTER() bfin_read32(TIMER7_COUNTER) +#define bfin_write_TIMER7_COUNTER(val) bfin_write32(TIMER7_COUNTER,val) +#define bfin_read_TIMER7_PERIOD() bfin_read32(TIMER7_PERIOD) +#define bfin_write_TIMER7_PERIOD(val) bfin_write32(TIMER7_PERIOD,val) +#define bfin_read_TIMER7_WIDTH() bfin_read32(TIMER7_WIDTH) +#define bfin_write_TIMER7_WIDTH(val) bfin_write32(TIMER7_WIDTH,val) + +/* Timer registers 8-11 (0xFFC0 1600-0xFFC0 16FF) */ +#define bfin_read_TMRS8_ENABLE() bfin_read16(TMRS8_ENABLE) +#define bfin_write_TMRS8_ENABLE(val) bfin_write16(TMRS8_ENABLE,val) +#define bfin_read_TMRS8_DISABLE() bfin_read16(TMRS8_DISABLE) +#define bfin_write_TMRS8_DISABLE(val) bfin_write16(TMRS8_DISABLE,val) +#define bfin_read_TMRS8_STATUS() bfin_read32(TMRS8_STATUS) +#define bfin_write_TMRS8_STATUS(val) bfin_write32(TMRS8_STATUS,val) +#define bfin_read_TIMER8_CONFIG() bfin_read16(TIMER8_CONFIG) +#define bfin_write_TIMER8_CONFIG(val) bfin_write16(TIMER8_CONFIG,val) +#define bfin_read_TIMER8_COUNTER() bfin_read32(TIMER8_COUNTER) +#define bfin_write_TIMER8_COUNTER(val) bfin_write32(TIMER8_COUNTER,val) +#define bfin_read_TIMER8_PERIOD() bfin_read32(TIMER8_PERIOD) +#define bfin_write_TIMER8_PERIOD(val) bfin_write32(TIMER8_PERIOD,val) +#define bfin_read_TIMER8_WIDTH() bfin_read32(TIMER8_WIDTH) +#define bfin_write_TIMER8_WIDTH(val) bfin_write32(TIMER8_WIDTH,val) +#define bfin_read_TIMER9_CONFIG() bfin_read16(TIMER9_CONFIG) +#define bfin_write_TIMER9_CONFIG(val) bfin_write16(TIMER9_CONFIG,val) +#define bfin_read_TIMER9_COUNTER() bfin_read32(TIMER9_COUNTER) +#define bfin_write_TIMER9_COUNTER(val) bfin_write32(TIMER9_COUNTER,val) +#define bfin_read_TIMER9_PERIOD() bfin_read32(TIMER9_PERIOD) +#define bfin_write_TIMER9_PERIOD(val) bfin_write32(TIMER9_PERIOD,val) +#define bfin_read_TIMER9_WIDTH() bfin_read32(TIMER9_WIDTH) +#define bfin_write_TIMER9_WIDTH(val) bfin_write32(TIMER9_WIDTH,val) +#define bfin_read_TIMER10_CONFIG() bfin_read16(TIMER10_CONFIG) +#define bfin_write_TIMER10_CONFIG(val) bfin_write16(TIMER10_CONFIG,val) +#define bfin_read_TIMER10_COUNTER() bfin_read32(TIMER10_COUNTER) +#define bfin_write_TIMER10_COUNTER(val) bfin_write32(TIMER10_COUNTER,val) +#define bfin_read_TIMER10_PERIOD() bfin_read32(TIMER10_PERIOD) +#define bfin_write_TIMER10_PERIOD(val) bfin_write32(TIMER10_PERIOD,val) +#define bfin_read_TIMER10_WIDTH() bfin_read32(TIMER10_WIDTH) +#define bfin_write_TIMER10_WIDTH(val) bfin_write32(TIMER10_WIDTH,val) +#define bfin_read_TIMER11_CONFIG() bfin_read16(TIMER11_CONFIG) +#define bfin_write_TIMER11_CONFIG(val) bfin_write16(TIMER11_CONFIG,val) +#define bfin_read_TIMER11_COUNTER() bfin_read32(TIMER11_COUNTER) +#define bfin_write_TIMER11_COUNTER(val) bfin_write32(TIMER11_COUNTER,val) +#define bfin_read_TIMER11_PERIOD() bfin_read32(TIMER11_PERIOD) +#define bfin_write_TIMER11_PERIOD(val) bfin_write32(TIMER11_PERIOD,val) +#define bfin_read_TIMER11_WIDTH() bfin_read32(TIMER11_WIDTH) +#define bfin_write_TIMER11_WIDTH(val) bfin_write32(TIMER11_WIDTH,val) +#define bfin_read_TMRS4_ENABLE() bfin_read16(TMRS4_ENABLE) +#define bfin_write_TMRS4_ENABLE(val) bfin_write16(TMRS4_ENABLE,val) +#define bfin_read_TMRS4_DISABLE() bfin_read16(TMRS4_DISABLE) +#define bfin_write_TMRS4_DISABLE(val) bfin_write16(TMRS4_DISABLE,val) +#define bfin_read_TMRS4_STATUS() bfin_read32(TMRS4_STATUS) +#define bfin_write_TMRS4_STATUS(val) bfin_write32(TMRS4_STATUS,val) + +/* Programmable Flag 0 registers (0xFFC0 0700-0xFFC0 07FF) */ +#define bfin_read_FIO0_FLAG_D() bfin_read16(FIO0_FLAG_D) +#define bfin_write_FIO0_FLAG_D(val) bfin_write16(FIO0_FLAG_D,val) +#define bfin_read_FIO0_FLAG_C() bfin_read16(FIO0_FLAG_C) +#define bfin_write_FIO0_FLAG_C(val) bfin_write16(FIO0_FLAG_C,val) +#define bfin_read_FIO0_FLAG_S() bfin_read16(FIO0_FLAG_S) +#define bfin_write_FIO0_FLAG_S(val) bfin_write16(FIO0_FLAG_S,val) +#define bfin_read_FIO0_FLAG_T() bfin_read16(FIO0_FLAG_T) +#define bfin_write_FIO0_FLAG_T(val) bfin_write16(FIO0_FLAG_T,val) +#define bfin_read_FIO0_MASKA_D() bfin_read16(FIO0_MASKA_D) +#define bfin_write_FIO0_MASKA_D(val) bfin_write16(FIO0_MASKA_D,val) +#define bfin_read_FIO0_MASKA_C() bfin_read16(FIO0_MASKA_C) +#define bfin_write_FIO0_MASKA_C(val) bfin_write16(FIO0_MASKA_C,val) +#define bfin_read_FIO0_MASKA_S() bfin_read16(FIO0_MASKA_S) +#define bfin_write_FIO0_MASKA_S(val) bfin_write16(FIO0_MASKA_S,val) +#define bfin_read_FIO0_MASKA_T() bfin_read16(FIO0_MASKA_T) +#define bfin_write_FIO0_MASKA_T(val) bfin_write16(FIO0_MASKA_T,val) +#define bfin_read_FIO0_MASKB_D() bfin_read16(FIO0_MASKB_D) +#define bfin_write_FIO0_MASKB_D(val) bfin_write16(FIO0_MASKB_D,val) +#define bfin_read_FIO0_MASKB_C() bfin_read16(FIO0_MASKB_C) +#define bfin_write_FIO0_MASKB_C(val) bfin_write16(FIO0_MASKB_C,val) +#define bfin_read_FIO0_MASKB_S() bfin_read16(FIO0_MASKB_S) +#define bfin_write_FIO0_MASKB_S(val) bfin_write16(FIO0_MASKB_S,val) +#define bfin_read_FIO0_MASKB_T() bfin_read16(FIO0_MASKB_T) +#define bfin_write_FIO0_MASKB_T(val) bfin_write16(FIO0_MASKB_T,val) +#define bfin_read_FIO0_DIR() bfin_read16(FIO0_DIR) +#define bfin_write_FIO0_DIR(val) bfin_write16(FIO0_DIR,val) +#define bfin_read_FIO0_POLAR() bfin_read16(FIO0_POLAR) +#define bfin_write_FIO0_POLAR(val) bfin_write16(FIO0_POLAR,val) +#define bfin_read_FIO0_EDGE() bfin_read16(FIO0_EDGE) +#define bfin_write_FIO0_EDGE(val) bfin_write16(FIO0_EDGE,val) +#define bfin_read_FIO0_BOTH() bfin_read16(FIO0_BOTH) +#define bfin_write_FIO0_BOTH(val) bfin_write16(FIO0_BOTH,val) +#define bfin_read_FIO0_INEN() bfin_read16(FIO0_INEN) +#define bfin_write_FIO0_INEN(val) bfin_write16(FIO0_INEN,val) +/* Programmable Flag 1 registers (0xFFC0 1500-0xFFC0 15FF) */ +#define bfin_read_FIO1_FLAG_D() bfin_read16(FIO1_FLAG_D) +#define bfin_write_FIO1_FLAG_D(val) bfin_write16(FIO1_FLAG_D,val) +#define bfin_read_FIO1_FLAG_C() bfin_read16(FIO1_FLAG_C) +#define bfin_write_FIO1_FLAG_C(val) bfin_write16(FIO1_FLAG_C,val) +#define bfin_read_FIO1_FLAG_S() bfin_read16(FIO1_FLAG_S) +#define bfin_write_FIO1_FLAG_S(val) bfin_write16(FIO1_FLAG_S,val) +#define bfin_read_FIO1_FLAG_T() bfin_read16(FIO1_FLAG_T) +#define bfin_write_FIO1_FLAG_T(val) bfin_write16(FIO1_FLAG_T,val) +#define bfin_read_FIO1_MASKA_D() bfin_read16(FIO1_MASKA_D) +#define bfin_write_FIO1_MASKA_D(val) bfin_write16(FIO1_MASKA_D,val) +#define bfin_read_FIO1_MASKA_C() bfin_read16(FIO1_MASKA_C) +#define bfin_write_FIO1_MASKA_C(val) bfin_write16(FIO1_MASKA_C,val) +#define bfin_read_FIO1_MASKA_S() bfin_read16(FIO1_MASKA_S) +#define bfin_write_FIO1_MASKA_S(val) bfin_write16(FIO1_MASKA_S,val) +#define bfin_read_FIO1_MASKA_T() bfin_read16(FIO1_MASKA_T) +#define bfin_write_FIO1_MASKA_T(val) bfin_write16(FIO1_MASKA_T,val) +#define bfin_read_FIO1_MASKB_D() bfin_read16(FIO1_MASKB_D) +#define bfin_write_FIO1_MASKB_D(val) bfin_write16(FIO1_MASKB_D,val) +#define bfin_read_FIO1_MASKB_C() bfin_read16(FIO1_MASKB_C) +#define bfin_write_FIO1_MASKB_C(val) bfin_write16(FIO1_MASKB_C,val) +#define bfin_read_FIO1_MASKB_S() bfin_read16(FIO1_MASKB_S) +#define bfin_write_FIO1_MASKB_S(val) bfin_write16(FIO1_MASKB_S,val) +#define bfin_read_FIO1_MASKB_T() bfin_read16(FIO1_MASKB_T) +#define bfin_write_FIO1_MASKB_T(val) bfin_write16(FIO1_MASKB_T,val) +#define bfin_read_FIO1_DIR() bfin_read16(FIO1_DIR) +#define bfin_write_FIO1_DIR(val) bfin_write16(FIO1_DIR,val) +#define bfin_read_FIO1_POLAR() bfin_read16(FIO1_POLAR) +#define bfin_write_FIO1_POLAR(val) bfin_write16(FIO1_POLAR,val) +#define bfin_read_FIO1_EDGE() bfin_read16(FIO1_EDGE) +#define bfin_write_FIO1_EDGE(val) bfin_write16(FIO1_EDGE,val) +#define bfin_read_FIO1_BOTH() bfin_read16(FIO1_BOTH) +#define bfin_write_FIO1_BOTH(val) bfin_write16(FIO1_BOTH,val) +#define bfin_read_FIO1_INEN() bfin_read16(FIO1_INEN) +#define bfin_write_FIO1_INEN(val) bfin_write16(FIO1_INEN,val) +/* Programmable Flag registers (0xFFC0 1700-0xFFC0 17FF) */ +#define bfin_read_FIO2_FLAG_D() bfin_read16(FIO2_FLAG_D) +#define bfin_write_FIO2_FLAG_D(val) bfin_write16(FIO2_FLAG_D,val) +#define bfin_read_FIO2_FLAG_C() bfin_read16(FIO2_FLAG_C) +#define bfin_write_FIO2_FLAG_C(val) bfin_write16(FIO2_FLAG_C,val) +#define bfin_read_FIO2_FLAG_S() bfin_read16(FIO2_FLAG_S) +#define bfin_write_FIO2_FLAG_S(val) bfin_write16(FIO2_FLAG_S,val) +#define bfin_read_FIO2_FLAG_T() bfin_read16(FIO2_FLAG_T) +#define bfin_write_FIO2_FLAG_T(val) bfin_write16(FIO2_FLAG_T,val) +#define bfin_read_FIO2_MASKA_D() bfin_read16(FIO2_MASKA_D) +#define bfin_write_FIO2_MASKA_D(val) bfin_write16(FIO2_MASKA_D,val) +#define bfin_read_FIO2_MASKA_C() bfin_read16(FIO2_MASKA_C) +#define bfin_write_FIO2_MASKA_C(val) bfin_write16(FIO2_MASKA_C,val) +#define bfin_read_FIO2_MASKA_S() bfin_read16(FIO2_MASKA_S) +#define bfin_write_FIO2_MASKA_S(val) bfin_write16(FIO2_MASKA_S,val) +#define bfin_read_FIO2_MASKA_T() bfin_read16(FIO2_MASKA_T) +#define bfin_write_FIO2_MASKA_T(val) bfin_write16(FIO2_MASKA_T,val) +#define bfin_read_FIO2_MASKB_D() bfin_read16(FIO2_MASKB_D) +#define bfin_write_FIO2_MASKB_D(val) bfin_write16(FIO2_MASKB_D,val) +#define bfin_read_FIO2_MASKB_C() bfin_read16(FIO2_MASKB_C) +#define bfin_write_FIO2_MASKB_C(val) bfin_write16(FIO2_MASKB_C,val) +#define bfin_read_FIO2_MASKB_S() bfin_read16(FIO2_MASKB_S) +#define bfin_write_FIO2_MASKB_S(val) bfin_write16(FIO2_MASKB_S,val) +#define bfin_read_FIO2_MASKB_T() bfin_read16(FIO2_MASKB_T) +#define bfin_write_FIO2_MASKB_T(val) bfin_write16(FIO2_MASKB_T,val) +#define bfin_read_FIO2_DIR() bfin_read16(FIO2_DIR) +#define bfin_write_FIO2_DIR(val) bfin_write16(FIO2_DIR,val) +#define bfin_read_FIO2_POLAR() bfin_read16(FIO2_POLAR) +#define bfin_write_FIO2_POLAR(val) bfin_write16(FIO2_POLAR,val) +#define bfin_read_FIO2_EDGE() bfin_read16(FIO2_EDGE) +#define bfin_write_FIO2_EDGE(val) bfin_write16(FIO2_EDGE,val) +#define bfin_read_FIO2_BOTH() bfin_read16(FIO2_BOTH) +#define bfin_write_FIO2_BOTH(val) bfin_write16(FIO2_BOTH,val) +#define bfin_read_FIO2_INEN() bfin_read16(FIO2_INEN) +#define bfin_write_FIO2_INEN(val) bfin_write16(FIO2_INEN,val) +/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */ +#define bfin_read_SPORT0_TCR1() bfin_read16(SPORT0_TCR1) +#define bfin_write_SPORT0_TCR1(val) bfin_write16(SPORT0_TCR1,val) +#define bfin_read_SPORT0_TCR2() bfin_read16(SPORT0_TCR2) +#define bfin_write_SPORT0_TCR2(val) bfin_write16(SPORT0_TCR2,val) +#define bfin_read_SPORT0_TCLKDIV() bfin_read16(SPORT0_TCLKDIV) +#define bfin_write_SPORT0_TCLKDIV(val) bfin_write16(SPORT0_TCLKDIV,val) +#define bfin_read_SPORT0_TFSDIV() bfin_read16(SPORT0_TFSDIV) +#define bfin_write_SPORT0_TFSDIV(val) bfin_write16(SPORT0_TFSDIV,val) +#define bfin_read_SPORT0_TX() bfin_read32(SPORT0_TX) +#define bfin_write_SPORT0_TX(val) bfin_write32(SPORT0_TX,val) +#define bfin_read_SPORT0_RX() bfin_read32(SPORT0_RX) +#define bfin_write_SPORT0_RX(val) bfin_write32(SPORT0_RX,val) +#define bfin_read_SPORT0_TX32() bfin_read32(SPORT0_TX) +#define bfin_write_SPORT0_TX32(val) bfin_write32(SPORT0_TX,val) +#define bfin_read_SPORT0_RX32() bfin_read32(SPORT0_RX) +#define bfin_write_SPORT0_RX32(val) bfin_write32(SPORT0_RX,val) +#define bfin_read_SPORT0_TX16() bfin_read16(SPORT0_TX) +#define bfin_write_SPORT0_TX16(val) bfin_write16(SPORT0_TX,val) +#define bfin_read_SPORT0_RX16() bfin_read16(SPORT0_RX) +#define bfin_write_SPORT0_RX16(val) bfin_write16(SPORT0_RX,val) +#define bfin_read_SPORT0_RCR1() bfin_read16(SPORT0_RCR1) +#define bfin_write_SPORT0_RCR1(val) bfin_write16(SPORT0_RCR1,val) +#define bfin_read_SPORT0_RCR2() bfin_read16(SPORT0_RCR2) +#define bfin_write_SPORT0_RCR2(val) bfin_write16(SPORT0_RCR2,val) +#define bfin_read_SPORT0_RCLKDIV() bfin_read16(SPORT0_RCLKDIV) +#define bfin_write_SPORT0_RCLKDIV(val) bfin_write16(SPORT0_RCLKDIV,val) +#define bfin_read_SPORT0_RFSDIV() bfin_read16(SPORT0_RFSDIV) +#define bfin_write_SPORT0_RFSDIV(val) bfin_write16(SPORT0_RFSDIV,val) +#define bfin_read_SPORT0_STAT() bfin_read16(SPORT0_STAT) +#define bfin_write_SPORT0_STAT(val) bfin_write16(SPORT0_STAT,val) +#define bfin_read_SPORT0_CHNL() bfin_read16(SPORT0_CHNL) +#define bfin_write_SPORT0_CHNL(val) bfin_write16(SPORT0_CHNL,val) +#define bfin_read_SPORT0_MCMC1() bfin_read16(SPORT0_MCMC1) +#define bfin_write_SPORT0_MCMC1(val) bfin_write16(SPORT0_MCMC1,val) +#define bfin_read_SPORT0_MCMC2() bfin_read16(SPORT0_MCMC2) +#define bfin_write_SPORT0_MCMC2(val) bfin_write16(SPORT0_MCMC2,val) +#define bfin_read_SPORT0_MTCS0() bfin_read32(SPORT0_MTCS0) +#define bfin_write_SPORT0_MTCS0(val) bfin_write32(SPORT0_MTCS0,val) +#define bfin_read_SPORT0_MTCS1() bfin_read32(SPORT0_MTCS1) +#define bfin_write_SPORT0_MTCS1(val) bfin_write32(SPORT0_MTCS1,val) +#define bfin_read_SPORT0_MTCS2() bfin_read32(SPORT0_MTCS2) +#define bfin_write_SPORT0_MTCS2(val) bfin_write32(SPORT0_MTCS2,val) +#define bfin_read_SPORT0_MTCS3() bfin_read32(SPORT0_MTCS3) +#define bfin_write_SPORT0_MTCS3(val) bfin_write32(SPORT0_MTCS3,val) +#define bfin_read_SPORT0_MRCS0() bfin_read32(SPORT0_MRCS0) +#define bfin_write_SPORT0_MRCS0(val) bfin_write32(SPORT0_MRCS0,val) +#define bfin_read_SPORT0_MRCS1() bfin_read32(SPORT0_MRCS1) +#define bfin_write_SPORT0_MRCS1(val) bfin_write32(SPORT0_MRCS1,val) +#define bfin_read_SPORT0_MRCS2() bfin_read32(SPORT0_MRCS2) +#define bfin_write_SPORT0_MRCS2(val) bfin_write32(SPORT0_MRCS2,val) +#define bfin_read_SPORT0_MRCS3() bfin_read32(SPORT0_MRCS3) +#define bfin_write_SPORT0_MRCS3(val) bfin_write32(SPORT0_MRCS3,val) +/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */ +#define bfin_read_SPORT1_TCR1() bfin_read16(SPORT1_TCR1) +#define bfin_write_SPORT1_TCR1(val) bfin_write16(SPORT1_TCR1,val) +#define bfin_read_SPORT1_TCR2() bfin_read16(SPORT1_TCR2) +#define bfin_write_SPORT1_TCR2(val) bfin_write16(SPORT1_TCR2,val) +#define bfin_read_SPORT1_TCLKDIV() bfin_read16(SPORT1_TCLKDIV) +#define bfin_write_SPORT1_TCLKDIV(val) bfin_write16(SPORT1_TCLKDIV,val) +#define bfin_read_SPORT1_TFSDIV() bfin_read16(SPORT1_TFSDIV) +#define bfin_write_SPORT1_TFSDIV(val) bfin_write16(SPORT1_TFSDIV,val) +#define bfin_read_SPORT1_TX() bfin_read32(SPORT1_TX) +#define bfin_write_SPORT1_TX(val) bfin_write32(SPORT1_TX,val) +#define bfin_read_SPORT1_RX() bfin_read32(SPORT1_RX) +#define bfin_write_SPORT1_RX(val) bfin_write32(SPORT1_RX,val) +#define bfin_read_SPORT1_TX32() bfin_read32(SPORT1_TX) +#define bfin_write_SPORT1_TX32(val) bfin_write32(SPORT1_TX,val) +#define bfin_read_SPORT1_RX32() bfin_read32(SPORT1_RX) +#define bfin_write_SPORT1_RX32(val) bfin_write32(SPORT1_RX,val) +#define bfin_read_SPORT1_TX16() bfin_read16(SPORT1_TX) +#define bfin_write_SPORT1_TX16(val) bfin_write16(SPORT1_TX,val) +#define bfin_read_SPORT1_RX16() bfin_read16(SPORT1_RX) +#define bfin_write_SPORT1_RX16(val) bfin_write16(SPORT1_RX,val) +#define bfin_read_SPORT1_RCR1() bfin_read16(SPORT1_RCR1) +#define bfin_write_SPORT1_RCR1(val) bfin_write16(SPORT1_RCR1,val) +#define bfin_read_SPORT1_RCR2() bfin_read16(SPORT1_RCR2) +#define bfin_write_SPORT1_RCR2(val) bfin_write16(SPORT1_RCR2,val) +#define bfin_read_SPORT1_RCLKDIV() bfin_read16(SPORT1_RCLKDIV) +#define bfin_write_SPORT1_RCLKDIV(val) bfin_write16(SPORT1_RCLKDIV,val) +#define bfin_read_SPORT1_RFSDIV() bfin_read16(SPORT1_RFSDIV) +#define bfin_write_SPORT1_RFSDIV(val) bfin_write16(SPORT1_RFSDIV,val) +#define bfin_read_SPORT1_STAT() bfin_read16(SPORT1_STAT) +#define bfin_write_SPORT1_STAT(val) bfin_write16(SPORT1_STAT,val) +#define bfin_read_SPORT1_CHNL() bfin_read16(SPORT1_CHNL) +#define bfin_write_SPORT1_CHNL(val) bfin_write16(SPORT1_CHNL,val) +#define bfin_read_SPORT1_MCMC1() bfin_read16(SPORT1_MCMC1) +#define bfin_write_SPORT1_MCMC1(val) bfin_write16(SPORT1_MCMC1,val) +#define bfin_read_SPORT1_MCMC2() bfin_read16(SPORT1_MCMC2) +#define bfin_write_SPORT1_MCMC2(val) bfin_write16(SPORT1_MCMC2,val) +#define bfin_read_SPORT1_MTCS0() bfin_read32(SPORT1_MTCS0) +#define bfin_write_SPORT1_MTCS0(val) bfin_write32(SPORT1_MTCS0,val) +#define bfin_read_SPORT1_MTCS1() bfin_read32(SPORT1_MTCS1) +#define bfin_write_SPORT1_MTCS1(val) bfin_write32(SPORT1_MTCS1,val) +#define bfin_read_SPORT1_MTCS2() bfin_read32(SPORT1_MTCS2) +#define bfin_write_SPORT1_MTCS2(val) bfin_write32(SPORT1_MTCS2,val) +#define bfin_read_SPORT1_MTCS3() bfin_read32(SPORT1_MTCS3) +#define bfin_write_SPORT1_MTCS3(val) bfin_write32(SPORT1_MTCS3,val) +#define bfin_read_SPORT1_MRCS0() bfin_read32(SPORT1_MRCS0) +#define bfin_write_SPORT1_MRCS0(val) bfin_write32(SPORT1_MRCS0,val) +#define bfin_read_SPORT1_MRCS1() bfin_read32(SPORT1_MRCS1) +#define bfin_write_SPORT1_MRCS1(val) bfin_write32(SPORT1_MRCS1,val) +#define bfin_read_SPORT1_MRCS2() bfin_read32(SPORT1_MRCS2) +#define bfin_write_SPORT1_MRCS2(val) bfin_write32(SPORT1_MRCS2,val) +#define bfin_read_SPORT1_MRCS3() bfin_read32(SPORT1_MRCS3) +#define bfin_write_SPORT1_MRCS3(val) bfin_write32(SPORT1_MRCS3,val) +/* Asynchronous Memory Controller - External Bus Interface Unit */ +#define bfin_read_EBIU_AMGCTL() bfin_read16(EBIU_AMGCTL) +#define bfin_write_EBIU_AMGCTL(val) bfin_write16(EBIU_AMGCTL,val) +#define bfin_read_EBIU_AMBCTL0() bfin_read32(EBIU_AMBCTL0) +#define bfin_write_EBIU_AMBCTL0(val) bfin_write32(EBIU_AMBCTL0,val) +#define bfin_read_EBIU_AMBCTL1() bfin_read32(EBIU_AMBCTL1) +#define bfin_write_EBIU_AMBCTL1(val) bfin_write32(EBIU_AMBCTL1,val) +/* SDRAM Controller External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */ +#define bfin_read_EBIU_SDGCTL() bfin_read32(EBIU_SDGCTL) +#define bfin_write_EBIU_SDGCTL(val) bfin_write32(EBIU_SDGCTL,val) +#define bfin_read_EBIU_SDBCTL() bfin_read32(EBIU_SDBCTL) +#define bfin_write_EBIU_SDBCTL(val) bfin_write32(EBIU_SDBCTL,val) +#define bfin_read_EBIU_SDRRC() bfin_read16(EBIU_SDRRC) +#define bfin_write_EBIU_SDRRC(val) bfin_write16(EBIU_SDRRC,val) +#define bfin_read_EBIU_SDSTAT() bfin_read16(EBIU_SDSTAT) +#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT,val) +/* Parallel Peripheral Interface (PPI) 0 registers (0xFFC0 1000-0xFFC0 10FF) */ +#define bfin_read_PPI0_CONTROL() bfin_read16(PPI0_CONTROL) +#define bfin_write_PPI0_CONTROL(val) bfin_write16(PPI0_CONTROL,val) +#define bfin_read_PPI0_STATUS() bfin_read16(PPI0_STATUS) +#define bfin_write_PPI0_STATUS(val) bfin_write16(PPI0_STATUS,val) +#define bfin_read_PPI0_COUNT() bfin_read16(PPI0_COUNT) +#define bfin_write_PPI0_COUNT(val) bfin_write16(PPI0_COUNT,val) +#define bfin_read_PPI0_DELAY() bfin_read16(PPI0_DELAY) +#define bfin_write_PPI0_DELAY(val) bfin_write16(PPI0_DELAY,val) +#define bfin_read_PPI0_FRAME() bfin_read16(PPI0_FRAME) +#define bfin_write_PPI0_FRAME(val) bfin_write16(PPI0_FRAME,val) +/* Parallel Peripheral Interface (PPI) 1 registers (0xFFC0 1300-0xFFC0 13FF) */ +#define bfin_read_PPI1_CONTROL() bfin_read16(PPI1_CONTROL) +#define bfin_write_PPI1_CONTROL(val) bfin_write16(PPI1_CONTROL,val) +#define bfin_read_PPI1_STATUS() bfin_read16(PPI1_STATUS) +#define bfin_write_PPI1_STATUS(val) bfin_write16(PPI1_STATUS,val) +#define bfin_read_PPI1_COUNT() bfin_read16(PPI1_COUNT) +#define bfin_write_PPI1_COUNT(val) bfin_write16(PPI1_COUNT,val) +#define bfin_read_PPI1_DELAY() bfin_read16(PPI1_DELAY) +#define bfin_write_PPI1_DELAY(val) bfin_write16(PPI1_DELAY,val) +#define bfin_read_PPI1_FRAME() bfin_read16(PPI1_FRAME) +#define bfin_write_PPI1_FRAME(val) bfin_write16(PPI1_FRAME,val) +/*DMA traffic control registers */ +#define bfin_read_DMA1_TC_PER() bfin_read16(DMA1_TC_PER) +#define bfin_write_DMA1_TC_PER(val) bfin_write16(DMA1_TC_PER,val) +#define bfin_read_DMA1_TC_CNT() bfin_read16(DMA1_TC_CNT) +#define bfin_write_DMA1_TC_CNT(val) bfin_write16(DMA1_TC_CNT,val) +#define bfin_read_DMA2_TC_PER() bfin_read16(DMA2_TC_PER) +#define bfin_write_DMA2_TC_PER(val) bfin_write16(DMA2_TC_PER,val) +#define bfin_read_DMA2_TC_CNT() bfin_read16(DMA2_TC_CNT) +#define bfin_write_DMA2_TC_CNT(val) bfin_write16(DMA2_TC_CNT,val) +/* DMA1 Controller registers (0xFFC0 1C00-0xFFC0 1FFF) */ +#define bfin_read_DMA1_0_CONFIG() bfin_read16(DMA1_0_CONFIG) +#define bfin_write_DMA1_0_CONFIG(val) bfin_write16(DMA1_0_CONFIG,val) +#define bfin_read_DMA1_0_NEXT_DESC_PTR() bfin_read32(DMA1_0_NEXT_DESC_PTR) +#define bfin_write_DMA1_0_NEXT_DESC_PTR(val) bfin_write32(DMA1_0_NEXT_DESC_PTR,val) +#define bfin_read_DMA1_0_START_ADDR() bfin_read32(DMA1_0_START_ADDR) +#define bfin_write_DMA1_0_START_ADDR(val) bfin_write32(DMA1_0_START_ADDR,val) +#define bfin_read_DMA1_0_X_COUNT() bfin_read16(DMA1_0_X_COUNT) +#define bfin_write_DMA1_0_X_COUNT(val) bfin_write16(DMA1_0_X_COUNT,val) +#define bfin_read_DMA1_0_Y_COUNT() bfin_read16(DMA1_0_Y_COUNT) +#define bfin_write_DMA1_0_Y_COUNT(val) bfin_write16(DMA1_0_Y_COUNT,val) +#define bfin_read_DMA1_0_X_MODIFY() bfin_read16(DMA1_0_X_MODIFY) +#define bfin_write_DMA1_0_X_MODIFY(val) bfin_write16(DMA1_0_X_MODIFY,val) +#define bfin_read_DMA1_0_Y_MODIFY() bfin_read16(DMA1_0_Y_MODIFY) +#define bfin_write_DMA1_0_Y_MODIFY(val) bfin_write16(DMA1_0_Y_MODIFY,val) +#define bfin_read_DMA1_0_CURR_DESC_PTR() bfin_read32(DMA1_0_CURR_DESC_PTR) +#define bfin_write_DMA1_0_CURR_DESC_PTR(val) bfin_write32(DMA1_0_CURR_DESC_PTR,val) +#define bfin_read_DMA1_0_CURR_ADDR() bfin_read32(DMA1_0_CURR_ADDR) +#define bfin_write_DMA1_0_CURR_ADDR(val) bfin_write32(DMA1_0_CURR_ADDR,val) +#define bfin_read_DMA1_0_CURR_X_COUNT() bfin_read16(DMA1_0_CURR_X_COUNT) +#define bfin_write_DMA1_0_CURR_X_COUNT(val) bfin_write16(DMA1_0_CURR_X_COUNT,val) +#define bfin_read_DMA1_0_CURR_Y_COUNT() bfin_read16(DMA1_0_CURR_Y_COUNT) +#define bfin_write_DMA1_0_CURR_Y_COUNT(val) bfin_write16(DMA1_0_CURR_Y_COUNT,val) +#define bfin_read_DMA1_0_IRQ_STATUS() bfin_read16(DMA1_0_IRQ_STATUS) +#define bfin_write_DMA1_0_IRQ_STATUS(val) bfin_write16(DMA1_0_IRQ_STATUS,val) +#define bfin_read_DMA1_0_PERIPHERAL_MAP() bfin_read16(DMA1_0_PERIPHERAL_MAP) +#define bfin_write_DMA1_0_PERIPHERAL_MAP(val) bfin_write16(DMA1_0_PERIPHERAL_MAP,val) +#define bfin_read_DMA1_1_CONFIG() bfin_read16(DMA1_1_CONFIG) +#define bfin_write_DMA1_1_CONFIG(val) bfin_write16(DMA1_1_CONFIG,val) +#define bfin_read_DMA1_1_NEXT_DESC_PTR() bfin_read32(DMA1_1_NEXT_DESC_PTR) +#define bfin_write_DMA1_1_NEXT_DESC_PTR(val) bfin_write32(DMA1_1_NEXT_DESC_PTR,val) +#define bfin_read_DMA1_1_START_ADDR() bfin_read32(DMA1_1_START_ADDR) +#define bfin_write_DMA1_1_START_ADDR(val) bfin_write32(DMA1_1_START_ADDR,val) +#define bfin_read_DMA1_1_X_COUNT() bfin_read16(DMA1_1_X_COUNT) +#define bfin_write_DMA1_1_X_COUNT(val) bfin_write16(DMA1_1_X_COUNT,val) +#define bfin_read_DMA1_1_Y_COUNT() bfin_read16(DMA1_1_Y_COUNT) +#define bfin_write_DMA1_1_Y_COUNT(val) bfin_write16(DMA1_1_Y_COUNT,val) +#define bfin_read_DMA1_1_X_MODIFY() bfin_read16(DMA1_1_X_MODIFY) +#define bfin_write_DMA1_1_X_MODIFY(val) bfin_write16(DMA1_1_X_MODIFY,val) +#define bfin_read_DMA1_1_Y_MODIFY() bfin_read16(DMA1_1_Y_MODIFY) +#define bfin_write_DMA1_1_Y_MODIFY(val) bfin_write16(DMA1_1_Y_MODIFY,val) +#define bfin_read_DMA1_1_CURR_DESC_PTR() bfin_read32(DMA1_1_CURR_DESC_PTR) +#define bfin_write_DMA1_1_CURR_DESC_PTR(val) bfin_write32(DMA1_1_CURR_DESC_PTR,val) +#define bfin_read_DMA1_1_CURR_ADDR() bfin_read32(DMA1_1_CURR_ADDR) +#define bfin_write_DMA1_1_CURR_ADDR(val) bfin_write32(DMA1_1_CURR_ADDR,val) +#define bfin_read_DMA1_1_CURR_X_COUNT() bfin_read16(DMA1_1_CURR_X_COUNT) +#define bfin_write_DMA1_1_CURR_X_COUNT(val) bfin_write16(DMA1_1_CURR_X_COUNT,val) +#define bfin_read_DMA1_1_CURR_Y_COUNT() bfin_read16(DMA1_1_CURR_Y_COUNT) +#define bfin_write_DMA1_1_CURR_Y_COUNT(val) bfin_write16(DMA1_1_CURR_Y_COUNT,val) +#define bfin_read_DMA1_1_IRQ_STATUS() bfin_read16(DMA1_1_IRQ_STATUS) +#define bfin_write_DMA1_1_IRQ_STATUS(val) bfin_write16(DMA1_1_IRQ_STATUS,val) +#define bfin_read_DMA1_1_PERIPHERAL_MAP() bfin_read16(DMA1_1_PERIPHERAL_MAP) +#define bfin_write_DMA1_1_PERIPHERAL_MAP(val) bfin_write16(DMA1_1_PERIPHERAL_MAP,val) +#define bfin_read_DMA1_2_CONFIG() bfin_read16(DMA1_2_CONFIG) +#define bfin_write_DMA1_2_CONFIG(val) bfin_write16(DMA1_2_CONFIG,val) +#define bfin_read_DMA1_2_NEXT_DESC_PTR() bfin_read32(DMA1_2_NEXT_DESC_PTR) +#define bfin_write_DMA1_2_NEXT_DESC_PTR(val) bfin_write32(DMA1_2_NEXT_DESC_PTR,val) +#define bfin_read_DMA1_2_START_ADDR() bfin_read32(DMA1_2_START_ADDR) +#define bfin_write_DMA1_2_START_ADDR(val) bfin_write32(DMA1_2_START_ADDR,val) +#define bfin_read_DMA1_2_X_COUNT() bfin_read16(DMA1_2_X_COUNT) +#define bfin_write_DMA1_2_X_COUNT(val) bfin_write16(DMA1_2_X_COUNT,val) +#define bfin_read_DMA1_2_Y_COUNT() bfin_read16(DMA1_2_Y_COUNT) +#define bfin_write_DMA1_2_Y_COUNT(val) bfin_write16(DMA1_2_Y_COUNT,val) +#define bfin_read_DMA1_2_X_MODIFY() bfin_read16(DMA1_2_X_MODIFY) +#define bfin_write_DMA1_2_X_MODIFY(val) bfin_write16(DMA1_2_X_MODIFY,val) +#define bfin_read_DMA1_2_Y_MODIFY() bfin_read16(DMA1_2_Y_MODIFY) +#define bfin_write_DMA1_2_Y_MODIFY(val) bfin_write16(DMA1_2_Y_MODIFY,val) +#define bfin_read_DMA1_2_CURR_DESC_PTR() bfin_read32(DMA1_2_CURR_DESC_PTR) +#define bfin_write_DMA1_2_CURR_DESC_PTR(val) bfin_write32(DMA1_2_CURR_DESC_PTR,val) +#define bfin_read_DMA1_2_CURR_ADDR() bfin_read32(DMA1_2_CURR_ADDR) +#define bfin_write_DMA1_2_CURR_ADDR(val) bfin_write32(DMA1_2_CURR_ADDR,val) +#define bfin_read_DMA1_2_CURR_X_COUNT() bfin_read16(DMA1_2_CURR_X_COUNT) +#define bfin_write_DMA1_2_CURR_X_COUNT(val) bfin_write16(DMA1_2_CURR_X_COUNT,val) +#define bfin_read_DMA1_2_CURR_Y_COUNT() bfin_read16(DMA1_2_CURR_Y_COUNT) +#define bfin_write_DMA1_2_CURR_Y_COUNT(val) bfin_write16(DMA1_2_CURR_Y_COUNT,val) +#define bfin_read_DMA1_2_IRQ_STATUS() bfin_read16(DMA1_2_IRQ_STATUS) +#define bfin_write_DMA1_2_IRQ_STATUS(val) bfin_write16(DMA1_2_IRQ_STATUS,val) +#define bfin_read_DMA1_2_PERIPHERAL_MAP() bfin_read16(DMA1_2_PERIPHERAL_MAP) +#define bfin_write_DMA1_2_PERIPHERAL_MAP(val) bfin_write16(DMA1_2_PERIPHERAL_MAP,val) +#define bfin_read_DMA1_3_CONFIG() bfin_read16(DMA1_3_CONFIG) +#define bfin_write_DMA1_3_CONFIG(val) bfin_write16(DMA1_3_CONFIG,val) +#define bfin_read_DMA1_3_NEXT_DESC_PTR() bfin_read32(DMA1_3_NEXT_DESC_PTR) +#define bfin_write_DMA1_3_NEXT_DESC_PTR(val) bfin_write32(DMA1_3_NEXT_DESC_PTR,val) +#define bfin_read_DMA1_3_START_ADDR() bfin_read32(DMA1_3_START_ADDR) +#define bfin_write_DMA1_3_START_ADDR(val) bfin_write32(DMA1_3_START_ADDR,val) +#define bfin_read_DMA1_3_X_COUNT() bfin_read16(DMA1_3_X_COUNT) +#define bfin_write_DMA1_3_X_COUNT(val) bfin_write16(DMA1_3_X_COUNT,val) +#define bfin_read_DMA1_3_Y_COUNT() bfin_read16(DMA1_3_Y_COUNT) +#define bfin_write_DMA1_3_Y_COUNT(val) bfin_write16(DMA1_3_Y_COUNT,val) +#define bfin_read_DMA1_3_X_MODIFY() bfin_read16(DMA1_3_X_MODIFY) +#define bfin_write_DMA1_3_X_MODIFY(val) bfin_write16(DMA1_3_X_MODIFY,val) +#define bfin_read_DMA1_3_Y_MODIFY() bfin_read16(DMA1_3_Y_MODIFY) +#define bfin_write_DMA1_3_Y_MODIFY(val) bfin_write16(DMA1_3_Y_MODIFY,val) +#define bfin_read_DMA1_3_CURR_DESC_PTR() bfin_read32(DMA1_3_CURR_DESC_PTR) +#define bfin_write_DMA1_3_CURR_DESC_PTR(val) bfin_write32(DMA1_3_CURR_DESC_PTR,val) +#define bfin_read_DMA1_3_CURR_ADDR() bfin_read32(DMA1_3_CURR_ADDR) +#define bfin_write_DMA1_3_CURR_ADDR(val) bfin_write32(DMA1_3_CURR_ADDR,val) +#define bfin_read_DMA1_3_CURR_X_COUNT() bfin_read16(DMA1_3_CURR_X_COUNT) +#define bfin_write_DMA1_3_CURR_X_COUNT(val) bfin_write16(DMA1_3_CURR_X_COUNT,val) +#define bfin_read_DMA1_3_CURR_Y_COUNT() bfin_read16(DMA1_3_CURR_Y_COUNT) +#define bfin_write_DMA1_3_CURR_Y_COUNT(val) bfin_write16(DMA1_3_CURR_Y_COUNT,val) +#define bfin_read_DMA1_3_IRQ_STATUS() bfin_read16(DMA1_3_IRQ_STATUS) +#define bfin_write_DMA1_3_IRQ_STATUS(val) bfin_write16(DMA1_3_IRQ_STATUS,val) +#define bfin_read_DMA1_3_PERIPHERAL_MAP() bfin_read16(DMA1_3_PERIPHERAL_MAP) +#define bfin_write_DMA1_3_PERIPHERAL_MAP(val) bfin_write16(DMA1_3_PERIPHERAL_MAP,val) +#define bfin_read_DMA1_4_CONFIG() bfin_read16(DMA1_4_CONFIG) +#define bfin_write_DMA1_4_CONFIG(val) bfin_write16(DMA1_4_CONFIG,val) +#define bfin_read_DMA1_4_NEXT_DESC_PTR() bfin_read32(DMA1_4_NEXT_DESC_PTR) +#define bfin_write_DMA1_4_NEXT_DESC_PTR(val) bfin_write32(DMA1_4_NEXT_DESC_PTR,val) +#define bfin_read_DMA1_4_START_ADDR() bfin_read32(DMA1_4_START_ADDR) +#define bfin_write_DMA1_4_START_ADDR(val) bfin_write32(DMA1_4_START_ADDR,val) +#define bfin_read_DMA1_4_X_COUNT() bfin_read16(DMA1_4_X_COUNT) +#define bfin_write_DMA1_4_X_COUNT(val) bfin_write16(DMA1_4_X_COUNT,val) +#define bfin_read_DMA1_4_Y_COUNT() bfin_read16(DMA1_4_Y_COUNT) +#define bfin_write_DMA1_4_Y_COUNT(val) bfin_write16(DMA1_4_Y_COUNT,val) +#define bfin_read_DMA1_4_X_MODIFY() bfin_read16(DMA1_4_X_MODIFY) +#define bfin_write_DMA1_4_X_MODIFY(val) bfin_write16(DMA1_4_X_MODIFY,val) +#define bfin_read_DMA1_4_Y_MODIFY() bfin_read16(DMA1_4_Y_MODIFY) +#define bfin_write_DMA1_4_Y_MODIFY(val) bfin_write16(DMA1_4_Y_MODIFY,val) +#define bfin_read_DMA1_4_CURR_DESC_PTR() bfin_read32(DMA1_4_CURR_DESC_PTR) +#define bfin_write_DMA1_4_CURR_DESC_PTR(val) bfin_write32(DMA1_4_CURR_DESC_PTR,val) +#define bfin_read_DMA1_4_CURR_ADDR() bfin_read32(DMA1_4_CURR_ADDR) +#define bfin_write_DMA1_4_CURR_ADDR(val) bfin_write32(DMA1_4_CURR_ADDR,val) +#define bfin_read_DMA1_4_CURR_X_COUNT() bfin_read16(DMA1_4_CURR_X_COUNT) +#define bfin_write_DMA1_4_CURR_X_COUNT(val) bfin_write16(DMA1_4_CURR_X_COUNT,val) +#define bfin_read_DMA1_4_CURR_Y_COUNT() bfin_read16(DMA1_4_CURR_Y_COUNT) +#define bfin_write_DMA1_4_CURR_Y_COUNT(val) bfin_write16(DMA1_4_CURR_Y_COUNT,val) +#define bfin_read_DMA1_4_IRQ_STATUS() bfin_read16(DMA1_4_IRQ_STATUS) +#define bfin_write_DMA1_4_IRQ_STATUS(val) bfin_write16(DMA1_4_IRQ_STATUS,val) +#define bfin_read_DMA1_4_PERIPHERAL_MAP() bfin_read16(DMA1_4_PERIPHERAL_MAP) +#define bfin_write_DMA1_4_PERIPHERAL_MAP(val) bfin_write16(DMA1_4_PERIPHERAL_MAP,val) +#define bfin_read_DMA1_5_CONFIG() bfin_read16(DMA1_5_CONFIG) +#define bfin_write_DMA1_5_CONFIG(val) bfin_write16(DMA1_5_CONFIG,val) +#define bfin_read_DMA1_5_NEXT_DESC_PTR() bfin_read32(DMA1_5_NEXT_DESC_PTR) +#define bfin_write_DMA1_5_NEXT_DESC_PTR(val) bfin_write32(DMA1_5_NEXT_DESC_PTR,val) +#define bfin_read_DMA1_5_START_ADDR() bfin_read32(DMA1_5_START_ADDR) +#define bfin_write_DMA1_5_START_ADDR(val) bfin_write32(DMA1_5_START_ADDR,val) +#define bfin_read_DMA1_5_X_COUNT() bfin_read16(DMA1_5_X_COUNT) +#define bfin_write_DMA1_5_X_COUNT(val) bfin_write16(DMA1_5_X_COUNT,val) +#define bfin_read_DMA1_5_Y_COUNT() bfin_read16(DMA1_5_Y_COUNT) +#define bfin_write_DMA1_5_Y_COUNT(val) bfin_write16(DMA1_5_Y_COUNT,val) +#define bfin_read_DMA1_5_X_MODIFY() bfin_read16(DMA1_5_X_MODIFY) +#define bfin_write_DMA1_5_X_MODIFY(val) bfin_write16(DMA1_5_X_MODIFY,val) +#define bfin_read_DMA1_5_Y_MODIFY() bfin_read16(DMA1_5_Y_MODIFY) +#define bfin_write_DMA1_5_Y_MODIFY(val) bfin_write16(DMA1_5_Y_MODIFY,val) +#define bfin_read_DMA1_5_CURR_DESC_PTR() bfin_read32(DMA1_5_CURR_DESC_PTR) +#define bfin_write_DMA1_5_CURR_DESC_PTR(val) bfin_write32(DMA1_5_CURR_DESC_PTR,val) +#define bfin_read_DMA1_5_CURR_ADDR() bfin_read32(DMA1_5_CURR_ADDR) +#define bfin_write_DMA1_5_CURR_ADDR(val) bfin_write32(DMA1_5_CURR_ADDR,val) +#define bfin_read_DMA1_5_CURR_X_COUNT() bfin_read16(DMA1_5_CURR_X_COUNT) +#define bfin_write_DMA1_5_CURR_X_COUNT(val) bfin_write16(DMA1_5_CURR_X_COUNT,val) +#define bfin_read_DMA1_5_CURR_Y_COUNT() bfin_read16(DMA1_5_CURR_Y_COUNT) +#define bfin_write_DMA1_5_CURR_Y_COUNT(val) bfin_write16(DMA1_5_CURR_Y_COUNT,val) +#define bfin_read_DMA1_5_IRQ_STATUS() bfin_read16(DMA1_5_IRQ_STATUS) +#define bfin_write_DMA1_5_IRQ_STATUS(val) bfin_write16(DMA1_5_IRQ_STATUS,val) +#define bfin_read_DMA1_5_PERIPHERAL_MAP() bfin_read16(DMA1_5_PERIPHERAL_MAP) +#define bfin_write_DMA1_5_PERIPHERAL_MAP(val) bfin_write16(DMA1_5_PERIPHERAL_MAP,val) +#define bfin_read_DMA1_6_CONFIG() bfin_read16(DMA1_6_CONFIG) +#define bfin_write_DMA1_6_CONFIG(val) bfin_write16(DMA1_6_CONFIG,val) +#define bfin_read_DMA1_6_NEXT_DESC_PTR() bfin_read32(DMA1_6_NEXT_DESC_PTR) +#define bfin_write_DMA1_6_NEXT_DESC_PTR(val) bfin_write32(DMA1_6_NEXT_DESC_PTR,val) +#define bfin_read_DMA1_6_START_ADDR() bfin_read32(DMA1_6_START_ADDR) +#define bfin_write_DMA1_6_START_ADDR(val) bfin_write32(DMA1_6_START_ADDR,val) +#define bfin_read_DMA1_6_X_COUNT() bfin_read16(DMA1_6_X_COUNT) +#define bfin_write_DMA1_6_X_COUNT(val) bfin_write16(DMA1_6_X_COUNT,val) +#define bfin_read_DMA1_6_Y_COUNT() bfin_read16(DMA1_6_Y_COUNT) +#define bfin_write_DMA1_6_Y_COUNT(val) bfin_write16(DMA1_6_Y_COUNT,val) +#define bfin_read_DMA1_6_X_MODIFY() bfin_read16(DMA1_6_X_MODIFY) +#define bfin_write_DMA1_6_X_MODIFY(val) bfin_write16(DMA1_6_X_MODIFY,val) +#define bfin_read_DMA1_6_Y_MODIFY() bfin_read16(DMA1_6_Y_MODIFY) +#define bfin_write_DMA1_6_Y_MODIFY(val) bfin_write16(DMA1_6_Y_MODIFY,val) +#define bfin_read_DMA1_6_CURR_DESC_PTR() bfin_read32(DMA1_6_CURR_DESC_PTR) +#define bfin_write_DMA1_6_CURR_DESC_PTR(val) bfin_write32(DMA1_6_CURR_DESC_PTR,val) +#define bfin_read_DMA1_6_CURR_ADDR() bfin_read32(DMA1_6_CURR_ADDR) +#define bfin_write_DMA1_6_CURR_ADDR(val) bfin_write32(DMA1_6_CURR_ADDR,val) +#define bfin_read_DMA1_6_CURR_X_COUNT() bfin_read16(DMA1_6_CURR_X_COUNT) +#define bfin_write_DMA1_6_CURR_X_COUNT(val) bfin_write16(DMA1_6_CURR_X_COUNT,val) +#define bfin_read_DMA1_6_CURR_Y_COUNT() bfin_read16(DMA1_6_CURR_Y_COUNT) +#define bfin_write_DMA1_6_CURR_Y_COUNT(val) bfin_write16(DMA1_6_CURR_Y_COUNT,val) +#define bfin_read_DMA1_6_IRQ_STATUS() bfin_read16(DMA1_6_IRQ_STATUS) +#define bfin_write_DMA1_6_IRQ_STATUS(val) bfin_write16(DMA1_6_IRQ_STATUS,val) +#define bfin_read_DMA1_6_PERIPHERAL_MAP() bfin_read16(DMA1_6_PERIPHERAL_MAP) +#define bfin_write_DMA1_6_PERIPHERAL_MAP(val) bfin_write16(DMA1_6_PERIPHERAL_MAP,val) +#define bfin_read_DMA1_7_CONFIG() bfin_read16(DMA1_7_CONFIG) +#define bfin_write_DMA1_7_CONFIG(val) bfin_write16(DMA1_7_CONFIG,val) +#define bfin_read_DMA1_7_NEXT_DESC_PTR() bfin_read32(DMA1_7_NEXT_DESC_PTR) +#define bfin_write_DMA1_7_NEXT_DESC_PTR(val) bfin_write32(DMA1_7_NEXT_DESC_PTR,val) +#define bfin_read_DMA1_7_START_ADDR() bfin_read32(DMA1_7_START_ADDR) +#define bfin_write_DMA1_7_START_ADDR(val) bfin_write32(DMA1_7_START_ADDR,val) +#define bfin_read_DMA1_7_X_COUNT() bfin_read16(DMA1_7_X_COUNT) +#define bfin_write_DMA1_7_X_COUNT(val) bfin_write16(DMA1_7_X_COUNT,val) +#define bfin_read_DMA1_7_Y_COUNT() bfin_read16(DMA1_7_Y_COUNT) +#define bfin_write_DMA1_7_Y_COUNT(val) bfin_write16(DMA1_7_Y_COUNT,val) +#define bfin_read_DMA1_7_X_MODIFY() bfin_read16(DMA1_7_X_MODIFY) +#define bfin_write_DMA1_7_X_MODIFY(val) bfin_write16(DMA1_7_X_MODIFY,val) +#define bfin_read_DMA1_7_Y_MODIFY() bfin_read16(DMA1_7_Y_MODIFY) +#define bfin_write_DMA1_7_Y_MODIFY(val) bfin_write16(DMA1_7_Y_MODIFY,val) +#define bfin_read_DMA1_7_CURR_DESC_PTR() bfin_read32(DMA1_7_CURR_DESC_PTR) +#define bfin_write_DMA1_7_CURR_DESC_PTR(val) bfin_write32(DMA1_7_CURR_DESC_PTR,val) +#define bfin_read_DMA1_7_CURR_ADDR() bfin_read32(DMA1_7_CURR_ADDR) +#define bfin_write_DMA1_7_CURR_ADDR(val) bfin_write32(DMA1_7_CURR_ADDR,val) +#define bfin_read_DMA1_7_CURR_X_COUNT() bfin_read16(DMA1_7_CURR_X_COUNT) +#define bfin_write_DMA1_7_CURR_X_COUNT(val) bfin_write16(DMA1_7_CURR_X_COUNT,val) +#define bfin_read_DMA1_7_CURR_Y_COUNT() bfin_read16(DMA1_7_CURR_Y_COUNT) +#define bfin_write_DMA1_7_CURR_Y_COUNT(val) bfin_write16(DMA1_7_CURR_Y_COUNT,val) +#define bfin_read_DMA1_7_IRQ_STATUS() bfin_read16(DMA1_7_IRQ_STATUS) +#define bfin_write_DMA1_7_IRQ_STATUS(val) bfin_write16(DMA1_7_IRQ_STATUS,val) +#define bfin_read_DMA1_7_PERIPHERAL_MAP() bfin_read16(DMA1_7_PERIPHERAL_MAP) +#define bfin_write_DMA1_7_PERIPHERAL_MAP(val) bfin_write16(DMA1_7_PERIPHERAL_MAP,val) +#define bfin_read_DMA1_8_CONFIG() bfin_read16(DMA1_8_CONFIG) +#define bfin_write_DMA1_8_CONFIG(val) bfin_write16(DMA1_8_CONFIG,val) +#define bfin_read_DMA1_8_NEXT_DESC_PTR() bfin_read32(DMA1_8_NEXT_DESC_PTR) +#define bfin_write_DMA1_8_NEXT_DESC_PTR(val) bfin_write32(DMA1_8_NEXT_DESC_PTR,val) +#define bfin_read_DMA1_8_START_ADDR() bfin_read32(DMA1_8_START_ADDR) +#define bfin_write_DMA1_8_START_ADDR(val) bfin_write32(DMA1_8_START_ADDR,val) +#define bfin_read_DMA1_8_X_COUNT() bfin_read16(DMA1_8_X_COUNT) +#define bfin_write_DMA1_8_X_COUNT(val) bfin_write16(DMA1_8_X_COUNT,val) +#define bfin_read_DMA1_8_Y_COUNT() bfin_read16(DMA1_8_Y_COUNT) +#define bfin_write_DMA1_8_Y_COUNT(val) bfin_write16(DMA1_8_Y_COUNT,val) +#define bfin_read_DMA1_8_X_MODIFY() bfin_read16(DMA1_8_X_MODIFY) +#define bfin_write_DMA1_8_X_MODIFY(val) bfin_write16(DMA1_8_X_MODIFY,val) +#define bfin_read_DMA1_8_Y_MODIFY() bfin_read16(DMA1_8_Y_MODIFY) +#define bfin_write_DMA1_8_Y_MODIFY(val) bfin_write16(DMA1_8_Y_MODIFY,val) +#define bfin_read_DMA1_8_CURR_DESC_PTR() bfin_read32(DMA1_8_CURR_DESC_PTR) +#define bfin_write_DMA1_8_CURR_DESC_PTR(val) bfin_write32(DMA1_8_CURR_DESC_PTR,val) +#define bfin_read_DMA1_8_CURR_ADDR() bfin_read32(DMA1_8_CURR_ADDR) +#define bfin_write_DMA1_8_CURR_ADDR(val) bfin_write32(DMA1_8_CURR_ADDR,val) +#define bfin_read_DMA1_8_CURR_X_COUNT() bfin_read16(DMA1_8_CURR_X_COUNT) +#define bfin_write_DMA1_8_CURR_X_COUNT(val) bfin_write16(DMA1_8_CURR_X_COUNT,val) +#define bfin_read_DMA1_8_CURR_Y_COUNT() bfin_read16(DMA1_8_CURR_Y_COUNT) +#define bfin_write_DMA1_8_CURR_Y_COUNT(val) bfin_write16(DMA1_8_CURR_Y_COUNT,val) +#define bfin_read_DMA1_8_IRQ_STATUS() bfin_read16(DMA1_8_IRQ_STATUS) +#define bfin_write_DMA1_8_IRQ_STATUS(val) bfin_write16(DMA1_8_IRQ_STATUS,val) +#define bfin_read_DMA1_8_PERIPHERAL_MAP() bfin_read16(DMA1_8_PERIPHERAL_MAP) +#define bfin_write_DMA1_8_PERIPHERAL_MAP(val) bfin_write16(DMA1_8_PERIPHERAL_MAP,val) +#define bfin_read_DMA1_9_CONFIG() bfin_read16(DMA1_9_CONFIG) +#define bfin_write_DMA1_9_CONFIG(val) bfin_write16(DMA1_9_CONFIG,val) +#define bfin_read_DMA1_9_NEXT_DESC_PTR() bfin_read32(DMA1_9_NEXT_DESC_PTR) +#define bfin_write_DMA1_9_NEXT_DESC_PTR(val) bfin_write32(DMA1_9_NEXT_DESC_PTR,val) +#define bfin_read_DMA1_9_START_ADDR() bfin_read32(DMA1_9_START_ADDR) +#define bfin_write_DMA1_9_START_ADDR(val) bfin_write32(DMA1_9_START_ADDR,val) +#define bfin_read_DMA1_9_X_COUNT() bfin_read16(DMA1_9_X_COUNT) +#define bfin_write_DMA1_9_X_COUNT(val) bfin_write16(DMA1_9_X_COUNT,val) +#define bfin_read_DMA1_9_Y_COUNT() bfin_read16(DMA1_9_Y_COUNT) +#define bfin_write_DMA1_9_Y_COUNT(val) bfin_write16(DMA1_9_Y_COUNT,val) +#define bfin_read_DMA1_9_X_MODIFY() bfin_read16(DMA1_9_X_MODIFY) +#define bfin_write_DMA1_9_X_MODIFY(val) bfin_write16(DMA1_9_X_MODIFY,val) +#define bfin_read_DMA1_9_Y_MODIFY() bfin_read16(DMA1_9_Y_MODIFY) +#define bfin_write_DMA1_9_Y_MODIFY(val) bfin_write16(DMA1_9_Y_MODIFY,val) +#define bfin_read_DMA1_9_CURR_DESC_PTR() bfin_read32(DMA1_9_CURR_DESC_PTR) +#define bfin_write_DMA1_9_CURR_DESC_PTR(val) bfin_write32(DMA1_9_CURR_DESC_PTR,val) +#define bfin_read_DMA1_9_CURR_ADDR() bfin_read32(DMA1_9_CURR_ADDR) +#define bfin_write_DMA1_9_CURR_ADDR(val) bfin_write32(DMA1_9_CURR_ADDR,val) +#define bfin_read_DMA1_9_CURR_X_COUNT() bfin_read16(DMA1_9_CURR_X_COUNT) +#define bfin_write_DMA1_9_CURR_X_COUNT(val) bfin_write16(DMA1_9_CURR_X_COUNT,val) +#define bfin_read_DMA1_9_CURR_Y_COUNT() bfin_read16(DMA1_9_CURR_Y_COUNT) +#define bfin_write_DMA1_9_CURR_Y_COUNT(val) bfin_write16(DMA1_9_CURR_Y_COUNT,val) +#define bfin_read_DMA1_9_IRQ_STATUS() bfin_read16(DMA1_9_IRQ_STATUS) +#define bfin_write_DMA1_9_IRQ_STATUS(val) bfin_write16(DMA1_9_IRQ_STATUS,val) +#define bfin_read_DMA1_9_PERIPHERAL_MAP() bfin_read16(DMA1_9_PERIPHERAL_MAP) +#define bfin_write_DMA1_9_PERIPHERAL_MAP(val) bfin_write16(DMA1_9_PERIPHERAL_MAP,val) +#define bfin_read_DMA1_10_CONFIG() bfin_read16(DMA1_10_CONFIG) +#define bfin_write_DMA1_10_CONFIG(val) bfin_write16(DMA1_10_CONFIG,val) +#define bfin_read_DMA1_10_NEXT_DESC_PTR() bfin_read32(DMA1_10_NEXT_DESC_PTR) +#define bfin_write_DMA1_10_NEXT_DESC_PTR(val) bfin_write32(DMA1_10_NEXT_DESC_PTR,val) +#define bfin_read_DMA1_10_START_ADDR() bfin_read32(DMA1_10_START_ADDR) +#define bfin_write_DMA1_10_START_ADDR(val) bfin_write32(DMA1_10_START_ADDR,val) +#define bfin_read_DMA1_10_X_COUNT() bfin_read16(DMA1_10_X_COUNT) +#define bfin_write_DMA1_10_X_COUNT(val) bfin_write16(DMA1_10_X_COUNT,val) +#define bfin_read_DMA1_10_Y_COUNT() bfin_read16(DMA1_10_Y_COUNT) +#define bfin_write_DMA1_10_Y_COUNT(val) bfin_write16(DMA1_10_Y_COUNT,val) +#define bfin_read_DMA1_10_X_MODIFY() bfin_read16(DMA1_10_X_MODIFY) +#define bfin_write_DMA1_10_X_MODIFY(val) bfin_write16(DMA1_10_X_MODIFY,val) +#define bfin_read_DMA1_10_Y_MODIFY() bfin_read16(DMA1_10_Y_MODIFY) +#define bfin_write_DMA1_10_Y_MODIFY(val) bfin_write16(DMA1_10_Y_MODIFY,val) +#define bfin_read_DMA1_10_CURR_DESC_PTR() bfin_read32(DMA1_10_CURR_DESC_PTR) +#define bfin_write_DMA1_10_CURR_DESC_PTR(val) bfin_write32(DMA1_10_CURR_DESC_PTR,val) +#define bfin_read_DMA1_10_CURR_ADDR() bfin_read32(DMA1_10_CURR_ADDR) +#define bfin_write_DMA1_10_CURR_ADDR(val) bfin_write32(DMA1_10_CURR_ADDR,val) +#define bfin_read_DMA1_10_CURR_X_COUNT() bfin_read16(DMA1_10_CURR_X_COUNT) +#define bfin_write_DMA1_10_CURR_X_COUNT(val) bfin_write16(DMA1_10_CURR_X_COUNT,val) +#define bfin_read_DMA1_10_CURR_Y_COUNT() bfin_read16(DMA1_10_CURR_Y_COUNT) +#define bfin_write_DMA1_10_CURR_Y_COUNT(val) bfin_write16(DMA1_10_CURR_Y_COUNT,val) +#define bfin_read_DMA1_10_IRQ_STATUS() bfin_read16(DMA1_10_IRQ_STATUS) +#define bfin_write_DMA1_10_IRQ_STATUS(val) bfin_write16(DMA1_10_IRQ_STATUS,val) +#define bfin_read_DMA1_10_PERIPHERAL_MAP() bfin_read16(DMA1_10_PERIPHERAL_MAP) +#define bfin_write_DMA1_10_PERIPHERAL_MAP(val) bfin_write16(DMA1_10_PERIPHERAL_MAP,val) +#define bfin_read_DMA1_11_CONFIG() bfin_read16(DMA1_11_CONFIG) +#define bfin_write_DMA1_11_CONFIG(val) bfin_write16(DMA1_11_CONFIG,val) +#define bfin_read_DMA1_11_NEXT_DESC_PTR() bfin_read32(DMA1_11_NEXT_DESC_PTR) +#define bfin_write_DMA1_11_NEXT_DESC_PTR(val) bfin_write32(DMA1_11_NEXT_DESC_PTR,val) +#define bfin_read_DMA1_11_START_ADDR() bfin_read32(DMA1_11_START_ADDR) +#define bfin_write_DMA1_11_START_ADDR(val) bfin_write32(DMA1_11_START_ADDR,val) +#define bfin_read_DMA1_11_X_COUNT() bfin_read16(DMA1_11_X_COUNT) +#define bfin_write_DMA1_11_X_COUNT(val) bfin_write16(DMA1_11_X_COUNT,val) +#define bfin_read_DMA1_11_Y_COUNT() bfin_read16(DMA1_11_Y_COUNT) +#define bfin_write_DMA1_11_Y_COUNT(val) bfin_write16(DMA1_11_Y_COUNT,val) +#define bfin_read_DMA1_11_X_MODIFY() bfin_read16(DMA1_11_X_MODIFY) +#define bfin_write_DMA1_11_X_MODIFY(val) bfin_write16(DMA1_11_X_MODIFY,val) +#define bfin_read_DMA1_11_Y_MODIFY() bfin_read16(DMA1_11_Y_MODIFY) +#define bfin_write_DMA1_11_Y_MODIFY(val) bfin_write16(DMA1_11_Y_MODIFY,val) +#define bfin_read_DMA1_11_CURR_DESC_PTR() bfin_read32(DMA1_11_CURR_DESC_PTR) +#define bfin_write_DMA1_11_CURR_DESC_PTR(val) bfin_write32(DMA1_11_CURR_DESC_PTR,val) +#define bfin_read_DMA1_11_CURR_ADDR() bfin_read32(DMA1_11_CURR_ADDR) +#define bfin_write_DMA1_11_CURR_ADDR(val) bfin_write32(DMA1_11_CURR_ADDR,val) +#define bfin_read_DMA1_11_CURR_X_COUNT() bfin_read16(DMA1_11_CURR_X_COUNT) +#define bfin_write_DMA1_11_CURR_X_COUNT(val) bfin_write16(DMA1_11_CURR_X_COUNT,val) +#define bfin_read_DMA1_11_CURR_Y_COUNT() bfin_read16(DMA1_11_CURR_Y_COUNT) +#define bfin_write_DMA1_11_CURR_Y_COUNT(val) bfin_write16(DMA1_11_CURR_Y_COUNT,val) +#define bfin_read_DMA1_11_IRQ_STATUS() bfin_read16(DMA1_11_IRQ_STATUS) +#define bfin_write_DMA1_11_IRQ_STATUS(val) bfin_write16(DMA1_11_IRQ_STATUS,val) +#define bfin_read_DMA1_11_PERIPHERAL_MAP() bfin_read16(DMA1_11_PERIPHERAL_MAP) +#define bfin_write_DMA1_11_PERIPHERAL_MAP(val) bfin_write16(DMA1_11_PERIPHERAL_MAP,val) +/* Memory DMA1 Controller registers (0xFFC0 1E80-0xFFC0 1FFF) */ +#define bfin_read_MDMA1_D0_CONFIG() bfin_read16(MDMA1_D0_CONFIG) +#define bfin_write_MDMA1_D0_CONFIG(val) bfin_write16(MDMA1_D0_CONFIG,val) +#define bfin_read_MDMA1_D0_NEXT_DESC_PTR() bfin_read32(MDMA1_D0_NEXT_DESC_PTR) +#define bfin_write_MDMA1_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA1_D0_NEXT_DESC_PTR,val) +#define bfin_read_MDMA1_D0_START_ADDR() bfin_read32(MDMA1_D0_START_ADDR) +#define bfin_write_MDMA1_D0_START_ADDR(val) bfin_write32(MDMA1_D0_START_ADDR,val) +#define bfin_read_MDMA1_D0_X_COUNT() bfin_read16(MDMA1_D0_X_COUNT) +#define bfin_write_MDMA1_D0_X_COUNT(val) bfin_write16(MDMA1_D0_X_COUNT,val) +#define bfin_read_MDMA1_D0_Y_COUNT() bfin_read16(MDMA1_D0_Y_COUNT) +#define bfin_write_MDMA1_D0_Y_COUNT(val) bfin_write16(MDMA1_D0_Y_COUNT,val) +#define bfin_read_MDMA1_D0_X_MODIFY() bfin_read16(MDMA1_D0_X_MODIFY) +#define bfin_write_MDMA1_D0_X_MODIFY(val) bfin_write16(MDMA1_D0_X_MODIFY,val) +#define bfin_read_MDMA1_D0_Y_MODIFY() bfin_read16(MDMA1_D0_Y_MODIFY) +#define bfin_write_MDMA1_D0_Y_MODIFY(val) bfin_write16(MDMA1_D0_Y_MODIFY,val) +#define bfin_read_MDMA1_D0_CURR_DESC_PTR() bfin_read32(MDMA1_D0_CURR_DESC_PTR) +#define bfin_write_MDMA1_D0_CURR_DESC_PTR(val) bfin_write32(MDMA1_D0_CURR_DESC_PTR,val) +#define bfin_read_MDMA1_D0_CURR_ADDR() bfin_read32(MDMA1_D0_CURR_ADDR) +#define bfin_write_MDMA1_D0_CURR_ADDR(val) bfin_write32(MDMA1_D0_CURR_ADDR,val) +#define bfin_read_MDMA1_D0_CURR_X_COUNT() bfin_read16(MDMA1_D0_CURR_X_COUNT) +#define bfin_write_MDMA1_D0_CURR_X_COUNT(val) bfin_write16(MDMA1_D0_CURR_X_COUNT,val) +#define bfin_read_MDMA1_D0_CURR_Y_COUNT() bfin_read16(MDMA1_D0_CURR_Y_COUNT) +#define bfin_write_MDMA1_D0_CURR_Y_COUNT(val) bfin_write16(MDMA1_D0_CURR_Y_COUNT,val) +#define bfin_read_MDMA1_D0_IRQ_STATUS() bfin_read16(MDMA1_D0_IRQ_STATUS) +#define bfin_write_MDMA1_D0_IRQ_STATUS(val) bfin_write16(MDMA1_D0_IRQ_STATUS,val) +#define bfin_read_MDMA1_D0_PERIPHERAL_MAP() bfin_read16(MDMA1_D0_PERIPHERAL_MAP) +#define bfin_write_MDMA1_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D0_PERIPHERAL_MAP,val) +#define bfin_read_MDMA1_S0_CONFIG() bfin_read16(MDMA1_S0_CONFIG) +#define bfin_write_MDMA1_S0_CONFIG(val) bfin_write16(MDMA1_S0_CONFIG,val) +#define bfin_read_MDMA1_S0_NEXT_DESC_PTR() bfin_read32(MDMA1_S0_NEXT_DESC_PTR) +#define bfin_write_MDMA1_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA1_S0_NEXT_DESC_PTR,val) +#define bfin_read_MDMA1_S0_START_ADDR() bfin_read32(MDMA1_S0_START_ADDR) +#define bfin_write_MDMA1_S0_START_ADDR(val) bfin_write32(MDMA1_S0_START_ADDR,val) +#define bfin_read_MDMA1_S0_X_COUNT() bfin_read16(MDMA1_S0_X_COUNT) +#define bfin_write_MDMA1_S0_X_COUNT(val) bfin_write16(MDMA1_S0_X_COUNT,val) +#define bfin_read_MDMA1_S0_Y_COUNT() bfin_read16(MDMA1_S0_Y_COUNT) +#define bfin_write_MDMA1_S0_Y_COUNT(val) bfin_write16(MDMA1_S0_Y_COUNT,val) +#define bfin_read_MDMA1_S0_X_MODIFY() bfin_read16(MDMA1_S0_X_MODIFY) +#define bfin_write_MDMA1_S0_X_MODIFY(val) bfin_write16(MDMA1_S0_X_MODIFY,val) +#define bfin_read_MDMA1_S0_Y_MODIFY() bfin_read16(MDMA1_S0_Y_MODIFY) +#define bfin_write_MDMA1_S0_Y_MODIFY(val) bfin_write16(MDMA1_S0_Y_MODIFY,val) +#define bfin_read_MDMA1_S0_CURR_DESC_PTR() bfin_read32(MDMA1_S0_CURR_DESC_PTR) +#define bfin_write_MDMA1_S0_CURR_DESC_PTR(val) bfin_write32(MDMA1_S0_CURR_DESC_PTR,val) +#define bfin_read_MDMA1_S0_CURR_ADDR() bfin_read32(MDMA1_S0_CURR_ADDR) +#define bfin_write_MDMA1_S0_CURR_ADDR(val) bfin_write32(MDMA1_S0_CURR_ADDR,val) +#define bfin_read_MDMA1_S0_CURR_X_COUNT() bfin_read16(MDMA1_S0_CURR_X_COUNT) +#define bfin_write_MDMA1_S0_CURR_X_COUNT(val) bfin_write16(MDMA1_S0_CURR_X_COUNT,val) +#define bfin_read_MDMA1_S0_CURR_Y_COUNT() bfin_read16(MDMA1_S0_CURR_Y_COUNT) +#define bfin_write_MDMA1_S0_CURR_Y_COUNT(val) bfin_write16(MDMA1_S0_CURR_Y_COUNT,val) +#define bfin_read_MDMA1_S0_IRQ_STATUS() bfin_read16(MDMA1_S0_IRQ_STATUS) +#define bfin_write_MDMA1_S0_IRQ_STATUS(val) bfin_write16(MDMA1_S0_IRQ_STATUS,val) +#define bfin_read_MDMA1_S0_PERIPHERAL_MAP() bfin_read16(MDMA1_S0_PERIPHERAL_MAP) +#define bfin_write_MDMA1_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S0_PERIPHERAL_MAP,val) +#define bfin_read_MDMA1_D1_CONFIG() bfin_read16(MDMA1_D1_CONFIG) +#define bfin_write_MDMA1_D1_CONFIG(val) bfin_write16(MDMA1_D1_CONFIG,val) +#define bfin_read_MDMA1_D1_NEXT_DESC_PTR() bfin_read32(MDMA1_D1_NEXT_DESC_PTR) +#define bfin_write_MDMA1_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA1_D1_NEXT_DESC_PTR,val) +#define bfin_read_MDMA1_D1_START_ADDR() bfin_read32(MDMA1_D1_START_ADDR) +#define bfin_write_MDMA1_D1_START_ADDR(val) bfin_write32(MDMA1_D1_START_ADDR,val) +#define bfin_read_MDMA1_D1_X_COUNT() bfin_read16(MDMA1_D1_X_COUNT) +#define bfin_write_MDMA1_D1_X_COUNT(val) bfin_write16(MDMA1_D1_X_COUNT,val) +#define bfin_read_MDMA1_D1_Y_COUNT() bfin_read16(MDMA1_D1_Y_COUNT) +#define bfin_write_MDMA1_D1_Y_COUNT(val) bfin_write16(MDMA1_D1_Y_COUNT,val) +#define bfin_read_MDMA1_D1_X_MODIFY() bfin_read16(MDMA1_D1_X_MODIFY) +#define bfin_write_MDMA1_D1_X_MODIFY(val) bfin_write16(MDMA1_D1_X_MODIFY,val) +#define bfin_read_MDMA1_D1_Y_MODIFY() bfin_read16(MDMA1_D1_Y_MODIFY) +#define bfin_write_MDMA1_D1_Y_MODIFY(val) bfin_write16(MDMA1_D1_Y_MODIFY,val) +#define bfin_read_MDMA1_D1_CURR_DESC_PTR() bfin_read32(MDMA1_D1_CURR_DESC_PTR) +#define bfin_write_MDMA1_D1_CURR_DESC_PTR(val) bfin_write32(MDMA1_D1_CURR_DESC_PTR,val) +#define bfin_read_MDMA1_D1_CURR_ADDR() bfin_read32(MDMA1_D1_CURR_ADDR) +#define bfin_write_MDMA1_D1_CURR_ADDR(val) bfin_write32(MDMA1_D1_CURR_ADDR,val) +#define bfin_read_MDMA1_D1_CURR_X_COUNT() bfin_read16(MDMA1_D1_CURR_X_COUNT) +#define bfin_write_MDMA1_D1_CURR_X_COUNT(val) bfin_write16(MDMA1_D1_CURR_X_COUNT,val) +#define bfin_read_MDMA1_D1_CURR_Y_COUNT() bfin_read16(MDMA1_D1_CURR_Y_COUNT) +#define bfin_write_MDMA1_D1_CURR_Y_COUNT(val) bfin_write16(MDMA1_D1_CURR_Y_COUNT,val) +#define bfin_read_MDMA1_D1_IRQ_STATUS() bfin_read16(MDMA1_D1_IRQ_STATUS) +#define bfin_write_MDMA1_D1_IRQ_STATUS(val) bfin_write16(MDMA1_D1_IRQ_STATUS,val) +#define bfin_read_MDMA1_D1_PERIPHERAL_MAP() bfin_read16(MDMA1_D1_PERIPHERAL_MAP) +#define bfin_write_MDMA1_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D1_PERIPHERAL_MAP,val) +#define bfin_read_MDMA1_S1_CONFIG() bfin_read16(MDMA1_S1_CONFIG) +#define bfin_write_MDMA1_S1_CONFIG(val) bfin_write16(MDMA1_S1_CONFIG,val) +#define bfin_read_MDMA1_S1_NEXT_DESC_PTR() bfin_read32(MDMA1_S1_NEXT_DESC_PTR) +#define bfin_write_MDMA1_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA1_S1_NEXT_DESC_PTR,val) +#define bfin_read_MDMA1_S1_START_ADDR() bfin_read32(MDMA1_S1_START_ADDR) +#define bfin_write_MDMA1_S1_START_ADDR(val) bfin_write32(MDMA1_S1_START_ADDR,val) +#define bfin_read_MDMA1_S1_X_COUNT() bfin_read16(MDMA1_S1_X_COUNT) +#define bfin_write_MDMA1_S1_X_COUNT(val) bfin_write16(MDMA1_S1_X_COUNT,val) +#define bfin_read_MDMA1_S1_Y_COUNT() bfin_read16(MDMA1_S1_Y_COUNT) +#define bfin_write_MDMA1_S1_Y_COUNT(val) bfin_write16(MDMA1_S1_Y_COUNT,val) +#define bfin_read_MDMA1_S1_X_MODIFY() bfin_read16(MDMA1_S1_X_MODIFY) +#define bfin_write_MDMA1_S1_X_MODIFY(val) bfin_write16(MDMA1_S1_X_MODIFY,val) +#define bfin_read_MDMA1_S1_Y_MODIFY() bfin_read16(MDMA1_S1_Y_MODIFY) +#define bfin_write_MDMA1_S1_Y_MODIFY(val) bfin_write16(MDMA1_S1_Y_MODIFY,val) +#define bfin_read_MDMA1_S1_CURR_DESC_PTR() bfin_read32(MDMA1_S1_CURR_DESC_PTR) +#define bfin_write_MDMA1_S1_CURR_DESC_PTR(val) bfin_write32(MDMA1_S1_CURR_DESC_PTR,val) +#define bfin_read_MDMA1_S1_CURR_ADDR() bfin_read32(MDMA1_S1_CURR_ADDR) +#define bfin_write_MDMA1_S1_CURR_ADDR(val) bfin_write32(MDMA1_S1_CURR_ADDR,val) +#define bfin_read_MDMA1_S1_CURR_X_COUNT() bfin_read16(MDMA1_S1_CURR_X_COUNT) +#define bfin_write_MDMA1_S1_CURR_X_COUNT(val) bfin_write16(MDMA1_S1_CURR_X_COUNT,val) +#define bfin_read_MDMA1_S1_CURR_Y_COUNT() bfin_read16(MDMA1_S1_CURR_Y_COUNT) +#define bfin_write_MDMA1_S1_CURR_Y_COUNT(val) bfin_write16(MDMA1_S1_CURR_Y_COUNT,val) +#define bfin_read_MDMA1_S1_IRQ_STATUS() bfin_read16(MDMA1_S1_IRQ_STATUS) +#define bfin_write_MDMA1_S1_IRQ_STATUS(val) bfin_write16(MDMA1_S1_IRQ_STATUS,val) +#define bfin_read_MDMA1_S1_PERIPHERAL_MAP() bfin_read16(MDMA1_S1_PERIPHERAL_MAP) +#define bfin_write_MDMA1_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S1_PERIPHERAL_MAP,val) +/* DMA2 Controller registers (0xFFC0 0C00-0xFFC0 0DFF) */ +#define bfin_read_DMA2_0_CONFIG() bfin_read16(DMA2_0_CONFIG) +#define bfin_write_DMA2_0_CONFIG(val) bfin_write16(DMA2_0_CONFIG,val) +#define bfin_read_DMA2_0_NEXT_DESC_PTR() bfin_read32(DMA2_0_NEXT_DESC_PTR) +#define bfin_write_DMA2_0_NEXT_DESC_PTR(val) bfin_write32(DMA2_0_NEXT_DESC_PTR,val) +#define bfin_read_DMA2_0_START_ADDR() bfin_read32(DMA2_0_START_ADDR) +#define bfin_write_DMA2_0_START_ADDR(val) bfin_write32(DMA2_0_START_ADDR,val) +#define bfin_read_DMA2_0_X_COUNT() bfin_read16(DMA2_0_X_COUNT) +#define bfin_write_DMA2_0_X_COUNT(val) bfin_write16(DMA2_0_X_COUNT,val) +#define bfin_read_DMA2_0_Y_COUNT() bfin_read16(DMA2_0_Y_COUNT) +#define bfin_write_DMA2_0_Y_COUNT(val) bfin_write16(DMA2_0_Y_COUNT,val) +#define bfin_read_DMA2_0_X_MODIFY() bfin_read16(DMA2_0_X_MODIFY) +#define bfin_write_DMA2_0_X_MODIFY(val) bfin_write16(DMA2_0_X_MODIFY,val) +#define bfin_read_DMA2_0_Y_MODIFY() bfin_read16(DMA2_0_Y_MODIFY) +#define bfin_write_DMA2_0_Y_MODIFY(val) bfin_write16(DMA2_0_Y_MODIFY,val) +#define bfin_read_DMA2_0_CURR_DESC_PTR() bfin_read32(DMA2_0_CURR_DESC_PTR) +#define bfin_write_DMA2_0_CURR_DESC_PTR(val) bfin_write32(DMA2_0_CURR_DESC_PTR,val) +#define bfin_read_DMA2_0_CURR_ADDR() bfin_read32(DMA2_0_CURR_ADDR) +#define bfin_write_DMA2_0_CURR_ADDR(val) bfin_write32(DMA2_0_CURR_ADDR,val) +#define bfin_read_DMA2_0_CURR_X_COUNT() bfin_read16(DMA2_0_CURR_X_COUNT) +#define bfin_write_DMA2_0_CURR_X_COUNT(val) bfin_write16(DMA2_0_CURR_X_COUNT,val) +#define bfin_read_DMA2_0_CURR_Y_COUNT() bfin_read16(DMA2_0_CURR_Y_COUNT) +#define bfin_write_DMA2_0_CURR_Y_COUNT(val) bfin_write16(DMA2_0_CURR_Y_COUNT,val) +#define bfin_read_DMA2_0_IRQ_STATUS() bfin_read16(DMA2_0_IRQ_STATUS) +#define bfin_write_DMA2_0_IRQ_STATUS(val) bfin_write16(DMA2_0_IRQ_STATUS,val) +#define bfin_read_DMA2_0_PERIPHERAL_MAP() bfin_read16(DMA2_0_PERIPHERAL_MAP) +#define bfin_write_DMA2_0_PERIPHERAL_MAP(val) bfin_write16(DMA2_0_PERIPHERAL_MAP,val) +#define bfin_read_DMA2_1_CONFIG() bfin_read16(DMA2_1_CONFIG) +#define bfin_write_DMA2_1_CONFIG(val) bfin_write16(DMA2_1_CONFIG,val) +#define bfin_read_DMA2_1_NEXT_DESC_PTR() bfin_read32(DMA2_1_NEXT_DESC_PTR) +#define bfin_write_DMA2_1_NEXT_DESC_PTR(val) bfin_write32(DMA2_1_NEXT_DESC_PTR,val) +#define bfin_read_DMA2_1_START_ADDR() bfin_read32(DMA2_1_START_ADDR) +#define bfin_write_DMA2_1_START_ADDR(val) bfin_write32(DMA2_1_START_ADDR,val) +#define bfin_read_DMA2_1_X_COUNT() bfin_read16(DMA2_1_X_COUNT) +#define bfin_write_DMA2_1_X_COUNT(val) bfin_write16(DMA2_1_X_COUNT,val) +#define bfin_read_DMA2_1_Y_COUNT() bfin_read16(DMA2_1_Y_COUNT) +#define bfin_write_DMA2_1_Y_COUNT(val) bfin_write16(DMA2_1_Y_COUNT,val) +#define bfin_read_DMA2_1_X_MODIFY() bfin_read16(DMA2_1_X_MODIFY) +#define bfin_write_DMA2_1_X_MODIFY(val) bfin_write16(DMA2_1_X_MODIFY,val) +#define bfin_read_DMA2_1_Y_MODIFY() bfin_read16(DMA2_1_Y_MODIFY) +#define bfin_write_DMA2_1_Y_MODIFY(val) bfin_write16(DMA2_1_Y_MODIFY,val) +#define bfin_read_DMA2_1_CURR_DESC_PTR() bfin_read32(DMA2_1_CURR_DESC_PTR) +#define bfin_write_DMA2_1_CURR_DESC_PTR(val) bfin_write32(DMA2_1_CURR_DESC_PTR,val) +#define bfin_read_DMA2_1_CURR_ADDR() bfin_read32(DMA2_1_CURR_ADDR) +#define bfin_write_DMA2_1_CURR_ADDR(val) bfin_write32(DMA2_1_CURR_ADDR,val) +#define bfin_read_DMA2_1_CURR_X_COUNT() bfin_read16(DMA2_1_CURR_X_COUNT) +#define bfin_write_DMA2_1_CURR_X_COUNT(val) bfin_write16(DMA2_1_CURR_X_COUNT,val) +#define bfin_read_DMA2_1_CURR_Y_COUNT() bfin_read16(DMA2_1_CURR_Y_COUNT) +#define bfin_write_DMA2_1_CURR_Y_COUNT(val) bfin_write16(DMA2_1_CURR_Y_COUNT,val) +#define bfin_read_DMA2_1_IRQ_STATUS() bfin_read16(DMA2_1_IRQ_STATUS) +#define bfin_write_DMA2_1_IRQ_STATUS(val) bfin_write16(DMA2_1_IRQ_STATUS,val) +#define bfin_read_DMA2_1_PERIPHERAL_MAP() bfin_read16(DMA2_1_PERIPHERAL_MAP) +#define bfin_write_DMA2_1_PERIPHERAL_MAP(val) bfin_write16(DMA2_1_PERIPHERAL_MAP,val) +#define bfin_read_DMA2_2_CONFIG() bfin_read16(DMA2_2_CONFIG) +#define bfin_write_DMA2_2_CONFIG(val) bfin_write16(DMA2_2_CONFIG,val) +#define bfin_read_DMA2_2_NEXT_DESC_PTR() bfin_read32(DMA2_2_NEXT_DESC_PTR) +#define bfin_write_DMA2_2_NEXT_DESC_PTR(val) bfin_write32(DMA2_2_NEXT_DESC_PTR,val) +#define bfin_read_DMA2_2_START_ADDR() bfin_read32(DMA2_2_START_ADDR) +#define bfin_write_DMA2_2_START_ADDR(val) bfin_write32(DMA2_2_START_ADDR,val) +#define bfin_read_DMA2_2_X_COUNT() bfin_read16(DMA2_2_X_COUNT) +#define bfin_write_DMA2_2_X_COUNT(val) bfin_write16(DMA2_2_X_COUNT,val) +#define bfin_read_DMA2_2_Y_COUNT() bfin_read16(DMA2_2_Y_COUNT) +#define bfin_write_DMA2_2_Y_COUNT(val) bfin_write16(DMA2_2_Y_COUNT,val) +#define bfin_read_DMA2_2_X_MODIFY() bfin_read16(DMA2_2_X_MODIFY) +#define bfin_write_DMA2_2_X_MODIFY(val) bfin_write16(DMA2_2_X_MODIFY,val) +#define bfin_read_DMA2_2_Y_MODIFY() bfin_read16(DMA2_2_Y_MODIFY) +#define bfin_write_DMA2_2_Y_MODIFY(val) bfin_write16(DMA2_2_Y_MODIFY,val) +#define bfin_read_DMA2_2_CURR_DESC_PTR() bfin_read32(DMA2_2_CURR_DESC_PTR) +#define bfin_write_DMA2_2_CURR_DESC_PTR(val) bfin_write32(DMA2_2_CURR_DESC_PTR,val) +#define bfin_read_DMA2_2_CURR_ADDR() bfin_read32(DMA2_2_CURR_ADDR) +#define bfin_write_DMA2_2_CURR_ADDR(val) bfin_write32(DMA2_2_CURR_ADDR,val) +#define bfin_read_DMA2_2_CURR_X_COUNT() bfin_read16(DMA2_2_CURR_X_COUNT) +#define bfin_write_DMA2_2_CURR_X_COUNT(val) bfin_write16(DMA2_2_CURR_X_COUNT,val) +#define bfin_read_DMA2_2_CURR_Y_COUNT() bfin_read16(DMA2_2_CURR_Y_COUNT) +#define bfin_write_DMA2_2_CURR_Y_COUNT(val) bfin_write16(DMA2_2_CURR_Y_COUNT,val) +#define bfin_read_DMA2_2_IRQ_STATUS() bfin_read16(DMA2_2_IRQ_STATUS) +#define bfin_write_DMA2_2_IRQ_STATUS(val) bfin_write16(DMA2_2_IRQ_STATUS,val) +#define bfin_read_DMA2_2_PERIPHERAL_MAP() bfin_read16(DMA2_2_PERIPHERAL_MAP) +#define bfin_write_DMA2_2_PERIPHERAL_MAP(val) bfin_write16(DMA2_2_PERIPHERAL_MAP,val) +#define bfin_read_DMA2_3_CONFIG() bfin_read16(DMA2_3_CONFIG) +#define bfin_write_DMA2_3_CONFIG(val) bfin_write16(DMA2_3_CONFIG,val) +#define bfin_read_DMA2_3_NEXT_DESC_PTR() bfin_read32(DMA2_3_NEXT_DESC_PTR) +#define bfin_write_DMA2_3_NEXT_DESC_PTR(val) bfin_write32(DMA2_3_NEXT_DESC_PTR,val) +#define bfin_read_DMA2_3_START_ADDR() bfin_read32(DMA2_3_START_ADDR) +#define bfin_write_DMA2_3_START_ADDR(val) bfin_write32(DMA2_3_START_ADDR,val) +#define bfin_read_DMA2_3_X_COUNT() bfin_read16(DMA2_3_X_COUNT) +#define bfin_write_DMA2_3_X_COUNT(val) bfin_write16(DMA2_3_X_COUNT,val) +#define bfin_read_DMA2_3_Y_COUNT() bfin_read16(DMA2_3_Y_COUNT) +#define bfin_write_DMA2_3_Y_COUNT(val) bfin_write16(DMA2_3_Y_COUNT,val) +#define bfin_read_DMA2_3_X_MODIFY() bfin_read16(DMA2_3_X_MODIFY) +#define bfin_write_DMA2_3_X_MODIFY(val) bfin_write16(DMA2_3_X_MODIFY,val) +#define bfin_read_DMA2_3_Y_MODIFY() bfin_read16(DMA2_3_Y_MODIFY) +#define bfin_write_DMA2_3_Y_MODIFY(val) bfin_write16(DMA2_3_Y_MODIFY,val) +#define bfin_read_DMA2_3_CURR_DESC_PTR() bfin_read32(DMA2_3_CURR_DESC_PTR) +#define bfin_write_DMA2_3_CURR_DESC_PTR(val) bfin_write32(DMA2_3_CURR_DESC_PTR,val) +#define bfin_read_DMA2_3_CURR_ADDR() bfin_read32(DMA2_3_CURR_ADDR) +#define bfin_write_DMA2_3_CURR_ADDR(val) bfin_write32(DMA2_3_CURR_ADDR,val) +#define bfin_read_DMA2_3_CURR_X_COUNT() bfin_read16(DMA2_3_CURR_X_COUNT) +#define bfin_write_DMA2_3_CURR_X_COUNT(val) bfin_write16(DMA2_3_CURR_X_COUNT,val) +#define bfin_read_DMA2_3_CURR_Y_COUNT() bfin_read16(DMA2_3_CURR_Y_COUNT) +#define bfin_write_DMA2_3_CURR_Y_COUNT(val) bfin_write16(DMA2_3_CURR_Y_COUNT,val) +#define bfin_read_DMA2_3_IRQ_STATUS() bfin_read16(DMA2_3_IRQ_STATUS) +#define bfin_write_DMA2_3_IRQ_STATUS(val) bfin_write16(DMA2_3_IRQ_STATUS,val) +#define bfin_read_DMA2_3_PERIPHERAL_MAP() bfin_read16(DMA2_3_PERIPHERAL_MAP) +#define bfin_write_DMA2_3_PERIPHERAL_MAP(val) bfin_write16(DMA2_3_PERIPHERAL_MAP,val) +#define bfin_read_DMA2_4_CONFIG() bfin_read16(DMA2_4_CONFIG) +#define bfin_write_DMA2_4_CONFIG(val) bfin_write16(DMA2_4_CONFIG,val) +#define bfin_read_DMA2_4_NEXT_DESC_PTR() bfin_read32(DMA2_4_NEXT_DESC_PTR) +#define bfin_write_DMA2_4_NEXT_DESC_PTR(val) bfin_write32(DMA2_4_NEXT_DESC_PTR,val) +#define bfin_read_DMA2_4_START_ADDR() bfin_read32(DMA2_4_START_ADDR) +#define bfin_write_DMA2_4_START_ADDR(val) bfin_write32(DMA2_4_START_ADDR,val) +#define bfin_read_DMA2_4_X_COUNT() bfin_read16(DMA2_4_X_COUNT) +#define bfin_write_DMA2_4_X_COUNT(val) bfin_write16(DMA2_4_X_COUNT,val) +#define bfin_read_DMA2_4_Y_COUNT() bfin_read16(DMA2_4_Y_COUNT) +#define bfin_write_DMA2_4_Y_COUNT(val) bfin_write16(DMA2_4_Y_COUNT,val) +#define bfin_read_DMA2_4_X_MODIFY() bfin_read16(DMA2_4_X_MODIFY) +#define bfin_write_DMA2_4_X_MODIFY(val) bfin_write16(DMA2_4_X_MODIFY,val) +#define bfin_read_DMA2_4_Y_MODIFY() bfin_read16(DMA2_4_Y_MODIFY) +#define bfin_write_DMA2_4_Y_MODIFY(val) bfin_write16(DMA2_4_Y_MODIFY,val) +#define bfin_read_DMA2_4_CURR_DESC_PTR() bfin_read32(DMA2_4_CURR_DESC_PTR) +#define bfin_write_DMA2_4_CURR_DESC_PTR(val) bfin_write32(DMA2_4_CURR_DESC_PTR,val) +#define bfin_read_DMA2_4_CURR_ADDR() bfin_read32(DMA2_4_CURR_ADDR) +#define bfin_write_DMA2_4_CURR_ADDR(val) bfin_write32(DMA2_4_CURR_ADDR,val) +#define bfin_read_DMA2_4_CURR_X_COUNT() bfin_read16(DMA2_4_CURR_X_COUNT) +#define bfin_write_DMA2_4_CURR_X_COUNT(val) bfin_write16(DMA2_4_CURR_X_COUNT,val) +#define bfin_read_DMA2_4_CURR_Y_COUNT() bfin_read16(DMA2_4_CURR_Y_COUNT) +#define bfin_write_DMA2_4_CURR_Y_COUNT(val) bfin_write16(DMA2_4_CURR_Y_COUNT,val) +#define bfin_read_DMA2_4_IRQ_STATUS() bfin_read16(DMA2_4_IRQ_STATUS) +#define bfin_write_DMA2_4_IRQ_STATUS(val) bfin_write16(DMA2_4_IRQ_STATUS,val) +#define bfin_read_DMA2_4_PERIPHERAL_MAP() bfin_read16(DMA2_4_PERIPHERAL_MAP) +#define bfin_write_DMA2_4_PERIPHERAL_MAP(val) bfin_write16(DMA2_4_PERIPHERAL_MAP,val) +#define bfin_read_DMA2_5_CONFIG() bfin_read16(DMA2_5_CONFIG) +#define bfin_write_DMA2_5_CONFIG(val) bfin_write16(DMA2_5_CONFIG,val) +#define bfin_read_DMA2_5_NEXT_DESC_PTR() bfin_read32(DMA2_5_NEXT_DESC_PTR) +#define bfin_write_DMA2_5_NEXT_DESC_PTR(val) bfin_write32(DMA2_5_NEXT_DESC_PTR,val) +#define bfin_read_DMA2_5_START_ADDR() bfin_read32(DMA2_5_START_ADDR) +#define bfin_write_DMA2_5_START_ADDR(val) bfin_write32(DMA2_5_START_ADDR,val) +#define bfin_read_DMA2_5_X_COUNT() bfin_read16(DMA2_5_X_COUNT) +#define bfin_write_DMA2_5_X_COUNT(val) bfin_write16(DMA2_5_X_COUNT,val) +#define bfin_read_DMA2_5_Y_COUNT() bfin_read16(DMA2_5_Y_COUNT) +#define bfin_write_DMA2_5_Y_COUNT(val) bfin_write16(DMA2_5_Y_COUNT,val) +#define bfin_read_DMA2_5_X_MODIFY() bfin_read16(DMA2_5_X_MODIFY) +#define bfin_write_DMA2_5_X_MODIFY(val) bfin_write16(DMA2_5_X_MODIFY,val) +#define bfin_read_DMA2_5_Y_MODIFY() bfin_read16(DMA2_5_Y_MODIFY) +#define bfin_write_DMA2_5_Y_MODIFY(val) bfin_write16(DMA2_5_Y_MODIFY,val) +#define bfin_read_DMA2_5_CURR_DESC_PTR() bfin_read32(DMA2_5_CURR_DESC_PTR) +#define bfin_write_DMA2_5_CURR_DESC_PTR(val) bfin_write32(DMA2_5_CURR_DESC_PTR,val) +#define bfin_read_DMA2_5_CURR_ADDR() bfin_read32(DMA2_5_CURR_ADDR) +#define bfin_write_DMA2_5_CURR_ADDR(val) bfin_write32(DMA2_5_CURR_ADDR,val) +#define bfin_read_DMA2_5_CURR_X_COUNT() bfin_read16(DMA2_5_CURR_X_COUNT) +#define bfin_write_DMA2_5_CURR_X_COUNT(val) bfin_write16(DMA2_5_CURR_X_COUNT,val) +#define bfin_read_DMA2_5_CURR_Y_COUNT() bfin_read16(DMA2_5_CURR_Y_COUNT) +#define bfin_write_DMA2_5_CURR_Y_COUNT(val) bfin_write16(DMA2_5_CURR_Y_COUNT,val) +#define bfin_read_DMA2_5_IRQ_STATUS() bfin_read16(DMA2_5_IRQ_STATUS) +#define bfin_write_DMA2_5_IRQ_STATUS(val) bfin_write16(DMA2_5_IRQ_STATUS,val) +#define bfin_read_DMA2_5_PERIPHERAL_MAP() bfin_read16(DMA2_5_PERIPHERAL_MAP) +#define bfin_write_DMA2_5_PERIPHERAL_MAP(val) bfin_write16(DMA2_5_PERIPHERAL_MAP,val) +#define bfin_read_DMA2_6_CONFIG() bfin_read16(DMA2_6_CONFIG) +#define bfin_write_DMA2_6_CONFIG(val) bfin_write16(DMA2_6_CONFIG,val) +#define bfin_read_DMA2_6_NEXT_DESC_PTR() bfin_read32(DMA2_6_NEXT_DESC_PTR) +#define bfin_write_DMA2_6_NEXT_DESC_PTR(val) bfin_write32(DMA2_6_NEXT_DESC_PTR,val) +#define bfin_read_DMA2_6_START_ADDR() bfin_read32(DMA2_6_START_ADDR) +#define bfin_write_DMA2_6_START_ADDR(val) bfin_write32(DMA2_6_START_ADDR,val) +#define bfin_read_DMA2_6_X_COUNT() bfin_read16(DMA2_6_X_COUNT) +#define bfin_write_DMA2_6_X_COUNT(val) bfin_write16(DMA2_6_X_COUNT,val) +#define bfin_read_DMA2_6_Y_COUNT() bfin_read16(DMA2_6_Y_COUNT) +#define bfin_write_DMA2_6_Y_COUNT(val) bfin_write16(DMA2_6_Y_COUNT,val) +#define bfin_read_DMA2_6_X_MODIFY() bfin_read16(DMA2_6_X_MODIFY) +#define bfin_write_DMA2_6_X_MODIFY(val) bfin_write16(DMA2_6_X_MODIFY,val) +#define bfin_read_DMA2_6_Y_MODIFY() bfin_read16(DMA2_6_Y_MODIFY) +#define bfin_write_DMA2_6_Y_MODIFY(val) bfin_write16(DMA2_6_Y_MODIFY,val) +#define bfin_read_DMA2_6_CURR_DESC_PTR() bfin_read32(DMA2_6_CURR_DESC_PTR) +#define bfin_write_DMA2_6_CURR_DESC_PTR(val) bfin_write32(DMA2_6_CURR_DESC_PTR,val) +#define bfin_read_DMA2_6_CURR_ADDR() bfin_read32(DMA2_6_CURR_ADDR) +#define bfin_write_DMA2_6_CURR_ADDR(val) bfin_write32(DMA2_6_CURR_ADDR,val) +#define bfin_read_DMA2_6_CURR_X_COUNT() bfin_read16(DMA2_6_CURR_X_COUNT) +#define bfin_write_DMA2_6_CURR_X_COUNT(val) bfin_write16(DMA2_6_CURR_X_COUNT,val) +#define bfin_read_DMA2_6_CURR_Y_COUNT() bfin_read16(DMA2_6_CURR_Y_COUNT) +#define bfin_write_DMA2_6_CURR_Y_COUNT(val) bfin_write16(DMA2_6_CURR_Y_COUNT,val) +#define bfin_read_DMA2_6_IRQ_STATUS() bfin_read16(DMA2_6_IRQ_STATUS) +#define bfin_write_DMA2_6_IRQ_STATUS(val) bfin_write16(DMA2_6_IRQ_STATUS,val) +#define bfin_read_DMA2_6_PERIPHERAL_MAP() bfin_read16(DMA2_6_PERIPHERAL_MAP) +#define bfin_write_DMA2_6_PERIPHERAL_MAP(val) bfin_write16(DMA2_6_PERIPHERAL_MAP,val) +#define bfin_read_DMA2_7_CONFIG() bfin_read16(DMA2_7_CONFIG) +#define bfin_write_DMA2_7_CONFIG(val) bfin_write16(DMA2_7_CONFIG,val) +#define bfin_read_DMA2_7_NEXT_DESC_PTR() bfin_read32(DMA2_7_NEXT_DESC_PTR) +#define bfin_write_DMA2_7_NEXT_DESC_PTR(val) bfin_write32(DMA2_7_NEXT_DESC_PTR,val) +#define bfin_read_DMA2_7_START_ADDR() bfin_read32(DMA2_7_START_ADDR) +#define bfin_write_DMA2_7_START_ADDR(val) bfin_write32(DMA2_7_START_ADDR,val) +#define bfin_read_DMA2_7_X_COUNT() bfin_read16(DMA2_7_X_COUNT) +#define bfin_write_DMA2_7_X_COUNT(val) bfin_write16(DMA2_7_X_COUNT,val) +#define bfin_read_DMA2_7_Y_COUNT() bfin_read16(DMA2_7_Y_COUNT) +#define bfin_write_DMA2_7_Y_COUNT(val) bfin_write16(DMA2_7_Y_COUNT,val) +#define bfin_read_DMA2_7_X_MODIFY() bfin_read16(DMA2_7_X_MODIFY) +#define bfin_write_DMA2_7_X_MODIFY(val) bfin_write16(DMA2_7_X_MODIFY,val) +#define bfin_read_DMA2_7_Y_MODIFY() bfin_read16(DMA2_7_Y_MODIFY) +#define bfin_write_DMA2_7_Y_MODIFY(val) bfin_write16(DMA2_7_Y_MODIFY,val) +#define bfin_read_DMA2_7_CURR_DESC_PTR() bfin_read32(DMA2_7_CURR_DESC_PTR) +#define bfin_write_DMA2_7_CURR_DESC_PTR(val) bfin_write32(DMA2_7_CURR_DESC_PTR,val) +#define bfin_read_DMA2_7_CURR_ADDR() bfin_read32(DMA2_7_CURR_ADDR) +#define bfin_write_DMA2_7_CURR_ADDR(val) bfin_write32(DMA2_7_CURR_ADDR,val) +#define bfin_read_DMA2_7_CURR_X_COUNT() bfin_read16(DMA2_7_CURR_X_COUNT) +#define bfin_write_DMA2_7_CURR_X_COUNT(val) bfin_write16(DMA2_7_CURR_X_COUNT,val) +#define bfin_read_DMA2_7_CURR_Y_COUNT() bfin_read16(DMA2_7_CURR_Y_COUNT) +#define bfin_write_DMA2_7_CURR_Y_COUNT(val) bfin_write16(DMA2_7_CURR_Y_COUNT,val) +#define bfin_read_DMA2_7_IRQ_STATUS() bfin_read16(DMA2_7_IRQ_STATUS) +#define bfin_write_DMA2_7_IRQ_STATUS(val) bfin_write16(DMA2_7_IRQ_STATUS,val) +#define bfin_read_DMA2_7_PERIPHERAL_MAP() bfin_read16(DMA2_7_PERIPHERAL_MAP) +#define bfin_write_DMA2_7_PERIPHERAL_MAP(val) bfin_write16(DMA2_7_PERIPHERAL_MAP,val) +#define bfin_read_DMA2_8_CONFIG() bfin_read16(DMA2_8_CONFIG) +#define bfin_write_DMA2_8_CONFIG(val) bfin_write16(DMA2_8_CONFIG,val) +#define bfin_read_DMA2_8_NEXT_DESC_PTR() bfin_read32(DMA2_8_NEXT_DESC_PTR) +#define bfin_write_DMA2_8_NEXT_DESC_PTR(val) bfin_write32(DMA2_8_NEXT_DESC_PTR,val) +#define bfin_read_DMA2_8_START_ADDR() bfin_read32(DMA2_8_START_ADDR) +#define bfin_write_DMA2_8_START_ADDR(val) bfin_write32(DMA2_8_START_ADDR,val) +#define bfin_read_DMA2_8_X_COUNT() bfin_read16(DMA2_8_X_COUNT) +#define bfin_write_DMA2_8_X_COUNT(val) bfin_write16(DMA2_8_X_COUNT,val) +#define bfin_read_DMA2_8_Y_COUNT() bfin_read16(DMA2_8_Y_COUNT) +#define bfin_write_DMA2_8_Y_COUNT(val) bfin_write16(DMA2_8_Y_COUNT,val) +#define bfin_read_DMA2_8_X_MODIFY() bfin_read16(DMA2_8_X_MODIFY) +#define bfin_write_DMA2_8_X_MODIFY(val) bfin_write16(DMA2_8_X_MODIFY,val) +#define bfin_read_DMA2_8_Y_MODIFY() bfin_read16(DMA2_8_Y_MODIFY) +#define bfin_write_DMA2_8_Y_MODIFY(val) bfin_write16(DMA2_8_Y_MODIFY,val) +#define bfin_read_DMA2_8_CURR_DESC_PTR() bfin_read32(DMA2_8_CURR_DESC_PTR) +#define bfin_write_DMA2_8_CURR_DESC_PTR(val) bfin_write32(DMA2_8_CURR_DESC_PTR,val) +#define bfin_read_DMA2_8_CURR_ADDR() bfin_read32(DMA2_8_CURR_ADDR) +#define bfin_write_DMA2_8_CURR_ADDR(val) bfin_write32(DMA2_8_CURR_ADDR,val) +#define bfin_read_DMA2_8_CURR_X_COUNT() bfin_read16(DMA2_8_CURR_X_COUNT) +#define bfin_write_DMA2_8_CURR_X_COUNT(val) bfin_write16(DMA2_8_CURR_X_COUNT,val) +#define bfin_read_DMA2_8_CURR_Y_COUNT() bfin_read16(DMA2_8_CURR_Y_COUNT) +#define bfin_write_DMA2_8_CURR_Y_COUNT(val) bfin_write16(DMA2_8_CURR_Y_COUNT,val) +#define bfin_read_DMA2_8_IRQ_STATUS() bfin_read16(DMA2_8_IRQ_STATUS) +#define bfin_write_DMA2_8_IRQ_STATUS(val) bfin_write16(DMA2_8_IRQ_STATUS,val) +#define bfin_read_DMA2_8_PERIPHERAL_MAP() bfin_read16(DMA2_8_PERIPHERAL_MAP) +#define bfin_write_DMA2_8_PERIPHERAL_MAP(val) bfin_write16(DMA2_8_PERIPHERAL_MAP,val) +#define bfin_read_DMA2_9_CONFIG() bfin_read16(DMA2_9_CONFIG) +#define bfin_write_DMA2_9_CONFIG(val) bfin_write16(DMA2_9_CONFIG,val) +#define bfin_read_DMA2_9_NEXT_DESC_PTR() bfin_read32(DMA2_9_NEXT_DESC_PTR) +#define bfin_write_DMA2_9_NEXT_DESC_PTR(val) bfin_write32(DMA2_9_NEXT_DESC_PTR,val) +#define bfin_read_DMA2_9_START_ADDR() bfin_read32(DMA2_9_START_ADDR) +#define bfin_write_DMA2_9_START_ADDR(val) bfin_write32(DMA2_9_START_ADDR,val) +#define bfin_read_DMA2_9_X_COUNT() bfin_read16(DMA2_9_X_COUNT) +#define bfin_write_DMA2_9_X_COUNT(val) bfin_write16(DMA2_9_X_COUNT,val) +#define bfin_read_DMA2_9_Y_COUNT() bfin_read16(DMA2_9_Y_COUNT) +#define bfin_write_DMA2_9_Y_COUNT(val) bfin_write16(DMA2_9_Y_COUNT,val) +#define bfin_read_DMA2_9_X_MODIFY() bfin_read16(DMA2_9_X_MODIFY) +#define bfin_write_DMA2_9_X_MODIFY(val) bfin_write16(DMA2_9_X_MODIFY,val) +#define bfin_read_DMA2_9_Y_MODIFY() bfin_read16(DMA2_9_Y_MODIFY) +#define bfin_write_DMA2_9_Y_MODIFY(val) bfin_write16(DMA2_9_Y_MODIFY,val) +#define bfin_read_DMA2_9_CURR_DESC_PTR() bfin_read32(DMA2_9_CURR_DESC_PTR) +#define bfin_write_DMA2_9_CURR_DESC_PTR(val) bfin_write32(DMA2_9_CURR_DESC_PTR,val) +#define bfin_read_DMA2_9_CURR_ADDR() bfin_read32(DMA2_9_CURR_ADDR) +#define bfin_write_DMA2_9_CURR_ADDR(val) bfin_write32(DMA2_9_CURR_ADDR,val) +#define bfin_read_DMA2_9_CURR_X_COUNT() bfin_read16(DMA2_9_CURR_X_COUNT) +#define bfin_write_DMA2_9_CURR_X_COUNT(val) bfin_write16(DMA2_9_CURR_X_COUNT,val) +#define bfin_read_DMA2_9_CURR_Y_COUNT() bfin_read16(DMA2_9_CURR_Y_COUNT) +#define bfin_write_DMA2_9_CURR_Y_COUNT(val) bfin_write16(DMA2_9_CURR_Y_COUNT,val) +#define bfin_read_DMA2_9_IRQ_STATUS() bfin_read16(DMA2_9_IRQ_STATUS) +#define bfin_write_DMA2_9_IRQ_STATUS(val) bfin_write16(DMA2_9_IRQ_STATUS,val) +#define bfin_read_DMA2_9_PERIPHERAL_MAP() bfin_read16(DMA2_9_PERIPHERAL_MAP) +#define bfin_write_DMA2_9_PERIPHERAL_MAP(val) bfin_write16(DMA2_9_PERIPHERAL_MAP,val) +#define bfin_read_DMA2_10_CONFIG() bfin_read16(DMA2_10_CONFIG) +#define bfin_write_DMA2_10_CONFIG(val) bfin_write16(DMA2_10_CONFIG,val) +#define bfin_read_DMA2_10_NEXT_DESC_PTR() bfin_read32(DMA2_10_NEXT_DESC_PTR) +#define bfin_write_DMA2_10_NEXT_DESC_PTR(val) bfin_write32(DMA2_10_NEXT_DESC_PTR,val) +#define bfin_read_DMA2_10_START_ADDR() bfin_read32(DMA2_10_START_ADDR) +#define bfin_write_DMA2_10_START_ADDR(val) bfin_write32(DMA2_10_START_ADDR,val) +#define bfin_read_DMA2_10_X_COUNT() bfin_read16(DMA2_10_X_COUNT) +#define bfin_write_DMA2_10_X_COUNT(val) bfin_write16(DMA2_10_X_COUNT,val) +#define bfin_read_DMA2_10_Y_COUNT() bfin_read16(DMA2_10_Y_COUNT) +#define bfin_write_DMA2_10_Y_COUNT(val) bfin_write16(DMA2_10_Y_COUNT,val) +#define bfin_read_DMA2_10_X_MODIFY() bfin_read16(DMA2_10_X_MODIFY) +#define bfin_write_DMA2_10_X_MODIFY(val) bfin_write16(DMA2_10_X_MODIFY,val) +#define bfin_read_DMA2_10_Y_MODIFY() bfin_read16(DMA2_10_Y_MODIFY) +#define bfin_write_DMA2_10_Y_MODIFY(val) bfin_write16(DMA2_10_Y_MODIFY,val) +#define bfin_read_DMA2_10_CURR_DESC_PTR() bfin_read32(DMA2_10_CURR_DESC_PTR) +#define bfin_write_DMA2_10_CURR_DESC_PTR(val) bfin_write32(DMA2_10_CURR_DESC_PTR,val) +#define bfin_read_DMA2_10_CURR_ADDR() bfin_read32(DMA2_10_CURR_ADDR) +#define bfin_write_DMA2_10_CURR_ADDR(val) bfin_write32(DMA2_10_CURR_ADDR,val) +#define bfin_read_DMA2_10_CURR_X_COUNT() bfin_read16(DMA2_10_CURR_X_COUNT) +#define bfin_write_DMA2_10_CURR_X_COUNT(val) bfin_write16(DMA2_10_CURR_X_COUNT,val) +#define bfin_read_DMA2_10_CURR_Y_COUNT() bfin_read16(DMA2_10_CURR_Y_COUNT) +#define bfin_write_DMA2_10_CURR_Y_COUNT(val) bfin_write16(DMA2_10_CURR_Y_COUNT,val) +#define bfin_read_DMA2_10_IRQ_STATUS() bfin_read16(DMA2_10_IRQ_STATUS) +#define bfin_write_DMA2_10_IRQ_STATUS(val) bfin_write16(DMA2_10_IRQ_STATUS,val) +#define bfin_read_DMA2_10_PERIPHERAL_MAP() bfin_read16(DMA2_10_PERIPHERAL_MAP) +#define bfin_write_DMA2_10_PERIPHERAL_MAP(val) bfin_write16(DMA2_10_PERIPHERAL_MAP,val) +#define bfin_read_DMA2_11_CONFIG() bfin_read16(DMA2_11_CONFIG) +#define bfin_write_DMA2_11_CONFIG(val) bfin_write16(DMA2_11_CONFIG,val) +#define bfin_read_DMA2_11_NEXT_DESC_PTR() bfin_read32(DMA2_11_NEXT_DESC_PTR) +#define bfin_write_DMA2_11_NEXT_DESC_PTR(val) bfin_write32(DMA2_11_NEXT_DESC_PTR,val) +#define bfin_read_DMA2_11_START_ADDR() bfin_read32(DMA2_11_START_ADDR) +#define bfin_write_DMA2_11_START_ADDR(val) bfin_write32(DMA2_11_START_ADDR,val) +#define bfin_read_DMA2_11_X_COUNT() bfin_read16(DMA2_11_X_COUNT) +#define bfin_write_DMA2_11_X_COUNT(val) bfin_write16(DMA2_11_X_COUNT,val) +#define bfin_read_DMA2_11_Y_COUNT() bfin_read16(DMA2_11_Y_COUNT) +#define bfin_write_DMA2_11_Y_COUNT(val) bfin_write16(DMA2_11_Y_COUNT,val) +#define bfin_read_DMA2_11_X_MODIFY() bfin_read16(DMA2_11_X_MODIFY) +#define bfin_write_DMA2_11_X_MODIFY(val) bfin_write16(DMA2_11_X_MODIFY,val) +#define bfin_read_DMA2_11_Y_MODIFY() bfin_read16(DMA2_11_Y_MODIFY) +#define bfin_write_DMA2_11_Y_MODIFY(val) bfin_write16(DMA2_11_Y_MODIFY,val) +#define bfin_read_DMA2_11_CURR_DESC_PTR() bfin_read32(DMA2_11_CURR_DESC_PTR) +#define bfin_write_DMA2_11_CURR_DESC_PTR(val) bfin_write32(DMA2_11_CURR_DESC_PTR,val) +#define bfin_read_DMA2_11_CURR_ADDR() bfin_read32(DMA2_11_CURR_ADDR) +#define bfin_write_DMA2_11_CURR_ADDR(val) bfin_write32(DMA2_11_CURR_ADDR,val) +#define bfin_read_DMA2_11_CURR_X_COUNT() bfin_read16(DMA2_11_CURR_X_COUNT) +#define bfin_write_DMA2_11_CURR_X_COUNT(val) bfin_write16(DMA2_11_CURR_X_COUNT,val) +#define bfin_read_DMA2_11_CURR_Y_COUNT() bfin_read16(DMA2_11_CURR_Y_COUNT) +#define bfin_write_DMA2_11_CURR_Y_COUNT(val) bfin_write16(DMA2_11_CURR_Y_COUNT,val) +#define bfin_read_DMA2_11_IRQ_STATUS() bfin_read16(DMA2_11_IRQ_STATUS) +#define bfin_write_DMA2_11_IRQ_STATUS(val) bfin_write16(DMA2_11_IRQ_STATUS,val) +#define bfin_read_DMA2_11_PERIPHERAL_MAP() bfin_read16(DMA2_11_PERIPHERAL_MAP) +#define bfin_write_DMA2_11_PERIPHERAL_MAP(val) bfin_write16(DMA2_11_PERIPHERAL_MAP,val) +/* Memory DMA2 Controller registers (0xFFC0 0E80-0xFFC0 0FFF) */ +#define bfin_read_MDMA2_D0_CONFIG() bfin_read16(MDMA2_D0_CONFIG) +#define bfin_write_MDMA2_D0_CONFIG(val) bfin_write16(MDMA2_D0_CONFIG,val) +#define bfin_read_MDMA2_D0_NEXT_DESC_PTR() bfin_read32(MDMA2_D0_NEXT_DESC_PTR) +#define bfin_write_MDMA2_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA2_D0_NEXT_DESC_PTR,val) +#define bfin_read_MDMA2_D0_START_ADDR() bfin_read32(MDMA2_D0_START_ADDR) +#define bfin_write_MDMA2_D0_START_ADDR(val) bfin_write32(MDMA2_D0_START_ADDR,val) +#define bfin_read_MDMA2_D0_X_COUNT() bfin_read16(MDMA2_D0_X_COUNT) +#define bfin_write_MDMA2_D0_X_COUNT(val) bfin_write16(MDMA2_D0_X_COUNT,val) +#define bfin_read_MDMA2_D0_Y_COUNT() bfin_read16(MDMA2_D0_Y_COUNT) +#define bfin_write_MDMA2_D0_Y_COUNT(val) bfin_write16(MDMA2_D0_Y_COUNT,val) +#define bfin_read_MDMA2_D0_X_MODIFY() bfin_read16(MDMA2_D0_X_MODIFY) +#define bfin_write_MDMA2_D0_X_MODIFY(val) bfin_write16(MDMA2_D0_X_MODIFY,val) +#define bfin_read_MDMA2_D0_Y_MODIFY() bfin_read16(MDMA2_D0_Y_MODIFY) +#define bfin_write_MDMA2_D0_Y_MODIFY(val) bfin_write16(MDMA2_D0_Y_MODIFY,val) +#define bfin_read_MDMA2_D0_CURR_DESC_PTR() bfin_read32(MDMA2_D0_CURR_DESC_PTR) +#define bfin_write_MDMA2_D0_CURR_DESC_PTR(val) bfin_write32(MDMA2_D0_CURR_DESC_PTR,val) +#define bfin_read_MDMA2_D0_CURR_ADDR() bfin_read32(MDMA2_D0_CURR_ADDR) +#define bfin_write_MDMA2_D0_CURR_ADDR(val) bfin_write32(MDMA2_D0_CURR_ADDR,val) +#define bfin_read_MDMA2_D0_CURR_X_COUNT() bfin_read16(MDMA2_D0_CURR_X_COUNT) +#define bfin_write_MDMA2_D0_CURR_X_COUNT(val) bfin_write16(MDMA2_D0_CURR_X_COUNT,val) +#define bfin_read_MDMA2_D0_CURR_Y_COUNT() bfin_read16(MDMA2_D0_CURR_Y_COUNT) +#define bfin_write_MDMA2_D0_CURR_Y_COUNT(val) bfin_write16(MDMA2_D0_CURR_Y_COUNT,val) +#define bfin_read_MDMA2_D0_IRQ_STATUS() bfin_read16(MDMA2_D0_IRQ_STATUS) +#define bfin_write_MDMA2_D0_IRQ_STATUS(val) bfin_write16(MDMA2_D0_IRQ_STATUS,val) +#define bfin_read_MDMA2_D0_PERIPHERAL_MAP() bfin_read16(MDMA2_D0_PERIPHERAL_MAP) +#define bfin_write_MDMA2_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA2_D0_PERIPHERAL_MAP,val) +#define bfin_read_MDMA2_S0_CONFIG() bfin_read16(MDMA2_S0_CONFIG) +#define bfin_write_MDMA2_S0_CONFIG(val) bfin_write16(MDMA2_S0_CONFIG,val) +#define bfin_read_MDMA2_S0_NEXT_DESC_PTR() bfin_read32(MDMA2_S0_NEXT_DESC_PTR) +#define bfin_write_MDMA2_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA2_S0_NEXT_DESC_PTR,val) +#define bfin_read_MDMA2_S0_START_ADDR() bfin_read32(MDMA2_S0_START_ADDR) +#define bfin_write_MDMA2_S0_START_ADDR(val) bfin_write32(MDMA2_S0_START_ADDR,val) +#define bfin_read_MDMA2_S0_X_COUNT() bfin_read16(MDMA2_S0_X_COUNT) +#define bfin_write_MDMA2_S0_X_COUNT(val) bfin_write16(MDMA2_S0_X_COUNT,val) +#define bfin_read_MDMA2_S0_Y_COUNT() bfin_read16(MDMA2_S0_Y_COUNT) +#define bfin_write_MDMA2_S0_Y_COUNT(val) bfin_write16(MDMA2_S0_Y_COUNT,val) +#define bfin_read_MDMA2_S0_X_MODIFY() bfin_read16(MDMA2_S0_X_MODIFY) +#define bfin_write_MDMA2_S0_X_MODIFY(val) bfin_write16(MDMA2_S0_X_MODIFY,val) +#define bfin_read_MDMA2_S0_Y_MODIFY() bfin_read16(MDMA2_S0_Y_MODIFY) +#define bfin_write_MDMA2_S0_Y_MODIFY(val) bfin_write16(MDMA2_S0_Y_MODIFY,val) +#define bfin_read_MDMA2_S0_CURR_DESC_PTR() bfin_read32(MDMA2_S0_CURR_DESC_PTR) +#define bfin_write_MDMA2_S0_CURR_DESC_PTR(val) bfin_write32(MDMA2_S0_CURR_DESC_PTR,val) +#define bfin_read_MDMA2_S0_CURR_ADDR() bfin_read32(MDMA2_S0_CURR_ADDR) +#define bfin_write_MDMA2_S0_CURR_ADDR(val) bfin_write32(MDMA2_S0_CURR_ADDR,val) +#define bfin_read_MDMA2_S0_CURR_X_COUNT() bfin_read16(MDMA2_S0_CURR_X_COUNT) +#define bfin_write_MDMA2_S0_CURR_X_COUNT(val) bfin_write16(MDMA2_S0_CURR_X_COUNT,val) +#define bfin_read_MDMA2_S0_CURR_Y_COUNT() bfin_read16(MDMA2_S0_CURR_Y_COUNT) +#define bfin_write_MDMA2_S0_CURR_Y_COUNT(val) bfin_write16(MDMA2_S0_CURR_Y_COUNT,val) +#define bfin_read_MDMA2_S0_IRQ_STATUS() bfin_read16(MDMA2_S0_IRQ_STATUS) +#define bfin_write_MDMA2_S0_IRQ_STATUS(val) bfin_write16(MDMA2_S0_IRQ_STATUS,val) +#define bfin_read_MDMA2_S0_PERIPHERAL_MAP() bfin_read16(MDMA2_S0_PERIPHERAL_MAP) +#define bfin_write_MDMA2_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA2_S0_PERIPHERAL_MAP,val) +#define bfin_read_MDMA2_D1_CONFIG() bfin_read16(MDMA2_D1_CONFIG) +#define bfin_write_MDMA2_D1_CONFIG(val) bfin_write16(MDMA2_D1_CONFIG,val) +#define bfin_read_MDMA2_D1_NEXT_DESC_PTR() bfin_read32(MDMA2_D1_NEXT_DESC_PTR) +#define bfin_write_MDMA2_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA2_D1_NEXT_DESC_PTR,val) +#define bfin_read_MDMA2_D1_START_ADDR() bfin_read32(MDMA2_D1_START_ADDR) +#define bfin_write_MDMA2_D1_START_ADDR(val) bfin_write32(MDMA2_D1_START_ADDR,val) +#define bfin_read_MDMA2_D1_X_COUNT() bfin_read16(MDMA2_D1_X_COUNT) +#define bfin_write_MDMA2_D1_X_COUNT(val) bfin_write16(MDMA2_D1_X_COUNT,val) +#define bfin_read_MDMA2_D1_Y_COUNT() bfin_read16(MDMA2_D1_Y_COUNT) +#define bfin_write_MDMA2_D1_Y_COUNT(val) bfin_write16(MDMA2_D1_Y_COUNT,val) +#define bfin_read_MDMA2_D1_X_MODIFY() bfin_read16(MDMA2_D1_X_MODIFY) +#define bfin_write_MDMA2_D1_X_MODIFY(val) bfin_write16(MDMA2_D1_X_MODIFY,val) +#define bfin_read_MDMA2_D1_Y_MODIFY() bfin_read16(MDMA2_D1_Y_MODIFY) +#define bfin_write_MDMA2_D1_Y_MODIFY(val) bfin_write16(MDMA2_D1_Y_MODIFY,val) +#define bfin_read_MDMA2_D1_CURR_DESC_PTR() bfin_read32(MDMA2_D1_CURR_DESC_PTR) +#define bfin_write_MDMA2_D1_CURR_DESC_PTR(val) bfin_write32(MDMA2_D1_CURR_DESC_PTR,val) +#define bfin_read_MDMA2_D1_CURR_ADDR() bfin_read32(MDMA2_D1_CURR_ADDR) +#define bfin_write_MDMA2_D1_CURR_ADDR(val) bfin_write32(MDMA2_D1_CURR_ADDR,val) +#define bfin_read_MDMA2_D1_CURR_X_COUNT() bfin_read16(MDMA2_D1_CURR_X_COUNT) +#define bfin_write_MDMA2_D1_CURR_X_COUNT(val) bfin_write16(MDMA2_D1_CURR_X_COUNT,val) +#define bfin_read_MDMA2_D1_CURR_Y_COUNT() bfin_read16(MDMA2_D1_CURR_Y_COUNT) +#define bfin_write_MDMA2_D1_CURR_Y_COUNT(val) bfin_write16(MDMA2_D1_CURR_Y_COUNT,val) +#define bfin_read_MDMA2_D1_IRQ_STATUS() bfin_read16(MDMA2_D1_IRQ_STATUS) +#define bfin_write_MDMA2_D1_IRQ_STATUS(val) bfin_write16(MDMA2_D1_IRQ_STATUS,val) +#define bfin_read_MDMA2_D1_PERIPHERAL_MAP() bfin_read16(MDMA2_D1_PERIPHERAL_MAP) +#define bfin_write_MDMA2_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA2_D1_PERIPHERAL_MAP,val) +#define bfin_read_MDMA2_S1_CONFIG() bfin_read16(MDMA2_S1_CONFIG) +#define bfin_write_MDMA2_S1_CONFIG(val) bfin_write16(MDMA2_S1_CONFIG,val) +#define bfin_read_MDMA2_S1_NEXT_DESC_PTR() bfin_read32(MDMA2_S1_NEXT_DESC_PTR) +#define bfin_write_MDMA2_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA2_S1_NEXT_DESC_PTR,val) +#define bfin_read_MDMA2_S1_START_ADDR() bfin_read32(MDMA2_S1_START_ADDR) +#define bfin_write_MDMA2_S1_START_ADDR(val) bfin_write32(MDMA2_S1_START_ADDR,val) +#define bfin_read_MDMA2_S1_X_COUNT() bfin_read16(MDMA2_S1_X_COUNT) +#define bfin_write_MDMA2_S1_X_COUNT(val) bfin_write16(MDMA2_S1_X_COUNT,val) +#define bfin_read_MDMA2_S1_Y_COUNT() bfin_read16(MDMA2_S1_Y_COUNT) +#define bfin_write_MDMA2_S1_Y_COUNT(val) bfin_write16(MDMA2_S1_Y_COUNT,val) +#define bfin_read_MDMA2_S1_X_MODIFY() bfin_read16(MDMA2_S1_X_MODIFY) +#define bfin_write_MDMA2_S1_X_MODIFY(val) bfin_write16(MDMA2_S1_X_MODIFY,val) +#define bfin_read_MDMA2_S1_Y_MODIFY() bfin_read16(MDMA2_S1_Y_MODIFY) +#define bfin_write_MDMA2_S1_Y_MODIFY(val) bfin_write16(MDMA2_S1_Y_MODIFY,val) +#define bfin_read_MDMA2_S1_CURR_DESC_PTR() bfin_read32(MDMA2_S1_CURR_DESC_PTR) +#define bfin_write_MDMA2_S1_CURR_DESC_PTR(val) bfin_write32(MDMA2_S1_CURR_DESC_PTR,val) +#define bfin_read_MDMA2_S1_CURR_ADDR() bfin_read32(MDMA2_S1_CURR_ADDR) +#define bfin_write_MDMA2_S1_CURR_ADDR(val) bfin_write32(MDMA2_S1_CURR_ADDR,val) +#define bfin_read_MDMA2_S1_CURR_X_COUNT() bfin_read16(MDMA2_S1_CURR_X_COUNT) +#define bfin_write_MDMA2_S1_CURR_X_COUNT(val) bfin_write16(MDMA2_S1_CURR_X_COUNT,val) +#define bfin_read_MDMA2_S1_CURR_Y_COUNT() bfin_read16(MDMA2_S1_CURR_Y_COUNT) +#define bfin_write_MDMA2_S1_CURR_Y_COUNT(val) bfin_write16(MDMA2_S1_CURR_Y_COUNT,val) +#define bfin_read_MDMA2_S1_IRQ_STATUS() bfin_read16(MDMA2_S1_IRQ_STATUS) +#define bfin_write_MDMA2_S1_IRQ_STATUS(val) bfin_write16(MDMA2_S1_IRQ_STATUS,val) +#define bfin_read_MDMA2_S1_PERIPHERAL_MAP() bfin_read16(MDMA2_S1_PERIPHERAL_MAP) +#define bfin_write_MDMA2_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA2_S1_PERIPHERAL_MAP,val) +/* Internal Memory DMA Registers (0xFFC0_1800 - 0xFFC0_19FF) */ +#define bfin_read_IMDMA_D0_CONFIG() bfin_read16(IMDMA_D0_CONFIG) +#define bfin_write_IMDMA_D0_CONFIG(val) bfin_write16(IMDMA_D0_CONFIG,val) +#define bfin_read_IMDMA_D0_NEXT_DESC_PTR() bfin_read32(IMDMA_D0_NEXT_DESC_PTR) +#define bfin_write_IMDMA_D0_NEXT_DESC_PTR(val) bfin_write32(IMDMA_D0_NEXT_DESC_PTR,val) +#define bfin_read_IMDMA_D0_START_ADDR() bfin_read32(IMDMA_D0_START_ADDR) +#define bfin_write_IMDMA_D0_START_ADDR(val) bfin_write32(IMDMA_D0_START_ADDR,val) +#define bfin_read_IMDMA_D0_X_COUNT() bfin_read16(IMDMA_D0_X_COUNT) +#define bfin_write_IMDMA_D0_X_COUNT(val) bfin_write16(IMDMA_D0_X_COUNT,val) +#define bfin_read_IMDMA_D0_Y_COUNT() bfin_read16(IMDMA_D0_Y_COUNT) +#define bfin_write_IMDMA_D0_Y_COUNT(val) bfin_write16(IMDMA_D0_Y_COUNT,val) +#define bfin_read_IMDMA_D0_X_MODIFY() bfin_read16(IMDMA_D0_X_MODIFY) +#define bfin_write_IMDMA_D0_X_MODIFY(val) bfin_write16(IMDMA_D0_X_MODIFY,val) +#define bfin_read_IMDMA_D0_Y_MODIFY() bfin_read16(IMDMA_D0_Y_MODIFY) +#define bfin_write_IMDMA_D0_Y_MODIFY(val) bfin_write16(IMDMA_D0_Y_MODIFY,val) +#define bfin_read_IMDMA_D0_CURR_DESC_PTR() bfin_read32(IMDMA_D0_CURR_DESC_PTR) +#define bfin_write_IMDMA_D0_CURR_DESC_PTR(val) bfin_write32(IMDMA_D0_CURR_DESC_PTR,val) +#define bfin_read_IMDMA_D0_CURR_ADDR() bfin_read32(IMDMA_D0_CURR_ADDR) +#define bfin_write_IMDMA_D0_CURR_ADDR(val) bfin_write32(IMDMA_D0_CURR_ADDR,val) +#define bfin_read_IMDMA_D0_CURR_X_COUNT() bfin_read16(IMDMA_D0_CURR_X_COUNT) +#define bfin_write_IMDMA_D0_CURR_X_COUNT(val) bfin_write16(IMDMA_D0_CURR_X_COUNT,val) +#define bfin_read_IMDMA_D0_CURR_Y_COUNT() bfin_read16(IMDMA_D0_CURR_Y_COUNT) +#define bfin_write_IMDMA_D0_CURR_Y_COUNT(val) bfin_write16(IMDMA_D0_CURR_Y_COUNT,val) +#define bfin_read_IMDMA_D0_IRQ_STATUS() bfin_read16(IMDMA_D0_IRQ_STATUS) +#define bfin_write_IMDMA_D0_IRQ_STATUS(val) bfin_write16(IMDMA_D0_IRQ_STATUS,val) +#define bfin_read_IMDMA_S0_CONFIG() bfin_read16(IMDMA_S0_CONFIG) +#define bfin_write_IMDMA_S0_CONFIG(val) bfin_write16(IMDMA_S0_CONFIG,val) +#define bfin_read_IMDMA_S0_NEXT_DESC_PTR() bfin_read32(IMDMA_S0_NEXT_DESC_PTR) +#define bfin_write_IMDMA_S0_NEXT_DESC_PTR(val) bfin_write32(IMDMA_S0_NEXT_DESC_PTR,val) +#define bfin_read_IMDMA_S0_START_ADDR() bfin_read32(IMDMA_S0_START_ADDR) +#define bfin_write_IMDMA_S0_START_ADDR(val) bfin_write32(IMDMA_S0_START_ADDR,val) +#define bfin_read_IMDMA_S0_X_COUNT() bfin_read16(IMDMA_S0_X_COUNT) +#define bfin_write_IMDMA_S0_X_COUNT(val) bfin_write16(IMDMA_S0_X_COUNT,val) +#define bfin_read_IMDMA_S0_Y_COUNT() bfin_read16(IMDMA_S0_Y_COUNT) +#define bfin_write_IMDMA_S0_Y_COUNT(val) bfin_write16(IMDMA_S0_Y_COUNT,val) +#define bfin_read_IMDMA_S0_X_MODIFY() bfin_read16(IMDMA_S0_X_MODIFY) +#define bfin_write_IMDMA_S0_X_MODIFY(val) bfin_write16(IMDMA_S0_X_MODIFY,val) +#define bfin_read_IMDMA_S0_Y_MODIFY() bfin_read16(IMDMA_S0_Y_MODIFY) +#define bfin_write_IMDMA_S0_Y_MODIFY(val) bfin_write16(IMDMA_S0_Y_MODIFY,val) +#define bfin_read_IMDMA_S0_CURR_DESC_PTR() bfin_read32(IMDMA_S0_CURR_DESC_PTR) +#define bfin_write_IMDMA_S0_CURR_DESC_PTR(val) bfin_write32(IMDMA_S0_CURR_DESC_PTR,val) +#define bfin_read_IMDMA_S0_CURR_ADDR() bfin_read32(IMDMA_S0_CURR_ADDR) +#define bfin_write_IMDMA_S0_CURR_ADDR(val) bfin_write32(IMDMA_S0_CURR_ADDR,val) +#define bfin_read_IMDMA_S0_CURR_X_COUNT() bfin_read16(IMDMA_S0_CURR_X_COUNT) +#define bfin_write_IMDMA_S0_CURR_X_COUNT(val) bfin_write16(IMDMA_S0_CURR_X_COUNT,val) +#define bfin_read_IMDMA_S0_CURR_Y_COUNT() bfin_read16(IMDMA_S0_CURR_Y_COUNT) +#define bfin_write_IMDMA_S0_CURR_Y_COUNT(val) bfin_write16(IMDMA_S0_CURR_Y_COUNT,val) +#define bfin_read_IMDMA_S0_IRQ_STATUS() bfin_read16(IMDMA_S0_IRQ_STATUS) +#define bfin_write_IMDMA_S0_IRQ_STATUS(val) bfin_write16(IMDMA_S0_IRQ_STATUS,val) +#define bfin_read_IMDMA_D1_CONFIG() bfin_read16(IMDMA_D1_CONFIG) +#define bfin_write_IMDMA_D1_CONFIG(val) bfin_write16(IMDMA_D1_CONFIG,val) +#define bfin_read_IMDMA_D1_NEXT_DESC_PTR() bfin_read32(IMDMA_D1_NEXT_DESC_PTR) +#define bfin_write_IMDMA_D1_NEXT_DESC_PTR(val) bfin_write32(IMDMA_D1_NEXT_DESC_PTR,val) +#define bfin_read_IMDMA_D1_START_ADDR() bfin_read32(IMDMA_D1_START_ADDR) +#define bfin_write_IMDMA_D1_START_ADDR(val) bfin_write32(IMDMA_D1_START_ADDR,val) +#define bfin_read_IMDMA_D1_X_COUNT() bfin_read16(IMDMA_D1_X_COUNT) +#define bfin_write_IMDMA_D1_X_COUNT(val) bfin_write16(IMDMA_D1_X_COUNT,val) +#define bfin_read_IMDMA_D1_Y_COUNT() bfin_read16(IMDMA_D1_Y_COUNT) +#define bfin_write_IMDMA_D1_Y_COUNT(val) bfin_write16(IMDMA_D1_Y_COUNT,val) +#define bfin_read_IMDMA_D1_X_MODIFY() bfin_read16(IMDMA_D1_X_MODIFY) +#define bfin_write_IMDMA_D1_X_MODIFY(val) bfin_write16(IMDMA_D1_X_MODIFY,val) +#define bfin_read_IMDMA_D1_Y_MODIFY() bfin_read16(IMDMA_D1_Y_MODIFY) +#define bfin_write_IMDMA_D1_Y_MODIFY(val) bfin_write16(IMDMA_D1_Y_MODIFY,val) +#define bfin_read_IMDMA_D1_CURR_DESC_PTR() bfin_read32(IMDMA_D1_CURR_DESC_PTR) +#define bfin_write_IMDMA_D1_CURR_DESC_PTR(val) bfin_write32(IMDMA_D1_CURR_DESC_PTR,val) +#define bfin_read_IMDMA_D1_CURR_ADDR() bfin_read32(IMDMA_D1_CURR_ADDR) +#define bfin_write_IMDMA_D1_CURR_ADDR(val) bfin_write32(IMDMA_D1_CURR_ADDR,val) +#define bfin_read_IMDMA_D1_CURR_X_COUNT() bfin_read16(IMDMA_D1_CURR_X_COUNT) +#define bfin_write_IMDMA_D1_CURR_X_COUNT(val) bfin_write16(IMDMA_D1_CURR_X_COUNT,val) +#define bfin_read_IMDMA_D1_CURR_Y_COUNT() bfin_read16(IMDMA_D1_CURR_Y_COUNT) +#define bfin_write_IMDMA_D1_CURR_Y_COUNT(val) bfin_write16(IMDMA_D1_CURR_Y_COUNT,val) +#define bfin_read_IMDMA_D1_IRQ_STATUS() bfin_read16(IMDMA_D1_IRQ_STATUS) +#define bfin_write_IMDMA_D1_IRQ_STATUS(val) bfin_write16(IMDMA_D1_IRQ_STATUS,val) +#define bfin_read_IMDMA_S1_CONFIG() bfin_read16(IMDMA_S1_CONFIG) +#define bfin_write_IMDMA_S1_CONFIG(val) bfin_write16(IMDMA_S1_CONFIG,val) +#define bfin_read_IMDMA_S1_NEXT_DESC_PTR() bfin_read32(IMDMA_S1_NEXT_DESC_PTR) +#define bfin_write_IMDMA_S1_NEXT_DESC_PTR(val) bfin_write32(IMDMA_S1_NEXT_DESC_PTR,val) +#define bfin_read_IMDMA_S1_START_ADDR() bfin_read32(IMDMA_S1_START_ADDR) +#define bfin_write_IMDMA_S1_START_ADDR(val) bfin_write32(IMDMA_S1_START_ADDR,val) +#define bfin_read_IMDMA_S1_X_COUNT() bfin_read16(IMDMA_S1_X_COUNT) +#define bfin_write_IMDMA_S1_X_COUNT(val) bfin_write16(IMDMA_S1_X_COUNT,val) +#define bfin_read_IMDMA_S1_Y_COUNT() bfin_read16(IMDMA_S1_Y_COUNT) +#define bfin_write_IMDMA_S1_Y_COUNT(val) bfin_write16(IMDMA_S1_Y_COUNT,val) +#define bfin_read_IMDMA_S1_X_MODIFY() bfin_read16(IMDMA_S1_X_MODIFY) +#define bfin_write_IMDMA_S1_X_MODIFY(val) bfin_write16(IMDMA_S1_X_MODIFY,val) +#define bfin_read_IMDMA_S1_Y_MODIFY() bfin_read16(IMDMA_S1_Y_MODIFY) +#define bfin_write_IMDMA_S1_Y_MODIFY(val) bfin_write16(IMDMA_S1_Y_MODIFY,val) +#define bfin_read_IMDMA_S1_CURR_DESC_PTR() bfin_read32(IMDMA_S1_CURR_DESC_PTR) +#define bfin_write_IMDMA_S1_CURR_DESC_PTR(val) bfin_write32(IMDMA_S1_CURR_DESC_PTR,val) +#define bfin_read_IMDMA_S1_CURR_ADDR() bfin_read32(IMDMA_S1_CURR_ADDR) +#define bfin_write_IMDMA_S1_CURR_ADDR(val) bfin_write32(IMDMA_S1_CURR_ADDR,val) +#define bfin_read_IMDMA_S1_CURR_X_COUNT() bfin_read16(IMDMA_S1_CURR_X_COUNT) +#define bfin_write_IMDMA_S1_CURR_X_COUNT(val) bfin_write16(IMDMA_S1_CURR_X_COUNT,val) +#define bfin_read_IMDMA_S1_CURR_Y_COUNT() bfin_read16(IMDMA_S1_CURR_Y_COUNT) +#define bfin_write_IMDMA_S1_CURR_Y_COUNT(val) bfin_write16(IMDMA_S1_CURR_Y_COUNT,val) +#define bfin_read_IMDMA_S1_IRQ_STATUS() bfin_read16(IMDMA_S1_IRQ_STATUS) +#define bfin_write_IMDMA_S1_IRQ_STATUS(val) bfin_write16(IMDMA_S1_IRQ_STATUS,val) + +#define bfin_read_MDMA_S0_CONFIG() bfin_read_MDMA1_S0_CONFIG() +#define bfin_write_MDMA_S0_CONFIG(val) bfin_write_MDMA1_S0_CONFIG(val) +#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read_MDMA1_S0_IRQ_STATUS() +#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write_MDMA1_S0_IRQ_STATUS(val) +#define bfin_read_MDMA_S0_X_MODIFY() bfin_read_MDMA1_S0_X_MODIFY() +#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write_MDMA1_S0_X_MODIFY(val) +#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read_MDMA1_S0_Y_MODIFY() +#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write_MDMA1_S0_Y_MODIFY(val) +#define bfin_read_MDMA_S0_X_COUNT() bfin_read_MDMA1_S0_X_COUNT() +#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write_MDMA1_S0_X_COUNT(val) +#define bfin_read_MDMA_S0_Y_COUNT() bfin_read_MDMA1_S0_Y_COUNT() +#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write_MDMA1_S0_Y_COUNT(val) +#define bfin_read_MDMA_S0_START_ADDR() bfin_read_MDMA1_S0_START_ADDR() +#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write_MDMA1_S0_START_ADDR(val) +#define bfin_read_MDMA_D0_CONFIG() bfin_read_MDMA1_D0_CONFIG() +#define bfin_write_MDMA_D0_CONFIG(val) bfin_write_MDMA1_D0_CONFIG(val) +#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read_MDMA1_D0_IRQ_STATUS() +#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write_MDMA1_D0_IRQ_STATUS(val) +#define bfin_read_MDMA_D0_X_MODIFY() bfin_read_MDMA1_D0_X_MODIFY() +#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write_MDMA1_D0_X_MODIFY(val) +#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read_MDMA1_D0_Y_MODIFY() +#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write_MDMA1_D0_Y_MODIFY(val) +#define bfin_read_MDMA_D0_X_COUNT() bfin_read_MDMA1_D0_X_COUNT() +#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write_MDMA1_D0_X_COUNT(val) +#define bfin_read_MDMA_D0_Y_COUNT() bfin_read_MDMA1_D0_Y_COUNT() +#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write_MDMA1_D0_Y_COUNT(val) +#define bfin_read_MDMA_D0_START_ADDR() bfin_read_MDMA1_D0_START_ADDR() +#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write_MDMA1_D0_START_ADDR(val) + +#endif /* _CDEF_BF561_H */ diff --git a/include/asm-blackfin/mach-bf561/defBF561.h b/include/asm-blackfin/mach-bf561/defBF561.h new file mode 100644 index 000000000000..a6de4c69ba55 --- /dev/null +++ b/include/asm-blackfin/mach-bf561/defBF561.h @@ -0,0 +1,1717 @@ + +/* + * File: include/asm-blackfin/mach-bf561/defBF561.h + * Based on: + * Author: + * + * Created: + * Description: + * SYSTEM MMR REGISTER AND MEMORY MAP FOR ADSP-BF561 + * Rev: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _DEF_BF561_H +#define _DEF_BF561_H +/* +#if !defined(__ADSPBF561__) +#warning defBF561.h should only be included for BF561 chip. +#endif +*/ +/* include all Core registers and bit definitions */ +#include + +/*********************************************************************************** */ +/* System MMR Register Map */ +/*********************************************************************************** */ + +/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */ + +#define PLL_CTL 0xFFC00000 /* PLL Control register (16-bit) */ +#define PLL_DIV 0xFFC00004 /* PLL Divide Register (16-bit) */ +#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register (16-bit) */ +#define PLL_STAT 0xFFC0000C /* PLL Status register (16-bit) */ +#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count register (16-bit) */ +#define CHIPID 0xFFC00014 /* Chip ID Register */ + +/* System Reset and Interrupt Controller registers for core A (0xFFC0 0100-0xFFC0 01FF) */ +#define SICA_SWRST 0xFFC00100 /* Software Reset register */ +#define SICA_SYSCR 0xFFC00104 /* System Reset Configuration register */ +#define SICA_RVECT 0xFFC00108 /* SIC Reset Vector Address Register */ +#define SICA_IMASK 0xFFC0010C /* SIC Interrupt Mask register 0 - hack to fix old tests */ +#define SICA_IMASK0 0xFFC0010C /* SIC Interrupt Mask register 0 */ +#define SICA_IMASK1 0xFFC00110 /* SIC Interrupt Mask register 1 */ +#define SICA_IAR0 0xFFC00124 /* SIC Interrupt Assignment Register 0 */ +#define SICA_IAR1 0xFFC00128 /* SIC Interrupt Assignment Register 1 */ +#define SICA_IAR2 0xFFC0012C /* SIC Interrupt Assignment Register 2 */ +#define SICA_IAR3 0xFFC00130 /* SIC Interrupt Assignment Register 3 */ +#define SICA_IAR4 0xFFC00134 /* SIC Interrupt Assignment Register 4 */ +#define SICA_IAR5 0xFFC00138 /* SIC Interrupt Assignment Register 5 */ +#define SICA_IAR6 0xFFC0013C /* SIC Interrupt Assignment Register 6 */ +#define SICA_IAR7 0xFFC00140 /* SIC Interrupt Assignment Register 7 */ +#define SICA_ISR0 0xFFC00114 /* SIC Interrupt Status register 0 */ +#define SICA_ISR1 0xFFC00118 /* SIC Interrupt Status register 1 */ +#define SICA_IWR0 0xFFC0011C /* SIC Interrupt Wakeup-Enable register 0 */ +#define SICA_IWR1 0xFFC00120 /* SIC Interrupt Wakeup-Enable register 1 */ + +/* System Reset and Interrupt Controller registers for Core B (0xFFC0 1100-0xFFC0 11FF) */ +#define SICB_SWRST 0xFFC01100 /* reserved */ +#define SICB_SYSCR 0xFFC01104 /* reserved */ +#define SICB_RVECT 0xFFC01108 /* SIC Reset Vector Address Register */ +#define SICB_IMASK0 0xFFC0110C /* SIC Interrupt Mask register 0 */ +#define SICB_IMASK1 0xFFC01110 /* SIC Interrupt Mask register 1 */ +#define SICB_IAR0 0xFFC01124 /* SIC Interrupt Assignment Register 0 */ +#define SICB_IAR1 0xFFC01128 /* SIC Interrupt Assignment Register 1 */ +#define SICB_IAR2 0xFFC0112C /* SIC Interrupt Assignment Register 2 */ +#define SICB_IAR3 0xFFC01130 /* SIC Interrupt Assignment Register 3 */ +#define SICB_IAR4 0xFFC01134 /* SIC Interrupt Assignment Register 4 */ +#define SICB_IAR5 0xFFC01138 /* SIC Interrupt Assignment Register 5 */ +#define SICB_IAR6 0xFFC0113C /* SIC Interrupt Assignment Register 6 */ +#define SICB_IAR7 0xFFC01140 /* SIC Interrupt Assignment Register 7 */ +#define SICB_ISR0 0xFFC01114 /* SIC Interrupt Status register 0 */ +#define SICB_ISR1 0xFFC01118 /* SIC Interrupt Status register 1 */ +#define SICB_IWR0 0xFFC0111C /* SIC Interrupt Wakeup-Enable register 0 */ +#define SICB_IWR1 0xFFC01120 /* SIC Interrupt Wakeup-Enable register 1 */ + +/* Watchdog Timer registers for Core A (0xFFC0 0200-0xFFC0 02FF) */ +#define WDOGA_CTL 0xFFC00200 /* Watchdog Control register */ +#define WDOGA_CNT 0xFFC00204 /* Watchdog Count register */ +#define WDOGA_STAT 0xFFC00208 /* Watchdog Status register */ + +/* Watchdog Timer registers for Core B (0xFFC0 1200-0xFFC0 12FF) */ +#define WDOGB_CTL 0xFFC01200 /* Watchdog Control register */ +#define WDOGB_CNT 0xFFC01204 /* Watchdog Count register */ +#define WDOGB_STAT 0xFFC01208 /* Watchdog Status register */ + +/* UART Controller (0xFFC00400 - 0xFFC004FF) */ +#define UART_THR 0xFFC00400 /* Transmit Holding register */ +#define UART_RBR 0xFFC00400 /* Receive Buffer register */ +#define UART_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */ +#define UART_IER 0xFFC00404 /* Interrupt Enable Register */ +#define UART_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */ +#define UART_IIR 0xFFC00408 /* Interrupt Identification Register */ +#define UART_LCR 0xFFC0040C /* Line Control Register */ +#define UART_MCR 0xFFC00410 /* Modem Control Register */ +#define UART_LSR 0xFFC00414 /* Line Status Register */ +#define UART_MSR 0xFFC00418 /* Modem Status Register */ +#define UART_SCR 0xFFC0041C /* SCR Scratch Register */ +#define UART_GCTL 0xFFC00424 /* Global Control Register */ + +/* SPI Controller (0xFFC00500 - 0xFFC005FF) */ +#define SPI_CTL 0xFFC00500 /* SPI Control Register */ +#define SPI_FLG 0xFFC00504 /* SPI Flag register */ +#define SPI_STAT 0xFFC00508 /* SPI Status register */ +#define SPI_TDBR 0xFFC0050C /* SPI Transmit Data Buffer Register */ +#define SPI_RDBR 0xFFC00510 /* SPI Receive Data Buffer Register */ +#define SPI_BAUD 0xFFC00514 /* SPI Baud rate Register */ +#define SPI_SHADOW 0xFFC00518 /* SPI_RDBR Shadow Register */ + +/* Timer 0-7 registers (0xFFC0 0600-0xFFC0 06FF) */ +#define TIMER0_CONFIG 0xFFC00600 /* Timer0 Configuration register */ +#define TIMER0_COUNTER 0xFFC00604 /* Timer0 Counter register */ +#define TIMER0_PERIOD 0xFFC00608 /* Timer0 Period register */ +#define TIMER0_WIDTH 0xFFC0060C /* Timer0 Width register */ + +#define TIMER1_CONFIG 0xFFC00610 /* Timer1 Configuration register */ +#define TIMER1_COUNTER 0xFFC00614 /* Timer1 Counter register */ +#define TIMER1_PERIOD 0xFFC00618 /* Timer1 Period register */ +#define TIMER1_WIDTH 0xFFC0061C /* Timer1 Width register */ + +#define TIMER2_CONFIG 0xFFC00620 /* Timer2 Configuration register */ +#define TIMER2_COUNTER 0xFFC00624 /* Timer2 Counter register */ +#define TIMER2_PERIOD 0xFFC00628 /* Timer2 Period register */ +#define TIMER2_WIDTH 0xFFC0062C /* Timer2 Width register */ + +#define TIMER3_CONFIG 0xFFC00630 /* Timer3 Configuration register */ +#define TIMER3_COUNTER 0xFFC00634 /* Timer3 Counter register */ +#define TIMER3_PERIOD 0xFFC00638 /* Timer3 Period register */ +#define TIMER3_WIDTH 0xFFC0063C /* Timer3 Width register */ + +#define TIMER4_CONFIG 0xFFC00640 /* Timer4 Configuration register */ +#define TIMER4_COUNTER 0xFFC00644 /* Timer4 Counter register */ +#define TIMER4_PERIOD 0xFFC00648 /* Timer4 Period register */ +#define TIMER4_WIDTH 0xFFC0064C /* Timer4 Width register */ + +#define TIMER5_CONFIG 0xFFC00650 /* Timer5 Configuration register */ +#define TIMER5_COUNTER 0xFFC00654 /* Timer5 Counter register */ +#define TIMER5_PERIOD 0xFFC00658 /* Timer5 Period register */ +#define TIMER5_WIDTH 0xFFC0065C /* Timer5 Width register */ + +#define TIMER6_CONFIG 0xFFC00660 /* Timer6 Configuration register */ +#define TIMER6_COUNTER 0xFFC00664 /* Timer6 Counter register */ +#define TIMER6_PERIOD 0xFFC00668 /* Timer6 Period register */ +#define TIMER6_WIDTH 0xFFC0066C /* Timer6 Width register */ + +#define TIMER7_CONFIG 0xFFC00670 /* Timer7 Configuration register */ +#define TIMER7_COUNTER 0xFFC00674 /* Timer7 Counter register */ +#define TIMER7_PERIOD 0xFFC00678 /* Timer7 Period register */ +#define TIMER7_WIDTH 0xFFC0067C /* Timer7 Width register */ + +#define TMRS8_ENABLE 0xFFC00680 /* Timer Enable Register */ +#define TMRS8_DISABLE 0xFFC00684 /* Timer Disable register */ +#define TMRS8_STATUS 0xFFC00688 /* Timer Status register */ + +/* Timer registers 8-11 (0xFFC0 1600-0xFFC0 16FF) */ +#define TIMER8_CONFIG 0xFFC01600 /* Timer8 Configuration register */ +#define TIMER8_COUNTER 0xFFC01604 /* Timer8 Counter register */ +#define TIMER8_PERIOD 0xFFC01608 /* Timer8 Period register */ +#define TIMER8_WIDTH 0xFFC0160C /* Timer8 Width register */ + +#define TIMER9_CONFIG 0xFFC01610 /* Timer9 Configuration register */ +#define TIMER9_COUNTER 0xFFC01614 /* Timer9 Counter register */ +#define TIMER9_PERIOD 0xFFC01618 /* Timer9 Period register */ +#define TIMER9_WIDTH 0xFFC0161C /* Timer9 Width register */ + +#define TIMER10_CONFIG 0xFFC01620 /* Timer10 Configuration register */ +#define TIMER10_COUNTER 0xFFC01624 /* Timer10 Counter register */ +#define TIMER10_PERIOD 0xFFC01628 /* Timer10 Period register */ +#define TIMER10_WIDTH 0xFFC0162C /* Timer10 Width register */ + +#define TIMER11_CONFIG 0xFFC01630 /* Timer11 Configuration register */ +#define TIMER11_COUNTER 0xFFC01634 /* Timer11 Counter register */ +#define TIMER11_PERIOD 0xFFC01638 /* Timer11 Period register */ +#define TIMER11_WIDTH 0xFFC0163C /* Timer11 Width register */ + +#define TMRS4_ENABLE 0xFFC01640 /* Timer Enable Register */ +#define TMRS4_DISABLE 0xFFC01644 /* Timer Disable register */ +#define TMRS4_STATUS 0xFFC01648 /* Timer Status register */ + +/* Programmable Flag 0 registers (0xFFC0 0700-0xFFC0 07FF) */ +#define FIO0_FLAG_D 0xFFC00700 /* Flag Data register */ +#define FIO0_FLAG_C 0xFFC00704 /* Flag Clear register */ +#define FIO0_FLAG_S 0xFFC00708 /* Flag Set register */ +#define FIO0_FLAG_T 0xFFC0070C /* Flag Toggle register */ +#define FIO0_MASKA_D 0xFFC00710 /* Flag Mask Interrupt A Data register */ +#define FIO0_MASKA_C 0xFFC00714 /* Flag Mask Interrupt A Clear register */ +#define FIO0_MASKA_S 0xFFC00718 /* Flag Mask Interrupt A Set register */ +#define FIO0_MASKA_T 0xFFC0071C /* Flag Mask Interrupt A Toggle register */ +#define FIO0_MASKB_D 0xFFC00720 /* Flag Mask Interrupt B Data register */ +#define FIO0_MASKB_C 0xFFC00724 /* Flag Mask Interrupt B Clear register */ +#define FIO0_MASKB_S 0xFFC00728 /* Flag Mask Interrupt B Set register */ +#define FIO0_MASKB_T 0xFFC0072C /* Flag Mask Interrupt B Toggle register */ +#define FIO0_DIR 0xFFC00730 /* Flag Direction register */ +#define FIO0_POLAR 0xFFC00734 /* Flag Polarity register */ +#define FIO0_EDGE 0xFFC00738 /* Flag Interrupt Sensitivity register */ +#define FIO0_BOTH 0xFFC0073C /* Flag Set on Both Edges register */ +#define FIO0_INEN 0xFFC00740 /* Flag Input Enable register */ + +/* Programmable Flag 1 registers (0xFFC0 1500-0xFFC0 15FF) */ +#define FIO1_FLAG_D 0xFFC01500 /* Flag Data register (mask used to directly */ +#define FIO1_FLAG_C 0xFFC01504 /* Flag Clear register */ +#define FIO1_FLAG_S 0xFFC01508 /* Flag Set register */ +#define FIO1_FLAG_T 0xFFC0150C /* Flag Toggle register (mask used to */ +#define FIO1_MASKA_D 0xFFC01510 /* Flag Mask Interrupt A Data register */ +#define FIO1_MASKA_C 0xFFC01514 /* Flag Mask Interrupt A Clear register */ +#define FIO1_MASKA_S 0xFFC01518 /* Flag Mask Interrupt A Set register */ +#define FIO1_MASKA_T 0xFFC0151C /* Flag Mask Interrupt A Toggle register */ +#define FIO1_MASKB_D 0xFFC01520 /* Flag Mask Interrupt B Data register */ +#define FIO1_MASKB_C 0xFFC01524 /* Flag Mask Interrupt B Clear register */ +#define FIO1_MASKB_S 0xFFC01528 /* Flag Mask Interrupt B Set register */ +#define FIO1_MASKB_T 0xFFC0152C /* Flag Mask Interrupt B Toggle register */ +#define FIO1_DIR 0xFFC01530 /* Flag Direction register */ +#define FIO1_POLAR 0xFFC01534 /* Flag Polarity register */ +#define FIO1_EDGE 0xFFC01538 /* Flag Interrupt Sensitivity register */ +#define FIO1_BOTH 0xFFC0153C /* Flag Set on Both Edges register */ +#define FIO1_INEN 0xFFC01540 /* Flag Input Enable register */ + +/* Programmable Flag registers (0xFFC0 1700-0xFFC0 17FF) */ +#define FIO2_FLAG_D 0xFFC01700 /* Flag Data register (mask used to directly */ +#define FIO2_FLAG_C 0xFFC01704 /* Flag Clear register */ +#define FIO2_FLAG_S 0xFFC01708 /* Flag Set register */ +#define FIO2_FLAG_T 0xFFC0170C /* Flag Toggle register (mask used to */ +#define FIO2_MASKA_D 0xFFC01710 /* Flag Mask Interrupt A Data register */ +#define FIO2_MASKA_C 0xFFC01714 /* Flag Mask Interrupt A Clear register */ +#define FIO2_MASKA_S 0xFFC01718 /* Flag Mask Interrupt A Set register */ +#define FIO2_MASKA_T 0xFFC0171C /* Flag Mask Interrupt A Toggle register */ +#define FIO2_MASKB_D 0xFFC01720 /* Flag Mask Interrupt B Data register */ +#define FIO2_MASKB_C 0xFFC01724 /* Flag Mask Interrupt B Clear register */ +#define FIO2_MASKB_S 0xFFC01728 /* Flag Mask Interrupt B Set register */ +#define FIO2_MASKB_T 0xFFC0172C /* Flag Mask Interrupt B Toggle register */ +#define FIO2_DIR 0xFFC01730 /* Flag Direction register */ +#define FIO2_POLAR 0xFFC01734 /* Flag Polarity register */ +#define FIO2_EDGE 0xFFC01738 /* Flag Interrupt Sensitivity register */ +#define FIO2_BOTH 0xFFC0173C /* Flag Set on Both Edges register */ +#define FIO2_INEN 0xFFC01740 /* Flag Input Enable register */ + +/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */ +#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */ +#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */ +#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */ +#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */ +#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */ +#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */ +#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */ +#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */ +#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */ +#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */ +#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */ +#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */ +#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */ +#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */ +#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */ +#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */ +#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */ +#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */ +#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */ +#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */ +#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */ +#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */ + +/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */ +#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */ +#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */ +#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */ +#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */ +#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */ +#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */ +#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */ +#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */ +#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */ +#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */ +#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */ +#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */ +#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */ +#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */ +#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */ +#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */ +#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */ +#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */ +#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */ +#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */ +#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */ +#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */ + +/* Asynchronous Memory Controller - External Bus Interface Unit */ +#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */ +#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */ +#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */ + +/* SDRAM Controller External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */ +#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */ +#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */ +#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */ +#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */ + +/* Parallel Peripheral Interface (PPI) 0 registers (0xFFC0 1000-0xFFC0 10FF) */ +#define PPI0_CONTROL 0xFFC01000 /* PPI0 Control register */ +#define PPI0_STATUS 0xFFC01004 /* PPI0 Status register */ +#define PPI0_COUNT 0xFFC01008 /* PPI0 Transfer Count register */ +#define PPI0_DELAY 0xFFC0100C /* PPI0 Delay Count register */ +#define PPI0_FRAME 0xFFC01010 /* PPI0 Frame Length register */ + +/*Parallel Peripheral Interface (PPI) 1 registers (0xFFC0 1300-0xFFC0 13FF) */ +#define PPI1_CONTROL 0xFFC01300 /* PPI1 Control register */ +#define PPI1_STATUS 0xFFC01304 /* PPI1 Status register */ +#define PPI1_COUNT 0xFFC01308 /* PPI1 Transfer Count register */ +#define PPI1_DELAY 0xFFC0130C /* PPI1 Delay Count register */ +#define PPI1_FRAME 0xFFC01310 /* PPI1 Frame Length register */ + +/*DMA traffic control registers */ +#define DMA1_TC_PER 0xFFC01B0C /* Traffic control periods */ +#define DMA1_TC_CNT 0xFFC01B10 /* Traffic control current counts */ +#define DMA2_TC_PER 0xFFC00B0C /* Traffic control periods */ +#define DMA2_TC_CNT 0xFFC00B10 /* Traffic control current counts */ + +/* DMA1 Controller registers (0xFFC0 1C00-0xFFC0 1FFF) */ +#define DMA1_0_CONFIG 0xFFC01C08 /* DMA1 Channel 0 Configuration register */ +#define DMA1_0_NEXT_DESC_PTR 0xFFC01C00 /* DMA1 Channel 0 Next Descripter Ptr Reg */ +#define DMA1_0_START_ADDR 0xFFC01C04 /* DMA1 Channel 0 Start Address */ +#define DMA1_0_X_COUNT 0xFFC01C10 /* DMA1 Channel 0 Inner Loop Count */ +#define DMA1_0_Y_COUNT 0xFFC01C18 /* DMA1 Channel 0 Outer Loop Count */ +#define DMA1_0_X_MODIFY 0xFFC01C14 /* DMA1 Channel 0 Inner Loop Addr Increment */ +#define DMA1_0_Y_MODIFY 0xFFC01C1C /* DMA1 Channel 0 Outer Loop Addr Increment */ +#define DMA1_0_CURR_DESC_PTR 0xFFC01C20 /* DMA1 Channel 0 Current Descriptor Pointer */ +#define DMA1_0_CURR_ADDR 0xFFC01C24 /* DMA1 Channel 0 Current Address Pointer */ +#define DMA1_0_CURR_X_COUNT 0xFFC01C30 /* DMA1 Channel 0 Current Inner Loop Count */ +#define DMA1_0_CURR_Y_COUNT 0xFFC01C38 /* DMA1 Channel 0 Current Outer Loop Count */ +#define DMA1_0_IRQ_STATUS 0xFFC01C28 /* DMA1 Channel 0 Interrupt/Status Register */ +#define DMA1_0_PERIPHERAL_MAP 0xFFC01C2C /* DMA1 Channel 0 Peripheral Map Register */ + +#define DMA1_1_CONFIG 0xFFC01C48 /* DMA1 Channel 1 Configuration register */ +#define DMA1_1_NEXT_DESC_PTR 0xFFC01C40 /* DMA1 Channel 1 Next Descripter Ptr Reg */ +#define DMA1_1_START_ADDR 0xFFC01C44 /* DMA1 Channel 1 Start Address */ +#define DMA1_1_X_COUNT 0xFFC01C50 /* DMA1 Channel 1 Inner Loop Count */ +#define DMA1_1_Y_COUNT 0xFFC01C58 /* DMA1 Channel 1 Outer Loop Count */ +#define DMA1_1_X_MODIFY 0xFFC01C54 /* DMA1 Channel 1 Inner Loop Addr Increment */ +#define DMA1_1_Y_MODIFY 0xFFC01C5C /* DMA1 Channel 1 Outer Loop Addr Increment */ +#define DMA1_1_CURR_DESC_PTR 0xFFC01C60 /* DMA1 Channel 1 Current Descriptor Pointer */ +#define DMA1_1_CURR_ADDR 0xFFC01C64 /* DMA1 Channel 1 Current Address Pointer */ +#define DMA1_1_CURR_X_COUNT 0xFFC01C70 /* DMA1 Channel 1 Current Inner Loop Count */ +#define DMA1_1_CURR_Y_COUNT 0xFFC01C78 /* DMA1 Channel 1 Current Outer Loop Count */ +#define DMA1_1_IRQ_STATUS 0xFFC01C68 /* DMA1 Channel 1 Interrupt/Status Register */ +#define DMA1_1_PERIPHERAL_MAP 0xFFC01C6C /* DMA1 Channel 1 Peripheral Map Register */ + +#define DMA1_2_CONFIG 0xFFC01C88 /* DMA1 Channel 2 Configuration register */ +#define DMA1_2_NEXT_DESC_PTR 0xFFC01C80 /* DMA1 Channel 2 Next Descripter Ptr Reg */ +#define DMA1_2_START_ADDR 0xFFC01C84 /* DMA1 Channel 2 Start Address */ +#define DMA1_2_X_COUNT 0xFFC01C90 /* DMA1 Channel 2 Inner Loop Count */ +#define DMA1_2_Y_COUNT 0xFFC01C98 /* DMA1 Channel 2 Outer Loop Count */ +#define DMA1_2_X_MODIFY 0xFFC01C94 /* DMA1 Channel 2 Inner Loop Addr Increment */ +#define DMA1_2_Y_MODIFY 0xFFC01C9C /* DMA1 Channel 2 Outer Loop Addr Increment */ +#define DMA1_2_CURR_DESC_PTR 0xFFC01CA0 /* DMA1 Channel 2 Current Descriptor Pointer */ +#define DMA1_2_CURR_ADDR 0xFFC01CA4 /* DMA1 Channel 2 Current Address Pointer */ +#define DMA1_2_CURR_X_COUNT 0xFFC01CB0 /* DMA1 Channel 2 Current Inner Loop Count */ +#define DMA1_2_CURR_Y_COUNT 0xFFC01CB8 /* DMA1 Channel 2 Current Outer Loop Count */ +#define DMA1_2_IRQ_STATUS 0xFFC01CA8 /* DMA1 Channel 2 Interrupt/Status Register */ +#define DMA1_2_PERIPHERAL_MAP 0xFFC01CAC /* DMA1 Channel 2 Peripheral Map Register */ + +#define DMA1_3_CONFIG 0xFFC01CC8 /* DMA1 Channel 3 Configuration register */ +#define DMA1_3_NEXT_DESC_PTR 0xFFC01CC0 /* DMA1 Channel 3 Next Descripter Ptr Reg */ +#define DMA1_3_START_ADDR 0xFFC01CC4 /* DMA1 Channel 3 Start Address */ +#define DMA1_3_X_COUNT 0xFFC01CD0 /* DMA1 Channel 3 Inner Loop Count */ +#define DMA1_3_Y_COUNT 0xFFC01CD8 /* DMA1 Channel 3 Outer Loop Count */ +#define DMA1_3_X_MODIFY 0xFFC01CD4 /* DMA1 Channel 3 Inner Loop Addr Increment */ +#define DMA1_3_Y_MODIFY 0xFFC01CDC /* DMA1 Channel 3 Outer Loop Addr Increment */ +#define DMA1_3_CURR_DESC_PTR 0xFFC01CE0 /* DMA1 Channel 3 Current Descriptor Pointer */ +#define DMA1_3_CURR_ADDR 0xFFC01CE4 /* DMA1 Channel 3 Current Address Pointer */ +#define DMA1_3_CURR_X_COUNT 0xFFC01CF0 /* DMA1 Channel 3 Current Inner Loop Count */ +#define DMA1_3_CURR_Y_COUNT 0xFFC01CF8 /* DMA1 Channel 3 Current Outer Loop Count */ +#define DMA1_3_IRQ_STATUS 0xFFC01CE8 /* DMA1 Channel 3 Interrupt/Status Register */ +#define DMA1_3_PERIPHERAL_MAP 0xFFC01CEC /* DMA1 Channel 3 Peripheral Map Register */ + +#define DMA1_4_CONFIG 0xFFC01D08 /* DMA1 Channel 4 Configuration register */ +#define DMA1_4_NEXT_DESC_PTR 0xFFC01D00 /* DMA1 Channel 4 Next Descripter Ptr Reg */ +#define DMA1_4_START_ADDR 0xFFC01D04 /* DMA1 Channel 4 Start Address */ +#define DMA1_4_X_COUNT 0xFFC01D10 /* DMA1 Channel 4 Inner Loop Count */ +#define DMA1_4_Y_COUNT 0xFFC01D18 /* DMA1 Channel 4 Outer Loop Count */ +#define DMA1_4_X_MODIFY 0xFFC01D14 /* DMA1 Channel 4 Inner Loop Addr Increment */ +#define DMA1_4_Y_MODIFY 0xFFC01D1C /* DMA1 Channel 4 Outer Loop Addr Increment */ +#define DMA1_4_CURR_DESC_PTR 0xFFC01D20 /* DMA1 Channel 4 Current Descriptor Pointer */ +#define DMA1_4_CURR_ADDR 0xFFC01D24 /* DMA1 Channel 4 Current Address Pointer */ +#define DMA1_4_CURR_X_COUNT 0xFFC01D30 /* DMA1 Channel 4 Current Inner Loop Count */ +#define DMA1_4_CURR_Y_COUNT 0xFFC01D38 /* DMA1 Channel 4 Current Outer Loop Count */ +#define DMA1_4_IRQ_STATUS 0xFFC01D28 /* DMA1 Channel 4 Interrupt/Status Register */ +#define DMA1_4_PERIPHERAL_MAP 0xFFC01D2C /* DMA1 Channel 4 Peripheral Map Register */ + +#define DMA1_5_CONFIG 0xFFC01D48 /* DMA1 Channel 5 Configuration register */ +#define DMA1_5_NEXT_DESC_PTR 0xFFC01D40 /* DMA1 Channel 5 Next Descripter Ptr Reg */ +#define DMA1_5_START_ADDR 0xFFC01D44 /* DMA1 Channel 5 Start Address */ +#define DMA1_5_X_COUNT 0xFFC01D50 /* DMA1 Channel 5 Inner Loop Count */ +#define DMA1_5_Y_COUNT 0xFFC01D58 /* DMA1 Channel 5 Outer Loop Count */ +#define DMA1_5_X_MODIFY 0xFFC01D54 /* DMA1 Channel 5 Inner Loop Addr Increment */ +#define DMA1_5_Y_MODIFY 0xFFC01D5C /* DMA1 Channel 5 Outer Loop Addr Increment */ +#define DMA1_5_CURR_DESC_PTR 0xFFC01D60 /* DMA1 Channel 5 Current Descriptor Pointer */ +#define DMA1_5_CURR_ADDR 0xFFC01D64 /* DMA1 Channel 5 Current Address Pointer */ +#define DMA1_5_CURR_X_COUNT 0xFFC01D70 /* DMA1 Channel 5 Current Inner Loop Count */ +#define DMA1_5_CURR_Y_COUNT 0xFFC01D78 /* DMA1 Channel 5 Current Outer Loop Count */ +#define DMA1_5_IRQ_STATUS 0xFFC01D68 /* DMA1 Channel 5 Interrupt/Status Register */ +#define DMA1_5_PERIPHERAL_MAP 0xFFC01D6C /* DMA1 Channel 5 Peripheral Map Register */ + +#define DMA1_6_CONFIG 0xFFC01D88 /* DMA1 Channel 6 Configuration register */ +#define DMA1_6_NEXT_DESC_PTR 0xFFC01D80 /* DMA1 Channel 6 Next Descripter Ptr Reg */ +#define DMA1_6_START_ADDR 0xFFC01D84 /* DMA1 Channel 6 Start Address */ +#define DMA1_6_X_COUNT 0xFFC01D90 /* DMA1 Channel 6 Inner Loop Count */ +#define DMA1_6_Y_COUNT 0xFFC01D98 /* DMA1 Channel 6 Outer Loop Count */ +#define DMA1_6_X_MODIFY 0xFFC01D94 /* DMA1 Channel 6 Inner Loop Addr Increment */ +#define DMA1_6_Y_MODIFY 0xFFC01D9C /* DMA1 Channel 6 Outer Loop Addr Increment */ +#define DMA1_6_CURR_DESC_PTR 0xFFC01DA0 /* DMA1 Channel 6 Current Descriptor Pointer */ +#define DMA1_6_CURR_ADDR 0xFFC01DA4 /* DMA1 Channel 6 Current Address Pointer */ +#define DMA1_6_CURR_X_COUNT 0xFFC01DB0 /* DMA1 Channel 6 Current Inner Loop Count */ +#define DMA1_6_CURR_Y_COUNT 0xFFC01DB8 /* DMA1 Channel 6 Current Outer Loop Count */ +#define DMA1_6_IRQ_STATUS 0xFFC01DA8 /* DMA1 Channel 6 Interrupt/Status Register */ +#define DMA1_6_PERIPHERAL_MAP 0xFFC01DAC /* DMA1 Channel 6 Peripheral Map Register */ + +#define DMA1_7_CONFIG 0xFFC01DC8 /* DMA1 Channel 7 Configuration register */ +#define DMA1_7_NEXT_DESC_PTR 0xFFC01DC0 /* DMA1 Channel 7 Next Descripter Ptr Reg */ +#define DMA1_7_START_ADDR 0xFFC01DC4 /* DMA1 Channel 7 Start Address */ +#define DMA1_7_X_COUNT 0xFFC01DD0 /* DMA1 Channel 7 Inner Loop Count */ +#define DMA1_7_Y_COUNT 0xFFC01DD8 /* DMA1 Channel 7 Outer Loop Count */ +#define DMA1_7_X_MODIFY 0xFFC01DD4 /* DMA1 Channel 7 Inner Loop Addr Increment */ +#define DMA1_7_Y_MODIFY 0xFFC01DDC /* DMA1 Channel 7 Outer Loop Addr Increment */ +#define DMA1_7_CURR_DESC_PTR 0xFFC01DE0 /* DMA1 Channel 7 Current Descriptor Pointer */ +#define DMA1_7_CURR_ADDR 0xFFC01DE4 /* DMA1 Channel 7 Current Address Pointer */ +#define DMA1_7_CURR_X_COUNT 0xFFC01DF0 /* DMA1 Channel 7 Current Inner Loop Count */ +#define DMA1_7_CURR_Y_COUNT 0xFFC01DF8 /* DMA1 Channel 7 Current Outer Loop Count */ +#define DMA1_7_IRQ_STATUS 0xFFC01DE8 /* DMA1 Channel 7 Interrupt/Status Register */ +#define DMA1_7_PERIPHERAL_MAP 0xFFC01DEC /* DMA1 Channel 7 Peripheral Map Register */ + +#define DMA1_8_CONFIG 0xFFC01E08 /* DMA1 Channel 8 Configuration register */ +#define DMA1_8_NEXT_DESC_PTR 0xFFC01E00 /* DMA1 Channel 8 Next Descripter Ptr Reg */ +#define DMA1_8_START_ADDR 0xFFC01E04 /* DMA1 Channel 8 Start Address */ +#define DMA1_8_X_COUNT 0xFFC01E10 /* DMA1 Channel 8 Inner Loop Count */ +#define DMA1_8_Y_COUNT 0xFFC01E18 /* DMA1 Channel 8 Outer Loop Count */ +#define DMA1_8_X_MODIFY 0xFFC01E14 /* DMA1 Channel 8 Inner Loop Addr Increment */ +#define DMA1_8_Y_MODIFY 0xFFC01E1C /* DMA1 Channel 8 Outer Loop Addr Increment */ +#define DMA1_8_CURR_DESC_PTR 0xFFC01E20 /* DMA1 Channel 8 Current Descriptor Pointer */ +#define DMA1_8_CURR_ADDR 0xFFC01E24 /* DMA1 Channel 8 Current Address Pointer */ +#define DMA1_8_CURR_X_COUNT 0xFFC01E30 /* DMA1 Channel 8 Current Inner Loop Count */ +#define DMA1_8_CURR_Y_COUNT 0xFFC01E38 /* DMA1 Channel 8 Current Outer Loop Count */ +#define DMA1_8_IRQ_STATUS 0xFFC01E28 /* DMA1 Channel 8 Interrupt/Status Register */ +#define DMA1_8_PERIPHERAL_MAP 0xFFC01E2C /* DMA1 Channel 8 Peripheral Map Register */ + +#define DMA1_9_CONFIG 0xFFC01E48 /* DMA1 Channel 9 Configuration register */ +#define DMA1_9_NEXT_DESC_PTR 0xFFC01E40 /* DMA1 Channel 9 Next Descripter Ptr Reg */ +#define DMA1_9_START_ADDR 0xFFC01E44 /* DMA1 Channel 9 Start Address */ +#define DMA1_9_X_COUNT 0xFFC01E50 /* DMA1 Channel 9 Inner Loop Count */ +#define DMA1_9_Y_COUNT 0xFFC01E58 /* DMA1 Channel 9 Outer Loop Count */ +#define DMA1_9_X_MODIFY 0xFFC01E54 /* DMA1 Channel 9 Inner Loop Addr Increment */ +#define DMA1_9_Y_MODIFY 0xFFC01E5C /* DMA1 Channel 9 Outer Loop Addr Increment */ +#define DMA1_9_CURR_DESC_PTR 0xFFC01E60 /* DMA1 Channel 9 Current Descriptor Pointer */ +#define DMA1_9_CURR_ADDR 0xFFC01E64 /* DMA1 Channel 9 Current Address Pointer */ +#define DMA1_9_CURR_X_COUNT 0xFFC01E70 /* DMA1 Channel 9 Current Inner Loop Count */ +#define DMA1_9_CURR_Y_COUNT 0xFFC01E78 /* DMA1 Channel 9 Current Outer Loop Count */ +#define DMA1_9_IRQ_STATUS 0xFFC01E68 /* DMA1 Channel 9 Interrupt/Status Register */ +#define DMA1_9_PERIPHERAL_MAP 0xFFC01E6C /* DMA1 Channel 9 Peripheral Map Register */ + +#define DMA1_10_CONFIG 0xFFC01E88 /* DMA1 Channel 10 Configuration register */ +#define DMA1_10_NEXT_DESC_PTR 0xFFC01E80 /* DMA1 Channel 10 Next Descripter Ptr Reg */ +#define DMA1_10_START_ADDR 0xFFC01E84 /* DMA1 Channel 10 Start Address */ +#define DMA1_10_X_COUNT 0xFFC01E90 /* DMA1 Channel 10 Inner Loop Count */ +#define DMA1_10_Y_COUNT 0xFFC01E98 /* DMA1 Channel 10 Outer Loop Count */ +#define DMA1_10_X_MODIFY 0xFFC01E94 /* DMA1 Channel 10 Inner Loop Addr Increment */ +#define DMA1_10_Y_MODIFY 0xFFC01E9C /* DMA1 Channel 10 Outer Loop Addr Increment */ +#define DMA1_10_CURR_DESC_PTR 0xFFC01EA0 /* DMA1 Channel 10 Current Descriptor Pointer */ +#define DMA1_10_CURR_ADDR 0xFFC01EA4 /* DMA1 Channel 10 Current Address Pointer */ +#define DMA1_10_CURR_X_COUNT 0xFFC01EB0 /* DMA1 Channel 10 Current Inner Loop Count */ +#define DMA1_10_CURR_Y_COUNT 0xFFC01EB8 /* DMA1 Channel 10 Current Outer Loop Count */ +#define DMA1_10_IRQ_STATUS 0xFFC01EA8 /* DMA1 Channel 10 Interrupt/Status Register */ +#define DMA1_10_PERIPHERAL_MAP 0xFFC01EAC /* DMA1 Channel 10 Peripheral Map Register */ + +#define DMA1_11_CONFIG 0xFFC01EC8 /* DMA1 Channel 11 Configuration register */ +#define DMA1_11_NEXT_DESC_PTR 0xFFC01EC0 /* DMA1 Channel 11 Next Descripter Ptr Reg */ +#define DMA1_11_START_ADDR 0xFFC01EC4 /* DMA1 Channel 11 Start Address */ +#define DMA1_11_X_COUNT 0xFFC01ED0 /* DMA1 Channel 11 Inner Loop Count */ +#define DMA1_11_Y_COUNT 0xFFC01ED8 /* DMA1 Channel 11 Outer Loop Count */ +#define DMA1_11_X_MODIFY 0xFFC01ED4 /* DMA1 Channel 11 Inner Loop Addr Increment */ +#define DMA1_11_Y_MODIFY 0xFFC01EDC /* DMA1 Channel 11 Outer Loop Addr Increment */ +#define DMA1_11_CURR_DESC_PTR 0xFFC01EE0 /* DMA1 Channel 11 Current Descriptor Pointer */ +#define DMA1_11_CURR_ADDR 0xFFC01EE4 /* DMA1 Channel 11 Current Address Pointer */ +#define DMA1_11_CURR_X_COUNT 0xFFC01EF0 /* DMA1 Channel 11 Current Inner Loop Count */ +#define DMA1_11_CURR_Y_COUNT 0xFFC01EF8 /* DMA1 Channel 11 Current Outer Loop Count */ +#define DMA1_11_IRQ_STATUS 0xFFC01EE8 /* DMA1 Channel 11 Interrupt/Status Register */ +#define DMA1_11_PERIPHERAL_MAP 0xFFC01EEC /* DMA1 Channel 11 Peripheral Map Register */ + +/* Memory DMA1 Controller registers (0xFFC0 1E80-0xFFC0 1FFF) */ +#define MDMA1_D0_CONFIG 0xFFC01F08 /*MemDMA1 Stream 0 Destination Configuration */ +#define MDMA1_D0_NEXT_DESC_PTR 0xFFC01F00 /*MemDMA1 Stream 0 Destination Next Descriptor Ptr Reg */ +#define MDMA1_D0_START_ADDR 0xFFC01F04 /*MemDMA1 Stream 0 Destination Start Address */ +#define MDMA1_D0_X_COUNT 0xFFC01F10 /*MemDMA1 Stream 0 Destination Inner-Loop Count */ +#define MDMA1_D0_Y_COUNT 0xFFC01F18 /*MemDMA1 Stream 0 Destination Outer-Loop Count */ +#define MDMA1_D0_X_MODIFY 0xFFC01F14 /*MemDMA1 Stream 0 Dest Inner-Loop Address-Increment */ +#define MDMA1_D0_Y_MODIFY 0xFFC01F1C /*MemDMA1 Stream 0 Dest Outer-Loop Address-Increment */ +#define MDMA1_D0_CURR_DESC_PTR 0xFFC01F20 /*MemDMA1 Stream 0 Dest Current Descriptor Ptr reg */ +#define MDMA1_D0_CURR_ADDR 0xFFC01F24 /*MemDMA1 Stream 0 Destination Current Address */ +#define MDMA1_D0_CURR_X_COUNT 0xFFC01F30 /*MemDMA1 Stream 0 Dest Current Inner-Loop Count */ +#define MDMA1_D0_CURR_Y_COUNT 0xFFC01F38 /*MemDMA1 Stream 0 Dest Current Outer-Loop Count */ +#define MDMA1_D0_IRQ_STATUS 0xFFC01F28 /*MemDMA1 Stream 0 Destination Interrupt/Status */ +#define MDMA1_D0_PERIPHERAL_MAP 0xFFC01F2C /*MemDMA1 Stream 0 Destination Peripheral Map */ + +#define MDMA1_S0_CONFIG 0xFFC01F48 /*MemDMA1 Stream 0 Source Configuration */ +#define MDMA1_S0_NEXT_DESC_PTR 0xFFC01F40 /*MemDMA1 Stream 0 Source Next Descriptor Ptr Reg */ +#define MDMA1_S0_START_ADDR 0xFFC01F44 /*MemDMA1 Stream 0 Source Start Address */ +#define MDMA1_S0_X_COUNT 0xFFC01F50 /*MemDMA1 Stream 0 Source Inner-Loop Count */ +#define MDMA1_S0_Y_COUNT 0xFFC01F58 /*MemDMA1 Stream 0 Source Outer-Loop Count */ +#define MDMA1_S0_X_MODIFY 0xFFC01F54 /*MemDMA1 Stream 0 Source Inner-Loop Address-Increment */ +#define MDMA1_S0_Y_MODIFY 0xFFC01F5C /*MemDMA1 Stream 0 Source Outer-Loop Address-Increment */ +#define MDMA1_S0_CURR_DESC_PTR 0xFFC01F60 /*MemDMA1 Stream 0 Source Current Descriptor Ptr reg */ +#define MDMA1_S0_CURR_ADDR 0xFFC01F64 /*MemDMA1 Stream 0 Source Current Address */ +#define MDMA1_S0_CURR_X_COUNT 0xFFC01F70 /*MemDMA1 Stream 0 Source Current Inner-Loop Count */ +#define MDMA1_S0_CURR_Y_COUNT 0xFFC01F78 /*MemDMA1 Stream 0 Source Current Outer-Loop Count */ +#define MDMA1_S0_IRQ_STATUS 0xFFC01F68 /*MemDMA1 Stream 0 Source Interrupt/Status */ +#define MDMA1_S0_PERIPHERAL_MAP 0xFFC01F6C /*MemDMA1 Stream 0 Source Peripheral Map */ + +#define MDMA1_D1_CONFIG 0xFFC01F88 /*MemDMA1 Stream 1 Destination Configuration */ +#define MDMA1_D1_NEXT_DESC_PTR 0xFFC01F80 /*MemDMA1 Stream 1 Destination Next Descriptor Ptr Reg */ +#define MDMA1_D1_START_ADDR 0xFFC01F84 /*MemDMA1 Stream 1 Destination Start Address */ +#define MDMA1_D1_X_COUNT 0xFFC01F90 /*MemDMA1 Stream 1 Destination Inner-Loop Count */ +#define MDMA1_D1_Y_COUNT 0xFFC01F98 /*MemDMA1 Stream 1 Destination Outer-Loop Count */ +#define MDMA1_D1_X_MODIFY 0xFFC01F94 /*MemDMA1 Stream 1 Dest Inner-Loop Address-Increment */ +#define MDMA1_D1_Y_MODIFY 0xFFC01F9C /*MemDMA1 Stream 1 Dest Outer-Loop Address-Increment */ +#define MDMA1_D1_CURR_DESC_PTR 0xFFC01FA0 /*MemDMA1 Stream 1 Dest Current Descriptor Ptr reg */ +#define MDMA1_D1_CURR_ADDR 0xFFC01FA4 /*MemDMA1 Stream 1 Dest Current Address */ +#define MDMA1_D1_CURR_X_COUNT 0xFFC01FB0 /*MemDMA1 Stream 1 Dest Current Inner-Loop Count */ +#define MDMA1_D1_CURR_Y_COUNT 0xFFC01FB8 /*MemDMA1 Stream 1 Dest Current Outer-Loop Count */ +#define MDMA1_D1_IRQ_STATUS 0xFFC01FA8 /*MemDMA1 Stream 1 Dest Interrupt/Status */ +#define MDMA1_D1_PERIPHERAL_MAP 0xFFC01FAC /*MemDMA1 Stream 1 Dest Peripheral Map */ + +#define MDMA1_S1_CONFIG 0xFFC01FC8 /*MemDMA1 Stream 1 Source Configuration */ +#define MDMA1_S1_NEXT_DESC_PTR 0xFFC01FC0 /*MemDMA1 Stream 1 Source Next Descriptor Ptr Reg */ +#define MDMA1_S1_START_ADDR 0xFFC01FC4 /*MemDMA1 Stream 1 Source Start Address */ +#define MDMA1_S1_X_COUNT 0xFFC01FD0 /*MemDMA1 Stream 1 Source Inner-Loop Count */ +#define MDMA1_S1_Y_COUNT 0xFFC01FD8 /*MemDMA1 Stream 1 Source Outer-Loop Count */ +#define MDMA1_S1_X_MODIFY 0xFFC01FD4 /*MemDMA1 Stream 1 Source Inner-Loop Address-Increment */ +#define MDMA1_S1_Y_MODIFY 0xFFC01FDC /*MemDMA1 Stream 1 Source Outer-Loop Address-Increment */ +#define MDMA1_S1_CURR_DESC_PTR 0xFFC01FE0 /*MemDMA1 Stream 1 Source Current Descriptor Ptr reg */ +#define MDMA1_S1_CURR_ADDR 0xFFC01FE4 /*MemDMA1 Stream 1 Source Current Address */ +#define MDMA1_S1_CURR_X_COUNT 0xFFC01FF0 /*MemDMA1 Stream 1 Source Current Inner-Loop Count */ +#define MDMA1_S1_CURR_Y_COUNT 0xFFC01FF8 /*MemDMA1 Stream 1 Source Current Outer-Loop Count */ +#define MDMA1_S1_IRQ_STATUS 0xFFC01FE8 /*MemDMA1 Stream 1 Source Interrupt/Status */ +#define MDMA1_S1_PERIPHERAL_MAP 0xFFC01FEC /*MemDMA1 Stream 1 Source Peripheral Map */ + +/* DMA2 Controller registers (0xFFC0 0C00-0xFFC0 0DFF) */ +#define DMA2_0_CONFIG 0xFFC00C08 /* DMA2 Channel 0 Configuration register */ +#define DMA2_0_NEXT_DESC_PTR 0xFFC00C00 /* DMA2 Channel 0 Next Descripter Ptr Reg */ +#define DMA2_0_START_ADDR 0xFFC00C04 /* DMA2 Channel 0 Start Address */ +#define DMA2_0_X_COUNT 0xFFC00C10 /* DMA2 Channel 0 Inner Loop Count */ +#define DMA2_0_Y_COUNT 0xFFC00C18 /* DMA2 Channel 0 Outer Loop Count */ +#define DMA2_0_X_MODIFY 0xFFC00C14 /* DMA2 Channel 0 Inner Loop Addr Increment */ +#define DMA2_0_Y_MODIFY 0xFFC00C1C /* DMA2 Channel 0 Outer Loop Addr Increment */ +#define DMA2_0_CURR_DESC_PTR 0xFFC00C20 /* DMA2 Channel 0 Current Descriptor Pointer */ +#define DMA2_0_CURR_ADDR 0xFFC00C24 /* DMA2 Channel 0 Current Address Pointer */ +#define DMA2_0_CURR_X_COUNT 0xFFC00C30 /* DMA2 Channel 0 Current Inner Loop Count */ +#define DMA2_0_CURR_Y_COUNT 0xFFC00C38 /* DMA2 Channel 0 Current Outer Loop Count */ +#define DMA2_0_IRQ_STATUS 0xFFC00C28 /* DMA2 Channel 0 Interrupt/Status Register */ +#define DMA2_0_PERIPHERAL_MAP 0xFFC00C2C /* DMA2 Channel 0 Peripheral Map Register */ + +#define DMA2_1_CONFIG 0xFFC00C48 /* DMA2 Channel 1 Configuration register */ +#define DMA2_1_NEXT_DESC_PTR 0xFFC00C40 /* DMA2 Channel 1 Next Descripter Ptr Reg */ +#define DMA2_1_START_ADDR 0xFFC00C44 /* DMA2 Channel 1 Start Address */ +#define DMA2_1_X_COUNT 0xFFC00C50 /* DMA2 Channel 1 Inner Loop Count */ +#define DMA2_1_Y_COUNT 0xFFC00C58 /* DMA2 Channel 1 Outer Loop Count */ +#define DMA2_1_X_MODIFY 0xFFC00C54 /* DMA2 Channel 1 Inner Loop Addr Increment */ +#define DMA2_1_Y_MODIFY 0xFFC00C5C /* DMA2 Channel 1 Outer Loop Addr Increment */ +#define DMA2_1_CURR_DESC_PTR 0xFFC00C60 /* DMA2 Channel 1 Current Descriptor Pointer */ +#define DMA2_1_CURR_ADDR 0xFFC00C64 /* DMA2 Channel 1 Current Address Pointer */ +#define DMA2_1_CURR_X_COUNT 0xFFC00C70 /* DMA2 Channel 1 Current Inner Loop Count */ +#define DMA2_1_CURR_Y_COUNT 0xFFC00C78 /* DMA2 Channel 1 Current Outer Loop Count */ +#define DMA2_1_IRQ_STATUS 0xFFC00C68 /* DMA2 Channel 1 Interrupt/Status Register */ +#define DMA2_1_PERIPHERAL_MAP 0xFFC00C6C /* DMA2 Channel 1 Peripheral Map Register */ + +#define DMA2_2_CONFIG 0xFFC00C88 /* DMA2 Channel 2 Configuration register */ +#define DMA2_2_NEXT_DESC_PTR 0xFFC00C80 /* DMA2 Channel 2 Next Descripter Ptr Reg */ +#define DMA2_2_START_ADDR 0xFFC00C84 /* DMA2 Channel 2 Start Address */ +#define DMA2_2_X_COUNT 0xFFC00C90 /* DMA2 Channel 2 Inner Loop Count */ +#define DMA2_2_Y_COUNT 0xFFC00C98 /* DMA2 Channel 2 Outer Loop Count */ +#define DMA2_2_X_MODIFY 0xFFC00C94 /* DMA2 Channel 2 Inner Loop Addr Increment */ +#define DMA2_2_Y_MODIFY 0xFFC00C9C /* DMA2 Channel 2 Outer Loop Addr Increment */ +#define DMA2_2_CURR_DESC_PTR 0xFFC00CA0 /* DMA2 Channel 2 Current Descriptor Pointer */ +#define DMA2_2_CURR_ADDR 0xFFC00CA4 /* DMA2 Channel 2 Current Address Pointer */ +#define DMA2_2_CURR_X_COUNT 0xFFC00CB0 /* DMA2 Channel 2 Current Inner Loop Count */ +#define DMA2_2_CURR_Y_COUNT 0xFFC00CB8 /* DMA2 Channel 2 Current Outer Loop Count */ +#define DMA2_2_IRQ_STATUS 0xFFC00CA8 /* DMA2 Channel 2 Interrupt/Status Register */ +#define DMA2_2_PERIPHERAL_MAP 0xFFC00CAC /* DMA2 Channel 2 Peripheral Map Register */ + +#define DMA2_3_CONFIG 0xFFC00CC8 /* DMA2 Channel 3 Configuration register */ +#define DMA2_3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA2 Channel 3 Next Descripter Ptr Reg */ +#define DMA2_3_START_ADDR 0xFFC00CC4 /* DMA2 Channel 3 Start Address */ +#define DMA2_3_X_COUNT 0xFFC00CD0 /* DMA2 Channel 3 Inner Loop Count */ +#define DMA2_3_Y_COUNT 0xFFC00CD8 /* DMA2 Channel 3 Outer Loop Count */ +#define DMA2_3_X_MODIFY 0xFFC00CD4 /* DMA2 Channel 3 Inner Loop Addr Increment */ +#define DMA2_3_Y_MODIFY 0xFFC00CDC /* DMA2 Channel 3 Outer Loop Addr Increment */ +#define DMA2_3_CURR_DESC_PTR 0xFFC00CE0 /* DMA2 Channel 3 Current Descriptor Pointer */ +#define DMA2_3_CURR_ADDR 0xFFC00CE4 /* DMA2 Channel 3 Current Address Pointer */ +#define DMA2_3_CURR_X_COUNT 0xFFC00CF0 /* DMA2 Channel 3 Current Inner Loop Count */ +#define DMA2_3_CURR_Y_COUNT 0xFFC00CF8 /* DMA2 Channel 3 Current Outer Loop Count */ +#define DMA2_3_IRQ_STATUS 0xFFC00CE8 /* DMA2 Channel 3 Interrupt/Status Register */ +#define DMA2_3_PERIPHERAL_MAP 0xFFC00CEC /* DMA2 Channel 3 Peripheral Map Register */ + +#define DMA2_4_CONFIG 0xFFC00D08 /* DMA2 Channel 4 Configuration register */ +#define DMA2_4_NEXT_DESC_PTR 0xFFC00D00 /* DMA2 Channel 4 Next Descripter Ptr Reg */ +#define DMA2_4_START_ADDR 0xFFC00D04 /* DMA2 Channel 4 Start Address */ +#define DMA2_4_X_COUNT 0xFFC00D10 /* DMA2 Channel 4 Inner Loop Count */ +#define DMA2_4_Y_COUNT 0xFFC00D18 /* DMA2 Channel 4 Outer Loop Count */ +#define DMA2_4_X_MODIFY 0xFFC00D14 /* DMA2 Channel 4 Inner Loop Addr Increment */ +#define DMA2_4_Y_MODIFY 0xFFC00D1C /* DMA2 Channel 4 Outer Loop Addr Increment */ +#define DMA2_4_CURR_DESC_PTR 0xFFC00D20 /* DMA2 Channel 4 Current Descriptor Pointer */ +#define DMA2_4_CURR_ADDR 0xFFC00D24 /* DMA2 Channel 4 Current Address Pointer */ +#define DMA2_4_CURR_X_COUNT 0xFFC00D30 /* DMA2 Channel 4 Current Inner Loop Count */ +#define DMA2_4_CURR_Y_COUNT 0xFFC00D38 /* DMA2 Channel 4 Current Outer Loop Count */ +#define DMA2_4_IRQ_STATUS 0xFFC00D28 /* DMA2 Channel 4 Interrupt/Status Register */ +#define DMA2_4_PERIPHERAL_MAP 0xFFC00D2C /* DMA2 Channel 4 Peripheral Map Register */ + +#define DMA2_5_CONFIG 0xFFC00D48 /* DMA2 Channel 5 Configuration register */ +#define DMA2_5_NEXT_DESC_PTR 0xFFC00D40 /* DMA2 Channel 5 Next Descripter Ptr Reg */ +#define DMA2_5_START_ADDR 0xFFC00D44 /* DMA2 Channel 5 Start Address */ +#define DMA2_5_X_COUNT 0xFFC00D50 /* DMA2 Channel 5 Inner Loop Count */ +#define DMA2_5_Y_COUNT 0xFFC00D58 /* DMA2 Channel 5 Outer Loop Count */ +#define DMA2_5_X_MODIFY 0xFFC00D54 /* DMA2 Channel 5 Inner Loop Addr Increment */ +#define DMA2_5_Y_MODIFY 0xFFC00D5C /* DMA2 Channel 5 Outer Loop Addr Increment */ +#define DMA2_5_CURR_DESC_PTR 0xFFC00D60 /* DMA2 Channel 5 Current Descriptor Pointer */ +#define DMA2_5_CURR_ADDR 0xFFC00D64 /* DMA2 Channel 5 Current Address Pointer */ +#define DMA2_5_CURR_X_COUNT 0xFFC00D70 /* DMA2 Channel 5 Current Inner Loop Count */ +#define DMA2_5_CURR_Y_COUNT 0xFFC00D78 /* DMA2 Channel 5 Current Outer Loop Count */ +#define DMA2_5_IRQ_STATUS 0xFFC00D68 /* DMA2 Channel 5 Interrupt/Status Register */ +#define DMA2_5_PERIPHERAL_MAP 0xFFC00D6C /* DMA2 Channel 5 Peripheral Map Register */ + +#define DMA2_6_CONFIG 0xFFC00D88 /* DMA2 Channel 6 Configuration register */ +#define DMA2_6_NEXT_DESC_PTR 0xFFC00D80 /* DMA2 Channel 6 Next Descripter Ptr Reg */ +#define DMA2_6_START_ADDR 0xFFC00D84 /* DMA2 Channel 6 Start Address */ +#define DMA2_6_X_COUNT 0xFFC00D90 /* DMA2 Channel 6 Inner Loop Count */ +#define DMA2_6_Y_COUNT 0xFFC00D98 /* DMA2 Channel 6 Outer Loop Count */ +#define DMA2_6_X_MODIFY 0xFFC00D94 /* DMA2 Channel 6 Inner Loop Addr Increment */ +#define DMA2_6_Y_MODIFY 0xFFC00D9C /* DMA2 Channel 6 Outer Loop Addr Increment */ +#define DMA2_6_CURR_DESC_PTR 0xFFC00DA0 /* DMA2 Channel 6 Current Descriptor Pointer */ +#define DMA2_6_CURR_ADDR 0xFFC00DA4 /* DMA2 Channel 6 Current Address Pointer */ +#define DMA2_6_CURR_X_COUNT 0xFFC00DB0 /* DMA2 Channel 6 Current Inner Loop Count */ +#define DMA2_6_CURR_Y_COUNT 0xFFC00DB8 /* DMA2 Channel 6 Current Outer Loop Count */ +#define DMA2_6_IRQ_STATUS 0xFFC00DA8 /* DMA2 Channel 6 Interrupt/Status Register */ +#define DMA2_6_PERIPHERAL_MAP 0xFFC00DAC /* DMA2 Channel 6 Peripheral Map Register */ + +#define DMA2_7_CONFIG 0xFFC00DC8 /* DMA2 Channel 7 Configuration register */ +#define DMA2_7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA2 Channel 7 Next Descripter Ptr Reg */ +#define DMA2_7_START_ADDR 0xFFC00DC4 /* DMA2 Channel 7 Start Address */ +#define DMA2_7_X_COUNT 0xFFC00DD0 /* DMA2 Channel 7 Inner Loop Count */ +#define DMA2_7_Y_COUNT 0xFFC00DD8 /* DMA2 Channel 7 Outer Loop Count */ +#define DMA2_7_X_MODIFY 0xFFC00DD4 /* DMA2 Channel 7 Inner Loop Addr Increment */ +#define DMA2_7_Y_MODIFY 0xFFC00DDC /* DMA2 Channel 7 Outer Loop Addr Increment */ +#define DMA2_7_CURR_DESC_PTR 0xFFC00DE0 /* DMA2 Channel 7 Current Descriptor Pointer */ +#define DMA2_7_CURR_ADDR 0xFFC00DE4 /* DMA2 Channel 7 Current Address Pointer */ +#define DMA2_7_CURR_X_COUNT 0xFFC00DF0 /* DMA2 Channel 7 Current Inner Loop Count */ +#define DMA2_7_CURR_Y_COUNT 0xFFC00DF8 /* DMA2 Channel 7 Current Outer Loop Count */ +#define DMA2_7_IRQ_STATUS 0xFFC00DE8 /* DMA2 Channel 7 Interrupt/Status Register */ +#define DMA2_7_PERIPHERAL_MAP 0xFFC00DEC /* DMA2 Channel 7 Peripheral Map Register */ + +#define DMA2_8_CONFIG 0xFFC00E08 /* DMA2 Channel 8 Configuration register */ +#define DMA2_8_NEXT_DESC_PTR 0xFFC00E00 /* DMA2 Channel 8 Next Descripter Ptr Reg */ +#define DMA2_8_START_ADDR 0xFFC00E04 /* DMA2 Channel 8 Start Address */ +#define DMA2_8_X_COUNT 0xFFC00E10 /* DMA2 Channel 8 Inner Loop Count */ +#define DMA2_8_Y_COUNT 0xFFC00E18 /* DMA2 Channel 8 Outer Loop Count */ +#define DMA2_8_X_MODIFY 0xFFC00E14 /* DMA2 Channel 8 Inner Loop Addr Increment */ +#define DMA2_8_Y_MODIFY 0xFFC00E1C /* DMA2 Channel 8 Outer Loop Addr Increment */ +#define DMA2_8_CURR_DESC_PTR 0xFFC00E20 /* DMA2 Channel 8 Current Descriptor Pointer */ +#define DMA2_8_CURR_ADDR 0xFFC00E24 /* DMA2 Channel 8 Current Address Pointer */ +#define DMA2_8_CURR_X_COUNT 0xFFC00E30 /* DMA2 Channel 8 Current Inner Loop Count */ +#define DMA2_8_CURR_Y_COUNT 0xFFC00E38 /* DMA2 Channel 8 Current Outer Loop Count */ +#define DMA2_8_IRQ_STATUS 0xFFC00E28 /* DMA2 Channel 8 Interrupt/Status Register */ +#define DMA2_8_PERIPHERAL_MAP 0xFFC00E2C /* DMA2 Channel 8 Peripheral Map Register */ + +#define DMA2_9_CONFIG 0xFFC00E48 /* DMA2 Channel 9 Configuration register */ +#define DMA2_9_NEXT_DESC_PTR 0xFFC00E40 /* DMA2 Channel 9 Next Descripter Ptr Reg */ +#define DMA2_9_START_ADDR 0xFFC00E44 /* DMA2 Channel 9 Start Address */ +#define DMA2_9_X_COUNT 0xFFC00E50 /* DMA2 Channel 9 Inner Loop Count */ +#define DMA2_9_Y_COUNT 0xFFC00E58 /* DMA2 Channel 9 Outer Loop Count */ +#define DMA2_9_X_MODIFY 0xFFC00E54 /* DMA2 Channel 9 Inner Loop Addr Increment */ +#define DMA2_9_Y_MODIFY 0xFFC00E5C /* DMA2 Channel 9 Outer Loop Addr Increment */ +#define DMA2_9_CURR_DESC_PTR 0xFFC00E60 /* DMA2 Channel 9 Current Descriptor Pointer */ +#define DMA2_9_CURR_ADDR 0xFFC00E64 /* DMA2 Channel 9 Current Address Pointer */ +#define DMA2_9_CURR_X_COUNT 0xFFC00E70 /* DMA2 Channel 9 Current Inner Loop Count */ +#define DMA2_9_CURR_Y_COUNT 0xFFC00E78 /* DMA2 Channel 9 Current Outer Loop Count */ +#define DMA2_9_IRQ_STATUS 0xFFC00E68 /* DMA2 Channel 9 Interrupt/Status Register */ +#define DMA2_9_PERIPHERAL_MAP 0xFFC00E6C /* DMA2 Channel 9 Peripheral Map Register */ + +#define DMA2_10_CONFIG 0xFFC00E88 /* DMA2 Channel 10 Configuration register */ +#define DMA2_10_NEXT_DESC_PTR 0xFFC00E80 /* DMA2 Channel 10 Next Descripter Ptr Reg */ +#define DMA2_10_START_ADDR 0xFFC00E84 /* DMA2 Channel 10 Start Address */ +#define DMA2_10_X_COUNT 0xFFC00E90 /* DMA2 Channel 10 Inner Loop Count */ +#define DMA2_10_Y_COUNT 0xFFC00E98 /* DMA2 Channel 10 Outer Loop Count */ +#define DMA2_10_X_MODIFY 0xFFC00E94 /* DMA2 Channel 10 Inner Loop Addr Increment */ +#define DMA2_10_Y_MODIFY 0xFFC00E9C /* DMA2 Channel 10 Outer Loop Addr Increment */ +#define DMA2_10_CURR_DESC_PTR 0xFFC00EA0 /* DMA2 Channel 10 Current Descriptor Pointer */ +#define DMA2_10_CURR_ADDR 0xFFC00EA4 /* DMA2 Channel 10 Current Address Pointer */ +#define DMA2_10_CURR_X_COUNT 0xFFC00EB0 /* DMA2 Channel 10 Current Inner Loop Count */ +#define DMA2_10_CURR_Y_COUNT 0xFFC00EB8 /* DMA2 Channel 10 Current Outer Loop Count */ +#define DMA2_10_IRQ_STATUS 0xFFC00EA8 /* DMA2 Channel 10 Interrupt/Status Register */ +#define DMA2_10_PERIPHERAL_MAP 0xFFC00EAC /* DMA2 Channel 10 Peripheral Map Register */ + +#define DMA2_11_CONFIG 0xFFC00EC8 /* DMA2 Channel 11 Configuration register */ +#define DMA2_11_NEXT_DESC_PTR 0xFFC00EC0 /* DMA2 Channel 11 Next Descripter Ptr Reg */ +#define DMA2_11_START_ADDR 0xFFC00EC4 /* DMA2 Channel 11 Start Address */ +#define DMA2_11_X_COUNT 0xFFC00ED0 /* DMA2 Channel 11 Inner Loop Count */ +#define DMA2_11_Y_COUNT 0xFFC00ED8 /* DMA2 Channel 11 Outer Loop Count */ +#define DMA2_11_X_MODIFY 0xFFC00ED4 /* DMA2 Channel 11 Inner Loop Addr Increment */ +#define DMA2_11_Y_MODIFY 0xFFC00EDC /* DMA2 Channel 11 Outer Loop Addr Increment */ +#define DMA2_11_CURR_DESC_PTR 0xFFC00EE0 /* DMA2 Channel 11 Current Descriptor Pointer */ +#define DMA2_11_CURR_ADDR 0xFFC00EE4 /* DMA2 Channel 11 Current Address Pointer */ +#define DMA2_11_CURR_X_COUNT 0xFFC00EF0 /* DMA2 Channel 11 Current Inner Loop Count */ +#define DMA2_11_CURR_Y_COUNT 0xFFC00EF8 /* DMA2 Channel 11 Current Outer Loop Count */ +#define DMA2_11_IRQ_STATUS 0xFFC00EE8 /* DMA2 Channel 11 Interrupt/Status Register */ +#define DMA2_11_PERIPHERAL_MAP 0xFFC00EEC /* DMA2 Channel 11 Peripheral Map Register */ + +/* Memory DMA2 Controller registers (0xFFC0 0E80-0xFFC0 0FFF) */ +#define MDMA2_D0_CONFIG 0xFFC00F08 /*MemDMA2 Stream 0 Destination Configuration register */ +#define MDMA2_D0_NEXT_DESC_PTR 0xFFC00F00 /*MemDMA2 Stream 0 Destination Next Descriptor Ptr Reg */ +#define MDMA2_D0_START_ADDR 0xFFC00F04 /*MemDMA2 Stream 0 Destination Start Address */ +#define MDMA2_D0_X_COUNT 0xFFC00F10 /*MemDMA2 Stream 0 Dest Inner-Loop Count register */ +#define MDMA2_D0_Y_COUNT 0xFFC00F18 /*MemDMA2 Stream 0 Dest Outer-Loop Count register */ +#define MDMA2_D0_X_MODIFY 0xFFC00F14 /*MemDMA2 Stream 0 Dest Inner-Loop Address-Increment */ +#define MDMA2_D0_Y_MODIFY 0xFFC00F1C /*MemDMA2 Stream 0 Dest Outer-Loop Address-Increment */ +#define MDMA2_D0_CURR_DESC_PTR 0xFFC00F20 /*MemDMA2 Stream 0 Dest Current Descriptor Ptr reg */ +#define MDMA2_D0_CURR_ADDR 0xFFC00F24 /*MemDMA2 Stream 0 Destination Current Address */ +#define MDMA2_D0_CURR_X_COUNT 0xFFC00F30 /*MemDMA2 Stream 0 Dest Current Inner-Loop Count reg */ +#define MDMA2_D0_CURR_Y_COUNT 0xFFC00F38 /*MemDMA2 Stream 0 Dest Current Outer-Loop Count reg */ +#define MDMA2_D0_IRQ_STATUS 0xFFC00F28 /*MemDMA2 Stream 0 Dest Interrupt/Status Register */ +#define MDMA2_D0_PERIPHERAL_MAP 0xFFC00F2C /*MemDMA2 Stream 0 Destination Peripheral Map register */ + +#define MDMA2_S0_CONFIG 0xFFC00F48 /*MemDMA2 Stream 0 Source Configuration register */ +#define MDMA2_S0_NEXT_DESC_PTR 0xFFC00F40 /*MemDMA2 Stream 0 Source Next Descriptor Ptr Reg */ +#define MDMA2_S0_START_ADDR 0xFFC00F44 /*MemDMA2 Stream 0 Source Start Address */ +#define MDMA2_S0_X_COUNT 0xFFC00F50 /*MemDMA2 Stream 0 Source Inner-Loop Count register */ +#define MDMA2_S0_Y_COUNT 0xFFC00F58 /*MemDMA2 Stream 0 Source Outer-Loop Count register */ +#define MDMA2_S0_X_MODIFY 0xFFC00F54 /*MemDMA2 Stream 0 Src Inner-Loop Addr-Increment reg */ +#define MDMA2_S0_Y_MODIFY 0xFFC00F5C /*MemDMA2 Stream 0 Src Outer-Loop Addr-Increment reg */ +#define MDMA2_S0_CURR_DESC_PTR 0xFFC00F60 /*MemDMA2 Stream 0 Source Current Descriptor Ptr reg */ +#define MDMA2_S0_CURR_ADDR 0xFFC00F64 /*MemDMA2 Stream 0 Source Current Address */ +#define MDMA2_S0_CURR_X_COUNT 0xFFC00F70 /*MemDMA2 Stream 0 Src Current Inner-Loop Count reg */ +#define MDMA2_S0_CURR_Y_COUNT 0xFFC00F78 /*MemDMA2 Stream 0 Src Current Outer-Loop Count reg */ +#define MDMA2_S0_IRQ_STATUS 0xFFC00F68 /*MemDMA2 Stream 0 Source Interrupt/Status Register */ +#define MDMA2_S0_PERIPHERAL_MAP 0xFFC00F6C /*MemDMA2 Stream 0 Source Peripheral Map register */ + +#define MDMA2_D1_CONFIG 0xFFC00F88 /*MemDMA2 Stream 1 Destination Configuration register */ +#define MDMA2_D1_NEXT_DESC_PTR 0xFFC00F80 /*MemDMA2 Stream 1 Destination Next Descriptor Ptr Reg */ +#define MDMA2_D1_START_ADDR 0xFFC00F84 /*MemDMA2 Stream 1 Destination Start Address */ +#define MDMA2_D1_X_COUNT 0xFFC00F90 /*MemDMA2 Stream 1 Dest Inner-Loop Count register */ +#define MDMA2_D1_Y_COUNT 0xFFC00F98 /*MemDMA2 Stream 1 Dest Outer-Loop Count register */ +#define MDMA2_D1_X_MODIFY 0xFFC00F94 /*MemDMA2 Stream 1 Dest Inner-Loop Address-Increment */ +#define MDMA2_D1_Y_MODIFY 0xFFC00F9C /*MemDMA2 Stream 1 Dest Outer-Loop Address-Increment */ +#define MDMA2_D1_CURR_DESC_PTR 0xFFC00FA0 /*MemDMA2 Stream 1 Destination Current Descriptor Ptr */ +#define MDMA2_D1_CURR_ADDR 0xFFC00FA4 /*MemDMA2 Stream 1 Destination Current Address reg */ +#define MDMA2_D1_CURR_X_COUNT 0xFFC00FB0 /*MemDMA2 Stream 1 Dest Current Inner-Loop Count reg */ +#define MDMA2_D1_CURR_Y_COUNT 0xFFC00FB8 /*MemDMA2 Stream 1 Dest Current Outer-Loop Count reg */ +#define MDMA2_D1_IRQ_STATUS 0xFFC00FA8 /*MemDMA2 Stream 1 Destination Interrupt/Status Reg */ +#define MDMA2_D1_PERIPHERAL_MAP 0xFFC00FAC /*MemDMA2 Stream 1 Destination Peripheral Map register */ + +#define MDMA2_S1_CONFIG 0xFFC00FC8 /*MemDMA2 Stream 1 Source Configuration register */ +#define MDMA2_S1_NEXT_DESC_PTR 0xFFC00FC0 /*MemDMA2 Stream 1 Source Next Descriptor Ptr Reg */ +#define MDMA2_S1_START_ADDR 0xFFC00FC4 /*MemDMA2 Stream 1 Source Start Address */ +#define MDMA2_S1_X_COUNT 0xFFC00FD0 /*MemDMA2 Stream 1 Source Inner-Loop Count register */ +#define MDMA2_S1_Y_COUNT 0xFFC00FD8 /*MemDMA2 Stream 1 Source Outer-Loop Count register */ +#define MDMA2_S1_X_MODIFY 0xFFC00FD4 /*MemDMA2 Stream 1 Src Inner-Loop Address-Increment */ +#define MDMA2_S1_Y_MODIFY 0xFFC00FDC /*MemDMA2 Stream 1 Source Outer-Loop Address-Increment */ +#define MDMA2_S1_CURR_DESC_PTR 0xFFC00FE0 /*MemDMA2 Stream 1 Source Current Descriptor Ptr reg */ +#define MDMA2_S1_CURR_ADDR 0xFFC00FE4 /*MemDMA2 Stream 1 Source Current Address */ +#define MDMA2_S1_CURR_X_COUNT 0xFFC00FF0 /*MemDMA2 Stream 1 Source Current Inner-Loop Count */ +#define MDMA2_S1_CURR_Y_COUNT 0xFFC00FF8 /*MemDMA2 Stream 1 Source Current Outer-Loop Count */ +#define MDMA2_S1_IRQ_STATUS 0xFFC00FE8 /*MemDMA2 Stream 1 Source Interrupt/Status Register */ +#define MDMA2_S1_PERIPHERAL_MAP 0xFFC00FEC /*MemDMA2 Stream 1 Source Peripheral Map register */ + +/* Internal Memory DMA Registers (0xFFC0_1800 - 0xFFC0_19FF) */ +#define IMDMA_D0_CONFIG 0xFFC01808 /*IMDMA Stream 0 Destination Configuration */ +#define IMDMA_D0_NEXT_DESC_PTR 0xFFC01800 /*IMDMA Stream 0 Destination Next Descriptor Ptr Reg */ +#define IMDMA_D0_START_ADDR 0xFFC01804 /*IMDMA Stream 0 Destination Start Address */ +#define IMDMA_D0_X_COUNT 0xFFC01810 /*IMDMA Stream 0 Destination Inner-Loop Count */ +#define IMDMA_D0_Y_COUNT 0xFFC01818 /*IMDMA Stream 0 Destination Outer-Loop Count */ +#define IMDMA_D0_X_MODIFY 0xFFC01814 /*IMDMA Stream 0 Dest Inner-Loop Address-Increment */ +#define IMDMA_D0_Y_MODIFY 0xFFC0181C /*IMDMA Stream 0 Dest Outer-Loop Address-Increment */ +#define IMDMA_D0_CURR_DESC_PTR 0xFFC01820 /*IMDMA Stream 0 Destination Current Descriptor Ptr */ +#define IMDMA_D0_CURR_ADDR 0xFFC01824 /*IMDMA Stream 0 Destination Current Address */ +#define IMDMA_D0_CURR_X_COUNT 0xFFC01830 /*IMDMA Stream 0 Destination Current Inner-Loop Count */ +#define IMDMA_D0_CURR_Y_COUNT 0xFFC01838 /*IMDMA Stream 0 Destination Current Outer-Loop Count */ +#define IMDMA_D0_IRQ_STATUS 0xFFC01828 /*IMDMA Stream 0 Destination Interrupt/Status */ + +#define IMDMA_S0_CONFIG 0xFFC01848 /*IMDMA Stream 0 Source Configuration */ +#define IMDMA_S0_NEXT_DESC_PTR 0xFFC01840 /*IMDMA Stream 0 Source Next Descriptor Ptr Reg */ +#define IMDMA_S0_START_ADDR 0xFFC01844 /*IMDMA Stream 0 Source Start Address */ +#define IMDMA_S0_X_COUNT 0xFFC01850 /*IMDMA Stream 0 Source Inner-Loop Count */ +#define IMDMA_S0_Y_COUNT 0xFFC01858 /*IMDMA Stream 0 Source Outer-Loop Count */ +#define IMDMA_S0_X_MODIFY 0xFFC01854 /*IMDMA Stream 0 Source Inner-Loop Address-Increment */ +#define IMDMA_S0_Y_MODIFY 0xFFC0185C /*IMDMA Stream 0 Source Outer-Loop Address-Increment */ +#define IMDMA_S0_CURR_DESC_PTR 0xFFC01860 /*IMDMA Stream 0 Source Current Descriptor Ptr reg */ +#define IMDMA_S0_CURR_ADDR 0xFFC01864 /*IMDMA Stream 0 Source Current Address */ +#define IMDMA_S0_CURR_X_COUNT 0xFFC01870 /*IMDMA Stream 0 Source Current Inner-Loop Count */ +#define IMDMA_S0_CURR_Y_COUNT 0xFFC01878 /*IMDMA Stream 0 Source Current Outer-Loop Count */ +#define IMDMA_S0_IRQ_STATUS 0xFFC01868 /*IMDMA Stream 0 Source Interrupt/Status */ + +#define IMDMA_D1_CONFIG 0xFFC01888 /*IMDMA Stream 1 Destination Configuration */ +#define IMDMA_D1_NEXT_DESC_PTR 0xFFC01880 /*IMDMA Stream 1 Destination Next Descriptor Ptr Reg */ +#define IMDMA_D1_START_ADDR 0xFFC01884 /*IMDMA Stream 1 Destination Start Address */ +#define IMDMA_D1_X_COUNT 0xFFC01890 /*IMDMA Stream 1 Destination Inner-Loop Count */ +#define IMDMA_D1_Y_COUNT 0xFFC01898 /*IMDMA Stream 1 Destination Outer-Loop Count */ +#define IMDMA_D1_X_MODIFY 0xFFC01894 /*IMDMA Stream 1 Dest Inner-Loop Address-Increment */ +#define IMDMA_D1_Y_MODIFY 0xFFC0189C /*IMDMA Stream 1 Dest Outer-Loop Address-Increment */ +#define IMDMA_D1_CURR_DESC_PTR 0xFFC018A0 /*IMDMA Stream 1 Destination Current Descriptor Ptr */ +#define IMDMA_D1_CURR_ADDR 0xFFC018A4 /*IMDMA Stream 1 Destination Current Address */ +#define IMDMA_D1_CURR_X_COUNT 0xFFC018B0 /*IMDMA Stream 1 Destination Current Inner-Loop Count */ +#define IMDMA_D1_CURR_Y_COUNT 0xFFC018B8 /*IMDMA Stream 1 Destination Current Outer-Loop Count */ +#define IMDMA_D1_IRQ_STATUS 0xFFC018A8 /*IMDMA Stream 1 Destination Interrupt/Status */ + +#define IMDMA_S1_CONFIG 0xFFC018C8 /*IMDMA Stream 1 Source Configuration */ +#define IMDMA_S1_NEXT_DESC_PTR 0xFFC018C0 /*IMDMA Stream 1 Source Next Descriptor Ptr Reg */ +#define IMDMA_S1_START_ADDR 0xFFC018C4 /*IMDMA Stream 1 Source Start Address */ +#define IMDMA_S1_X_COUNT 0xFFC018D0 /*IMDMA Stream 1 Source Inner-Loop Count */ +#define IMDMA_S1_Y_COUNT 0xFFC018D8 /*IMDMA Stream 1 Source Outer-Loop Count */ +#define IMDMA_S1_X_MODIFY 0xFFC018D4 /*IMDMA Stream 1 Source Inner-Loop Address-Increment */ +#define IMDMA_S1_Y_MODIFY 0xFFC018DC /*IMDMA Stream 1 Source Outer-Loop Address-Increment */ +#define IMDMA_S1_CURR_DESC_PTR 0xFFC018E0 /*IMDMA Stream 1 Source Current Descriptor Ptr reg */ +#define IMDMA_S1_CURR_ADDR 0xFFC018E4 /*IMDMA Stream 1 Source Current Address */ +#define IMDMA_S1_CURR_X_COUNT 0xFFC018F0 /*IMDMA Stream 1 Source Current Inner-Loop Count */ +#define IMDMA_S1_CURR_Y_COUNT 0xFFC018F8 /*IMDMA Stream 1 Source Current Outer-Loop Count */ +#define IMDMA_S1_IRQ_STATUS 0xFFC018E8 /*IMDMA Stream 1 Source Interrupt/Status */ + +/*********************************************************************************** */ +/* System MMR Register Bits */ +/******************************************************************************* */ + +/* ********************* PLL AND RESET MASKS ************************ */ + +/* PLL_CTL Masks */ +#define PLL_CLKIN 0x00000000 /* Pass CLKIN to PLL */ +#define PLL_CLKIN_DIV2 0x00000001 /* Pass CLKIN/2 to PLL */ +#define PLL_OFF 0x00000002 /* Shut off PLL clocks */ +#define STOPCK_OFF 0x00000008 /* Core clock off */ +#define PDWN 0x00000020 /* Put the PLL in a Deep Sleep state */ +#define BYPASS 0x00000100 /* Bypass the PLL */ + +/* CHIPID Masks */ +#define CHIPID_VERSION 0xF0000000 +#define CHIPID_FAMILY 0x0FFFF000 +#define CHIPID_MANUFACTURE 0x00000FFE + +/* PLL_DIV Masks */ +#define SCLK_DIV(x) (x) /* SCLK = VCO / x */ + +#define CCLK_DIV1 0x00000000 /* CCLK = VCO / 1 */ +#define CCLK_DIV2 0x00000010 /* CCLK = VCO / 2 */ +#define CCLK_DIV4 0x00000020 /* CCLK = VCO / 4 */ +#define CCLK_DIV8 0x00000030 /* CCLK = VCO / 8 */ + +/* PLL_STAT Masks */ +#define ACTIVE_PLLENABLED 0x0001 /* Processor In Active Mode With PLL Enabled */ +#define FULL_ON 0x0002 /* Processor In Full On Mode */ +#define ACTIVE_PLLDISABLED 0x0004 /* Processor In Active Mode With PLL Disabled */ +#define PLL_LOCKED 0x0020 /* PLL_LOCKCNT Has Been Reached */ + +/* SWRST Mask */ +#define SYSTEM_RESET 0x00000007 /* Initiates a system software reset */ +#define SWRST_DBL_FAULT_B 0x00000800 /* SWRST Core B Double Fault */ +#define SWRST_DBL_FAULT_A 0x00001000 /* SWRST Core A Double Fault */ +#define SWRST_WDT_B 0x00002000 /* SWRST Watchdog B */ +#define SWRST_WDT_A 0x00004000 /* SWRST Watchdog A */ +#define SWRST_OCCURRED 0x00008000 /* SWRST Status */ + +/* ************* SYSTEM INTERRUPT CONTROLLER MASKS ***************** */ + +/* SICu_IARv Masks */ +/* u = A or B */ +/* v = 0 to 7 */ +/* w = 0 or 1 */ + +/* Per_number = 0 to 63 */ +/* IVG_number = 7 to 15 */ +#define Peripheral_IVG(Per_number, IVG_number) \ + ((IVG_number) - 7) << (((Per_number) % 8) * 4) /* Peripheral #Per_number assigned IVG #IVG_number */ + /* Usage: r0.l = lo(Peripheral_IVG(62, 10)); */ + /* r0.h = hi(Peripheral_IVG(62, 10)); */ + +/* SICx_IMASKw Masks */ +/* masks are 32 bit wide, so two writes reguired for "64 bit" wide registers */ +#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */ +#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */ +#define SIC_MASK(x) (1 << (x)) /* Mask Peripheral #x interrupt */ +#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << (x))) /* Unmask Peripheral #x interrupt */ + +/* SIC_IWR Masks */ +#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */ +#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */ +/* x = pos 0 to 31, for 32-63 use value-32 */ +#define IWR_ENABLE(x) (1 << (x)) /* Wakeup Enable Peripheral #x */ +#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << (x))) /* Wakeup Disable Peripheral #x */ + +/* ********* WATCHDOG TIMER MASKS ********************8 */ + +/* Watchdog Timer WDOG_CTL Register */ +#define ICTL(x) ((x<<1) & 0x0006) +#define ENABLE_RESET 0x00000000 /* Set Watchdog Timer to generate reset */ +#define ENABLE_NMI 0x00000002 /* Set Watchdog Timer to generate non-maskable interrupt */ +#define ENABLE_GPI 0x00000004 /* Set Watchdog Timer to generate general-purpose interrupt */ +#define DISABLE_EVT 0x00000006 /* Disable Watchdog Timer interrupts */ + +#define TMR_EN 0x0000 +#define TMR_DIS 0x0AD0 +#define TRO 0x8000 + +#define ICTL_P0 0x01 +#define ICTL_P1 0x02 +#define TRO_P 0x0F + +/* ***************************** UART CONTROLLER MASKS ********************** */ + +/* UART_LCR Register */ + +#define DLAB 0x80 +#define SB 0x40 +#define STP 0x20 +#define EPS 0x10 +#define PEN 0x08 +#define STB 0x04 +#define WLS(x) ((x-5) & 0x03) + +#define DLAB_P 0x07 +#define SB_P 0x06 +#define STP_P 0x05 +#define EPS_P 0x04 +#define PEN_P 0x03 +#define STB_P 0x02 +#define WLS_P1 0x01 +#define WLS_P0 0x00 + +/* UART_MCR Register */ +#define LOOP_ENA 0x10 +#define LOOP_ENA_P 0x04 + +/* UART_LSR Register */ +#define TEMT 0x40 +#define THRE 0x20 +#define BI 0x10 +#define FE 0x08 +#define PE 0x04 +#define OE 0x02 +#define DR 0x01 + +#define TEMP_P 0x06 +#define THRE_P 0x05 +#define BI_P 0x04 +#define FE_P 0x03 +#define PE_P 0x02 +#define OE_P 0x01 +#define DR_P 0x00 + +/* UART_IER Register */ +#define ELSI 0x04 +#define ETBEI 0x02 +#define ERBFI 0x01 + +#define ELSI_P 0x02 +#define ETBEI_P 0x01 +#define ERBFI_P 0x00 + +/* UART_IIR Register */ +#define STATUS(x) ((x << 1) & 0x06) +#define NINT 0x01 +#define STATUS_P1 0x02 +#define STATUS_P0 0x01 +#define NINT_P 0x00 +#define IIR_TX_READY 0x02 /* UART_THR empty */ +#define IIR_RX_READY 0x04 /* Receive data ready */ +#define IIR_LINE_CHANGE 0x06 /* Receive line status */ +#define IIR_STATUS 0x06 + +/* UART_GCTL Register */ +#define FFE 0x20 +#define FPE 0x10 +#define RPOLC 0x08 +#define TPOLC 0x04 +#define IREN 0x02 +#define UCEN 0x01 + +#define FFE_P 0x05 +#define FPE_P 0x04 +#define RPOLC_P 0x03 +#define TPOLC_P 0x02 +#define IREN_P 0x01 +#define UCEN_P 0x00 + +/* ********** SERIAL PORT MASKS ********************** */ + +/* SPORTx_TCR1 Masks */ +#define TSPEN 0x0001 /* TX enable */ +#define ITCLK 0x0002 /* Internal TX Clock Select */ +#define TDTYPE 0x000C /* TX Data Formatting Select */ +#define TLSBIT 0x0010 /* TX Bit Order */ +#define ITFS 0x0200 /* Internal TX Frame Sync Select */ +#define TFSR 0x0400 /* TX Frame Sync Required Select */ +#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */ +#define LTFS 0x1000 /* Low TX Frame Sync Select */ +#define LATFS 0x2000 /* Late TX Frame Sync Select */ +#define TCKFE 0x4000 /* TX Clock Falling Edge Select */ + +/* SPORTx_TCR2 Masks */ +#define SLEN 0x001F /*TX Word Length */ +#define TXSE 0x0100 /*TX Secondary Enable */ +#define TSFSE 0x0200 /*TX Stereo Frame Sync Enable */ +#define TRFST 0x0400 /*TX Right-First Data Order */ + +/* SPORTx_RCR1 Masks */ +#define RSPEN 0x0001 /* RX enable */ +#define IRCLK 0x0002 /* Internal RX Clock Select */ +#define RDTYPE 0x000C /* RX Data Formatting Select */ +#define RULAW 0x0008 /* u-Law enable */ +#define RALAW 0x000C /* A-Law enable */ +#define RLSBIT 0x0010 /* RX Bit Order */ +#define IRFS 0x0200 /* Internal RX Frame Sync Select */ +#define RFSR 0x0400 /* RX Frame Sync Required Select */ +#define LRFS 0x1000 /* Low RX Frame Sync Select */ +#define LARFS 0x2000 /* Late RX Frame Sync Select */ +#define RCKFE 0x4000 /* RX Clock Falling Edge Select */ + +/* SPORTx_RCR2 Masks */ +#define SLEN 0x001F /*RX Word Length */ +#define RXSE 0x0100 /*RX Secondary Enable */ +#define RSFSE 0x0200 /*RX Stereo Frame Sync Enable */ +#define RRFST 0x0400 /*Right-First Data Order */ + +/*SPORTx_STAT Masks */ +#define RXNE 0x0001 /*RX FIFO Not Empty Status */ +#define RUVF 0x0002 /*RX Underflow Status */ +#define ROVF 0x0004 /*RX Overflow Status */ +#define TXF 0x0008 /*TX FIFO Full Status */ +#define TUVF 0x0010 /*TX Underflow Status */ +#define TOVF 0x0020 /*TX Overflow Status */ +#define TXHRE 0x0040 /*TX Hold Register Empty */ + +/*SPORTx_MCMC1 Masks */ +#define SP_WSIZE 0x0000F000 /*Multichannel Window Size Field */ +#define SP_WOFF 0x000003FF /*Multichannel Window Offset Field */ + +/*SPORTx_MCMC2 Masks */ +#define MCCRM 0x00000003 /*Multichannel Clock Recovery Mode */ +#define MCDTXPE 0x00000004 /*Multichannel DMA Transmit Packing */ +#define MCDRXPE 0x00000008 /*Multichannel DMA Receive Packing */ +#define MCMEN 0x00000010 /*Multichannel Frame Mode Enable */ +#define FSDR 0x00000080 /*Multichannel Frame Sync to Data Relationship */ +#define MFD 0x0000F000 /*Multichannel Frame Delay */ + +/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */ + +/* PPI_CONTROL Masks */ +#define PORT_EN 0x00000001 /* PPI Port Enable */ +#define PORT_DIR 0x00000002 /* PPI Port Direction */ +#define XFR_TYPE 0x0000000C /* PPI Transfer Type */ +#define PORT_CFG 0x00000030 /* PPI Port Configuration */ +#define FLD_SEL 0x00000040 /* PPI Active Field Select */ +#define PACK_EN 0x00000080 /* PPI Packing Mode */ +#define DMA32 0x00000100 /* PPI 32-bit DMA Enable */ +#define SKIP_EN 0x00000200 /* PPI Skip Element Enable */ +#define SKIP_EO 0x00000400 /* PPI Skip Even/Odd Elements */ +#define DLENGTH 0x00003800 /* PPI Data Length */ +#define DLEN_8 0x0 /* PPI Data Length mask for DLEN=8 */ +#define DLEN(x) (((x-9) & 0x07) << 11) /* PPI Data Length (only works for x=10-->x=16) */ +#define POL 0x0000C000 /* PPI Signal Polarities */ + +/* PPI_STATUS Masks */ +#define FLD 0x00000400 /* Field Indicator */ +#define FT_ERR 0x00000800 /* Frame Track Error */ +#define OVR 0x00001000 /* FIFO Overflow Error */ +#define UNDR 0x00002000 /* FIFO Underrun Error */ +#define ERR_DET 0x00004000 /* Error Detected Indicator */ +#define ERR_NCOR 0x00008000 /* Error Not Corrected Indicator */ + +/* ********** DMA CONTROLLER MASKS *********************8 */ + +/* DMAx_CONFIG, MDMA_yy_CONFIG, IMDMA_yy_CONFIG Masks */ +#define DMAEN 0x00000001 /* Channel Enable */ +#define WNR 0x00000002 /* Channel Direction (W/R*) */ +#define WDSIZE_8 0x00000000 /* Word Size 8 bits */ +#define WDSIZE_16 0x00000004 /* Word Size 16 bits */ +#define WDSIZE_32 0x00000008 /* Word Size 32 bits */ +#define DMA2D 0x00000010 /* 2D/1D* Mode */ +#define RESTART 0x00000020 /* Restart */ +#define DI_SEL 0x00000040 /* Data Interrupt Select */ +#define DI_EN 0x00000080 /* Data Interrupt Enable */ +#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */ +#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */ +#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */ +#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */ +#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */ +#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */ +#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */ +#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */ +#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */ +#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */ +#define NDSIZE 0x00000900 /* Next Descriptor Size */ +#define DMAFLOW 0x00007000 /* Flow Control */ +#define DMAFLOW_STOP 0x0000 /* Stop Mode */ +#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */ +#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */ +#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */ +#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */ + +#define DMAEN_P 0 /* Channel Enable */ +#define WNR_P 1 /* Channel Direction (W/R*) */ +#define DMA2D_P 4 /* 2D/1D* Mode */ +#define RESTART_P 5 /* Restart */ +#define DI_SEL_P 6 /* Data Interrupt Select */ +#define DI_EN_P 7 /* Data Interrupt Enable */ + +/* DMAx_IRQ_STATUS, MDMA_yy_IRQ_STATUS, IMDMA_yy_IRQ_STATUS Masks */ + +#define DMA_DONE 0x00000001 /* DMA Done Indicator */ +#define DMA_ERR 0x00000002 /* DMA Error Indicator */ +#define DFETCH 0x00000004 /* Descriptor Fetch Indicator */ +#define DMA_RUN 0x00000008 /* DMA Running Indicator */ + +#define DMA_DONE_P 0 /* DMA Done Indicator */ +#define DMA_ERR_P 1 /* DMA Error Indicator */ +#define DFETCH_P 2 /* Descriptor Fetch Indicator */ +#define DMA_RUN_P 3 /* DMA Running Indicator */ + +/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP, IMDMA_yy_PERIPHERAL_MAP Masks */ + +#define CTYPE 0x00000040 /* DMA Channel Type Indicator */ +#define CTYPE_P 6 /* DMA Channel Type Indicator BIT POSITION */ +#define PCAP8 0x00000080 /* DMA 8-bit Operation Indicator */ +#define PCAP16 0x00000100 /* DMA 16-bit Operation Indicator */ +#define PCAP32 0x00000200 /* DMA 32-bit Operation Indicator */ +#define PCAPWR 0x00000400 /* DMA Write Operation Indicator */ +#define PCAPRD 0x00000800 /* DMA Read Operation Indicator */ +#define PMAP 0x00007000 /* DMA Peripheral Map Field */ + +/* ************* GENERAL PURPOSE TIMER MASKS ******************** */ + +/* PWM Timer bit definitions */ + +/* TIMER_ENABLE Register */ +#define TIMEN0 0x0001 +#define TIMEN1 0x0002 +#define TIMEN2 0x0004 +#define TIMEN3 0x0008 +#define TIMEN4 0x0010 +#define TIMEN5 0x0020 +#define TIMEN6 0x0040 +#define TIMEN7 0x0080 +#define TIMEN8 0x0001 +#define TIMEN9 0x0002 +#define TIMEN10 0x0004 +#define TIMEN11 0x0008 + +#define TIMEN0_P 0x00 +#define TIMEN1_P 0x01 +#define TIMEN2_P 0x02 +#define TIMEN3_P 0x03 +#define TIMEN4_P 0x04 +#define TIMEN5_P 0x05 +#define TIMEN6_P 0x06 +#define TIMEN7_P 0x07 +#define TIMEN8_P 0x00 +#define TIMEN9_P 0x01 +#define TIMEN10_P 0x02 +#define TIMEN11_P 0x03 + +/* TIMER_DISABLE Register */ +#define TIMDIS0 0x0001 +#define TIMDIS1 0x0002 +#define TIMDIS2 0x0004 +#define TIMDIS3 0x0008 +#define TIMDIS4 0x0010 +#define TIMDIS5 0x0020 +#define TIMDIS6 0x0040 +#define TIMDIS7 0x0080 +#define TIMDIS8 0x0001 +#define TIMDIS9 0x0002 +#define TIMDIS10 0x0004 +#define TIMDIS11 0x0008 + +#define TIMDIS0_P 0x00 +#define TIMDIS1_P 0x01 +#define TIMDIS2_P 0x02 +#define TIMDIS3_P 0x03 +#define TIMDIS4_P 0x04 +#define TIMDIS5_P 0x05 +#define TIMDIS6_P 0x06 +#define TIMDIS7_P 0x07 +#define TIMDIS8_P 0x00 +#define TIMDIS9_P 0x01 +#define TIMDIS10_P 0x02 +#define TIMDIS11_P 0x03 + +/* TIMER_STATUS Register */ +#define TIMIL0 0x00000001 +#define TIMIL1 0x00000002 +#define TIMIL2 0x00000004 +#define TIMIL3 0x00000008 +#define TIMIL4 0x00010000 +#define TIMIL5 0x00020000 +#define TIMIL6 0x00040000 +#define TIMIL7 0x00080000 +#define TIMIL8 0x0001 +#define TIMIL9 0x0002 +#define TIMIL10 0x0004 +#define TIMIL11 0x0008 +#define TOVL_ERR0 0x00000010 +#define TOVL_ERR1 0x00000020 +#define TOVL_ERR2 0x00000040 +#define TOVL_ERR3 0x00000080 +#define TOVL_ERR4 0x00100000 +#define TOVL_ERR5 0x00200000 +#define TOVL_ERR6 0x00400000 +#define TOVL_ERR7 0x00800000 +#define TOVL_ERR8 0x0010 +#define TOVL_ERR9 0x0020 +#define TOVL_ERR10 0x0040 +#define TOVL_ERR11 0x0080 +#define TRUN0 0x00001000 +#define TRUN1 0x00002000 +#define TRUN2 0x00004000 +#define TRUN3 0x00008000 +#define TRUN4 0x10000000 +#define TRUN5 0x20000000 +#define TRUN6 0x40000000 +#define TRUN7 0x80000000 +#define TRUN8 0x1000 +#define TRUN9 0x2000 +#define TRUN10 0x4000 +#define TRUN11 0x8000 + +#define TIMIL0_P 0x00 +#define TIMIL1_P 0x01 +#define TIMIL2_P 0x02 +#define TIMIL3_P 0x03 +#define TIMIL4_P 0x10 +#define TIMIL5_P 0x11 +#define TIMIL6_P 0x12 +#define TIMIL7_P 0x13 +#define TIMIL8_P 0x00 +#define TIMIL9_P 0x01 +#define TIMIL10_P 0x02 +#define TIMIL11_P 0x03 +#define TOVL_ERR0_P 0x04 +#define TOVL_ERR1_P 0x05 +#define TOVL_ERR2_P 0x06 +#define TOVL_ERR3_P 0x07 +#define TOVL_ERR4_P 0x14 +#define TOVL_ERR5_P 0x15 +#define TOVL_ERR6_P 0x16 +#define TOVL_ERR7_P 0x17 +#define TOVL_ERR8_P 0x04 +#define TOVL_ERR9_P 0x05 +#define TOVL_ERR10_P 0x06 +#define TOVL_ERR11_P 0x07 +#define TRUN0_P 0x0C +#define TRUN1_P 0x0D +#define TRUN2_P 0x0E +#define TRUN3_P 0x0F +#define TRUN4_P 0x1C +#define TRUN5_P 0x1D +#define TRUN6_P 0x1E +#define TRUN7_P 0x1F +#define TRUN8_P 0x0C +#define TRUN9_P 0x0D +#define TRUN10_P 0x0E +#define TRUN11_P 0x0F + +/* TIMERx_CONFIG Registers */ +#define PWM_OUT 0x0001 +#define WDTH_CAP 0x0002 +#define EXT_CLK 0x0003 +#define PULSE_HI 0x0004 +#define PERIOD_CNT 0x0008 +#define IRQ_ENA 0x0010 +#define TIN_SEL 0x0020 +#define OUT_DIS 0x0040 +#define CLK_SEL 0x0080 +#define TOGGLE_HI 0x0100 +#define EMU_RUN 0x0200 +#define ERR_TYP(x) ((x & 0x03) << 14) + +#define TMODE_P0 0x00 +#define TMODE_P1 0x01 +#define PULSE_HI_P 0x02 +#define PERIOD_CNT_P 0x03 +#define IRQ_ENA_P 0x04 +#define TIN_SEL_P 0x05 +#define OUT_DIS_P 0x06 +#define CLK_SEL_P 0x07 +#define TOGGLE_HI_P 0x08 +#define EMU_RUN_P 0x09 +#define ERR_TYP_P0 0x0E +#define ERR_TYP_P1 0x0F + +/*/ ****************** PROGRAMMABLE FLAG MASKS ********************* */ + +/* General Purpose IO (0xFFC00700 - 0xFFC007FF) Masks */ +#define PF0 0x0001 +#define PF1 0x0002 +#define PF2 0x0004 +#define PF3 0x0008 +#define PF4 0x0010 +#define PF5 0x0020 +#define PF6 0x0040 +#define PF7 0x0080 +#define PF8 0x0100 +#define PF9 0x0200 +#define PF10 0x0400 +#define PF11 0x0800 +#define PF12 0x1000 +#define PF13 0x2000 +#define PF14 0x4000 +#define PF15 0x8000 + +/* General Purpose IO (0xFFC00700 - 0xFFC007FF) BIT POSITIONS */ +#define PF0_P 0 +#define PF1_P 1 +#define PF2_P 2 +#define PF3_P 3 +#define PF4_P 4 +#define PF5_P 5 +#define PF6_P 6 +#define PF7_P 7 +#define PF8_P 8 +#define PF9_P 9 +#define PF10_P 10 +#define PF11_P 11 +#define PF12_P 12 +#define PF13_P 13 +#define PF14_P 14 +#define PF15_P 15 + +/* *********** SERIAL PERIPHERAL INTERFACE (SPI) MASKS **************** */ + +/* SPI_CTL Masks */ +#define TIMOD 0x00000003 /* Transfer initiation mode and interrupt generation */ +#define SZ 0x00000004 /* Send Zero (=0) or last (=1) word when TDBR empty. */ +#define GM 0x00000008 /* When RDBR full, get more (=1) data or discard (=0) incoming Data */ +#define PSSE 0x00000010 /* Enable (=1) Slave-Select input for Master. */ +#define EMISO 0x00000020 /* Enable (=1) MISO pin as an output. */ +#define SIZE 0x00000100 /* Word length (0 => 8 bits, 1 => 16 bits) */ +#define LSBF 0x00000200 /* Data format (0 => MSB sent/received first 1 => LSB sent/received first) */ +#define CPHA 0x00000400 /* Clock phase (0 => SPICLK starts toggling in middle of xfer, 1 => SPICLK toggles at the beginning of xfer. */ +#define CPOL 0x00000800 /* Clock polarity (0 => active-high, 1 => active-low) */ +#define MSTR 0x00001000 /* Configures SPI as master (=1) or slave (=0) */ +#define WOM 0x00002000 /* Open drain (=1) data output enable (for MOSI and MISO) */ +#define SPE 0x00004000 /* SPI module enable (=1), disable (=0) */ + +/* SPI_FLG Masks */ +#define FLS1 0x00000002 /* Enables (=1) SPI_FLOUT1 as flag output for SPI Slave-select */ +#define FLS2 0x00000004 /* Enables (=1) SPI_FLOUT2 as flag output for SPI Slave-select */ +#define FLS3 0x00000008 /* Enables (=1) SPI_FLOUT3 as flag output for SPI Slave-select */ +#define FLS4 0x00000010 /* Enables (=1) SPI_FLOUT4 as flag output for SPI Slave-select */ +#define FLS5 0x00000020 /* Enables (=1) SPI_FLOUT5 as flag output for SPI Slave-select */ +#define FLS6 0x00000040 /* Enables (=1) SPI_FLOUT6 as flag output for SPI Slave-select */ +#define FLS7 0x00000080 /* Enables (=1) SPI_FLOUT7 as flag output for SPI Slave-select */ +#define FLG1 0x00000200 /* Activates (=0) SPI_FLOUT1 as flag output for SPI Slave-select */ +#define FLG2 0x00000400 /* Activates (=0) SPI_FLOUT2 as flag output for SPI Slave-select */ +#define FLG3 0x00000800 /* Activates (=0) SPI_FLOUT3 as flag output for SPI Slave-select */ +#define FLG4 0x00001000 /* Activates (=0) SPI_FLOUT4 as flag output for SPI Slave-select */ +#define FLG5 0x00002000 /* Activates (=0) SPI_FLOUT5 as flag output for SPI Slave-select */ +#define FLG6 0x00004000 /* Activates (=0) SPI_FLOUT6 as flag output for SPI Slave-select */ +#define FLG7 0x00008000 /* Activates (=0) SPI_FLOUT7 as flag output for SPI Slave-select */ + +/* SPI_FLG Bit Positions */ +#define FLS1_P 0x00000001 /* Enables (=1) SPI_FLOUT1 as flag output for SPI Slave-select */ +#define FLS2_P 0x00000002 /* Enables (=1) SPI_FLOUT2 as flag output for SPI Slave-select */ +#define FLS3_P 0x00000003 /* Enables (=1) SPI_FLOUT3 as flag output for SPI Slave-select */ +#define FLS4_P 0x00000004 /* Enables (=1) SPI_FLOUT4 as flag output for SPI Slave-select */ +#define FLS5_P 0x00000005 /* Enables (=1) SPI_FLOUT5 as flag output for SPI Slave-select */ +#define FLS6_P 0x00000006 /* Enables (=1) SPI_FLOUT6 as flag output for SPI Slave-select */ +#define FLS7_P 0x00000007 /* Enables (=1) SPI_FLOUT7 as flag output for SPI Slave-select */ +#define FLG1_P 0x00000009 /* Activates (=0) SPI_FLOUT1 as flag output for SPI Slave-select */ +#define FLG2_P 0x0000000A /* Activates (=0) SPI_FLOUT2 as flag output for SPI Slave-select */ +#define FLG3_P 0x0000000B /* Activates (=0) SPI_FLOUT3 as flag output for SPI Slave-select */ +#define FLG4_P 0x0000000C /* Activates (=0) SPI_FLOUT4 as flag output for SPI Slave-select */ +#define FLG5_P 0x0000000D /* Activates (=0) SPI_FLOUT5 as flag output for SPI Slave-select */ +#define FLG6_P 0x0000000E /* Activates (=0) SPI_FLOUT6 as flag output for SPI Slave-select */ +#define FLG7_P 0x0000000F /* Activates (=0) SPI_FLOUT7 as flag output for SPI Slave-select */ + +/* SPI_STAT Masks */ +#define SPIF 0x00000001 /* Set (=1) when SPI single-word transfer complete */ +#define MODF 0x00000002 /* Set (=1) in a master device when some other device tries to become master */ +#define TXE 0x00000004 /* Set (=1) when transmission occurs with no new data in SPI_TDBR */ +#define TXS 0x00000008 /* SPI_TDBR Data Buffer Status (0=Empty, 1=Full) */ +#define RBSY 0x00000010 /* Set (=1) when data is received with RDBR full */ +#define RXS 0x00000020 /* SPI_RDBR Data Buffer Status (0=Empty, 1=Full) */ +#define TXCOL 0x00000040 /* When set (=1), corrupt data may have been transmitted */ + +/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS ************* */ + +/* AMGCTL Masks */ +#define AMCKEN 0x0001 /* Enable CLKOUT */ +#define AMBEN_B0 0x0002 /* Enable Asynchronous Memory Bank 0 only */ +#define AMBEN_B0_B1 0x0004 /* Enable Asynchronous Memory Banks 0 & 1 only */ +#define AMBEN_B0_B1_B2 0x0006 /* Enable Asynchronous Memory Banks 0, 1, and 2 */ +#define AMBEN_ALL 0x0008 /* Enable Asynchronous Memory Banks (all) 0, 1, 2, and 3 */ +#define B0_PEN 0x0010 /* Enable 16-bit packing Bank 0 */ +#define B1_PEN 0x0020 /* Enable 16-bit packing Bank 1 */ +#define B2_PEN 0x0040 /* Enable 16-bit packing Bank 2 */ +#define B3_PEN 0x0080 /* Enable 16-bit packing Bank 3 */ + +/* AMGCTL Bit Positions */ +#define AMCKEN_P 0x00000000 /* Enable CLKOUT */ +#define AMBEN_P0 0x00000001 /* Asynchronous Memory Enable, 000 - banks 0-3 disabled, 001 - Bank 0 enabled */ +#define AMBEN_P1 0x00000002 /* Asynchronous Memory Enable, 010 - banks 0&1 enabled, 011 - banks 0-3 enabled */ +#define AMBEN_P2 0x00000003 /* Asynchronous Memory Enable, 1xx - All banks (bank 0, 1, 2, and 3) enabled */ +#define B0_PEN_P 0x004 /* Enable 16-bit packing Bank 0 */ +#define B1_PEN_P 0x005 /* Enable 16-bit packing Bank 1 */ +#define B2_PEN_P 0x006 /* Enable 16-bit packing Bank 2 */ +#define B3_PEN_P 0x007 /* Enable 16-bit packing Bank 3 */ + +/* AMBCTL0 Masks */ +#define B0RDYEN 0x00000001 /* Bank 0 RDY Enable, 0=disable, 1=enable */ +#define B0RDYPOL 0x00000002 /* Bank 0 RDY Active high, 0=active low, 1=active high */ +#define B0TT_1 0x00000004 /* Bank 0 Transition Time from Read to Write = 1 cycle */ +#define B0TT_2 0x00000008 /* Bank 0 Transition Time from Read to Write = 2 cycles */ +#define B0TT_3 0x0000000C /* Bank 0 Transition Time from Read to Write = 3 cycles */ +#define B0TT_4 0x00000000 /* Bank 0 Transition Time from Read to Write = 4 cycles */ +#define B0ST_1 0x00000010 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=1 cycle */ +#define B0ST_2 0x00000020 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=2 cycles */ +#define B0ST_3 0x00000030 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=3 cycles */ +#define B0ST_4 0x00000000 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=4 cycles */ +#define B0HT_1 0x00000040 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 1 cycle */ +#define B0HT_2 0x00000080 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 2 cycles */ +#define B0HT_3 0x000000C0 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 3 cycles */ +#define B0HT_0 0x00000000 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 0 cycles */ +#define B0RAT_1 0x00000100 /* Bank 0 Read Access Time = 1 cycle */ +#define B0RAT_2 0x00000200 /* Bank 0 Read Access Time = 2 cycles */ +#define B0RAT_3 0x00000300 /* Bank 0 Read Access Time = 3 cycles */ +#define B0RAT_4 0x00000400 /* Bank 0 Read Access Time = 4 cycles */ +#define B0RAT_5 0x00000500 /* Bank 0 Read Access Time = 5 cycles */ +#define B0RAT_6 0x00000600 /* Bank 0 Read Access Time = 6 cycles */ +#define B0RAT_7 0x00000700 /* Bank 0 Read Access Time = 7 cycles */ +#define B0RAT_8 0x00000800 /* Bank 0 Read Access Time = 8 cycles */ +#define B0RAT_9 0x00000900 /* Bank 0 Read Access Time = 9 cycles */ +#define B0RAT_10 0x00000A00 /* Bank 0 Read Access Time = 10 cycles */ +#define B0RAT_11 0x00000B00 /* Bank 0 Read Access Time = 11 cycles */ +#define B0RAT_12 0x00000C00 /* Bank 0 Read Access Time = 12 cycles */ +#define B0RAT_13 0x00000D00 /* Bank 0 Read Access Time = 13 cycles */ +#define B0RAT_14 0x00000E00 /* Bank 0 Read Access Time = 14 cycles */ +#define B0RAT_15 0x00000F00 /* Bank 0 Read Access Time = 15 cycles */ +#define B0WAT_1 0x00001000 /* Bank 0 Write Access Time = 1 cycle */ +#define B0WAT_2 0x00002000 /* Bank 0 Write Access Time = 2 cycles */ +#define B0WAT_3 0x00003000 /* Bank 0 Write Access Time = 3 cycles */ +#define B0WAT_4 0x00004000 /* Bank 0 Write Access Time = 4 cycles */ +#define B0WAT_5 0x00005000 /* Bank 0 Write Access Time = 5 cycles */ +#define B0WAT_6 0x00006000 /* Bank 0 Write Access Time = 6 cycles */ +#define B0WAT_7 0x00007000 /* Bank 0 Write Access Time = 7 cycles */ +#define B0WAT_8 0x00008000 /* Bank 0 Write Access Time = 8 cycles */ +#define B0WAT_9 0x00009000 /* Bank 0 Write Access Time = 9 cycles */ +#define B0WAT_10 0x0000A000 /* Bank 0 Write Access Time = 10 cycles */ +#define B0WAT_11 0x0000B000 /* Bank 0 Write Access Time = 11 cycles */ +#define B0WAT_12 0x0000C000 /* Bank 0 Write Access Time = 12 cycles */ +#define B0WAT_13 0x0000D000 /* Bank 0 Write Access Time = 13 cycles */ +#define B0WAT_14 0x0000E000 /* Bank 0 Write Access Time = 14 cycles */ +#define B0WAT_15 0x0000F000 /* Bank 0 Write Access Time = 15 cycles */ +#define B1RDYEN 0x00010000 /* Bank 1 RDY enable, 0=disable, 1=enable */ +#define B1RDYPOL 0x00020000 /* Bank 1 RDY Active high, 0=active low, 1=active high */ +#define B1TT_1 0x00040000 /* Bank 1 Transition Time from Read to Write = 1 cycle */ +#define B1TT_2 0x00080000 /* Bank 1 Transition Time from Read to Write = 2 cycles */ +#define B1TT_3 0x000C0000 /* Bank 1 Transition Time from Read to Write = 3 cycles */ +#define B1TT_4 0x00000000 /* Bank 1 Transition Time from Read to Write = 4 cycles */ +#define B1ST_1 0x00100000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */ +#define B1ST_2 0x00200000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */ +#define B1ST_3 0x00300000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */ +#define B1ST_4 0x00000000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */ +#define B1HT_1 0x00400000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */ +#define B1HT_2 0x00800000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */ +#define B1HT_3 0x00C00000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */ +#define B1HT_0 0x00000000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */ +#define B1RAT_1 0x01000000 /* Bank 1 Read Access Time = 1 cycle */ +#define B1RAT_2 0x02000000 /* Bank 1 Read Access Time = 2 cycles */ +#define B1RAT_3 0x03000000 /* Bank 1 Read Access Time = 3 cycles */ +#define B1RAT_4 0x04000000 /* Bank 1 Read Access Time = 4 cycles */ +#define B1RAT_5 0x05000000 /* Bank 1 Read Access Time = 5 cycles */ +#define B1RAT_6 0x06000000 /* Bank 1 Read Access Time = 6 cycles */ +#define B1RAT_7 0x07000000 /* Bank 1 Read Access Time = 7 cycles */ +#define B1RAT_8 0x08000000 /* Bank 1 Read Access Time = 8 cycles */ +#define B1RAT_9 0x09000000 /* Bank 1 Read Access Time = 9 cycles */ +#define B1RAT_10 0x0A000000 /* Bank 1 Read Access Time = 10 cycles */ +#define B1RAT_11 0x0B000000 /* Bank 1 Read Access Time = 11 cycles */ +#define B1RAT_12 0x0C000000 /* Bank 1 Read Access Time = 12 cycles */ +#define B1RAT_13 0x0D000000 /* Bank 1 Read Access Time = 13 cycles */ +#define B1RAT_14 0x0E000000 /* Bank 1 Read Access Time = 14 cycles */ +#define B1RAT_15 0x0F000000 /* Bank 1 Read Access Time = 15 cycles */ +#define B1WAT_1 0x10000000 /* Bank 1 Write Access Time = 1 cycle */ +#define B1WAT_2 0x20000000 /* Bank 1 Write Access Time = 2 cycles */ +#define B1WAT_3 0x30000000 /* Bank 1 Write Access Time = 3 cycles */ +#define B1WAT_4 0x40000000 /* Bank 1 Write Access Time = 4 cycles */ +#define B1WAT_5 0x50000000 /* Bank 1 Write Access Time = 5 cycles */ +#define B1WAT_6 0x60000000 /* Bank 1 Write Access Time = 6 cycles */ +#define B1WAT_7 0x70000000 /* Bank 1 Write Access Time = 7 cycles */ +#define B1WAT_8 0x80000000 /* Bank 1 Write Access Time = 8 cycles */ +#define B1WAT_9 0x90000000 /* Bank 1 Write Access Time = 9 cycles */ +#define B1WAT_10 0xA0000000 /* Bank 1 Write Access Time = 10 cycles */ +#define B1WAT_11 0xB0000000 /* Bank 1 Write Access Time = 11 cycles */ +#define B1WAT_12 0xC0000000 /* Bank 1 Write Access Time = 12 cycles */ +#define B1WAT_13 0xD0000000 /* Bank 1 Write Access Time = 13 cycles */ +#define B1WAT_14 0xE0000000 /* Bank 1 Write Access Time = 14 cycles */ +#define B1WAT_15 0xF0000000 /* Bank 1 Write Access Time = 15 cycles */ + +/* AMBCTL1 Masks */ +#define B2RDYEN 0x00000001 /* Bank 2 RDY Enable, 0=disable, 1=enable */ +#define B2RDYPOL 0x00000002 /* Bank 2 RDY Active high, 0=active low, 1=active high */ +#define B2TT_1 0x00000004 /* Bank 2 Transition Time from Read to Write = 1 cycle */ +#define B2TT_2 0x00000008 /* Bank 2 Transition Time from Read to Write = 2 cycles */ +#define B2TT_3 0x0000000C /* Bank 2 Transition Time from Read to Write = 3 cycles */ +#define B2TT_4 0x00000000 /* Bank 2 Transition Time from Read to Write = 4 cycles */ +#define B2ST_1 0x00000010 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */ +#define B2ST_2 0x00000020 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */ +#define B2ST_3 0x00000030 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */ +#define B2ST_4 0x00000000 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */ +#define B2HT_1 0x00000040 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */ +#define B2HT_2 0x00000080 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */ +#define B2HT_3 0x000000C0 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */ +#define B2HT_0 0x00000000 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */ +#define B2RAT_1 0x00000100 /* Bank 2 Read Access Time = 1 cycle */ +#define B2RAT_2 0x00000200 /* Bank 2 Read Access Time = 2 cycles */ +#define B2RAT_3 0x00000300 /* Bank 2 Read Access Time = 3 cycles */ +#define B2RAT_4 0x00000400 /* Bank 2 Read Access Time = 4 cycles */ +#define B2RAT_5 0x00000500 /* Bank 2 Read Access Time = 5 cycles */ +#define B2RAT_6 0x00000600 /* Bank 2 Read Access Time = 6 cycles */ +#define B2RAT_7 0x00000700 /* Bank 2 Read Access Time = 7 cycles */ +#define B2RAT_8 0x00000800 /* Bank 2 Read Access Time = 8 cycles */ +#define B2RAT_9 0x00000900 /* Bank 2 Read Access Time = 9 cycles */ +#define B2RAT_10 0x00000A00 /* Bank 2 Read Access Time = 10 cycles */ +#define B2RAT_11 0x00000B00 /* Bank 2 Read Access Time = 11 cycles */ +#define B2RAT_12 0x00000C00 /* Bank 2 Read Access Time = 12 cycles */ +#define B2RAT_13 0x00000D00 /* Bank 2 Read Access Time = 13 cycles */ +#define B2RAT_14 0x00000E00 /* Bank 2 Read Access Time = 14 cycles */ +#define B2RAT_15 0x00000F00 /* Bank 2 Read Access Time = 15 cycles */ +#define B2WAT_1 0x00001000 /* Bank 2 Write Access Time = 1 cycle */ +#define B2WAT_2 0x00002000 /* Bank 2 Write Access Time = 2 cycles */ +#define B2WAT_3 0x00003000 /* Bank 2 Write Access Time = 3 cycles */ +#define B2WAT_4 0x00004000 /* Bank 2 Write Access Time = 4 cycles */ +#define B2WAT_5 0x00005000 /* Bank 2 Write Access Time = 5 cycles */ +#define B2WAT_6 0x00006000 /* Bank 2 Write Access Time = 6 cycles */ +#define B2WAT_7 0x00007000 /* Bank 2 Write Access Time = 7 cycles */ +#define B2WAT_8 0x00008000 /* Bank 2 Write Access Time = 8 cycles */ +#define B2WAT_9 0x00009000 /* Bank 2 Write Access Time = 9 cycles */ +#define B2WAT_10 0x0000A000 /* Bank 2 Write Access Time = 10 cycles */ +#define B2WAT_11 0x0000B000 /* Bank 2 Write Access Time = 11 cycles */ +#define B2WAT_12 0x0000C000 /* Bank 2 Write Access Time = 12 cycles */ +#define B2WAT_13 0x0000D000 /* Bank 2 Write Access Time = 13 cycles */ +#define B2WAT_14 0x0000E000 /* Bank 2 Write Access Time = 14 cycles */ +#define B2WAT_15 0x0000F000 /* Bank 2 Write Access Time = 15 cycles */ +#define B3RDYEN 0x00010000 /* Bank 3 RDY enable, 0=disable, 1=enable */ +#define B3RDYPOL 0x00020000 /* Bank 3 RDY Active high, 0=active low, 1=active high */ +#define B3TT_1 0x00040000 /* Bank 3 Transition Time from Read to Write = 1 cycle */ +#define B3TT_2 0x00080000 /* Bank 3 Transition Time from Read to Write = 2 cycles */ +#define B3TT_3 0x000C0000 /* Bank 3 Transition Time from Read to Write = 3 cycles */ +#define B3TT_4 0x00000000 /* Bank 3 Transition Time from Read to Write = 4 cycles */ +#define B3ST_1 0x00100000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */ +#define B3ST_2 0x00200000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */ +#define B3ST_3 0x00300000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */ +#define B3ST_4 0x00000000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */ +#define B3HT_1 0x00400000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */ +#define B3HT_2 0x00800000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */ +#define B3HT_3 0x00C00000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */ +#define B3HT_0 0x00000000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */ +#define B3RAT_1 0x01000000 /* Bank 3 Read Access Time = 1 cycle */ +#define B3RAT_2 0x02000000 /* Bank 3 Read Access Time = 2 cycles */ +#define B3RAT_3 0x03000000 /* Bank 3 Read Access Time = 3 cycles */ +#define B3RAT_4 0x04000000 /* Bank 3 Read Access Time = 4 cycles */ +#define B3RAT_5 0x05000000 /* Bank 3 Read Access Time = 5 cycles */ +#define B3RAT_6 0x06000000 /* Bank 3 Read Access Time = 6 cycles */ +#define B3RAT_7 0x07000000 /* Bank 3 Read Access Time = 7 cycles */ +#define B3RAT_8 0x08000000 /* Bank 3 Read Access Time = 8 cycles */ +#define B3RAT_9 0x09000000 /* Bank 3 Read Access Time = 9 cycles */ +#define B3RAT_10 0x0A000000 /* Bank 3 Read Access Time = 10 cycles */ +#define B3RAT_11 0x0B000000 /* Bank 3 Read Access Time = 11 cycles */ +#define B3RAT_12 0x0C000000 /* Bank 3 Read Access Time = 12 cycles */ +#define B3RAT_13 0x0D000000 /* Bank 3 Read Access Time = 13 cycles */ +#define B3RAT_14 0x0E000000 /* Bank 3 Read Access Time = 14 cycles */ +#define B3RAT_15 0x0F000000 /* Bank 3 Read Access Time = 15 cycles */ +#define B3WAT_1 0x10000000 /* Bank 3 Write Access Time = 1 cycle */ +#define B3WAT_2 0x20000000 /* Bank 3 Write Access Time = 2 cycles */ +#define B3WAT_3 0x30000000 /* Bank 3 Write Access Time = 3 cycles */ +#define B3WAT_4 0x40000000 /* Bank 3 Write Access Time = 4 cycles */ +#define B3WAT_5 0x50000000 /* Bank 3 Write Access Time = 5 cycles */ +#define B3WAT_6 0x60000000 /* Bank 3 Write Access Time = 6 cycles */ +#define B3WAT_7 0x70000000 /* Bank 3 Write Access Time = 7 cycles */ +#define B3WAT_8 0x80000000 /* Bank 3 Write Access Time = 8 cycles */ +#define B3WAT_9 0x90000000 /* Bank 3 Write Access Time = 9 cycles */ +#define B3WAT_10 0xA0000000 /* Bank 3 Write Access Time = 10 cycles */ +#define B3WAT_11 0xB0000000 /* Bank 3 Write Access Time = 11 cycles */ +#define B3WAT_12 0xC0000000 /* Bank 3 Write Access Time = 12 cycles */ +#define B3WAT_13 0xD0000000 /* Bank 3 Write Access Time = 13 cycles */ +#define B3WAT_14 0xE0000000 /* Bank 3 Write Access Time = 14 cycles */ +#define B3WAT_15 0xF0000000 /* Bank 3 Write Access Time = 15 cycles */ + +/* ********************** SDRAM CONTROLLER MASKS *************************** */ + +/* EBIU_SDGCTL Masks */ +#define SCTLE 0x00000001 /* Enable SCLK[0], /SRAS, /SCAS, /SWE, SDQM[3:0] */ +#define CL_2 0x00000008 /* SDRAM CAS latency = 2 cycles */ +#define CL_3 0x0000000C /* SDRAM CAS latency = 3 cycles */ +#define PFE 0x00000010 /* Enable SDRAM prefetch */ +#define PFP 0x00000020 /* Prefetch has priority over AMC requests */ +#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */ +#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */ +#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */ +#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */ +#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */ +#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */ +#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */ +#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */ +#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */ +#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */ +#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */ +#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */ +#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */ +#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */ +#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */ +#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */ +#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */ +#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */ +#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */ +#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */ +#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */ +#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */ +#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */ +#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */ +#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */ +#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */ +#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */ +#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */ +#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */ +#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */ +#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */ +#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */ +#define PUPSD 0x00200000 /*Power-up start delay */ +#define PSM 0x00400000 /* SDRAM power-up sequence = Precharge, mode register set, 8 CBR refresh cycles */ +#define PSS 0x00800000 /* enable SDRAM power-up sequence on next SDRAM access */ +#define SRFS 0x01000000 /* Start SDRAM self-refresh mode */ +#define EBUFE 0x02000000 /* Enable external buffering timing */ +#define FBBRW 0x04000000 /* Fast back-to-back read write enable */ +#define EMREN 0x10000000 /* Extended mode register enable */ +#define TCSR 0x20000000 /* Temp compensated self refresh value 85 deg C */ +#define CDDBG 0x40000000 /* Tristate SDRAM controls during bus grant */ + +/* EBIU_SDBCTL Masks */ +#define EB0_E 0x00000001 /* Enable SDRAM external bank 0 */ +#define EB0_SZ_16 0x00000000 /* SDRAM external bank size = 16MB */ +#define EB0_SZ_32 0x00000002 /* SDRAM external bank size = 32MB */ +#define EB0_SZ_64 0x00000004 /* SDRAM external bank size = 64MB */ +#define EB0_SZ_128 0x00000006 /* SDRAM external bank size = 128MB */ +#define EB0_CAW_8 0x00000000 /* SDRAM external bank column address width = 8 bits */ +#define EB0_CAW_9 0x00000010 /* SDRAM external bank column address width = 9 bits */ +#define EB0_CAW_10 0x00000020 /* SDRAM external bank column address width = 9 bits */ +#define EB0_CAW_11 0x00000030 /* SDRAM external bank column address width = 9 bits */ + +#define EB1_E 0x00000100 /* Enable SDRAM external bank 1 */ +#define EB1__SZ_16 0x00000000 /* SDRAM external bank size = 16MB */ +#define EB1__SZ_32 0x00000200 /* SDRAM external bank size = 32MB */ +#define EB1__SZ_64 0x00000400 /* SDRAM external bank size = 64MB */ +#define EB1__SZ_128 0x00000600 /* SDRAM external bank size = 128MB */ +#define EB1__CAW_8 0x00000000 /* SDRAM external bank column address width = 8 bits */ +#define EB1__CAW_9 0x00001000 /* SDRAM external bank column address width = 9 bits */ +#define EB1__CAW_10 0x00002000 /* SDRAM external bank column address width = 9 bits */ +#define EB1__CAW_11 0x00003000 /* SDRAM external bank column address width = 9 bits */ + +#define EB2__E 0x00010000 /* Enable SDRAM external bank 2 */ +#define EB2__SZ_16 0x00000000 /* SDRAM external bank size = 16MB */ +#define EB2__SZ_32 0x00020000 /* SDRAM external bank size = 32MB */ +#define EB2__SZ_64 0x00040000 /* SDRAM external bank size = 64MB */ +#define EB2__SZ_128 0x00060000 /* SDRAM external bank size = 128MB */ +#define EB2__CAW_8 0x00000000 /* SDRAM external bank column address width = 8 bits */ +#define EB2__CAW_9 0x00100000 /* SDRAM external bank column address width = 9 bits */ +#define EB2__CAW_10 0x00200000 /* SDRAM external bank column address width = 9 bits */ +#define EB2__CAW_11 0x00300000 /* SDRAM external bank column address width = 9 bits */ + +#define EB3__E 0x01000000 /* Enable SDRAM external bank 3 */ +#define EB3__SZ_16 0x00000000 /* SDRAM external bank size = 16MB */ +#define EB3__SZ_32 0x02000000 /* SDRAM external bank size = 32MB */ +#define EB3__SZ_64 0x04000000 /* SDRAM external bank size = 64MB */ +#define EB3__SZ_128 0x06000000 /* SDRAM external bank size = 128MB */ +#define EB3__CAW_8 0x00000000 /* SDRAM external bank column address width = 8 bits */ +#define EB3__CAW_9 0x10000000 /* SDRAM external bank column address width = 9 bits */ +#define EB3__CAW_10 0x20000000 /* SDRAM external bank column address width = 9 bits */ +#define EB3__CAW_11 0x30000000 /* SDRAM external bank column address width = 9 bits */ + +/* EBIU_SDSTAT Masks */ +#define SDCI 0x00000001 /* SDRAM controller is idle */ +#define SDSRA 0x00000002 /* SDRAM SDRAM self refresh is active */ +#define SDPUA 0x00000004 /* SDRAM power up active */ +#define SDRS 0x00000008 /* SDRAM is in reset state */ +#define SDEASE 0x00000010 /* SDRAM EAB sticky error status - W1C */ +#define BGSTAT 0x00000020 /* Bus granted */ + +/*VR_CTL Masks*/ +#define WAKE 0x100 +#define VLEV_6 0x60 +#define VLEV_7 0x70 +#define VLEV_8 0x80 +#define VLEV_9 0x90 +#define VLEV_10 0xA0 +#define VLEV_11 0xB0 +#define VLEV_12 0xC0 +#define VLEV_13 0xD0 +#define VLEV_14 0xE0 +#define VLEV_15 0xF0 +#define FREQ_3 0x03 + +#endif /* _DEF_BF561_H */ diff --git a/include/asm-blackfin/mach-bf561/dma.h b/include/asm-blackfin/mach-bf561/dma.h new file mode 100644 index 000000000000..21d982003e75 --- /dev/null +++ b/include/asm-blackfin/mach-bf561/dma.h @@ -0,0 +1,35 @@ +/***************************************************************************** +* +* BF-533/2/1 Specific Declarations +* +****************************************************************************/ + +#ifndef _MACH_DMA_H_ +#define _MACH_DMA_H_ + +#define MAX_BLACKFIN_DMA_CHANNEL 36 + +#define CH_PPI0 0 +#define CH_PPI (CH_PPI0) +#define CH_PPI1 1 +#define CH_SPORT0_RX 12 +#define CH_SPORT0_TX 13 +#define CH_SPORT1_RX 14 +#define CH_SPORT1_TX 15 +#define CH_SPI 16 +#define CH_UART_RX 17 +#define CH_UART_TX 18 +#define CH_MEM_STREAM0_DEST 24 /* TX */ +#define CH_MEM_STREAM0_SRC 25 /* RX */ +#define CH_MEM_STREAM1_DEST 26 /* TX */ +#define CH_MEM_STREAM1_SRC 27 /* RX */ +#define CH_MEM_STREAM2_DEST 28 +#define CH_MEM_STREAM2_SRC 29 +#define CH_MEM_STREAM3_SRC 30 +#define CH_MEM_STREAM3_DEST 31 +#define CH_IMEM_STREAM0_DEST 32 +#define CH_IMEM_STREAM0_SRC 33 +#define CH_IMEM_STREAM1_SRC 34 +#define CH_IMEM_STREAM1_DEST 35 + +#endif diff --git a/include/asm-blackfin/mach-bf561/irq.h b/include/asm-blackfin/mach-bf561/irq.h new file mode 100644 index 000000000000..a753ce720d74 --- /dev/null +++ b/include/asm-blackfin/mach-bf561/irq.h @@ -0,0 +1,450 @@ + +/* + * File: include/asm-blackfin/mach-bf561/irq.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _BF561_IRQ_H_ +#define _BF561_IRQ_H_ + +/*********************************************************************** + * Interrupt source definitions: + Event Source Core Event Name IRQ No + (highest priority) + Emulation Events EMU 0 + Reset RST 1 + NMI NMI 2 + Exception EVX 3 + Reserved -- 4 + Hardware Error IVHW 5 + Core Timer IVTMR 6 * + + PLL Wakeup Interrupt IVG7 7 + DMA1 Error (generic) IVG7 8 + DMA2 Error (generic) IVG7 9 + IMDMA Error (generic) IVG7 10 + PPI1 Error Interrupt IVG7 11 + PPI2 Error Interrupt IVG7 12 + SPORT0 Error Interrupt IVG7 13 + SPORT1 Error Interrupt IVG7 14 + SPI Error Interrupt IVG7 15 + UART Error Interrupt IVG7 16 + Reserved Interrupt IVG7 17 + + DMA1 0 Interrupt(PPI1) IVG8 18 + DMA1 1 Interrupt(PPI2) IVG8 19 + DMA1 2 Interrupt IVG8 20 + DMA1 3 Interrupt IVG8 21 + DMA1 4 Interrupt IVG8 22 + DMA1 5 Interrupt IVG8 23 + DMA1 6 Interrupt IVG8 24 + DMA1 7 Interrupt IVG8 25 + DMA1 8 Interrupt IVG8 26 + DMA1 9 Interrupt IVG8 27 + DMA1 10 Interrupt IVG8 28 + DMA1 11 Interrupt IVG8 29 + + DMA2 0 (SPORT0 RX) IVG9 30 + DMA2 1 (SPORT0 TX) IVG9 31 + DMA2 2 (SPORT1 RX) IVG9 32 + DMA2 3 (SPORT2 TX) IVG9 33 + DMA2 4 (SPI) IVG9 34 + DMA2 5 (UART RX) IVG9 35 + DMA2 6 (UART TX) IVG9 36 + DMA2 7 Interrupt IVG9 37 + DMA2 8 Interrupt IVG9 38 + DMA2 9 Interrupt IVG9 39 + DMA2 10 Interrupt IVG9 40 + DMA2 11 Interrupt IVG9 41 + + TIMER 0 Interrupt IVG10 42 + TIMER 1 Interrupt IVG10 43 + TIMER 2 Interrupt IVG10 44 + TIMER 3 Interrupt IVG10 45 + TIMER 4 Interrupt IVG10 46 + TIMER 5 Interrupt IVG10 47 + TIMER 6 Interrupt IVG10 48 + TIMER 7 Interrupt IVG10 49 + TIMER 8 Interrupt IVG10 50 + TIMER 9 Interrupt IVG10 51 + TIMER 10 Interrupt IVG10 52 + TIMER 11 Interrupt IVG10 53 + + Programmable Flags0 A (8) IVG11 54 + Programmable Flags0 B (8) IVG11 55 + Programmable Flags1 A (8) IVG11 56 + Programmable Flags1 B (8) IVG11 57 + Programmable Flags2 A (8) IVG11 58 + Programmable Flags2 B (8) IVG11 59 + + MDMA1 0 write/read INT IVG8 60 + MDMA1 1 write/read INT IVG8 61 + + MDMA2 0 write/read INT IVG9 62 + MDMA2 1 write/read INT IVG9 63 + + IMDMA 0 write/read INT IVG12 64 + IMDMA 1 write/read INT IVG12 65 + + Watch Dog Timer IVG13 66 + + Reserved interrupt IVG7 67 + Reserved interrupt IVG7 68 + Supplemental interrupt 0 IVG7 69 + supplemental interrupt 1 IVG7 70 + + Software Interrupt 1 IVG14 71 + Software Interrupt 2 IVG15 72 * + (lowest priority) + **********************************************************************/ + +#define SYS_IRQS 72 +#define NR_PERI_INTS 64 + +/* + * The ABSTRACT IRQ definitions + * the first seven of the following are fixed, + * the rest you change if you need to. + */ +/* IVG 0-6*/ +#define IRQ_EMU 0 /* Emulation */ +#define IRQ_RST 1 /* Reset */ +#define IRQ_NMI 2 /* Non Maskable Interrupt */ +#define IRQ_EVX 3 /* Exception */ +#define IRQ_UNUSED 4 /* Reserved interrupt */ +#define IRQ_HWERR 5 /* Hardware Error */ +#define IRQ_CORETMR 6 /* Core timer */ + +#define IVG_BASE 7 +/* IVG 7 */ +#define IRQ_PLL_WAKEUP (IVG_BASE + 0) /* PLL Wakeup Interrupt */ +#define IRQ_DMA1_ERROR (IVG_BASE + 1) /* DMA1 Error (general) */ +#define IRQ_DMA_ERROR IRQ_DMA1_ERROR /* DMA1 Error (general) */ +#define IRQ_DMA2_ERROR (IVG_BASE + 2) /* DMA2 Error (general) */ +#define IRQ_IMDMA_ERROR (IVG_BASE + 3) /* IMDMA Error Interrupt */ +#define IRQ_PPI1_ERROR (IVG_BASE + 4) /* PPI1 Error Interrupt */ +#define IRQ_PPI_ERROR IRQ_PPI1_ERROR /* PPI1 Error Interrupt */ +#define IRQ_PPI2_ERROR (IVG_BASE + 5) /* PPI2 Error Interrupt */ +#define IRQ_SPORT0_ERROR (IVG_BASE + 6) /* SPORT0 Error Interrupt */ +#define IRQ_SPORT1_ERROR (IVG_BASE + 7) /* SPORT1 Error Interrupt */ +#define IRQ_SPI_ERROR (IVG_BASE + 8) /* SPI Error Interrupt */ +#define IRQ_UART_ERROR (IVG_BASE + 9) /* UART Error Interrupt */ +#define IRQ_RESERVED_ERROR (IVG_BASE + 10) /* Reversed Interrupt */ +/* IVG 8 */ +#define IRQ_DMA1_0 (IVG_BASE + 11) /* DMA1 0 Interrupt(PPI1) */ +#define IRQ_PPI IRQ_DMA1_0 /* DMA1 0 Interrupt(PPI1) */ +#define IRQ_PPI0 IRQ_DMA1_0 /* DMA1 0 Interrupt(PPI1) */ +#define IRQ_DMA1_1 (IVG_BASE + 12) /* DMA1 1 Interrupt(PPI2) */ +#define IRQ_PPI1 IRQ_DMA1_1 /* DMA1 1 Interrupt(PPI2) */ +#define IRQ_DMA1_2 (IVG_BASE + 13) /* DMA1 2 Interrupt */ +#define IRQ_DMA1_3 (IVG_BASE + 14) /* DMA1 3 Interrupt */ +#define IRQ_DMA1_4 (IVG_BASE + 15) /* DMA1 4 Interrupt */ +#define IRQ_DMA1_5 (IVG_BASE + 16) /* DMA1 5 Interrupt */ +#define IRQ_DMA1_6 (IVG_BASE + 17) /* DMA1 6 Interrupt */ +#define IRQ_DMA1_7 (IVG_BASE + 18) /* DMA1 7 Interrupt */ +#define IRQ_DMA1_8 (IVG_BASE + 19) /* DMA1 8 Interrupt */ +#define IRQ_DMA1_9 (IVG_BASE + 20) /* DMA1 9 Interrupt */ +#define IRQ_DMA1_10 (IVG_BASE + 21) /* DMA1 10 Interrupt */ +#define IRQ_DMA1_11 (IVG_BASE + 22) /* DMA1 11 Interrupt */ +/* IVG 9 */ +#define IRQ_DMA2_0 (IVG_BASE + 23) /* DMA2 0 (SPORT0 RX) */ +#define IRQ_SPORT0_RX IRQ_DMA2_0 /* DMA2 0 (SPORT0 RX) */ +#define IRQ_DMA2_1 (IVG_BASE + 24) /* DMA2 1 (SPORT0 TX) */ +#define IRQ_SPORT0_TX IRQ_DMA2_1 /* DMA2 1 (SPORT0 TX) */ +#define IRQ_DMA2_2 (IVG_BASE + 25) /* DMA2 2 (SPORT1 RX) */ +#define IRQ_SPORT1_RX IRQ_DMA2_2 /* DMA2 2 (SPORT1 RX) */ +#define IRQ_DMA2_3 (IVG_BASE + 26) /* DMA2 3 (SPORT2 TX) */ +#define IRQ_SPORT1_TX IRQ_DMA2_3 /* DMA2 3 (SPORT2 TX) */ +#define IRQ_DMA2_4 (IVG_BASE + 27) /* DMA2 4 (SPI) */ +#define IRQ_SPI IRQ_DMA2_4 /* DMA2 4 (SPI) */ +#define IRQ_DMA2_5 (IVG_BASE + 28) /* DMA2 5 (UART RX) */ +#define IRQ_UART_RX IRQ_DMA2_5 /* DMA2 5 (UART RX) */ +#define IRQ_DMA2_6 (IVG_BASE + 29) /* DMA2 6 (UART TX) */ +#define IRQ_UART_TX IRQ_DMA2_6 /* DMA2 6 (UART TX) */ +#define IRQ_DMA2_7 (IVG_BASE + 30) /* DMA2 7 Interrupt */ +#define IRQ_DMA2_8 (IVG_BASE + 31) /* DMA2 8 Interrupt */ +#define IRQ_DMA2_9 (IVG_BASE + 32) /* DMA2 9 Interrupt */ +#define IRQ_DMA2_10 (IVG_BASE + 33) /* DMA2 10 Interrupt */ +#define IRQ_DMA2_11 (IVG_BASE + 34) /* DMA2 11 Interrupt */ +/* IVG 10 */ +#define IRQ_TIMER0 (IVG_BASE + 35) /* TIMER 0 Interrupt */ +#define IRQ_TIMER1 (IVG_BASE + 36) /* TIMER 1 Interrupt */ +#define IRQ_TIMER2 (IVG_BASE + 37) /* TIMER 2 Interrupt */ +#define IRQ_TIMER3 (IVG_BASE + 38) /* TIMER 3 Interrupt */ +#define IRQ_TIMER4 (IVG_BASE + 39) /* TIMER 4 Interrupt */ +#define IRQ_TIMER5 (IVG_BASE + 40) /* TIMER 5 Interrupt */ +#define IRQ_TIMER6 (IVG_BASE + 41) /* TIMER 6 Interrupt */ +#define IRQ_TIMER7 (IVG_BASE + 42) /* TIMER 7 Interrupt */ +#define IRQ_TIMER8 (IVG_BASE + 43) /* TIMER 8 Interrupt */ +#define IRQ_TIMER9 (IVG_BASE + 44) /* TIMER 9 Interrupt */ +#define IRQ_TIMER10 (IVG_BASE + 45) /* TIMER 10 Interrupt */ +#define IRQ_TIMER11 (IVG_BASE + 46) /* TIMER 11 Interrupt */ +/* IVG 11 */ +#define IRQ_PROG0_INTA (IVG_BASE + 47) /* Programmable Flags0 A (8) */ +#define IRQ_PROG_INTA IRQ_PROG0_INTA /* Programmable Flags0 A (8) */ +#define IRQ_PROG0_INTB (IVG_BASE + 48) /* Programmable Flags0 B (8) */ +#define IRQ_PROG_INTB IRQ_PROG0_INTB /* Programmable Flags0 B (8) */ +#define IRQ_PROG1_INTA (IVG_BASE + 49) /* Programmable Flags1 A (8) */ +#define IRQ_PROG1_INTB (IVG_BASE + 50) /* Programmable Flags1 B (8) */ +#define IRQ_PROG2_INTA (IVG_BASE + 51) /* Programmable Flags2 A (8) */ +#define IRQ_PROG2_INTB (IVG_BASE + 52) /* Programmable Flags2 B (8) */ +/* IVG 8 */ +#define IRQ_DMA1_WRRD0 (IVG_BASE + 53) /* MDMA1 0 write/read INT */ +#define IRQ_DMA_WRRD0 IRQ_DMA1_WRRD0 /* MDMA1 0 write/read INT */ +#define IRQ_MEM_DMA0 IRQ_DMA1_WRRD0 +#define IRQ_DMA1_WRRD1 (IVG_BASE + 54) /* MDMA1 1 write/read INT */ +#define IRQ_DMA_WRRD1 IRQ_DMA1_WRRD1 /* MDMA1 1 write/read INT */ +#define IRQ_MEM_DMA1 IRQ_DMA1_WRRD1 +/* IVG 9 */ +#define IRQ_DMA2_WRRD0 (IVG_BASE + 55) /* MDMA2 0 write/read INT */ +#define IRQ_MEM_DMA2 IRQ_DMA2_WRRD0 +#define IRQ_DMA2_WRRD1 (IVG_BASE + 56) /* MDMA2 1 write/read INT */ +#define IRQ_MEM_DMA3 IRQ_DMA2_WRRD1 +/* IVG 12 */ +#define IRQ_IMDMA_WRRD0 (IVG_BASE + 57) /* IMDMA 0 write/read INT */ +#define IRQ_IMEM_DMA0 IRQ_IMDMA_WRRD0 +#define IRQ_IMDMA_WRRD1 (IVG_BASE + 58) /* IMDMA 1 write/read INT */ +#define IRQ_IMEM_DMA1 IRQ_IMDMA_WRRD1 +/* IVG 13 */ +#define IRQ_WATCH (IVG_BASE + 59) /* Watch Dog Timer */ +/* IVG 7 */ +#define IRQ_RESERVED_1 (IVG_BASE + 60) /* Reserved interrupt */ +#define IRQ_RESERVED_2 (IVG_BASE + 61) /* Reserved interrupt */ +#define IRQ_SUPPLE_0 (IVG_BASE + 62) /* Supplemental interrupt 0 */ +#define IRQ_SUPPLE_1 (IVG_BASE + 63) /* supplemental interrupt 1 */ +#define IRQ_SW_INT1 71 /* Software Interrupt 1 */ +#define IRQ_SW_INT2 72 /* Software Interrupt 2 */ + /* reserved for SYSCALL */ +#define IRQ_PF0 73 +#define IRQ_PF1 74 +#define IRQ_PF2 75 +#define IRQ_PF3 76 +#define IRQ_PF4 77 +#define IRQ_PF5 78 +#define IRQ_PF6 79 +#define IRQ_PF7 80 +#define IRQ_PF8 81 +#define IRQ_PF9 82 +#define IRQ_PF10 83 +#define IRQ_PF11 84 +#define IRQ_PF12 85 +#define IRQ_PF13 86 +#define IRQ_PF14 87 +#define IRQ_PF15 88 +#define IRQ_PF16 89 +#define IRQ_PF17 90 +#define IRQ_PF18 91 +#define IRQ_PF19 92 +#define IRQ_PF20 93 +#define IRQ_PF21 94 +#define IRQ_PF22 95 +#define IRQ_PF23 96 +#define IRQ_PF24 97 +#define IRQ_PF25 98 +#define IRQ_PF26 99 +#define IRQ_PF27 100 +#define IRQ_PF28 101 +#define IRQ_PF29 102 +#define IRQ_PF30 103 +#define IRQ_PF31 104 +#define IRQ_PF32 105 +#define IRQ_PF33 106 +#define IRQ_PF34 107 +#define IRQ_PF35 108 +#define IRQ_PF36 109 +#define IRQ_PF37 110 +#define IRQ_PF38 111 +#define IRQ_PF39 112 +#define IRQ_PF40 113 +#define IRQ_PF41 114 +#define IRQ_PF42 115 +#define IRQ_PF43 116 +#define IRQ_PF44 117 +#define IRQ_PF45 118 +#define IRQ_PF46 119 +#define IRQ_PF47 120 + +#ifdef CONFIG_IRQCHIP_DEMUX_GPIO +#define NR_IRQS (IRQ_PF47 + 1) +#else +#define NR_IRQS SYS_IRQS +#endif + +#define IVG7 7 +#define IVG8 8 +#define IVG9 9 +#define IVG10 10 +#define IVG11 11 +#define IVG12 12 +#define IVG13 13 +#define IVG14 14 +#define IVG15 15 + +/* + * DEFAULT PRIORITIES: + */ + +#define CONFIG_DEF_PLL_WAKEUP 7 +#define CONFIG_DEF_DMA1_ERROR 7 +#define CONFIG_DEF_DMA2_ERROR 7 +#define CONFIG_DEF_IMDMA_ERROR 7 +#define CONFIG_DEF_PPI1_ERROR 7 +#define CONFIG_DEF_PPI2_ERROR 7 +#define CONFIG_DEF_SPORT0_ERROR 7 +#define CONFIG_DEF_SPORT1_ERROR 7 +#define CONFIG_DEF_SPI_ERROR 7 +#define CONFIG_DEF_UART_ERROR 7 +#define CONFIG_DEF_RESERVED_ERROR 7 +#define CONFIG_DEF_DMA1_0 8 +#define CONFIG_DEF_DMA1_1 8 +#define CONFIG_DEF_DMA1_2 8 +#define CONFIG_DEF_DMA1_3 8 +#define CONFIG_DEF_DMA1_4 8 +#define CONFIG_DEF_DMA1_5 8 +#define CONFIG_DEF_DMA1_6 8 +#define CONFIG_DEF_DMA1_7 8 +#define CONFIG_DEF_DMA1_8 8 +#define CONFIG_DEF_DMA1_9 8 +#define CONFIG_DEF_DMA1_10 8 +#define CONFIG_DEF_DMA1_11 8 +#define CONFIG_DEF_DMA2_0 9 +#define CONFIG_DEF_DMA2_1 9 +#define CONFIG_DEF_DMA2_2 9 +#define CONFIG_DEF_DMA2_3 9 +#define CONFIG_DEF_DMA2_4 9 +#define CONFIG_DEF_DMA2_5 9 +#define CONFIG_DEF_DMA2_6 9 +#define CONFIG_DEF_DMA2_7 9 +#define CONFIG_DEF_DMA2_8 9 +#define CONFIG_DEF_DMA2_9 9 +#define CONFIG_DEF_DMA2_10 9 +#define CONFIG_DEF_DMA2_11 9 +#define CONFIG_DEF_TIMER0 10 +#define CONFIG_DEF_TIMER1 10 +#define CONFIG_DEF_TIMER2 10 +#define CONFIG_DEF_TIMER3 10 +#define CONFIG_DEF_TIMER4 10 +#define CONFIG_DEF_TIMER5 10 +#define CONFIG_DEF_TIMER6 10 +#define CONFIG_DEF_TIMER7 10 +#define CONFIG_DEF_TIMER8 10 +#define CONFIG_DEF_TIMER9 10 +#define CONFIG_DEF_TIMER10 10 +#define CONFIG_DEF_TIMER11 10 +#define CONFIG_DEF_PROG0_INTA 11 +#define CONFIG_DEF_PROG0_INTB 11 +#define CONFIG_DEF_PROG1_INTA 11 +#define CONFIG_DEF_PROG1_INTB 11 +#define CONFIG_DEF_PROG2_INTA 11 +#define CONFIG_DEF_PROG2_INTB 11 +#define CONFIG_DEF_DMA1_WRRD0 8 +#define CONFIG_DEF_DMA1_WRRD1 8 +#define CONFIG_DEF_DMA2_WRRD0 9 +#define CONFIG_DEF_DMA2_WRRD1 9 +#define CONFIG_DEF_IMDMA_WRRD0 12 +#define CONFIG_DEF_IMDMA_WRRD1 12 +#define CONFIG_DEF_WATCH 13 +#define CONFIG_DEF_RESERVED_1 7 +#define CONFIG_DEF_RESERVED_2 7 +#define CONFIG_DEF_SUPPLE_0 7 +#define CONFIG_DEF_SUPPLE_1 7 + +/* IAR0 BIT FIELDS */ +#define IRQ_PLL_WAKEUP_POS 0 +#define IRQ_DMA1_ERROR_POS 4 +#define IRQ_DMA2_ERROR_POS 8 +#define IRQ_IMDMA_ERROR_POS 12 +#define IRQ_PPI0_ERROR_POS 16 +#define IRQ_PPI1_ERROR_POS 20 +#define IRQ_SPORT0_ERROR_POS 24 +#define IRQ_SPORT1_ERROR_POS 28 +/* IAR1 BIT FIELDS */ +#define IRQ_SPI_ERROR_POS 0 +#define IRQ_UART_ERROR_POS 4 +#define IRQ_RESERVED_ERROR_POS 8 +#define IRQ_DMA1_0_POS 12 +#define IRQ_DMA1_1_POS 16 +#define IRQ_DMA1_2_POS 20 +#define IRQ_DMA1_3_POS 24 +#define IRQ_DMA1_4_POS 28 +/* IAR2 BIT FIELDS */ +#define IRQ_DMA1_5_POS 0 +#define IRQ_DMA1_6_POS 4 +#define IRQ_DMA1_7_POS 8 +#define IRQ_DMA1_8_POS 12 +#define IRQ_DMA1_9_POS 16 +#define IRQ_DMA1_10_POS 20 +#define IRQ_DMA1_11_POS 24 +#define IRQ_DMA2_0_POS 28 +/* IAR3 BIT FIELDS */ +#define IRQ_DMA2_1_POS 0 +#define IRQ_DMA2_2_POS 4 +#define IRQ_DMA2_3_POS 8 +#define IRQ_DMA2_4_POS 12 +#define IRQ_DMA2_5_POS 16 +#define IRQ_DMA2_6_POS 20 +#define IRQ_DMA2_7_POS 24 +#define IRQ_DMA2_8_POS 28 +/* IAR4 BIT FIELDS */ +#define IRQ_DMA2_9_POS 0 +#define IRQ_DMA2_10_POS 4 +#define IRQ_DMA2_11_POS 8 +#define IRQ_TIMER0_POS 12 +#define IRQ_TIMER1_POS 16 +#define IRQ_TIMER2_POS 20 +#define IRQ_TIMER3_POS 24 +#define IRQ_TIMER4_POS 28 +/* IAR5 BIT FIELDS */ +#define IRQ_TIMER5_POS 0 +#define IRQ_TIMER6_POS 4 +#define IRQ_TIMER7_POS 8 +#define IRQ_TIMER8_POS 12 +#define IRQ_TIMER9_POS 16 +#define IRQ_TIMER10_POS 20 +#define IRQ_TIMER11_POS 24 +#define IRQ_PROG0_INTA_POS 28 +/* IAR6 BIT FIELDS */ +#define IRQ_PROG0_INTB_POS 0 +#define IRQ_PROG1_INTA_POS 4 +#define IRQ_PROG1_INTB_POS 8 +#define IRQ_PROG2_INTA_POS 12 +#define IRQ_PROG2_INTB_POS 16 +#define IRQ_DMA1_WRRD0_POS 20 +#define IRQ_DMA1_WRRD1_POS 24 +#define IRQ_DMA2_WRRD0_POS 28 +/* IAR7 BIT FIELDS */ +#define IRQ_DMA2_WRRD1_POS 0 +#define IRQ_IMDMA_WRRD0_POS 4 +#define IRQ_IMDMA_WRRD1_POS 8 +#define IRQ_WDTIMER_POS 12 +#define IRQ_RESERVED_1_POS 16 +#define IRQ_RESERVED_2_POS 20 +#define IRQ_SUPPLE_0_POS 24 +#define IRQ_SUPPLE_1_POS 28 + +#endif /* _BF561_IRQ_H_ */ diff --git a/include/asm-blackfin/mach-bf561/mem_init.h b/include/asm-blackfin/mach-bf561/mem_init.h new file mode 100644 index 000000000000..439a5895b346 --- /dev/null +++ b/include/asm-blackfin/mach-bf561/mem_init.h @@ -0,0 +1,322 @@ +/* + * File: include/asm-blackfin/mach-bf561/mem_init.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Rev: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#if (CONFIG_MEM_MT48LC16M16A2TG_75 || CONFIG_MEM_MT48LC64M4A2FB_7E || CONFIG_MEM_GENERIC_BOARD || CONFIG_MEM_MT48LC8M32B2B5_7) +#if (CONFIG_SCLK_HZ > 119402985) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_7 +#define SDRAM_tRAS_num 7 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 104477612) && (CONFIG_SCLK_HZ <= 119402985) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_6 +#define SDRAM_tRAS_num 6 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 89552239) && (CONFIG_SCLK_HZ <= 104477612) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_5 +#define SDRAM_tRAS_num 5 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 74626866) && (CONFIG_SCLK_HZ <= 89552239) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_4 +#define SDRAM_tRAS_num 4 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 66666667) && (CONFIG_SCLK_HZ <= 74626866) +#define SDRAM_tRP TRP_2 +#define SDRAM_tRP_num 2 +#define SDRAM_tRAS TRAS_3 +#define SDRAM_tRAS_num 3 +#define SDRAM_tRCD TRCD_2 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 59701493) && (CONFIG_SCLK_HZ <= 66666667) +#define SDRAM_tRP TRP_1 +#define SDRAM_tRP_num 1 +#define SDRAM_tRAS TRAS_4 +#define SDRAM_tRAS_num 3 +#define SDRAM_tRCD TRCD_1 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 44776119) && (CONFIG_SCLK_HZ <= 59701493) +#define SDRAM_tRP TRP_1 +#define SDRAM_tRP_num 1 +#define SDRAM_tRAS TRAS_3 +#define SDRAM_tRAS_num 3 +#define SDRAM_tRCD TRCD_1 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ > 29850746) && (CONFIG_SCLK_HZ <= 44776119) +#define SDRAM_tRP TRP_1 +#define SDRAM_tRP_num 1 +#define SDRAM_tRAS TRAS_2 +#define SDRAM_tRAS_num 2 +#define SDRAM_tRCD TRCD_1 +#define SDRAM_tWR TWR_2 +#endif +#if (CONFIG_SCLK_HZ <= 29850746) +#define SDRAM_tRP TRP_1 +#define SDRAM_tRP_num 1 +#define SDRAM_tRAS TRAS_1 +#define SDRAM_tRAS_num 1 +#define SDRAM_tRCD TRCD_1 +#define SDRAM_tWR TWR_2 +#endif +#endif + +#if (CONFIG_MEM_MT48LC16M16A2TG_75) + /*SDRAM INFORMATION: */ +#define SDRAM_Tref 64 /* Refresh period in milliseconds */ +#define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */ +#define SDRAM_CL CL_3 +#endif + +#if (CONFIG_MEM_MT48LC64M4A2FB_7E) + /*SDRAM INFORMATION: */ +#define SDRAM_Tref 64 /* Refresh period in milliseconds */ +#define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */ +#define SDRAM_CL CL_3 +#endif + +#if (CONFIG_MEM_MT48LC8M32B2B5_7) + /*SDRAM INFORMATION: */ +#define SDRAM_Tref 64 /* Refresh period in milliseconds */ +#define SDRAM_NRA 4096 /* Number of row addresses in SDRAM */ +#define SDRAM_CL CL_3 +#endif + +#if (CONFIG_MEM_GENERIC_BOARD) + /*SDRAM INFORMATION: Modify this for your board */ +#define SDRAM_Tref 64 /* Refresh period in milliseconds */ +#define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */ +#define SDRAM_CL CL_3 +#endif + +#if (CONFIG_MEM_SIZE == 128) +#define SDRAM_SIZE EB0_SZ_128 +#endif +#if (CONFIG_MEM_SIZE == 64) +#define SDRAM_SIZE EB0_SZ_64 +#endif +#if ( CONFIG_MEM_SIZE == 32) +#define SDRAM_SIZE EB0_SZ_32 +#endif +#if (CONFIG_MEM_SIZE == 16) +#define SDRAM_SIZE EB0_SZ_16 +#endif +#if (CONFIG_MEM_ADD_WIDTH == 11) +#define SDRAM_WIDTH EB0_CAW_11 +#endif +#if (CONFIG_MEM_ADD_WIDTH == 10) +#define SDRAM_WIDTH EB0_CAW_10 +#endif +#if (CONFIG_MEM_ADD_WIDTH == 9) +#define SDRAM_WIDTH EB0_CAW_9 +#endif +#if (CONFIG_MEM_ADD_WIDTH == 8) +#define SDRAM_WIDTH EB0_CAW_8 +#endif + +#define mem_SDBCTL (SDRAM_WIDTH | SDRAM_SIZE | EB0_E) + +/* Equation from section 17 (p17-46) of BF533 HRM */ +#define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num) + +/* Enable SCLK Out */ +#define mem_SDGCTL (SCTLE | SDRAM_CL | SDRAM_tRAS | SDRAM_tRP | SDRAM_tRCD | SDRAM_tWR | PSS) + +#if defined CONFIG_CLKIN_HALF +#define CLKIN_HALF 1 +#else +#define CLKIN_HALF 0 +#endif + +#if defined CONFIG_PLL_BYPASS +#define PLL_BYPASS 1 +#else +#define PLL_BYPASS 0 +#endif + +/***************************************Currently Not Being Used *********************************/ +#define flash_EBIU_AMBCTL_WAT ((CONFIG_FLASH_SPEED_BWAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 +#define flash_EBIU_AMBCTL_RAT ((CONFIG_FLASH_SPEED_BRAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 +#define flash_EBIU_AMBCTL_HT ((CONFIG_FLASH_SPEED_BHT * 4) / (4000000000 / CONFIG_SCLK_HZ)) +#define flash_EBIU_AMBCTL_ST ((CONFIG_FLASH_SPEED_BST * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 +#define flash_EBIU_AMBCTL_TT ((CONFIG_FLASH_SPEED_BTT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 + +#if (flash_EBIU_AMBCTL_TT > 3) +#define flash_EBIU_AMBCTL0_TT B0TT_4 +#endif +#if (flash_EBIU_AMBCTL_TT == 3) +#define flash_EBIU_AMBCTL0_TT B0TT_3 +#endif +#if (flash_EBIU_AMBCTL_TT == 2) +#define flash_EBIU_AMBCTL0_TT B0TT_2 +#endif +#if (flash_EBIU_AMBCTL_TT < 2) +#define flash_EBIU_AMBCTL0_TT B0TT_1 +#endif + +#if (flash_EBIU_AMBCTL_ST > 3) +#define flash_EBIU_AMBCTL0_ST B0ST_4 +#endif +#if (flash_EBIU_AMBCTL_ST == 3) +#define flash_EBIU_AMBCTL0_ST B0ST_3 +#endif +#if (flash_EBIU_AMBCTL_ST == 2) +#define flash_EBIU_AMBCTL0_ST B0ST_2 +#endif +#if (flash_EBIU_AMBCTL_ST < 2) +#define flash_EBIU_AMBCTL0_ST B0ST_1 +#endif + +#if (flash_EBIU_AMBCTL_HT > 2) +#define flash_EBIU_AMBCTL0_HT B0HT_3 +#endif +#if (flash_EBIU_AMBCTL_HT == 2) +#define flash_EBIU_AMBCTL0_HT B0HT_2 +#endif +#if (flash_EBIU_AMBCTL_HT == 1) +#define flash_EBIU_AMBCTL0_HT B0HT_1 +#endif +#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT == 0) +#define flash_EBIU_AMBCTL0_HT B0HT_0 +#endif +#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT != 0) +#define flash_EBIU_AMBCTL0_HT B0HT_1 +#endif + +#if (flash_EBIU_AMBCTL_WAT > 14) +#define flash_EBIU_AMBCTL0_WAT B0WAT_15 +#endif +#if (flash_EBIU_AMBCTL_WAT == 14) +#define flash_EBIU_AMBCTL0_WAT B0WAT_14 +#endif +#if (flash_EBIU_AMBCTL_WAT == 13) +#define flash_EBIU_AMBCTL0_WAT B0WAT_13 +#endif +#if (flash_EBIU_AMBCTL_WAT == 12) +#define flash_EBIU_AMBCTL0_WAT B0WAT_12 +#endif +#if (flash_EBIU_AMBCTL_WAT == 11) +#define flash_EBIU_AMBCTL0_WAT B0WAT_11 +#endif +#if (flash_EBIU_AMBCTL_WAT == 10) +#define flash_EBIU_AMBCTL0_WAT B0WAT_10 +#endif +#if (flash_EBIU_AMBCTL_WAT == 9) +#define flash_EBIU_AMBCTL0_WAT B0WAT_9 +#endif +#if (flash_EBIU_AMBCTL_WAT == 8) +#define flash_EBIU_AMBCTL0_WAT B0WAT_8 +#endif +#if (flash_EBIU_AMBCTL_WAT == 7) +#define flash_EBIU_AMBCTL0_WAT B0WAT_7 +#endif +#if (flash_EBIU_AMBCTL_WAT == 6) +#define flash_EBIU_AMBCTL0_WAT B0WAT_6 +#endif +#if (flash_EBIU_AMBCTL_WAT == 5) +#define flash_EBIU_AMBCTL0_WAT B0WAT_5 +#endif +#if (flash_EBIU_AMBCTL_WAT == 4) +#define flash_EBIU_AMBCTL0_WAT B0WAT_4 +#endif +#if (flash_EBIU_AMBCTL_WAT == 3) +#define flash_EBIU_AMBCTL0_WAT B0WAT_3 +#endif +#if (flash_EBIU_AMBCTL_WAT == 2) +#define flash_EBIU_AMBCTL0_WAT B0WAT_2 +#endif +#if (flash_EBIU_AMBCTL_WAT == 1) +#define flash_EBIU_AMBCTL0_WAT B0WAT_1 +#endif + +#if (flash_EBIU_AMBCTL_RAT > 14) +#define flash_EBIU_AMBCTL0_RAT B0RAT_15 +#endif +#if (flash_EBIU_AMBCTL_RAT == 14) +#define flash_EBIU_AMBCTL0_RAT B0RAT_14 +#endif +#if (flash_EBIU_AMBCTL_RAT == 13) +#define flash_EBIU_AMBCTL0_RAT B0RAT_13 +#endif +#if (flash_EBIU_AMBCTL_RAT == 12) +#define flash_EBIU_AMBCTL0_RAT B0RAT_12 +#endif +#if (flash_EBIU_AMBCTL_RAT == 11) +#define flash_EBIU_AMBCTL0_RAT B0RAT_11 +#endif +#if (flash_EBIU_AMBCTL_RAT == 10) +#define flash_EBIU_AMBCTL0_RAT B0RAT_10 +#endif +#if (flash_EBIU_AMBCTL_RAT == 9) +#define flash_EBIU_AMBCTL0_RAT B0RAT_9 +#endif +#if (flash_EBIU_AMBCTL_RAT == 8) +#define flash_EBIU_AMBCTL0_RAT B0RAT_8 +#endif +#if (flash_EBIU_AMBCTL_RAT == 7) +#define flash_EBIU_AMBCTL0_RAT B0RAT_7 +#endif +#if (flash_EBIU_AMBCTL_RAT == 6) +#define flash_EBIU_AMBCTL0_RAT B0RAT_6 +#endif +#if (flash_EBIU_AMBCTL_RAT == 5) +#define flash_EBIU_AMBCTL0_RAT B0RAT_5 +#endif +#if (flash_EBIU_AMBCTL_RAT == 4) +#define flash_EBIU_AMBCTL0_RAT B0RAT_4 +#endif +#if (flash_EBIU_AMBCTL_RAT == 3) +#define flash_EBIU_AMBCTL0_RAT B0RAT_3 +#endif +#if (flash_EBIU_AMBCTL_RAT == 2) +#define flash_EBIU_AMBCTL0_RAT B0RAT_2 +#endif +#if (flash_EBIU_AMBCTL_RAT == 1) +#define flash_EBIU_AMBCTL0_RAT B0RAT_1 +#endif + +#define flash_EBIU_AMBCTL0 \ + (flash_EBIU_AMBCTL0_WAT | flash_EBIU_AMBCTL0_RAT | flash_EBIU_AMBCTL0_HT | \ + flash_EBIU_AMBCTL0_ST | flash_EBIU_AMBCTL0_TT | CONFIG_FLASH_SPEED_RDYEN) diff --git a/include/asm-blackfin/mach-bf561/mem_map.h b/include/asm-blackfin/mach-bf561/mem_map.h new file mode 100644 index 000000000000..ebac9a8d838d --- /dev/null +++ b/include/asm-blackfin/mach-bf561/mem_map.h @@ -0,0 +1,75 @@ +/* + * Memory MAP + * Common header file for blackfin BF561 of processors. + */ + +#ifndef _MEM_MAP_561_H_ +#define _MEM_MAP_561_H_ + +#define COREMMR_BASE 0xFFE00000 /* Core MMRs */ +#define SYSMMR_BASE 0xFFC00000 /* System MMRs */ + +/* Async Memory Banks */ +#define ASYNC_BANK3_BASE 0x2C000000 /* Async Bank 3 */ +#define ASYNC_BANK3_SIZE 0x04000000 /* 64M */ +#define ASYNC_BANK2_BASE 0x28000000 /* Async Bank 2 */ +#define ASYNC_BANK2_SIZE 0x04000000 /* 64M */ +#define ASYNC_BANK1_BASE 0x24000000 /* Async Bank 1 */ +#define ASYNC_BANK1_SIZE 0x04000000 /* 64M */ +#define ASYNC_BANK0_BASE 0x20000000 /* Async Bank 0 */ +#define ASYNC_BANK0_SIZE 0x04000000 /* 64M */ + +/* Level 1 Memory */ + +#ifdef CONFIG_BLKFIN_CACHE +#define BLKFIN_ICACHESIZE (16*1024) +#else +#define BLKFIN_ICACHESIZE (0*1024) +#endif + +/* Memory Map for ADSP-BF561 processors */ + +#ifdef CONFIG_BF561 +#define L1_CODE_START 0xFFA00000 +#define L1_DATA_A_START 0xFF800000 +#define L1_DATA_B_START 0xFF900000 + +#define L1_CODE_LENGTH 0x4000 + +#ifdef CONFIG_BLKFIN_DCACHE + +#ifdef CONFIG_BLKFIN_DCACHE_BANKA +#define DMEM_CNTR (ACACHE_BSRAM | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH (0x8000 - 0x4000) +#define L1_DATA_B_LENGTH 0x8000 +#define BLKFIN_DCACHESIZE (16*1024) +#define BLKFIN_DSUPBANKS 1 +#else +#define DMEM_CNTR (ACACHE_BCACHE | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH (0x8000 - 0x4000) +#define L1_DATA_B_LENGTH (0x8000 - 0x4000) +#define BLKFIN_DCACHESIZE (32*1024) +#define BLKFIN_DSUPBANKS 2 +#endif + +#else +#define DMEM_CNTR (ASRAM_BSRAM | ENDCPLB | PORT_PREF0) +#define L1_DATA_A_LENGTH 0x8000 +#define L1_DATA_B_LENGTH 0x8000 +#define BLKFIN_DCACHESIZE (0*1024) +#define BLKFIN_DSUPBANKS 0 +#endif /*CONFIG_BLKFIN_DCACHE*/ +#endif + +/* Level 2 Memory */ +#define L2_START 0xFEB00000 +#define L2_LENGTH 0x20000 + +/* Scratch Pad Memory */ + +#if defined(CONFIG_BF561) +#define L1_SCRATCH_START 0xFFB00000 +#define L1_SCRATCH_LENGTH 0x1000 +#endif + +#endif /* _MEM_MAP_533_H_ */ diff --git a/include/asm-blackfin/mach-common/cdef_LPBlackfin.h b/include/asm-blackfin/mach-common/cdef_LPBlackfin.h new file mode 100644 index 000000000000..22aa5e637993 --- /dev/null +++ b/include/asm-blackfin/mach-common/cdef_LPBlackfin.h @@ -0,0 +1,471 @@ + /* + * File: include/asm-blackfin/mach-common/cdef_LPBlackfin.h + * Based on: + * Author: unknown + * COPYRIGHT 2005 Analog Devices + * Created: ? + * Description: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _CDEF_LPBLACKFIN_H +#define _CDEF_LPBLACKFIN_H + +/*#if !defined(__ADSPLPBLACKFIN__) +#warning cdef_LPBlackfin.h should only be included for 532 compatible chips. +#endif +*/ +#include + +/*Cache & SRAM Memory*/ +#define pSRAM_BASE_ADDRESS ((volatile void **)SRAM_BASE_ADDRESS) +#define bfin_read_SRAM_BASE_ADDRESS() bfin_read32(SRAM_BASE_ADDRESS) +#define bfin_write_SRAM_BASE_ADDRESS(val) bfin_write32(SRAM_BASE_ADDRESS,val) +#define pDMEM_CONTROL ((volatile unsigned long *)DMEM_CONTROL) +#define bfin_read_DMEM_CONTROL() bfin_read32(DMEM_CONTROL) +#define bfin_write_DMEM_CONTROL(val) bfin_write32(DMEM_CONTROL,val) +#define pDCPLB_STATUS ((volatile unsigned long *)DCPLB_STATUS) +#define bfin_read_DCPLB_STATUS() bfin_read32(DCPLB_STATUS) +#define bfin_write_DCPLB_STATUS(val) bfin_write32(DCPLB_STATUS,val) +#define pDCPLB_FAULT_ADDR ((volatile void **)DCPLB_FAULT_ADDR) +#define bfin_read_DCPLB_FAULT_ADDR() bfin_read32(DCPLB_FAULT_ADDR) +#define bfin_write_DCPLB_FAULT_ADDR(val) bfin_write32(DCPLB_FAULT_ADDR,val) +/* +#define MMR_TIMEOUT 0xFFE00010 +*/ +#define pDCPLB_ADDR0 ((volatile void **)DCPLB_ADDR0) +#define bfin_read_DCPLB_ADDR0() bfin_read32(DCPLB_ADDR0) +#define bfin_write_DCPLB_ADDR0(val) bfin_write32(DCPLB_ADDR0,val) +#define pDCPLB_ADDR1 ((volatile void **)DCPLB_ADDR1) +#define bfin_read_DCPLB_ADDR1() bfin_read32(DCPLB_ADDR1) +#define bfin_write_DCPLB_ADDR1(val) bfin_write32(DCPLB_ADDR1,val) +#define pDCPLB_ADDR2 ((volatile void **)DCPLB_ADDR2) +#define bfin_read_DCPLB_ADDR2() bfin_read32(DCPLB_ADDR2) +#define bfin_write_DCPLB_ADDR2(val) bfin_write32(DCPLB_ADDR2,val) +#define pDCPLB_ADDR3 ((volatile void **)DCPLB_ADDR3) +#define bfin_read_DCPLB_ADDR3() bfin_read32(DCPLB_ADDR3) +#define bfin_write_DCPLB_ADDR3(val) bfin_write32(DCPLB_ADDR3,val) +#define pDCPLB_ADDR4 ((volatile void **)DCPLB_ADDR4) +#define bfin_read_DCPLB_ADDR4() bfin_read32(DCPLB_ADDR4) +#define bfin_write_DCPLB_ADDR4(val) bfin_write32(DCPLB_ADDR4,val) +#define pDCPLB_ADDR5 ((volatile void **)DCPLB_ADDR5) +#define bfin_read_DCPLB_ADDR5() bfin_read32(DCPLB_ADDR5) +#define bfin_write_DCPLB_ADDR5(val) bfin_write32(DCPLB_ADDR5,val) +#define pDCPLB_ADDR6 ((volatile void **)DCPLB_ADDR6) +#define bfin_read_DCPLB_ADDR6() bfin_read32(DCPLB_ADDR6) +#define bfin_write_DCPLB_ADDR6(val) bfin_write32(DCPLB_ADDR6,val) +#define pDCPLB_ADDR7 ((volatile void **)DCPLB_ADDR7) +#define bfin_read_DCPLB_ADDR7() bfin_read32(DCPLB_ADDR7) +#define bfin_write_DCPLB_ADDR7(val) bfin_write32(DCPLB_ADDR7,val) +#define pDCPLB_ADDR8 ((volatile void **)DCPLB_ADDR8) +#define bfin_read_DCPLB_ADDR8() bfin_read32(DCPLB_ADDR8) +#define bfin_write_DCPLB_ADDR8(val) bfin_write32(DCPLB_ADDR8,val) +#define pDCPLB_ADDR9 ((volatile void **)DCPLB_ADDR9) +#define bfin_read_DCPLB_ADDR9() bfin_read32(DCPLB_ADDR9) +#define bfin_write_DCPLB_ADDR9(val) bfin_write32(DCPLB_ADDR9,val) +#define pDCPLB_ADDR10 ((volatile void **)DCPLB_ADDR10) +#define bfin_read_DCPLB_ADDR10() bfin_read32(DCPLB_ADDR10) +#define bfin_write_DCPLB_ADDR10(val) bfin_write32(DCPLB_ADDR10,val) +#define pDCPLB_ADDR11 ((volatile void **)DCPLB_ADDR11) +#define bfin_read_DCPLB_ADDR11() bfin_read32(DCPLB_ADDR11) +#define bfin_write_DCPLB_ADDR11(val) bfin_write32(DCPLB_ADDR11,val) +#define pDCPLB_ADDR12 ((volatile void **)DCPLB_ADDR12) +#define bfin_read_DCPLB_ADDR12() bfin_read32(DCPLB_ADDR12) +#define bfin_write_DCPLB_ADDR12(val) bfin_write32(DCPLB_ADDR12,val) +#define pDCPLB_ADDR13 ((volatile void **)DCPLB_ADDR13) +#define bfin_read_DCPLB_ADDR13() bfin_read32(DCPLB_ADDR13) +#define bfin_write_DCPLB_ADDR13(val) bfin_write32(DCPLB_ADDR13,val) +#define pDCPLB_ADDR14 ((volatile void **)DCPLB_ADDR14) +#define bfin_read_DCPLB_ADDR14() bfin_read32(DCPLB_ADDR14) +#define bfin_write_DCPLB_ADDR14(val) bfin_write32(DCPLB_ADDR14,val) +#define pDCPLB_ADDR15 ((volatile void **)DCPLB_ADDR15) +#define bfin_read_DCPLB_ADDR15() bfin_read32(DCPLB_ADDR15) +#define bfin_write_DCPLB_ADDR15(val) bfin_write32(DCPLB_ADDR15,val) +#define pDCPLB_DATA0 ((volatile unsigned long *)DCPLB_DATA0) +#define bfin_read_DCPLB_DATA0() bfin_read32(DCPLB_DATA0) +#define bfin_write_DCPLB_DATA0(val) bfin_write32(DCPLB_DATA0,val) +#define pDCPLB_DATA1 ((volatile unsigned long *)DCPLB_DATA1) +#define bfin_read_DCPLB_DATA1() bfin_read32(DCPLB_DATA1) +#define bfin_write_DCPLB_DATA1(val) bfin_write32(DCPLB_DATA1,val) +#define pDCPLB_DATA2 ((volatile unsigned long *)DCPLB_DATA2) +#define bfin_read_DCPLB_DATA2() bfin_read32(DCPLB_DATA2) +#define bfin_write_DCPLB_DATA2(val) bfin_write32(DCPLB_DATA2,val) +#define pDCPLB_DATA3 ((volatile unsigned long *)DCPLB_DATA3) +#define bfin_read_DCPLB_DATA3() bfin_read32(DCPLB_DATA3) +#define bfin_write_DCPLB_DATA3(val) bfin_write32(DCPLB_DATA3,val) +#define pDCPLB_DATA4 ((volatile unsigned long *)DCPLB_DATA4) +#define bfin_read_DCPLB_DATA4() bfin_read32(DCPLB_DATA4) +#define bfin_write_DCPLB_DATA4(val) bfin_write32(DCPLB_DATA4,val) +#define pDCPLB_DATA5 ((volatile unsigned long *)DCPLB_DATA5) +#define bfin_read_DCPLB_DATA5() bfin_read32(DCPLB_DATA5) +#define bfin_write_DCPLB_DATA5(val) bfin_write32(DCPLB_DATA5,val) +#define pDCPLB_DATA6 ((volatile unsigned long *)DCPLB_DATA6) +#define bfin_read_DCPLB_DATA6() bfin_read32(DCPLB_DATA6) +#define bfin_write_DCPLB_DATA6(val) bfin_write32(DCPLB_DATA6,val) +#define pDCPLB_DATA7 ((volatile unsigned long *)DCPLB_DATA7) +#define bfin_read_DCPLB_DATA7() bfin_read32(DCPLB_DATA7) +#define bfin_write_DCPLB_DATA7(val) bfin_write32(DCPLB_DATA7,val) +#define pDCPLB_DATA8 ((volatile unsigned long *)DCPLB_DATA8) +#define bfin_read_DCPLB_DATA8() bfin_read32(DCPLB_DATA8) +#define bfin_write_DCPLB_DATA8(val) bfin_write32(DCPLB_DATA8,val) +#define pDCPLB_DATA9 ((volatile unsigned long *)DCPLB_DATA9) +#define bfin_read_DCPLB_DATA9() bfin_read32(DCPLB_DATA9) +#define bfin_write_DCPLB_DATA9(val) bfin_write32(DCPLB_DATA9,val) +#define pDCPLB_DATA10 ((volatile unsigned long *)DCPLB_DATA10) +#define bfin_read_DCPLB_DATA10() bfin_read32(DCPLB_DATA10) +#define bfin_write_DCPLB_DATA10(val) bfin_write32(DCPLB_DATA10,val) +#define pDCPLB_DATA11 ((volatile unsigned long *)DCPLB_DATA11) +#define bfin_read_DCPLB_DATA11() bfin_read32(DCPLB_DATA11) +#define bfin_write_DCPLB_DATA11(val) bfin_write32(DCPLB_DATA11,val) +#define pDCPLB_DATA12 ((volatile unsigned long *)DCPLB_DATA12) +#define bfin_read_DCPLB_DATA12() bfin_read32(DCPLB_DATA12) +#define bfin_write_DCPLB_DATA12(val) bfin_write32(DCPLB_DATA12,val) +#define pDCPLB_DATA13 ((volatile unsigned long *)DCPLB_DATA13) +#define bfin_read_DCPLB_DATA13() bfin_read32(DCPLB_DATA13) +#define bfin_write_DCPLB_DATA13(val) bfin_write32(DCPLB_DATA13,val) +#define pDCPLB_DATA14 ((volatile unsigned long *)DCPLB_DATA14) +#define bfin_read_DCPLB_DATA14() bfin_read32(DCPLB_DATA14) +#define bfin_write_DCPLB_DATA14(val) bfin_write32(DCPLB_DATA14,val) +#define pDCPLB_DATA15 ((volatile unsigned long *)DCPLB_DATA15) +#define bfin_read_DCPLB_DATA15() bfin_read32(DCPLB_DATA15) +#define bfin_write_DCPLB_DATA15(val) bfin_write32(DCPLB_DATA15,val) +#define pDTEST_COMMAND ((volatile unsigned long *)DTEST_COMMAND) +#define bfin_read_DTEST_COMMAND() bfin_read32(DTEST_COMMAND) +#define bfin_write_DTEST_COMMAND(val) bfin_write32(DTEST_COMMAND,val) +/* +#define DTEST_INDEX 0xFFE00304 +*/ +#define pDTEST_DATA0 ((volatile unsigned long *)DTEST_DATA0) +#define bfin_read_DTEST_DATA0() bfin_read32(DTEST_DATA0) +#define bfin_write_DTEST_DATA0(val) bfin_write32(DTEST_DATA0,val) +#define pDTEST_DATA1 ((volatile unsigned long *)DTEST_DATA1) +#define bfin_read_DTEST_DATA1() bfin_read32(DTEST_DATA1) +#define bfin_write_DTEST_DATA1(val) bfin_write32(DTEST_DATA1,val) +/* +#define DTEST_DATA2 0xFFE00408 +#define DTEST_DATA3 0xFFE0040C +*/ +#define pIMEM_CONTROL ((volatile unsigned long *)IMEM_CONTROL) +#define bfin_read_IMEM_CONTROL() bfin_read32(IMEM_CONTROL) +#define bfin_write_IMEM_CONTROL(val) bfin_write32(IMEM_CONTROL,val) +#define pICPLB_STATUS ((volatile unsigned long *)ICPLB_STATUS) +#define bfin_read_ICPLB_STATUS() bfin_read32(ICPLB_STATUS) +#define bfin_write_ICPLB_STATUS(val) bfin_write32(ICPLB_STATUS,val) +#define pICPLB_FAULT_ADDR ((volatile void **)ICPLB_FAULT_ADDR) +#define bfin_read_ICPLB_FAULT_ADDR() bfin_read32(ICPLB_FAULT_ADDR) +#define bfin_write_ICPLB_FAULT_ADDR(val) bfin_write32(ICPLB_FAULT_ADDR,val) +#define pICPLB_ADDR0 ((volatile void **)ICPLB_ADDR0) +#define bfin_read_ICPLB_ADDR0() bfin_read32(ICPLB_ADDR0) +#define bfin_write_ICPLB_ADDR0(val) bfin_write32(ICPLB_ADDR0,val) +#define pICPLB_ADDR1 ((volatile void **)ICPLB_ADDR1) +#define bfin_read_ICPLB_ADDR1() bfin_read32(ICPLB_ADDR1) +#define bfin_write_ICPLB_ADDR1(val) bfin_write32(ICPLB_ADDR1,val) +#define pICPLB_ADDR2 ((volatile void **)ICPLB_ADDR2) +#define bfin_read_ICPLB_ADDR2() bfin_read32(ICPLB_ADDR2) +#define bfin_write_ICPLB_ADDR2(val) bfin_write32(ICPLB_ADDR2,val) +#define pICPLB_ADDR3 ((volatile void **)ICPLB_ADDR3) +#define bfin_read_ICPLB_ADDR3() bfin_read32(ICPLB_ADDR3) +#define bfin_write_ICPLB_ADDR3(val) bfin_write32(ICPLB_ADDR3,val) +#define pICPLB_ADDR4 ((volatile void **)ICPLB_ADDR4) +#define bfin_read_ICPLB_ADDR4() bfin_read32(ICPLB_ADDR4) +#define bfin_write_ICPLB_ADDR4(val) bfin_write32(ICPLB_ADDR4,val) +#define pICPLB_ADDR5 ((volatile void **)ICPLB_ADDR5) +#define bfin_read_ICPLB_ADDR5() bfin_read32(ICPLB_ADDR5) +#define bfin_write_ICPLB_ADDR5(val) bfin_write32(ICPLB_ADDR5,val) +#define pICPLB_ADDR6 ((volatile void **)ICPLB_ADDR6) +#define bfin_read_ICPLB_ADDR6() bfin_read32(ICPLB_ADDR6) +#define bfin_write_ICPLB_ADDR6(val) bfin_write32(ICPLB_ADDR6,val) +#define pICPLB_ADDR7 ((volatile void **)ICPLB_ADDR7) +#define bfin_read_ICPLB_ADDR7() bfin_read32(ICPLB_ADDR7) +#define bfin_write_ICPLB_ADDR7(val) bfin_write32(ICPLB_ADDR7,val) +#define pICPLB_ADDR8 ((volatile void **)ICPLB_ADDR8) +#define bfin_read_ICPLB_ADDR8() bfin_read32(ICPLB_ADDR8) +#define bfin_write_ICPLB_ADDR8(val) bfin_write32(ICPLB_ADDR8,val) +#define pICPLB_ADDR9 ((volatile void **)ICPLB_ADDR9) +#define bfin_read_ICPLB_ADDR9() bfin_read32(ICPLB_ADDR9) +#define bfin_write_ICPLB_ADDR9(val) bfin_write32(ICPLB_ADDR9,val) +#define pICPLB_ADDR10 ((volatile void **)ICPLB_ADDR10) +#define bfin_read_ICPLB_ADDR10() bfin_read32(ICPLB_ADDR10) +#define bfin_write_ICPLB_ADDR10(val) bfin_write32(ICPLB_ADDR10,val) +#define pICPLB_ADDR11 ((volatile void **)ICPLB_ADDR11) +#define bfin_read_ICPLB_ADDR11() bfin_read32(ICPLB_ADDR11) +#define bfin_write_ICPLB_ADDR11(val) bfin_write32(ICPLB_ADDR11,val) +#define pICPLB_ADDR12 ((volatile void **)ICPLB_ADDR12) +#define bfin_read_ICPLB_ADDR12() bfin_read32(ICPLB_ADDR12) +#define bfin_write_ICPLB_ADDR12(val) bfin_write32(ICPLB_ADDR12,val) +#define pICPLB_ADDR13 ((volatile void **)ICPLB_ADDR13) +#define bfin_read_ICPLB_ADDR13() bfin_read32(ICPLB_ADDR13) +#define bfin_write_ICPLB_ADDR13(val) bfin_write32(ICPLB_ADDR13,val) +#define pICPLB_ADDR14 ((volatile void **)ICPLB_ADDR14) +#define bfin_read_ICPLB_ADDR14() bfin_read32(ICPLB_ADDR14) +#define bfin_write_ICPLB_ADDR14(val) bfin_write32(ICPLB_ADDR14,val) +#define pICPLB_ADDR15 ((volatile void **)ICPLB_ADDR15) +#define bfin_read_ICPLB_ADDR15() bfin_read32(ICPLB_ADDR15) +#define bfin_write_ICPLB_ADDR15(val) bfin_write32(ICPLB_ADDR15,val) +#define pICPLB_DATA0 ((volatile unsigned long *)ICPLB_DATA0) +#define bfin_read_ICPLB_DATA0() bfin_read32(ICPLB_DATA0) +#define bfin_write_ICPLB_DATA0(val) bfin_write32(ICPLB_DATA0,val) +#define pICPLB_DATA1 ((volatile unsigned long *)ICPLB_DATA1) +#define bfin_read_ICPLB_DATA1() bfin_read32(ICPLB_DATA1) +#define bfin_write_ICPLB_DATA1(val) bfin_write32(ICPLB_DATA1,val) +#define pICPLB_DATA2 ((volatile unsigned long *)ICPLB_DATA2) +#define bfin_read_ICPLB_DATA2() bfin_read32(ICPLB_DATA2) +#define bfin_write_ICPLB_DATA2(val) bfin_write32(ICPLB_DATA2,val) +#define pICPLB_DATA3 ((volatile unsigned long *)ICPLB_DATA3) +#define bfin_read_ICPLB_DATA3() bfin_read32(ICPLB_DATA3) +#define bfin_write_ICPLB_DATA3(val) bfin_write32(ICPLB_DATA3,val) +#define pICPLB_DATA4 ((volatile unsigned long *)ICPLB_DATA4) +#define bfin_read_ICPLB_DATA4() bfin_read32(ICPLB_DATA4) +#define bfin_write_ICPLB_DATA4(val) bfin_write32(ICPLB_DATA4,val) +#define pICPLB_DATA5 ((volatile unsigned long *)ICPLB_DATA5) +#define bfin_read_ICPLB_DATA5() bfin_read32(ICPLB_DATA5) +#define bfin_write_ICPLB_DATA5(val) bfin_write32(ICPLB_DATA5,val) +#define pICPLB_DATA6 ((volatile unsigned long *)ICPLB_DATA6) +#define bfin_read_ICPLB_DATA6() bfin_read32(ICPLB_DATA6) +#define bfin_write_ICPLB_DATA6(val) bfin_write32(ICPLB_DATA6,val) +#define pICPLB_DATA7 ((volatile unsigned long *)ICPLB_DATA7) +#define bfin_read_ICPLB_DATA7() bfin_read32(ICPLB_DATA7) +#define bfin_write_ICPLB_DATA7(val) bfin_write32(ICPLB_DATA7,val) +#define pICPLB_DATA8 ((volatile unsigned long *)ICPLB_DATA8) +#define bfin_read_ICPLB_DATA8() bfin_read32(ICPLB_DATA8) +#define bfin_write_ICPLB_DATA8(val) bfin_write32(ICPLB_DATA8,val) +#define pICPLB_DATA9 ((volatile unsigned long *)ICPLB_DATA9) +#define bfin_read_ICPLB_DATA9() bfin_read32(ICPLB_DATA9) +#define bfin_write_ICPLB_DATA9(val) bfin_write32(ICPLB_DATA9,val) +#define pICPLB_DATA10 ((volatile unsigned long *)ICPLB_DATA10) +#define bfin_read_ICPLB_DATA10() bfin_read32(ICPLB_DATA10) +#define bfin_write_ICPLB_DATA10(val) bfin_write32(ICPLB_DATA10,val) +#define pICPLB_DATA11 ((volatile unsigned long *)ICPLB_DATA11) +#define bfin_read_ICPLB_DATA11() bfin_read32(ICPLB_DATA11) +#define bfin_write_ICPLB_DATA11(val) bfin_write32(ICPLB_DATA11,val) +#define pICPLB_DATA12 ((volatile unsigned long *)ICPLB_DATA12) +#define bfin_read_ICPLB_DATA12() bfin_read32(ICPLB_DATA12) +#define bfin_write_ICPLB_DATA12(val) bfin_write32(ICPLB_DATA12,val) +#define pICPLB_DATA13 ((volatile unsigned long *)ICPLB_DATA13) +#define bfin_read_ICPLB_DATA13() bfin_read32(ICPLB_DATA13) +#define bfin_write_ICPLB_DATA13(val) bfin_write32(ICPLB_DATA13,val) +#define pICPLB_DATA14 ((volatile unsigned long *)ICPLB_DATA14) +#define bfin_read_ICPLB_DATA14() bfin_read32(ICPLB_DATA14) +#define bfin_write_ICPLB_DATA14(val) bfin_write32(ICPLB_DATA14,val) +#define pICPLB_DATA15 ((volatile unsigned long *)ICPLB_DATA15) +#define bfin_read_ICPLB_DATA15() bfin_read32(ICPLB_DATA15) +#define bfin_write_ICPLB_DATA15(val) bfin_write32(ICPLB_DATA15,val) +#define pITEST_COMMAND ((volatile unsigned long *)ITEST_COMMAND) +#define bfin_read_ITEST_COMMAND() bfin_read32(ITEST_COMMAND) +#define bfin_write_ITEST_COMMAND(val) bfin_write32(ITEST_COMMAND,val) +#if 0 +#define ITEST_INDEX 0xFFE01304 /* Instruction Test Index Register */ +#endif +#define pITEST_DATA0 ((volatile unsigned long *)ITEST_DATA0) +#define bfin_read_ITEST_DATA0() bfin_read32(ITEST_DATA0) +#define bfin_write_ITEST_DATA0(val) bfin_write32(ITEST_DATA0,val) +#define pITEST_DATA1 ((volatile unsigned long *)ITEST_DATA1) +#define bfin_read_ITEST_DATA1() bfin_read32(ITEST_DATA1) +#define bfin_write_ITEST_DATA1(val) bfin_write32(ITEST_DATA1,val) + +/* Event/Interrupt Registers*/ + +#define pEVT0 ((volatile void **)EVT0) +#define bfin_read_EVT0() bfin_read32(EVT0) +#define bfin_write_EVT0(val) bfin_write32(EVT0,val) +#define pEVT1 ((volatile void **)EVT1) +#define bfin_read_EVT1() bfin_read32(EVT1) +#define bfin_write_EVT1(val) bfin_write32(EVT1,val) +#define pEVT2 ((volatile void **)EVT2) +#define bfin_read_EVT2() bfin_read32(EVT2) +#define bfin_write_EVT2(val) bfin_write32(EVT2,val) +#define pEVT3 ((volatile void **)EVT3) +#define bfin_read_EVT3() bfin_read32(EVT3) +#define bfin_write_EVT3(val) bfin_write32(EVT3,val) +#define pEVT4 ((volatile void **)EVT4) +#define bfin_read_EVT4() bfin_read32(EVT4) +#define bfin_write_EVT4(val) bfin_write32(EVT4,val) +#define pEVT5 ((volatile void **)EVT5) +#define bfin_read_EVT5() bfin_read32(EVT5) +#define bfin_write_EVT5(val) bfin_write32(EVT5,val) +#define pEVT6 ((volatile void **)EVT6) +#define bfin_read_EVT6() bfin_read32(EVT6) +#define bfin_write_EVT6(val) bfin_write32(EVT6,val) +#define pEVT7 ((volatile void **)EVT7) +#define bfin_read_EVT7() bfin_read32(EVT7) +#define bfin_write_EVT7(val) bfin_write32(EVT7,val) +#define pEVT8 ((volatile void **)EVT8) +#define bfin_read_EVT8() bfin_read32(EVT8) +#define bfin_write_EVT8(val) bfin_write32(EVT8,val) +#define pEVT9 ((volatile void **)EVT9) +#define bfin_read_EVT9() bfin_read32(EVT9) +#define bfin_write_EVT9(val) bfin_write32(EVT9,val) +#define pEVT10 ((volatile void **)EVT10) +#define bfin_read_EVT10() bfin_read32(EVT10) +#define bfin_write_EVT10(val) bfin_write32(EVT10,val) +#define pEVT11 ((volatile void **)EVT11) +#define bfin_read_EVT11() bfin_read32(EVT11) +#define bfin_write_EVT11(val) bfin_write32(EVT11,val) +#define pEVT12 ((volatile void **)EVT12) +#define bfin_read_EVT12() bfin_read32(EVT12) +#define bfin_write_EVT12(val) bfin_write32(EVT12,val) +#define pEVT13 ((volatile void **)EVT13) +#define bfin_read_EVT13() bfin_read32(EVT13) +#define bfin_write_EVT13(val) bfin_write32(EVT13,val) +#define pEVT14 ((volatile void **)EVT14) +#define bfin_read_EVT14() bfin_read32(EVT14) +#define bfin_write_EVT14(val) bfin_write32(EVT14,val) +#define pEVT15 ((volatile void **)EVT15) +#define bfin_read_EVT15() bfin_read32(EVT15) +#define bfin_write_EVT15(val) bfin_write32(EVT15,val) +#define pIMASK ((volatile unsigned long *)IMASK) +#define bfin_read_IMASK() bfin_read32(IMASK) +#define bfin_write_IMASK(val) bfin_write32(IMASK,val) +#define pIPEND ((volatile unsigned long *)IPEND) +#define bfin_read_IPEND() bfin_read32(IPEND) +#define bfin_write_IPEND(val) bfin_write32(IPEND,val) +#define pILAT ((volatile unsigned long *)ILAT) +#define bfin_read_ILAT() bfin_read32(ILAT) +#define bfin_write_ILAT(val) bfin_write32(ILAT,val) + +/*Core Timer Registers*/ +#define pTCNTL ((volatile unsigned long *)TCNTL) +#define bfin_read_TCNTL() bfin_read32(TCNTL) +#define bfin_write_TCNTL(val) bfin_write32(TCNTL,val) +#define pTPERIOD ((volatile unsigned long *)TPERIOD) +#define bfin_read_TPERIOD() bfin_read32(TPERIOD) +#define bfin_write_TPERIOD(val) bfin_write32(TPERIOD,val) +#define pTSCALE ((volatile unsigned long *)TSCALE) +#define bfin_read_TSCALE() bfin_read32(TSCALE) +#define bfin_write_TSCALE(val) bfin_write32(TSCALE,val) +#define pTCOUNT ((volatile unsigned long *)TCOUNT) +#define bfin_read_TCOUNT() bfin_read32(TCOUNT) +#define bfin_write_TCOUNT(val) bfin_write32(TCOUNT,val) + +/*Debug/MP/Emulation Registers*/ +#define pDSPID ((volatile unsigned long *)DSPID) +#define bfin_read_DSPID() bfin_read32(DSPID) +#define bfin_write_DSPID(val) bfin_write32(DSPID,val) +#define pDBGCTL ((volatile unsigned long *)DBGCTL) +#define bfin_read_DBGCTL() bfin_read32(DBGCTL) +#define bfin_write_DBGCTL(val) bfin_write32(DBGCTL,val) +#define pDBGSTAT ((volatile unsigned long *)DBGSTAT) +#define bfin_read_DBGSTAT() bfin_read32(DBGSTAT) +#define bfin_write_DBGSTAT(val) bfin_write32(DBGSTAT,val) +#define pEMUDAT ((volatile unsigned long *)EMUDAT) +#define bfin_read_EMUDAT() bfin_read32(EMUDAT) +#define bfin_write_EMUDAT(val) bfin_write32(EMUDAT,val) + +/*Trace Buffer Registers*/ +#define pTBUFCTL ((volatile unsigned long *)TBUFCTL) +#define bfin_read_TBUFCTL() bfin_read32(TBUFCTL) +#define bfin_write_TBUFCTL(val) bfin_write32(TBUFCTL,val) +#define pTBUFSTAT ((volatile unsigned long *)TBUFSTAT) +#define bfin_read_TBUFSTAT() bfin_read32(TBUFSTAT) +#define bfin_write_TBUFSTAT(val) bfin_write32(TBUFSTAT,val) +#define pTBUF ((volatile void **)TBUF) +#define bfin_read_TBUF() bfin_read32(TBUF) +#define bfin_write_TBUF(val) bfin_write32(TBUF,val) + +/*Watch Point Control Registers*/ +#define pWPIACTL ((volatile unsigned long *)WPIACTL) +#define bfin_read_WPIACTL() bfin_read32(WPIACTL) +#define bfin_write_WPIACTL(val) bfin_write32(WPIACTL,val) +#define pWPIA0 ((volatile void **)WPIA0) +#define bfin_read_WPIA0() bfin_read32(WPIA0) +#define bfin_write_WPIA0(val) bfin_write32(WPIA0,val) +#define pWPIA1 ((volatile void **)WPIA1) +#define bfin_read_WPIA1() bfin_read32(WPIA1) +#define bfin_write_WPIA1(val) bfin_write32(WPIA1,val) +#define pWPIA2 ((volatile void **)WPIA2) +#define bfin_read_WPIA2() bfin_read32(WPIA2) +#define bfin_write_WPIA2(val) bfin_write32(WPIA2,val) +#define pWPIA3 ((volatile void **)WPIA3) +#define bfin_read_WPIA3() bfin_read32(WPIA3) +#define bfin_write_WPIA3(val) bfin_write32(WPIA3,val) +#define pWPIA4 ((volatile void **)WPIA4) +#define bfin_read_WPIA4() bfin_read32(WPIA4) +#define bfin_write_WPIA4(val) bfin_write32(WPIA4,val) +#define pWPIA5 ((volatile void **)WPIA5) +#define bfin_read_WPIA5() bfin_read32(WPIA5) +#define bfin_write_WPIA5(val) bfin_write32(WPIA5,val) +#define pWPIACNT0 ((volatile unsigned long *)WPIACNT0) +#define bfin_read_WPIACNT0() bfin_read32(WPIACNT0) +#define bfin_write_WPIACNT0(val) bfin_write32(WPIACNT0,val) +#define pWPIACNT1 ((volatile unsigned long *)WPIACNT1) +#define bfin_read_WPIACNT1() bfin_read32(WPIACNT1) +#define bfin_write_WPIACNT1(val) bfin_write32(WPIACNT1,val) +#define pWPIACNT2 ((volatile unsigned long *)WPIACNT2) +#define bfin_read_WPIACNT2() bfin_read32(WPIACNT2) +#define bfin_write_WPIACNT2(val) bfin_write32(WPIACNT2,val) +#define pWPIACNT3 ((volatile unsigned long *)WPIACNT3) +#define bfin_read_WPIACNT3() bfin_read32(WPIACNT3) +#define bfin_write_WPIACNT3(val) bfin_write32(WPIACNT3,val) +#define pWPIACNT4 ((volatile unsigned long *)WPIACNT4) +#define bfin_read_WPIACNT4() bfin_read32(WPIACNT4) +#define bfin_write_WPIACNT4(val) bfin_write32(WPIACNT4,val) +#define pWPIACNT5 ((volatile unsigned long *)WPIACNT5) +#define bfin_read_WPIACNT5() bfin_read32(WPIACNT5) +#define bfin_write_WPIACNT5(val) bfin_write32(WPIACNT5,val) +#define pWPDACTL ((volatile unsigned long *)WPDACTL) +#define bfin_read_WPDACTL() bfin_read32(WPDACTL) +#define bfin_write_WPDACTL(val) bfin_write32(WPDACTL,val) +#define pWPDA0 ((volatile void **)WPDA0) +#define bfin_read_WPDA0() bfin_read32(WPDA0) +#define bfin_write_WPDA0(val) bfin_write32(WPDA0,val) +#define pWPDA1 ((volatile void **)WPDA1) +#define bfin_read_WPDA1() bfin_read32(WPDA1) +#define bfin_write_WPDA1(val) bfin_write32(WPDA1,val) +#define pWPDACNT0 ((volatile unsigned long *)WPDACNT0) +#define bfin_read_WPDACNT0() bfin_read32(WPDACNT0) +#define bfin_write_WPDACNT0(val) bfin_write32(WPDACNT0,val) +#define pWPDACNT1 ((volatile unsigned long *)WPDACNT1) +#define bfin_read_WPDACNT1() bfin_read32(WPDACNT1) +#define bfin_write_WPDACNT1(val) bfin_write32(WPDACNT1,val) +#define pWPSTAT ((volatile unsigned long *)WPSTAT) +#define bfin_read_WPSTAT() bfin_read32(WPSTAT) +#define bfin_write_WPSTAT(val) bfin_write32(WPSTAT,val) + +/*Performance Monitor Registers*/ +#define pPFCTL ((volatile unsigned long *)PFCTL) +#define bfin_read_PFCTL() bfin_read32(PFCTL) +#define bfin_write_PFCTL(val) bfin_write32(PFCTL,val) +#define pPFCNTR0 ((volatile unsigned long *)PFCNTR0) +#define bfin_read_PFCNTR0() bfin_read32(PFCNTR0) +#define bfin_write_PFCNTR0(val) bfin_write32(PFCNTR0,val) +#define pPFCNTR1 ((volatile unsigned long *)PFCNTR1) +#define bfin_read_PFCNTR1() bfin_read32(PFCNTR1) +#define bfin_write_PFCNTR1(val) bfin_write32(PFCNTR1,val) + +/* +#define IPRIO 0xFFE02110 +*/ + +#if defined(CONFIG_BFIN_ALIVE_LED) +#define pCONFIG_BFIN_ALIVE_LED_DPORT \ + (volatile unsigned short *)CONFIG_BFIN_ALIVE_LED_DPORT +#define pCONFIG_BFIN_ALIVE_LED_PORT \ + (volatile unsigned short *)CONFIG_BFIN_ALIVE_LED_PORT +#endif + +#if defined(CONFIG_BFIN_IDLE_LED) +#define pCONFIG_BFIN_IDLE_LED_DPORT \ + (volatile unsigned short *)CONFIG_BFIN_IDLE_LED_DPORT +#define pCONFIG_BFIN_IDLE_LED_PORT \ + (volatile unsigned short *)CONFIG_BFIN_IDLE_LED_PORT +#endif + +#endif /* _CDEF_LPBLACKFIN_H */ diff --git a/include/asm-blackfin/mach-common/context.S b/include/asm-blackfin/mach-common/context.S new file mode 100644 index 000000000000..fd0ebe1862b8 --- /dev/null +++ b/include/asm-blackfin/mach-common/context.S @@ -0,0 +1,350 @@ +/* + * File: arch/blackfin/kernel/context.S + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2007 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* + * Code to save processor context. + * We even save the register which are preserved by a function call + * - r4, r5, r6, r7, p3, p4, p5 + */ +.macro save_context_with_interrupts + [--sp] = SYSCFG; + + [--sp] = P0; /*orig_p0*/ + [--sp] = R0; /*orig_r0*/ + + [--sp] = ( R7:0, P5:0 ); + [--sp] = fp; + [--sp] = usp; + + [--sp] = i0; + [--sp] = i1; + [--sp] = i2; + [--sp] = i3; + + [--sp] = m0; + [--sp] = m1; + [--sp] = m2; + [--sp] = m3; + + [--sp] = l0; + [--sp] = l1; + [--sp] = l2; + [--sp] = l3; + + [--sp] = b0; + [--sp] = b1; + [--sp] = b2; + [--sp] = b3; + [--sp] = a0.x; + [--sp] = a0.w; + [--sp] = a1.x; + [--sp] = a1.w; + + [--sp] = LC0; + [--sp] = LC1; + [--sp] = LT0; + [--sp] = LT1; + [--sp] = LB0; + [--sp] = LB1; + + [--sp] = ASTAT; + + [--sp] = r0; /* Skip reserved */ + [--sp] = RETS; + r0 = RETI; + [--sp] = r0; + [--sp] = RETX; + [--sp] = RETN; + [--sp] = RETE; + [--sp] = SEQSTAT; + [--sp] = r0; /* Skip IPEND as well. */ + /* Switch to other method of keeping interrupts disabled. */ +#ifdef CONFIG_DEBUG_HWERR + r0 = 0x3f; + sti r0; +#else + cli r0; +#endif + [--sp] = RETI; /*orig_pc*/ + /* Clear all L registers. */ + r0 = 0 (x); + l0 = r0; + l1 = r0; + l2 = r0; + l3 = r0; +.endm + +.macro save_context_syscall + [--sp] = SYSCFG; + + [--sp] = P0; /*orig_p0*/ + [--sp] = R0; /*orig_r0*/ + [--sp] = ( R7:0, P5:0 ); + [--sp] = fp; + [--sp] = usp; + + [--sp] = i0; + [--sp] = i1; + [--sp] = i2; + [--sp] = i3; + + [--sp] = m0; + [--sp] = m1; + [--sp] = m2; + [--sp] = m3; + + [--sp] = l0; + [--sp] = l1; + [--sp] = l2; + [--sp] = l3; + + [--sp] = b0; + [--sp] = b1; + [--sp] = b2; + [--sp] = b3; + [--sp] = a0.x; + [--sp] = a0.w; + [--sp] = a1.x; + [--sp] = a1.w; + + [--sp] = LC0; + [--sp] = LC1; + [--sp] = LT0; + [--sp] = LT1; + [--sp] = LB0; + [--sp] = LB1; + + [--sp] = ASTAT; + + [--sp] = r0; /* Skip reserved */ + [--sp] = RETS; + r0 = RETI; + [--sp] = r0; + [--sp] = RETX; + [--sp] = RETN; + [--sp] = RETE; + [--sp] = SEQSTAT; + [--sp] = r0; /* Skip IPEND as well. */ + [--sp] = RETI; /*orig_pc*/ + /* Clear all L registers. */ + r0 = 0 (x); + l0 = r0; + l1 = r0; + l2 = r0; + l3 = r0; +.endm + +.macro save_context_no_interrupts + [--sp] = SYSCFG; + [--sp] = P0; /* orig_p0 */ + [--sp] = R0; /* orig_r0 */ + [--sp] = ( R7:0, P5:0 ); + [--sp] = fp; + [--sp] = usp; + + [--sp] = i0; + [--sp] = i1; + [--sp] = i2; + [--sp] = i3; + + [--sp] = m0; + [--sp] = m1; + [--sp] = m2; + [--sp] = m3; + + [--sp] = l0; + [--sp] = l1; + [--sp] = l2; + [--sp] = l3; + + [--sp] = b0; + [--sp] = b1; + [--sp] = b2; + [--sp] = b3; + [--sp] = a0.x; + [--sp] = a0.w; + [--sp] = a1.x; + [--sp] = a1.w; + + [--sp] = LC0; + [--sp] = LC1; + [--sp] = LT0; + [--sp] = LT1; + [--sp] = LB0; + [--sp] = LB1; + + [--sp] = ASTAT; + +#ifdef CONFIG_KGDB + fp = 0(Z); + r1 = sp; + r1 += 60; + r1 += 60; + r1 += 60; + [--sp] = r1; +#else + [--sp] = r0; /* Skip reserved */ +#endif + [--sp] = RETS; + r0 = RETI; + [--sp] = r0; + [--sp] = RETX; + [--sp] = RETN; + [--sp] = RETE; + [--sp] = SEQSTAT; +#ifdef CONFIG_KGDB + r1.l = lo(IPEND); + r1.h = hi(IPEND); + [--sp] = r1; +#else + [--sp] = r0; /* Skip IPEND as well. */ +#endif + [--sp] = r0; /*orig_pc*/ + /* Clear all L registers. */ + r0 = 0 (x); + l0 = r0; + l1 = r0; + l2 = r0; + l3 = r0; +.endm + +.macro restore_context_no_interrupts + sp += 4; /* Skip orig_pc */ + sp += 4; /* Skip IPEND */ + SEQSTAT = [sp++]; + RETE = [sp++]; + RETN = [sp++]; + RETX = [sp++]; + r0 = [sp++]; + RETI = r0; /* Restore RETI indirectly when in exception */ + RETS = [sp++]; + + sp += 4; /* Skip Reserved */ + + ASTAT = [sp++]; + + LB1 = [sp++]; + LB0 = [sp++]; + LT1 = [sp++]; + LT0 = [sp++]; + LC1 = [sp++]; + LC0 = [sp++]; + + a1.w = [sp++]; + a1.x = [sp++]; + a0.w = [sp++]; + a0.x = [sp++]; + b3 = [sp++]; + b2 = [sp++]; + b1 = [sp++]; + b0 = [sp++]; + + l3 = [sp++]; + l2 = [sp++]; + l1 = [sp++]; + l0 = [sp++]; + + m3 = [sp++]; + m2 = [sp++]; + m1 = [sp++]; + m0 = [sp++]; + + i3 = [sp++]; + i2 = [sp++]; + i1 = [sp++]; + i0 = [sp++]; + + sp += 4; + fp = [sp++]; + + ( R7 : 0, P5 : 0) = [ SP ++ ]; + sp += 8; /* Skip orig_r0/orig_p0 */ + SYSCFG = [sp++]; +.endm + +.macro restore_context_with_interrupts + sp += 4; /* Skip orig_pc */ + sp += 4; /* Skip IPEND */ + SEQSTAT = [sp++]; + RETE = [sp++]; + RETN = [sp++]; + RETX = [sp++]; + RETI = [sp++]; + RETS = [sp++]; + + p0.h = _irq_flags; + p0.l = _irq_flags; + r0 = [p0]; + sti r0; + + sp += 4; /* Skip Reserved */ + + ASTAT = [sp++]; + + LB1 = [sp++]; + LB0 = [sp++]; + LT1 = [sp++]; + LT0 = [sp++]; + LC1 = [sp++]; + LC0 = [sp++]; + + a1.w = [sp++]; + a1.x = [sp++]; + a0.w = [sp++]; + a0.x = [sp++]; + b3 = [sp++]; + b2 = [sp++]; + b1 = [sp++]; + b0 = [sp++]; + + l3 = [sp++]; + l2 = [sp++]; + l1 = [sp++]; + l0 = [sp++]; + + m3 = [sp++]; + m2 = [sp++]; + m1 = [sp++]; + m0 = [sp++]; + + i3 = [sp++]; + i2 = [sp++]; + i1 = [sp++]; + i0 = [sp++]; + + sp += 4; + fp = [sp++]; + + ( R7 : 0, P5 : 0) = [ SP ++ ]; + sp += 8; /* Skip orig_r0/orig_p0 */ + csync; + SYSCFG = [sp++]; + csync; +.endm + diff --git a/include/asm-blackfin/mach-common/def_LPBlackfin.h b/include/asm-blackfin/mach-common/def_LPBlackfin.h new file mode 100644 index 000000000000..76103526aec1 --- /dev/null +++ b/include/asm-blackfin/mach-common/def_LPBlackfin.h @@ -0,0 +1,691 @@ + /* + * File: include/asm-blackfin/mach-common/def_LPBlackfin.h + * Based on: + * Author: unknown + * COPYRIGHT 2005 Analog Devices + * Created: ? + * Description: + * + * Modified: + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* LP Blackfin CORE REGISTER BIT & ADDRESS DEFINITIONS FOR ADSP-BF532/33 */ + +#ifndef _DEF_LPBLACKFIN_H +#define _DEF_LPBLACKFIN_H + +#include + +/*#if !defined(__ADSPLPBLACKFIN__) +#warning def_LPBlackfin.h should only be included for 532 compatible chips. +#endif +*/ + +#define MK_BMSK_(x) (1<31 */ +#define TEST_WAY0 0x00000000 /* Access Way0 */ +#define TEST_WAY1 0x04000000 /* Access Way1 */ +/* ITEST_COMMAND only */ +#define TEST_WAY2 0x08000000 /* Access Way2 */ +#define TEST_WAY3 0x0C000000 /* Access Way3 */ +/* DTEST_COMMAND only */ +#define TEST_BNKSELA 0x00000000 /* Access SuperBank A */ +#define TEST_BNKSELB 0x00800000 /* Access SuperBank B */ + +#endif /* _DEF_LPBLACKFIN_H */ diff --git a/include/asm-blackfin/macros.h b/include/asm-blackfin/macros.h new file mode 100644 index 000000000000..c0c04a2f2dd5 --- /dev/null +++ b/include/asm-blackfin/macros.h @@ -0,0 +1,95 @@ +/************************************************************************ + * + * macros.h + * + * (c) Copyright 2001-2003 Analog Devices, Inc. All rights reserved. + * + ************************************************************************/ + +/* Defines various assembly macros. */ + +#ifndef _MACROS_H +#define _MACROS_H + +#define LO(con32) ((con32) & 0xFFFF) +#define lo(con32) ((con32) & 0xFFFF) +#define HI(con32) (((con32) >> 16) & 0xFFFF) +#define hi(con32) (((con32) >> 16) & 0xFFFF) + +/* + * Set the corresponding bits in a System Register (SR); + * All bits set in "mask" will be set in the system register + * specified by "sys_reg" bitset_SR(sys_reg, mask), where + * sys_reg is the system register and mask are the bits to be set. + */ +#define bitset_SR(sys_reg, mask)\ + [--SP] = (R7:6);\ + r7 = sys_reg;\ + r6.l = (mask) & 0xffff;\ + r6.h = (mask) >> 16;\ + r7 = r7 | r6;\ + sys_reg = r7;\ + csync;\ + (R7:6) = [SP++] + +/* + * Clear the corresponding bits in a System Register (SR); + * All bits set in "mask" will be cleared in the SR + * specified by "sys_reg" bitclr_SR(sys_reg, mask), where + * sys_reg is the SR and mask are the bits to be cleared. + */ +#define bitclr_SR(sys_reg, mask)\ + [--SP] = (R7:6);\ + r7 = sys_reg;\ + r7 =~ r7;\ + r6.l = (mask) & 0xffff;\ + r6.h = (mask) >> 16;\ + r7 = r7 | r6;\ + r7 =~ r7;\ + sys_reg = r7;\ + csync;\ + (R7:6) = [SP++] + +/* + * Set the corresponding bits in a Memory Mapped Register (MMR); + * All bits set in "mask" will be set in the MMR specified by "mmr_reg" + * bitset_MMR(mmr_reg, mask), where mmr_reg is the MMR and mask are + * the bits to be set. + */ +#define bitset_MMR(mmr_reg, mask)\ + [--SP] = (R7:6);\ + [--SP] = P5;\ + p5.l = mmr_reg & 0xffff;\ + p5.h = mmr_reg >> 16;\ + r7 = [p5];\ + r6.l = (mask) & 0xffff;\ + r6.h = (mask) >> 16;\ + r7 = r7 | r6;\ + [p5] = r7;\ + csync;\ + p5 = [SP++];\ + (R7:6) = [SP++] + +/* + * Clear the corresponding bits in a Memory Mapped Register (MMR); + * All bits set in "mask" will be cleared in the MMR specified by "mmr_reg" + * bitclr_MMRreg(mmr_reg, mask), where sys_reg is the MMR and mask are + * the bits to be cleared. + */ +#define bitclr_MMR(mmr_reg, mask)\ + [--SP] = (R7:6);\ + [--SP] = P5;\ + p5.l = mmr_reg & 0xffff;\ + p5.h = mmr_reg >> 16;\ + r7 = [p5];\ + r7 =~ r7;\ + r6.l = (mask) & 0xffff;\ + r6.h = (mask) >> 16;\ + r7 = r7 | r6;\ + r7 =~ r7;\ + [p5] = r7;\ + csync;\ + p5 = [SP++];\ + (R7:6) = [SP++] + +#endif /* _MACROS_H */ diff --git a/include/asm-blackfin/mem_map.h b/include/asm-blackfin/mem_map.h new file mode 100644 index 000000000000..42d1f37f6d9c --- /dev/null +++ b/include/asm-blackfin/mem_map.h @@ -0,0 +1,12 @@ +/* + * mem_map.h + * Common header file for blackfin family of processors. + * + */ + +#ifndef _MEM_MAP_H_ +#define _MEM_MAP_H_ + +#include + +#endif /* _MEM_MAP_H_ */ diff --git a/include/asm-blackfin/mman.h b/include/asm-blackfin/mman.h new file mode 100644 index 000000000000..4d504f908c0c --- /dev/null +++ b/include/asm-blackfin/mman.h @@ -0,0 +1,45 @@ +#ifndef __BFIN_MMAN_H__ +#define __BFIN_MMAN_H__ + +#define PROT_READ 0x1 /* page can be read */ +#define PROT_WRITE 0x2 /* page can be written */ +#define PROT_EXEC 0x4 /* page can be executed */ +#define PROT_SEM 0x8 /* page may be used for atomic ops */ +#define PROT_NONE 0x0 /* page can not be accessed */ +#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ +#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ + +#define MAP_SHARED 0x01 /* Share changes */ +#define MAP_PRIVATE 0x02 /* Changes are private */ +#define MAP_TYPE 0x0f /* Mask for type of mapping */ +#define MAP_FIXED 0x10 /* Interpret addr exactly */ +#define MAP_ANONYMOUS 0x20 /* don't use a file */ + +#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ +#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ +#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ +#define MAP_LOCKED 0x2000 /* pages are locked */ +#define MAP_NORESERVE 0x4000 /* don't check for reservations */ +#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_UNINITIALIZE 0x4000000 /* For anonymous mmap, memory could + be uninitialized. */ + +#define MS_ASYNC 1 /* sync memory asynchronously */ +#define MS_INVALIDATE 2 /* invalidate the caches */ +#define MS_SYNC 4 /* synchronous memory sync */ + +#define MCL_CURRENT 1 /* lock all current mappings */ +#define MCL_FUTURE 2 /* lock all future mappings */ + +#define MADV_NORMAL 0x0 /* default page-in behavior */ +#define MADV_RANDOM 0x1 /* page-in minimum required */ +#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ +#define MADV_WILLNEED 0x3 /* pre-fault pages */ +#define MADV_DONTNEED 0x4 /* discard these pages */ + +/* compatibility flags */ +#define MAP_ANON MAP_ANONYMOUS +#define MAP_FILE 0 + +#endif /* __BFIN_MMAN_H__ */ diff --git a/include/asm-blackfin/mmu.h b/include/asm-blackfin/mmu.h new file mode 100644 index 000000000000..11d52f1167d0 --- /dev/null +++ b/include/asm-blackfin/mmu.h @@ -0,0 +1,30 @@ +#ifndef __MMU_H +#define __MMU_H + +/* Copyright (C) 2002, David McCullough */ + +struct sram_list_struct { + struct sram_list_struct *next; + void *addr; + size_t length; +}; + +typedef struct { + struct vm_list_struct *vmlist; + unsigned long end_brk; + unsigned long stack_start; + + /* Points to the location in SDRAM where the L1 stack is normally + saved, or NULL if the stack is always in SDRAM. */ + void *l1_stack_save; + + struct sram_list_struct *sram_list; + +#ifdef CONFIG_BINFMT_ELF_FDPIC + unsigned long exec_fdpic_loadmap; + unsigned long interp_fdpic_loadmap; +#endif + +} mm_context_t; + +#endif diff --git a/include/asm-blackfin/mmu_context.h b/include/asm-blackfin/mmu_context.h new file mode 100644 index 000000000000..c5c71a6aaf19 --- /dev/null +++ b/include/asm-blackfin/mmu_context.h @@ -0,0 +1,129 @@ +/* + * File: include/asm-blackfin/mmu_context.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __BLACKFIN_MMU_CONTEXT_H__ +#define __BLACKFIN_MMU_CONTEXT_H__ + +#include +#include +#include + +extern void *current_l1_stack_save; +extern int nr_l1stack_tasks; +extern void *l1_stack_base; +extern unsigned long l1_stack_len; + +extern int l1sram_free(const void*); +extern void *l1sram_alloc_max(void*); + +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ +} + +/* Called when creating a new context during fork() or execve(). */ +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + return 0; +} + +static inline void free_l1stack(void) +{ + nr_l1stack_tasks--; + if (nr_l1stack_tasks == 0) + l1sram_free(l1_stack_base); +} +static inline void destroy_context(struct mm_struct *mm) +{ + struct sram_list_struct *tmp; + + if (current_l1_stack_save == mm->context.l1_stack_save) + current_l1_stack_save = 0; + if (mm->context.l1_stack_save) + free_l1stack(); + + while ((tmp = mm->context.sram_list)) { + mm->context.sram_list = tmp->next; + sram_free(tmp->addr); + kfree(tmp); + } +} + +static inline unsigned long +alloc_l1stack(unsigned long length, unsigned long *stack_base) +{ + if (nr_l1stack_tasks == 0) { + l1_stack_base = l1sram_alloc_max(&l1_stack_len); + if (!l1_stack_base) + return 0; + } + + if (l1_stack_len < length) { + if (nr_l1stack_tasks == 0) + l1sram_free(l1_stack_base); + return 0; + } + *stack_base = (unsigned long)l1_stack_base; + nr_l1stack_tasks++; + return l1_stack_len; +} + +static inline int +activate_l1stack(struct mm_struct *mm, unsigned long sp_base) +{ + if (current_l1_stack_save) + memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len); + mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base; + memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len); + return 1; +} + +#define deactivate_mm(tsk,mm) do { } while (0) + +static inline void activate_mm(struct mm_struct *prev_mm, + struct mm_struct *next_mm) +{ + if (!next_mm->context.l1_stack_save) + return; + if (next_mm->context.l1_stack_save == current_l1_stack_save) + return; + if (current_l1_stack_save) { + memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len); + } + current_l1_stack_save = next_mm->context.l1_stack_save; + memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len); +} + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + activate_mm(prev, next); +} + +#endif diff --git a/include/asm-blackfin/module.h b/include/asm-blackfin/module.h new file mode 100644 index 000000000000..3c7ce1644280 --- /dev/null +++ b/include/asm-blackfin/module.h @@ -0,0 +1,19 @@ +#ifndef _ASM_BFIN_MODULE_H +#define _ASM_BFIN_MODULE_H + +#define MODULE_SYMBOL_PREFIX "_" + +#define Elf_Shdr Elf32_Shdr +#define Elf_Sym Elf32_Sym +#define Elf_Ehdr Elf32_Ehdr +#define FLG_CODE_IN_L1 0x10 +#define FLG_DATA_IN_L1 0x20 + +struct mod_arch_specific { + Elf_Shdr *text_l1; + Elf_Shdr *data_a_l1; + Elf_Shdr *bss_a_l1; + Elf_Shdr *data_b_l1; + Elf_Shdr *bss_b_l1; +}; +#endif /* _ASM_BFIN_MODULE_H */ diff --git a/include/asm-blackfin/msgbuf.h b/include/asm-blackfin/msgbuf.h new file mode 100644 index 000000000000..6fcbe8cd801d --- /dev/null +++ b/include/asm-blackfin/msgbuf.h @@ -0,0 +1,31 @@ +#ifndef _BFIN_MSGBUF_H +#define _BFIN_MSGBUF_H + +/* + * The msqid64_ds structure for bfin architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + unsigned long __unused1; + __kernel_time_t msg_rtime; /* last msgrcv time */ + unsigned long __unused2; + __kernel_time_t msg_ctime; /* last change time */ + unsigned long __unused3; + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused4; + unsigned long __unused5; +}; + +#endif /* _BFIN_MSGBUF_H */ diff --git a/include/asm-blackfin/mutex.h b/include/asm-blackfin/mutex.h new file mode 100644 index 000000000000..458c1f7fbc18 --- /dev/null +++ b/include/asm-blackfin/mutex.h @@ -0,0 +1,9 @@ +/* + * Pull in the generic implementation for the mutex fastpath. + * + * TODO: implement optimized primitives instead, or leave the generic + * implementation in place, or pick the atomic_xchg() based generic + * implementation. (see asm-generic/mutex-xchg.h for details) + */ + +#include diff --git a/include/asm-blackfin/namei.h b/include/asm-blackfin/namei.h new file mode 100644 index 000000000000..8b89a2d65cb4 --- /dev/null +++ b/include/asm-blackfin/namei.h @@ -0,0 +1,19 @@ +/* + * linux/include/asm/namei.h + * + * Included from linux/fs/namei.c + * + * Changes made by Lineo Inc. May 2001 + */ + +#ifndef __BFIN_NAMEI_H +#define __BFIN_NAMEI_H + +/* This dummy routine maybe changed to something useful + * for /usr/gnemul/ emulation stuff. + * Look at asm-sparc/namei.h for details. + */ + +#define __emul_prefix() NULL + +#endif diff --git a/include/asm-blackfin/page.h b/include/asm-blackfin/page.h new file mode 100644 index 000000000000..ffad947f1b2a --- /dev/null +++ b/include/asm-blackfin/page.h @@ -0,0 +1,89 @@ +#ifndef _BLACKFIN_PAGE_H +#define _BLACKFIN_PAGE_H + +/* PAGE_SHIFT determines the page size */ + +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#ifdef __KERNEL__ + +#include + +#ifndef __ASSEMBLY__ + +#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) +#define free_user_page(page, addr) free_page(addr) + +#define clear_page(page) memset((page), 0, PAGE_SIZE) +#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) + +#define clear_user_page(page, vaddr,pg) clear_page(page) +#define copy_user_page(to, from, vaddr,pg) copy_page(to, from) + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { + unsigned long pte; +} pte_t; +typedef struct { + unsigned long pmd[16]; +} pmd_t; +typedef struct { + unsigned long pgd; +} pgd_t; +typedef struct { + unsigned long pgprot; +} pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((&x)->pmd[0]) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) + +extern unsigned long memory_start; +extern unsigned long memory_end; + +#endif /* !__ASSEMBLY__ */ + +#include +#include + +#define PAGE_OFFSET (PAGE_OFFSET_RAW) + +#ifndef __ASSEMBLY__ + +#define __pa(vaddr) virt_to_phys((void *)(vaddr)) +#define __va(paddr) phys_to_virt((unsigned long)(paddr)) + +#define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT) + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) +#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) +#define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)) +#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) +#define VALID_PAGE(page) ((page - mem_map) < max_mapnr) + +#define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn)) +#define page_to_pfn(page) virt_to_pfn(page_to_virt(page)) +#define pfn_valid(pfn) ((pfn) < max_mapnr) + +#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ + ((void *)(kaddr) < (void *)memory_end)) + +#include + +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif /* _BLACKFIN_PAGE_H */ diff --git a/include/asm-blackfin/page_offset.h b/include/asm-blackfin/page_offset.h new file mode 100644 index 000000000000..3b671d5fd70d --- /dev/null +++ b/include/asm-blackfin/page_offset.h @@ -0,0 +1,6 @@ + +/* This handles the memory map.. */ + +#ifdef CONFIG_BFIN +#define PAGE_OFFSET_RAW 0x00000000 +#endif diff --git a/include/asm-blackfin/param.h b/include/asm-blackfin/param.h new file mode 100644 index 000000000000..41564a6347f8 --- /dev/null +++ b/include/asm-blackfin/param.h @@ -0,0 +1,22 @@ +#ifndef _BLACKFIN_PARAM_H +#define _BLACKFIN_PARAM_H + +#ifdef __KERNEL__ +#define HZ CONFIG_HZ +#define USER_HZ 100 +#define CLOCKS_PER_SEC (USER_HZ) +#endif + +#ifndef HZ +#define HZ 100 +#endif + +#define EXEC_PAGESIZE 4096 + +#ifndef NOGROUP +#define NOGROUP (-1) +#endif + +#define MAXHOSTNAMELEN 64 /* max length of hostname */ + +#endif /* _BLACKFIN_PARAM_H */ diff --git a/include/asm-blackfin/pci.h b/include/asm-blackfin/pci.h new file mode 100644 index 000000000000..61277358c865 --- /dev/null +++ b/include/asm-blackfin/pci.h @@ -0,0 +1,148 @@ +/* Changed from asm-m68k version, Lineo Inc. May 2001 */ + +#ifndef _ASM_BFIN_PCI_H +#define _ASM_BFIN_PCI_H + +#include + +/* + * + * Written by Wout Klaren. + */ + +/* Added by Chang Junxiao */ +#define PCIBIOS_MIN_IO 0x00001000 +#define PCIBIOS_MIN_MEM 0x10000000 + +#define PCI_DMA_BUS_IS_PHYS (1) +struct pci_ops; + +/* + * Structure with hardware dependent information and functions of the + * PCI bus. + */ +struct pci_bus_info { + + /* + * Resources of the PCI bus. + */ + struct resource mem_space; + struct resource io_space; + + /* + * System dependent functions. + */ + struct pci_ops *bfin_pci_ops; + void (*fixup) (int pci_modify); + void (*conf_device) (unsigned char bus, unsigned char device_fn); +}; + +#define pcibios_assign_all_busses() 0 +static inline void pcibios_set_master(struct pci_dev *dev) +{ + + /* No special bus mastering setup handling */ +} +static inline void pcibios_penalize_isa_irq(int irq) +{ + + /* We don't do dynamic PCI IRQ allocation */ +} +static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, + size_t size, int direction) +{ + if (direction == PCI_DMA_NONE) + BUG(); + + /* return virt_to_bus(ptr); */ + return (dma_addr_t) ptr; +} + +/* Unmap a single streaming mode DMA translation. The dma_addr and size + * must match what was provided for in a previous pci_map_single call. All + * other usages are undefined. + * + * After this call, reads by the cpu to the buffer are guarenteed to see + * whatever the device wrote there. + */ +static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, + size_t size, int direction) +{ + if (direction == PCI_DMA_NONE) + BUG(); + + /* Nothing to do */ +} + +/* Map a set of buffers described by scatterlist in streaming + * mode for DMA. This is the scather-gather version of the + * above pci_map_single interface. Here the scatter gather list + * elements are each tagged with the appropriate dma address + * and length. They are obtained via sg_dma_{address,length}(SG). + * + * NOTE: An implementation may be able to use a smaller number of + * DMA address/length pairs than there are SG table elements. + * (for example via virtual mapping capabilities) + * The routine returns the number of addr/length pairs actually + * used, at most nents. + * + * Device ownership issues as mentioned above for pci_map_single are + * the same here. + */ +static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, + int nents, int direction) +{ + if (direction == PCI_DMA_NONE) + BUG(); + return nents; +} + +/* Unmap a set of streaming mode DMA translations. + * Again, cpu read rules concerning calls here are the same as for + * pci_unmap_single() above. + */ +static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, + int nents, int direction) +{ + if (direction == PCI_DMA_NONE) + BUG(); + + /* Nothing to do */ +} + +/* Make physical memory consistent for a single + * streaming mode DMA translation after a transfer. + * + * If you perform a pci_map_single() but wish to interrogate the + * buffer using the cpu, yet do not wish to teardown the PCI dma + * mapping, you must call this function before doing so. At the + * next point you give the PCI dma address back to the card, the + * device again owns the buffer. + */ +static inline void pci_dma_sync_single(struct pci_dev *hwdev, + dma_addr_t dma_handle, size_t size, + int direction) +{ + if (direction == PCI_DMA_NONE) + BUG(); + + /* Nothing to do */ +} + +/* Make physical memory consistent for a set of streaming + * mode DMA translations after a transfer. + * + * The same as pci_dma_sync_single but for a scatter-gather list, + * same rules and usage. + */ +static inline void pci_dma_sync_sg(struct pci_dev *hwdev, + struct scatterlist *sg, int nelems, + int direction) +{ + if (direction == PCI_DMA_NONE) + BUG(); + + /* Nothing to do */ +} + +#endif /* _ASM_BFIN_PCI_H */ diff --git a/include/asm-blackfin/percpu.h b/include/asm-blackfin/percpu.h new file mode 100644 index 000000000000..78dd61f6b39f --- /dev/null +++ b/include/asm-blackfin/percpu.h @@ -0,0 +1,6 @@ +#ifndef __ARCH_BLACKFIN_PERCPU__ +#define __ARCH_BLACKFIN_PERCPU__ + +#include + +#endif /* __ARCH_BLACKFIN_PERCPU__ */ diff --git a/include/asm-blackfin/pgalloc.h b/include/asm-blackfin/pgalloc.h new file mode 100644 index 000000000000..c686e0542fd0 --- /dev/null +++ b/include/asm-blackfin/pgalloc.h @@ -0,0 +1,8 @@ +#ifndef _BLACKFIN_PGALLOC_H +#define _BLACKFIN_PGALLOC_H + +#include + +#define check_pgt_cache() do { } while (0) + +#endif /* _BLACKFIN_PGALLOC_H */ diff --git a/include/asm-blackfin/pgtable.h b/include/asm-blackfin/pgtable.h new file mode 100644 index 000000000000..5a8f9e431c40 --- /dev/null +++ b/include/asm-blackfin/pgtable.h @@ -0,0 +1,96 @@ +#ifndef _BLACKFIN_PGTABLE_H +#define _BLACKFIN_PGTABLE_H + +#include + +#include +#include + +typedef pte_t *pte_addr_t; +/* +* Trivial page table functions. +*/ +#define pgd_present(pgd) (1) +#define pgd_none(pgd) (0) +#define pgd_bad(pgd) (0) +#define pgd_clear(pgdp) +#define kern_addr_valid(addr) (1) + +#define pmd_offset(a, b) ((void *)0) +#define pmd_none(x) (!pmd_val(x)) +#define pmd_present(x) (pmd_val(x)) +#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) +#define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK) + +#define kern_addr_valid(addr) (1) + +#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */ +#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */ +#define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */ +#define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */ +#define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */ + +extern void paging_init(void); + +#define __swp_type(x) (0) +#define __swp_offset(x) (0) +#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +static inline int pte_file(pte_t pte) +{ + return 0; +} + +#define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) + +/* + * Page assess control based on Blackfin CPLB management + */ +#define _PAGE_RD (CPLB_USER_RD) +#define _PAGE_WR (CPLB_USER_WR) +#define _PAGE_USER (CPLB_USER_RD | CPLB_USER_WR) +#define _PAGE_ACCESSED CPLB_ALL_ACCESS +#define _PAGE_DIRTY (CPLB_DIRTY) + +#define PTE_BIT_FUNC(fn, op) \ + static inline pte_t pte_##fn(pte_t _pte) { _pte.pte op; return _pte; } + +PTE_BIT_FUNC(rdprotect, &= ~_PAGE_RD); +PTE_BIT_FUNC(mkread, |= _PAGE_RD); +PTE_BIT_FUNC(wrprotect, &= ~_PAGE_WR); +PTE_BIT_FUNC(mkwrite, |= _PAGE_WR); +PTE_BIT_FUNC(exprotect, &= ~_PAGE_USER); +PTE_BIT_FUNC(mkexec, |= _PAGE_USER); +PTE_BIT_FUNC(mkclean, &= ~_PAGE_DIRTY); +PTE_BIT_FUNC(mkdirty, |= _PAGE_DIRTY); +PTE_BIT_FUNC(mkold, &= ~_PAGE_ACCESSED); +PTE_BIT_FUNC(mkyoung, |= _PAGE_ACCESSED); + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +#define ZERO_PAGE(vaddr) (virt_to_page(0)) + +extern unsigned int kobjsize(const void *objp); + +#define swapper_pg_dir ((pgd_t *) 0) +/* + * No page table caches to initialise. + */ +#define pgtable_cache_init() do { } while (0) +#define io_remap_pfn_range remap_pfn_range + +/* + * All 32bit addresses are effectively valid for vmalloc... + * Sort of meaningless for non-VM targets. + */ +#define VMALLOC_START 0 +#define VMALLOC_END 0xffffffff + +#include + +#endif /* _BLACKFIN_PGTABLE_H */ diff --git a/include/asm-blackfin/poll.h b/include/asm-blackfin/poll.h new file mode 100644 index 000000000000..94cc2636e0e2 --- /dev/null +++ b/include/asm-blackfin/poll.h @@ -0,0 +1,24 @@ +#ifndef __BFIN_POLL_H +#define __BFIN_POLL_H + +#define POLLIN 1 +#define POLLPRI 2 +#define POLLOUT 4 +#define POLLERR 8 +#define POLLHUP 16 +#define POLLNVAL 32 +#define POLLRDNORM 64 +#define POLLWRNORM POLLOUT +#define POLLRDBAND 128 +#define POLLWRBAND 256 +#define POLLMSG 0x0400 +#define POLLREMOVE 0x1000 +#define POLLRDHUP 0x2000 + +struct pollfd { + int fd; + short events; + short revents; +}; + +#endif /* __BFIN_POLL_H */ diff --git a/include/asm-blackfin/posix_types.h b/include/asm-blackfin/posix_types.h new file mode 100644 index 000000000000..c3fa50fa50b8 --- /dev/null +++ b/include/asm-blackfin/posix_types.h @@ -0,0 +1,65 @@ +#ifndef __ARCH_BFIN_POSIX_TYPES_H +#define __ARCH_BFIN_POSIX_TYPES_H + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. Also, we cannot + * assume GCC is being used. + */ + +typedef unsigned long __kernel_ino_t; +typedef unsigned short __kernel_mode_t; +typedef unsigned short __kernel_nlink_t; +typedef long __kernel_off_t; +typedef int __kernel_pid_t; +typedef unsigned int __kernel_ipc_pid_t; +typedef unsigned int __kernel_uid_t; +typedef unsigned int __kernel_gid_t; +typedef unsigned long __kernel_size_t; +typedef long __kernel_ssize_t; +typedef int __kernel_ptrdiff_t; +typedef long __kernel_time_t; +typedef long __kernel_suseconds_t; +typedef long __kernel_clock_t; +typedef int __kernel_timer_t; +typedef int __kernel_clockid_t; +typedef int __kernel_daddr_t; +typedef char *__kernel_caddr_t; +typedef unsigned short __kernel_uid16_t; +typedef unsigned short __kernel_gid16_t; +typedef unsigned int __kernel_uid32_t; +typedef unsigned int __kernel_gid32_t; + +typedef unsigned short __kernel_old_uid_t; +typedef unsigned short __kernel_old_gid_t; +typedef unsigned short __kernel_old_dev_t; + +#ifdef __GNUC__ +typedef long long __kernel_loff_t; +#endif + +typedef struct { +#if defined(__KERNEL__) || defined(__USE_ALL) + int val[2]; +#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */ + int __val[2]; +#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */ +} __kernel_fsid_t; + +#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) + +#undef __FD_SET +#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d)) + +#undef __FD_CLR +#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d)) + +#undef __FD_ISSET +#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) + +#undef __FD_ZERO +#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp))) + +#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */ + +#endif diff --git a/include/asm-blackfin/processor.h b/include/asm-blackfin/processor.h new file mode 100644 index 000000000000..997465c93e82 --- /dev/null +++ b/include/asm-blackfin/processor.h @@ -0,0 +1,130 @@ +#ifndef __ASM_BFIN_PROCESSOR_H +#define __ASM_BFIN_PROCESSOR_H + +/* + * Default implementation of macro that returns current + * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ __label__ _l; _l: &&_l;}) + +#include +#include +#include + +static inline unsigned long rdusp(void) +{ + unsigned long usp; + + __asm__ __volatile__("%0 = usp;\n\t":"=da"(usp)); + return usp; +} + +static inline void wrusp(unsigned long usp) +{ + __asm__ __volatile__("usp = %0;\n\t"::"da"(usp)); +} + +/* + * User space process size: 1st byte beyond user address space. + */ +extern unsigned long memory_end; +#define TASK_SIZE (memory_end) + +#define TASK_UNMAPPED_BASE 0 + +struct thread_struct { + unsigned long ksp; /* kernel stack pointer */ + unsigned long usp; /* user stack pointer */ + unsigned short seqstat; /* saved status register */ + unsigned long esp0; /* points to SR of stack frame pt_regs */ + unsigned long pc; /* instruction pointer */ + void * debuggerinfo; +}; + +#define INIT_THREAD { \ + sizeof(init_stack) + (unsigned long) init_stack, 0, \ + PS_S, 0, 0 \ +} + +/* + * Do necessary setup to start up a newly executed thread. + * + * pass the data segment into user programs if it exists, + * it can't hurt anything as far as I can tell + */ +#define start_thread(_regs, _pc, _usp) \ +do { \ + set_fs(USER_DS); \ + (_regs)->pc = (_pc); \ + if (current->mm) \ + (_regs)->p5 = current->mm->start_data; \ + current->thread_info->l1_task_info.stack_start \ + = (void *)current->mm->context.stack_start; \ + current->thread_info->l1_task_info.lowest_sp = (void *)(_usp); \ + memcpy(L1_SCRATCH_TASK_INFO, ¤t->thread_info->l1_task_info, \ + sizeof(*L1_SCRATCH_TASK_INFO)); \ + wrusp(_usp); \ +} while(0) + +/* Forward declaration, a strange C thing */ +struct task_struct; + +/* Free all resources held by a thread. */ +static inline void release_thread(struct task_struct *dead_task) +{ +} + +#define prepare_to_copy(tsk) do { } while (0) + +extern int kernel_thread(int (*fn) (void *), void *arg, unsigned long flags); + +/* + * Free current thread data structures etc.. + */ +static inline void exit_thread(void) +{ +} + +/* + * Return saved PC of a blocked thread. + */ +#define thread_saved_pc(tsk) (tsk->thread.pc) + +unsigned long get_wchan(struct task_struct *p); + +#define KSTK_EIP(tsk) \ + ({ \ + unsigned long eip = 0; \ + if ((tsk)->thread.esp0 > PAGE_SIZE && \ + MAP_NR((tsk)->thread.esp0) < max_mapnr) \ + eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \ + eip; }) +#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) + +#define cpu_relax() barrier() + +/* Get the Silicon Revision of the chip */ +static inline uint32_t bfin_revid(void) +{ + /* stored in the upper 4 bits */ + return bfin_read_CHIPID() >> 28; +} + +static inline uint32_t bfin_compiled_revid(void) +{ +#if defined(CONFIG_BF_REV_0_0) + return 0; +#elif defined(CONFIG_BF_REV_0_1) + return 1; +#elif defined(CONFIG_BF_REV_0_2) + return 2; +#elif defined(CONFIG_BF_REV_0_3) + return 3; +#elif defined(CONFIG_BF_REV_0_4) + return 4; +#elif defined(CONFIG_BF_REV_0_5) + return 5; +#endif +} + +#endif diff --git a/include/asm-blackfin/ptrace.h b/include/asm-blackfin/ptrace.h new file mode 100644 index 000000000000..b8346cd3a6f6 --- /dev/null +++ b/include/asm-blackfin/ptrace.h @@ -0,0 +1,166 @@ +#ifndef _BFIN_PTRACE_H +#define _BFIN_PTRACE_H + +/* + * GCC defines register number like this: + * ----------------------------- + * 0 - 7 are data registers R0-R7 + * 8 - 15 are address registers P0-P7 + * 16 - 31 dsp registers I/B/L0 -- I/B/L3 & M0--M3 + * 32 - 33 A registers A0 & A1 + * 34 - status register + * ----------------------------- + * + * We follows above, except: + * 32-33 --- Low 32-bit of A0&1 + * 34-35 --- High 8-bit of A0&1 + */ + +#ifndef __ASSEMBLY__ + +/* this struct defines the way the registers are stored on the + stack during a system call. */ + +struct pt_regs { + long orig_pc; + long ipend; + long seqstat; + long rete; + long retn; + long retx; + long pc; /* PC == RETI */ + long rets; + long reserved; /* Used as scratch during system calls */ + long astat; + long lb1; + long lb0; + long lt1; + long lt0; + long lc1; + long lc0; + long a1w; + long a1x; + long a0w; + long a0x; + long b3; + long b2; + long b1; + long b0; + long l3; + long l2; + long l1; + long l0; + long m3; + long m2; + long m1; + long m0; + long i3; + long i2; + long i1; + long i0; + long usp; + long fp; + long p5; + long p4; + long p3; + long p2; + long p1; + long p0; + long r7; + long r6; + long r5; + long r4; + long r3; + long r2; + long r1; + long r0; + long orig_r0; + long orig_p0; + long syscfg; +}; + +/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 /* ptrace signal */ + +#ifdef CONFIG_BINFMT_ELF_FDPIC +#define PTRACE_GETFDPIC 31 +#define PTRACE_GETFDPIC_EXEC 0 +#define PTRACE_GETFDPIC_INTERP 1 +#endif + +#define PS_S (0x0002) + +/* user_mode returns true if only one bit is set in IPEND, other than the + master interrupt enable. */ +#define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1))) +#define instruction_pointer(regs) ((regs)->pc) +#define profile_pc(regs) instruction_pointer(regs) +extern void show_regs(struct pt_regs *); + +#endif /* __ASSEMBLY__ */ + +/* + * Offsets used by 'ptrace' system call interface. + */ + +#define PT_R0 204 +#define PT_R1 200 +#define PT_R2 196 +#define PT_R3 192 +#define PT_R4 188 +#define PT_R5 184 +#define PT_R6 180 +#define PT_R7 176 +#define PT_P0 172 +#define PT_P1 168 +#define PT_P2 164 +#define PT_P3 160 +#define PT_P4 156 +#define PT_P5 152 +#define PT_FP 148 +#define PT_USP 144 +#define PT_I0 140 +#define PT_I1 136 +#define PT_I2 132 +#define PT_I3 128 +#define PT_M0 124 +#define PT_M1 120 +#define PT_M2 116 +#define PT_M3 112 +#define PT_L0 108 +#define PT_L1 104 +#define PT_L2 100 +#define PT_L3 96 +#define PT_B0 92 +#define PT_B1 88 +#define PT_B2 84 +#define PT_B3 80 +#define PT_A0X 76 +#define PT_A0W 72 +#define PT_A1X 68 +#define PT_A1W 64 +#define PT_LC0 60 +#define PT_LC1 56 +#define PT_LT0 52 +#define PT_LT1 48 +#define PT_LB0 44 +#define PT_LB1 40 +#define PT_ASTAT 36 +#define PT_RESERVED 32 +#define PT_RETS 28 +#define PT_PC 24 +#define PT_RETX 20 +#define PT_RETN 16 +#define PT_RETE 12 +#define PT_SEQSTAT 8 +#define PT_IPEND 4 + +#define PT_SYSCFG 216 +#define PT_TEXT_ADDR 220 +#define PT_TEXT_END_ADDR 224 +#define PT_DATA_ADDR 228 +#define PT_FDPIC_EXEC 232 +#define PT_FDPIC_INTERP 236 + +#endif /* _BFIN_PTRACE_H */ diff --git a/include/asm-blackfin/resource.h b/include/asm-blackfin/resource.h new file mode 100644 index 000000000000..091355ab3495 --- /dev/null +++ b/include/asm-blackfin/resource.h @@ -0,0 +1,6 @@ +#ifndef _BFIN_RESOURCE_H +#define _BFIN_RESOURCE_H + +#include + +#endif /* _BFIN_RESOURCE_H */ diff --git a/include/asm-blackfin/scatterlist.h b/include/asm-blackfin/scatterlist.h new file mode 100644 index 000000000000..60e07b92044c --- /dev/null +++ b/include/asm-blackfin/scatterlist.h @@ -0,0 +1,26 @@ +#ifndef _BLACKFIN_SCATTERLIST_H +#define _BLACKFIN_SCATTERLIST_H + +#include + +struct scatterlist { + struct page *page; + unsigned int offset; + dma_addr_t dma_address; + unsigned int length; +}; + +/* + * These macros should be used after a pci_map_sg call has been done + * to get bus addresses of each of the SG entries and their lengths. + * You should only work with the number of sg entries pci_map_sg + * returns, or alternatively stop on the first sg_dma_len(sg) which + * is 0. + */ +#define sg_address(sg) (page_address((sg)->page) + (sg)->offset) +#define sg_dma_address(sg) ((sg)->dma_address) +#define sg_dma_len(sg) ((sg)->length) + +#define ISA_DMA_THRESHOLD (0xffffffff) + +#endif /* !(_BLACKFIN_SCATTERLIST_H) */ diff --git a/include/asm-blackfin/sections.h b/include/asm-blackfin/sections.h new file mode 100644 index 000000000000..1443c3353a8c --- /dev/null +++ b/include/asm-blackfin/sections.h @@ -0,0 +1,7 @@ +#ifndef _BLACKFIN_SECTIONS_H +#define _BLACKFIN_SECTIONS_H + +/* nothing to see, move along */ +#include + +#endif diff --git a/include/asm-blackfin/segment.h b/include/asm-blackfin/segment.h new file mode 100644 index 000000000000..02cfd09b5a99 --- /dev/null +++ b/include/asm-blackfin/segment.h @@ -0,0 +1,7 @@ +#ifndef _BFIN_SEGMENT_H +#define _BFIN_SEGMENT_H + +#define KERNEL_DS (0x5) +#define USER_DS (0x1) + +#endif /* _BFIN_SEGMENT_H */ diff --git a/include/asm-blackfin/semaphore-helper.h b/include/asm-blackfin/semaphore-helper.h new file mode 100644 index 000000000000..9082b0dc3eb5 --- /dev/null +++ b/include/asm-blackfin/semaphore-helper.h @@ -0,0 +1,82 @@ +/* Based on M68K version, Lineo Inc. May 2001 */ + +#ifndef _BFIN_SEMAPHORE_HELPER_H +#define _BFIN_SEMAPHORE_HELPER_H + +/* + * SMP- and interrupt-safe semaphores helper functions. + * + * (C) Copyright 1996 Linus Torvalds + * + */ + +#include + +/* + * These two _must_ execute atomically wrt each other. + */ +static inline void wake_one_more(struct semaphore *sem) +{ + atomic_inc(&sem->waking); +} + +static inline int waking_non_zero(struct semaphore *sem) +{ + int ret; + unsigned long flags = 0; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + ret = 0; + if (atomic_read(&sem->waking) > 0) { + atomic_dec(&sem->waking); + ret = 1; + } + spin_unlock_irqrestore(&semaphore_wake_lock, flags); + return ret; +} + +/* + * waking_non_zero_interruptible: + * 1 got the lock + * 0 go to sleep + * -EINTR interrupted + */ +static inline int waking_non_zero_interruptible(struct semaphore *sem, + struct task_struct *tsk) +{ + int ret = 0; + unsigned long flags = 0; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + if (atomic_read(&sem->waking) > 0) { + atomic_dec(&sem->waking); + ret = 1; + } else if (signal_pending(tsk)) { + atomic_inc(&sem->count); + ret = -EINTR; + } + spin_unlock_irqrestore(&semaphore_wake_lock, flags); + return ret; +} + +/* + * waking_non_zero_trylock: + * 1 failed to lock + * 0 got the lock + */ +static inline int waking_non_zero_trylock(struct semaphore *sem) +{ + int ret = 1; + unsigned long flags = 0; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + if (atomic_read(&sem->waking) > 0) { + atomic_dec(&sem->waking); + ret = 0; + } else + atomic_inc(&sem->count); + spin_unlock_irqrestore(&semaphore_wake_lock, flags); + return ret; +} + +#endif /* _BFIN_SEMAPHORE_HELPER_H */ diff --git a/include/asm-blackfin/semaphore.h b/include/asm-blackfin/semaphore.h new file mode 100644 index 000000000000..94c04d7ab23e --- /dev/null +++ b/include/asm-blackfin/semaphore.h @@ -0,0 +1,106 @@ +#ifndef _BFIN_SEMAPHORE_H +#define _BFIN_SEMAPHORE_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include +#include + +/* + * Interrupt-safe semaphores.. + * + * (C) Copyright 1996 Linus Torvalds + * + * BFIN version by akbar hussain Lineo Inc April 2001 + * + */ + +struct semaphore { + atomic_t count; + int sleepers; + wait_queue_head_t wait; +}; + +#define __SEMAPHORE_INITIALIZER(name, n) \ +{ \ + .count = ATOMIC_INIT(n), \ + .sleepers = 0, \ + .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ +} + +#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ + struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) + +#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) +#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) + +static inline void sema_init(struct semaphore *sem, int val) +{ + *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val); +} + +static inline void init_MUTEX(struct semaphore *sem) +{ + sema_init(sem, 1); +} + +static inline void init_MUTEX_LOCKED(struct semaphore *sem) +{ + sema_init(sem, 0); +} + +asmlinkage void __down(struct semaphore *sem); +asmlinkage int __down_interruptible(struct semaphore *sem); +asmlinkage int __down_trylock(struct semaphore *sem); +asmlinkage void __up(struct semaphore *sem); + +extern spinlock_t semaphore_wake_lock; + +/* + * This is ugly, but we want the default case to fall through. + * "down_failed" is a special asm handler that calls the C + * routine that actually waits. + */ +static inline void down(struct semaphore *sem) +{ + might_sleep(); + if (atomic_dec_return(&sem->count) < 0) + __down(sem); +} + +static inline int down_interruptible(struct semaphore *sem) +{ + int ret = 0; + + might_sleep(); + if (atomic_dec_return(&sem->count) < 0) + ret = __down_interruptible(sem); + return (ret); +} + +static inline int down_trylock(struct semaphore *sem) +{ + int ret = 0; + + if (atomic_dec_return(&sem->count) < 0) + ret = __down_trylock(sem); + return ret; +} + +/* + * Note! This is subtle. We jump to wake people up only if + * the semaphore was negative (== somebody was waiting on it). + * The default case (no contention) will result in NO + * jumps for both down() and up(). + */ +static inline void up(struct semaphore *sem) +{ + if (atomic_inc_return(&sem->count) <= 0) + __up(sem); +} + +#endif /* __ASSEMBLY__ */ +#endif /* _BFIN_SEMAPHORE_H */ diff --git a/include/asm-blackfin/sembuf.h b/include/asm-blackfin/sembuf.h new file mode 100644 index 000000000000..18deb5c7fa5d --- /dev/null +++ b/include/asm-blackfin/sembuf.h @@ -0,0 +1,25 @@ +#ifndef _BFIN_SEMBUF_H +#define _BFIN_SEMBUF_H + +/* + * The semid64_ds structure for bfin architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct semid64_ds { + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t sem_otime; /* last semop time */ + unsigned long __unused1; + __kernel_time_t sem_ctime; /* last change time */ + unsigned long __unused2; + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* _BFIN_SEMBUF_H */ diff --git a/include/asm-blackfin/setup.h b/include/asm-blackfin/setup.h new file mode 100644 index 000000000000..01c8c6cbe6fc --- /dev/null +++ b/include/asm-blackfin/setup.h @@ -0,0 +1,17 @@ +/* +** asm/setup.h -- Definition of the Linux/bfin setup information +** +** This file is subject to the terms and conditions of the GNU General Public +** License. See the file COPYING in the main directory of this archive +** for more details. +** +** Copyright Lineo, Inc 2001 Tony Kou +** +*/ + +#ifndef _BFIN_SETUP_H +#define _BFIN_SETUP_H + +#define COMMAND_LINE_SIZE 512 + +#endif /* _BFIN_SETUP_H */ diff --git a/include/asm-blackfin/shmbuf.h b/include/asm-blackfin/shmbuf.h new file mode 100644 index 000000000000..612436303e89 --- /dev/null +++ b/include/asm-blackfin/shmbuf.h @@ -0,0 +1,42 @@ +#ifndef _BFIN_SHMBUF_H +#define _BFIN_SHMBUF_H + +/* + * The shmid64_ds structure for bfin architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + unsigned long __unused1; + __kernel_time_t shm_dtime; /* last detach time */ + unsigned long __unused2; + __kernel_time_t shm_ctime; /* last change time */ + unsigned long __unused3; + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + unsigned long shm_nattch; /* no. of current attaches */ + unsigned long __unused4; + unsigned long __unused5; +}; + +struct shminfo64 { + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* _BFIN_SHMBUF_H */ diff --git a/include/asm-blackfin/shmparam.h b/include/asm-blackfin/shmparam.h new file mode 100644 index 000000000000..3c03906b7664 --- /dev/null +++ b/include/asm-blackfin/shmparam.h @@ -0,0 +1,6 @@ +#ifndef _BFIN_SHMPARAM_H +#define _BFIN_SHMPARAM_H + +#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ + +#endif /* _BFIN_SHMPARAM_H */ diff --git a/include/asm-blackfin/sigcontext.h b/include/asm-blackfin/sigcontext.h new file mode 100644 index 000000000000..ce00b03c2775 --- /dev/null +++ b/include/asm-blackfin/sigcontext.h @@ -0,0 +1,55 @@ +#ifndef _ASM_BLACKFIN_SIGCONTEXT_H +#define _ASM_BLACKFIN_SIGCONTEXT_H + +/* Add new entries at the end of the structure only. */ +struct sigcontext { + unsigned long sc_r0; + unsigned long sc_r1; + unsigned long sc_r2; + unsigned long sc_r3; + unsigned long sc_r4; + unsigned long sc_r5; + unsigned long sc_r6; + unsigned long sc_r7; + unsigned long sc_p0; + unsigned long sc_p1; + unsigned long sc_p2; + unsigned long sc_p3; + unsigned long sc_p4; + unsigned long sc_p5; + unsigned long sc_usp; + unsigned long sc_a0w; + unsigned long sc_a1w; + unsigned long sc_a0x; + unsigned long sc_a1x; + unsigned long sc_astat; + unsigned long sc_rets; + unsigned long sc_pc; + unsigned long sc_retx; + unsigned long sc_fp; + unsigned long sc_i0; + unsigned long sc_i1; + unsigned long sc_i2; + unsigned long sc_i3; + unsigned long sc_m0; + unsigned long sc_m1; + unsigned long sc_m2; + unsigned long sc_m3; + unsigned long sc_l0; + unsigned long sc_l1; + unsigned long sc_l2; + unsigned long sc_l3; + unsigned long sc_b0; + unsigned long sc_b1; + unsigned long sc_b2; + unsigned long sc_b3; + unsigned long sc_lc0; + unsigned long sc_lc1; + unsigned long sc_lt0; + unsigned long sc_lt1; + unsigned long sc_lb0; + unsigned long sc_lb1; + unsigned long sc_seqstat; +}; + +#endif diff --git a/include/asm-blackfin/siginfo.h b/include/asm-blackfin/siginfo.h new file mode 100644 index 000000000000..eca4565cea37 --- /dev/null +++ b/include/asm-blackfin/siginfo.h @@ -0,0 +1,35 @@ +#ifndef _BFIN_SIGINFO_H +#define _BFIN_SIGINFO_H + +#include +#include + +#define UID16_SIGINFO_COMPAT_NEEDED + +#define si_uid16 _sifields._kill._uid + +#define ILL_ILLPARAOP (__SI_FAULT|2) /* illegal opcode combine ********** */ +#define ILL_ILLEXCPT (__SI_FAULT|4) /* unrecoverable exception ********** */ +#define ILL_CPLB_VI (__SI_FAULT|9) /* D/I CPLB protect violation ******** */ +#define ILL_CPLB_MISS (__SI_FAULT|10) /* D/I CPLB miss ******** */ +#define ILL_CPLB_MULHIT (__SI_FAULT|11) /* D/I CPLB multiple hit ******** */ + +/* + * SIGBUS si_codes + */ +#define BUS_OPFETCH (__SI_FAULT|4) /* error from instruction fetch ******** */ + +/* + * SIGTRAP si_codes + */ +#define TRAP_STEP (__SI_FAULT|1) /* single-step breakpoint************* */ +#define TRAP_TRACEFLOW (__SI_FAULT|2) /* trace buffer overflow ************* */ +#define TRAP_WATCHPT (__SI_FAULT|3) /* watchpoint match ************* */ +#define TRAP_ILLTRAP (__SI_FAULT|4) /* illegal trap ************* */ + +/* + * SIGSEGV si_codes + */ +#define SEGV_STACKFLOW (__SI_FAULT|3) /* stack overflow */ + +#endif diff --git a/include/asm-blackfin/signal.h b/include/asm-blackfin/signal.h new file mode 100644 index 000000000000..0250429b736a --- /dev/null +++ b/include/asm-blackfin/signal.h @@ -0,0 +1,160 @@ +#ifndef _BLACKFIN_SIGNAL_H +#define _BLACKFIN_SIGNAL_H + +#include + +/* Avoid too many header ordering problems. */ +struct siginfo; + +#ifdef __KERNEL__ +/* Most things should be clean enough to redefine this at will, if care + is taken to make libc match. */ + +#define _NSIG 64 +#define _NSIG_BPW 32 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +#else +/* Here we must cater to libcs that poke about in kernel headers. */ + +#define NSIG 32 +typedef unsigned long sigset_t; + +#endif /* __KERNEL__ */ + +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGIOT 6 +#define SIGBUS 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGUSR1 10 +#define SIGSEGV 11 +#define SIGUSR2 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGSTKFLT 16 +#define SIGCHLD 17 +#define SIGCONT 18 +#define SIGSTOP 19 +#define SIGTSTP 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGURG 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGIO 29 +#define SIGPOLL SIGIO +/* +#define SIGLOST 29 +*/ +#define SIGPWR 30 +#define SIGSYS 31 +#define SIGUNUSED 31 + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX _NSIG + +/* + * SA_FLAGS values: + * + * SA_ONSTACK indicates that a registered stack_t will be used. + * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_RESETHAND clears the handler when the signal is delivered. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_NODEFER prevents the current signal from being masked in the handler. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ +#define SA_NOCLDSTOP 0x00000001 +#define SA_NOCLDWAIT 0x00000002 /* not supported yet */ +#define SA_SIGINFO 0x00000004 +#define SA_ONSTACK 0x08000000 +#define SA_RESTART 0x10000000 +#define SA_NODEFER 0x40000000 +#define SA_RESETHAND 0x80000000 + +#define SA_NOMASK SA_NODEFER +#define SA_ONESHOT SA_RESETHAND + +/* + * sigaltstack controls + */ +#define SS_ONSTACK 1 +#define SS_DISABLE 2 + +#define MINSIGSTKSZ 2048 +#define SIGSTKSZ 8192 + +#include + +#ifdef __KERNEL__ +struct old_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + unsigned long sa_flags; + void (*sa_restorer) (void); +}; + +struct sigaction { + __sighandler_t sa_handler; + unsigned long sa_flags; + void (*sa_restorer) (void); + sigset_t sa_mask; /* mask last for extensibility */ +}; + +struct k_sigaction { + struct sigaction sa; +}; +#else +/* Here we must cater to libcs that poke about in kernel headers. */ + +struct sigaction { + union { + __sighandler_t _sa_handler; + void (*_sa_sigaction) (int, struct siginfo *, void *); + } _u; + sigset_t sa_mask; + unsigned long sa_flags; + void (*sa_restorer) (void); +}; + +#define sa_handler _u._sa_handler +#define sa_sigaction _u._sa_sigaction + +#endif /* __KERNEL__ */ + +typedef struct sigaltstack { + void *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + +#ifdef __KERNEL__ + +#include +#undef __HAVE_ARCH_SIG_BITOPS + +#define ptrace_signal_deliver(regs, cookie) do { } while (0) + +#endif /* __KERNEL__ */ + +#endif /* _BLACKFIN_SIGNAL_H */ diff --git a/include/asm-blackfin/socket.h b/include/asm-blackfin/socket.h new file mode 100644 index 000000000000..5213c9652186 --- /dev/null +++ b/include/asm-blackfin/socket.h @@ -0,0 +1,53 @@ +#ifndef _ASM_SOCKET_H +#define _ASM_SOCKET_H + +#include + +/* For setsockoptions(2) */ +#define SOL_SOCKET 1 + +#define SO_DEBUG 1 +#define SO_REUSEADDR 2 +#define SO_TYPE 3 +#define SO_ERROR 4 +#define SO_DONTROUTE 5 +#define SO_BROADCAST 6 +#define SO_SNDBUF 7 +#define SO_RCVBUF 8 +#define SO_SNDBUFFORCE 32 +#define SO_RCVBUFFORCE 33 +#define SO_KEEPALIVE 9 +#define SO_OOBINLINE 10 +#define SO_NO_CHECK 11 +#define SO_PRIORITY 12 +#define SO_LINGER 13 +#define SO_BSDCOMPAT 14 +/* To add :#define SO_REUSEPORT 15 */ +#define SO_PASSCRED 16 +#define SO_PEERCRED 17 +#define SO_RCVLOWAT 18 +#define SO_SNDLOWAT 19 +#define SO_RCVTIMEO 20 +#define SO_SNDTIMEO 21 + +/* Security levels - as per NRL IPv6 - don't actually do anything */ +#define SO_SECURITY_AUTHENTICATION 22 +#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 +#define SO_SECURITY_ENCRYPTION_NETWORK 24 + +#define SO_BINDTODEVICE 25 + +/* Socket filtering */ +#define SO_ATTACH_FILTER 26 +#define SO_DETACH_FILTER 27 + +#define SO_PEERNAME 28 +#define SO_TIMESTAMP 29 +#define SCM_TIMESTAMP SO_TIMESTAMP + +#define SO_ACCEPTCONN 30 +#define SO_PEERSEC 31 +#define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#endif /* _ASM_SOCKET_H */ diff --git a/include/asm-blackfin/sockios.h b/include/asm-blackfin/sockios.h new file mode 100644 index 000000000000..426b89bfaa8b --- /dev/null +++ b/include/asm-blackfin/sockios.h @@ -0,0 +1,13 @@ +#ifndef __ARCH_BFIN_SOCKIOS__ +#define __ARCH_BFIN_SOCKIOS__ + +/* Socket-level I/O control calls. */ +#define FIOSETOWN 0x8901 +#define SIOCSPGRP 0x8902 +#define FIOGETOWN 0x8903 +#define SIOCGPGRP 0x8904 +#define SIOCATMARK 0x8905 +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ + +#endif /* __ARCH_BFIN_SOCKIOS__ */ diff --git a/include/asm-blackfin/spinlock.h b/include/asm-blackfin/spinlock.h new file mode 100644 index 000000000000..64e908a50646 --- /dev/null +++ b/include/asm-blackfin/spinlock.h @@ -0,0 +1,6 @@ +#ifndef __BFIN_SPINLOCK_H +#define __BFIN_SPINLOCK_H + +#error blackfin architecture does not support SMP spin lock yet + +#endif diff --git a/include/asm-blackfin/stat.h b/include/asm-blackfin/stat.h new file mode 100644 index 000000000000..d2b6f11ec231 --- /dev/null +++ b/include/asm-blackfin/stat.h @@ -0,0 +1,63 @@ +#ifndef _BFIN_STAT_H +#define _BFIN_STAT_H + +struct stat { + unsigned short st_dev; + unsigned short __pad1; + unsigned long st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned short __pad2; + unsigned long st_size; + unsigned long st_blksize; + unsigned long st_blocks; + unsigned long st_atime; + unsigned long __unused1; + unsigned long st_mtime; + unsigned long __unused2; + unsigned long st_ctime; + unsigned long __unused3; + unsigned long __unused4; + unsigned long __unused5; +}; + +/* This matches struct stat64 in glibc2.1, hence the absolutely + * insane amounts of padding around dev_t's. + */ +struct stat64 { + unsigned long long st_dev; + unsigned char __pad1[4]; + +#define STAT64_HAS_BROKEN_ST_INO 1 + unsigned long __st_ino; + + unsigned int st_mode; + unsigned int st_nlink; + + unsigned long st_uid; + unsigned long st_gid; + + unsigned long long st_rdev; + unsigned char __pad2[4]; + + long long st_size; + unsigned long st_blksize; + + long long st_blocks; /* Number 512-byte blocks allocated. */ + + unsigned long st_atime; + unsigned long st_atime_nsec; + + unsigned long st_mtime; + unsigned long st_mtime_nsec; + + unsigned long st_ctime; + unsigned long st_ctime_nsec; + + unsigned long long st_ino; +}; + +#endif /* _BFIN_STAT_H */ diff --git a/include/asm-blackfin/statfs.h b/include/asm-blackfin/statfs.h new file mode 100644 index 000000000000..350672091ba3 --- /dev/null +++ b/include/asm-blackfin/statfs.h @@ -0,0 +1,6 @@ +#ifndef _BFIN_STATFS_H +#define _BFIN_STATFS_H + +#include + +#endif /* _BFIN_STATFS_H */ diff --git a/include/asm-blackfin/string.h b/include/asm-blackfin/string.h new file mode 100644 index 000000000000..6f1eb7d6d3cb --- /dev/null +++ b/include/asm-blackfin/string.h @@ -0,0 +1,104 @@ +#ifndef _BLACKFIN_STRING_H_ +#define _BLACKFIN_STRING_H_ + +#ifdef __KERNEL__ /* only set these up for kernel code */ + +#define __HAVE_ARCH_STRCPY +extern inline char *strcpy(char *dest, const char *src) +{ + char *xdest = dest; + char temp = 0; + + __asm__ __volatile__ + ("1:\t%2 = B [%1++] (Z);\n\t" + "B [%0++] = %2;\n\t" + "CC = %2;\n\t" + "if cc jump 1b (bp);\n" + : "+&a" (dest), "+&a" (src), "=&d" (temp) + ::"memory", "CC"); + return xdest; +} + +#define __HAVE_ARCH_STRNCPY +extern inline char *strncpy(char *dest, const char *src, size_t n) +{ + char *xdest = dest; + char temp = 0; + + if (n == 0) + return xdest; + + __asm__ __volatile__ + ("1:\t%3 = B [%1++] (Z);\n\t" + "B [%0++] = %3;\n\t" + "CC = %3;\n\t" + "if ! cc jump 2f;\n\t" + "%2 += -1;\n\t" + "CC = %2 == 0;\n\t" + "if ! cc jump 1b (bp);\n" + "2:\n" + : "+&a" (dest), "+&a" (src), "+&da" (n), "=&d" (temp) + ::"memory", "CC"); + return xdest; +} + +#define __HAVE_ARCH_STRCMP +extern inline int strcmp(const char *cs, const char *ct) +{ + char __res1, __res2; + + __asm__ + ("1:\t%2 = B[%0++] (Z);\n\t" /* get *cs */ + "%3 = B[%1++] (Z);\n\t" /* get *ct */ + "CC = %2 == %3;\n\t" /* compare a byte */ + "if ! cc jump 2f;\n\t" /* not equal, break out */ + "CC = %2;\n\t" /* at end of cs? */ + "if cc jump 1b (bp);\n\t" /* no, keep going */ + "jump.s 3f;\n" /* strings are equal */ + "2:\t%2 = %2 - %3;\n" /* *cs - *ct */ + "3:\n" + : "+&a" (cs), "+&a" (ct), "=&d" (__res1), "=&d" (__res2) + : : "CC"); + + return __res1; +} + +#define __HAVE_ARCH_STRNCMP +extern inline int strncmp(const char *cs, const char *ct, size_t count) +{ + char __res1, __res2; + + if (!count) + return 0; + __asm__ + ("1:\t%3 = B[%0++] (Z);\n\t" /* get *cs */ + "%4 = B[%1++] (Z);\n\t" /* get *ct */ + "CC = %3 == %4;\n\t" /* compare a byte */ + "if ! cc jump 3f;\n\t" /* not equal, break out */ + "CC = %3;\n\t" /* at end of cs? */ + "if ! cc jump 4f;\n\t" /* yes, all done */ + "%2 += -1;\n\t" /* no, adjust count */ + "CC = %2 == 0;\n\t" + "if ! cc jump 1b;\n" /* more to do, keep going */ + "2:\t%3 = 0;\n\t" /* strings are equal */ + "jump.s 4f;\n" + "3:\t%3 = %3 - %4;\n" /* *cs - *ct */ + "4:" + : "+&a" (cs), "+&a" (ct), "+&da" (count), "=&d" (__res1), "=&d" (__res2) + : : "CC"); + return __res1; +} + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *s, int c, size_t count); +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *d, const void *s, size_t count); +#define __HAVE_ARCH_MEMCMP +extern int memcmp(const void *, const void *, __kernel_size_t); +#define __HAVE_ARCH_MEMCHR +extern void *memchr(const void *s, int c, size_t n); +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *dest, const void *src, size_t count); + +#endif /*__KERNEL__*/ +#endif /* _BLACKFIN_STRING_H_ */ diff --git a/include/asm-blackfin/system.h b/include/asm-blackfin/system.h new file mode 100644 index 000000000000..758bac7c1e74 --- /dev/null +++ b/include/asm-blackfin/system.h @@ -0,0 +1,250 @@ +/* + * File: include/asm/system.h + * Based on: + * Author: Tony Kou (tonyko@lineo.ca) + * Copyright (c) 2002 Arcturus Networks Inc. + * (www.arcturusnetworks.com) + * Copyright (c) 2003 Metrowerks (www.metrowerks.com) + * Copyright (c) 2004 Analog Device Inc. + * Created: 25Jan2001 - Tony Kou + * Description: system.h include file + * + * Modified: 22Sep2006 - Robin Getz + * - move include blackfin.h down, so I can get access to + * irq functions in other include files. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _BLACKFIN_SYSTEM_H +#define _BLACKFIN_SYSTEM_H + +#include +#include + +/* + * Interrupt configuring macros. + */ + +extern unsigned long irq_flags; + +#define local_irq_enable() do { \ + __asm__ __volatile__ ( \ + "sti %0;" \ + ::"d"(irq_flags)); \ +} while (0) + +#define local_irq_disable() do { \ + int _tmp_dummy; \ + __asm__ __volatile__ ( \ + "cli %0;" \ + :"=d" (_tmp_dummy):); \ +} while (0) + +#if defined(ANOMALY_05000244) && defined (CONFIG_BLKFIN_CACHE) +#define idle_with_irq_disabled() do { \ + __asm__ __volatile__ ( \ + "nop; nop;\n" \ + ".align 8;\n" \ + "sti %0; idle;\n" \ + ::"d" (irq_flags)); \ +} while (0) +#else +#define idle_with_irq_disabled() do { \ + __asm__ __volatile__ ( \ + ".align 8;\n" \ + "sti %0; idle;\n" \ + ::"d" (irq_flags)); \ +} while (0) +#endif + +#ifdef CONFIG_DEBUG_HWERR +#define __save_and_cli(x) do { \ + __asm__ __volatile__ ( \ + "cli %0;\n\tsti %1;" \ + :"=&d"(x): "d" (0x3F)); \ +} while (0) +#else +#define __save_and_cli(x) do { \ + __asm__ __volatile__ ( \ + "cli %0;" \ + :"=&d"(x):); \ +} while (0) +#endif + +#define local_save_flags(x) asm volatile ("cli %0;" \ + "sti %0;" \ + :"=d"(x):); + +#ifdef CONFIG_DEBUG_HWERR +#define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0) +#else +#define irqs_enabled_from_flags(x) ((x) != 0x1f) +#endif + +#define local_irq_restore(x) do { \ + if (irqs_enabled_from_flags(x)) \ + local_irq_enable (); \ +} while (0) + +/* For spinlocks etc */ +#define local_irq_save(x) __save_and_cli(x) + +#define irqs_disabled() \ +({ \ + unsigned long flags; \ + local_save_flags(flags); \ + !irqs_enabled_from_flags(flags); \ +}) + +/* + * Force strict CPU ordering. + */ +#define nop() asm volatile ("nop;\n\t"::) +#define mb() asm volatile ("" : : :"memory") +#define rmb() asm volatile ("" : : :"memory") +#define wmb() asm volatile ("" : : :"memory") +#define set_rmb(var, value) do { (void) xchg(&var, value); } while (0) +#define set_mb(var, value) set_rmb(var, value) +#define set_wmb(var, value) do { var = value; wmb(); } while (0) + +#define read_barrier_depends() do { } while(0) + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while(0) +#endif + +#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define tas(ptr) ((void)xchg((ptr),1)) + +struct __xchg_dummy { + unsigned long a[100]; +}; +#define __xg(x) ((volatile struct __xchg_dummy *)(x)) + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, + int size) +{ + unsigned long tmp = 0; + unsigned long flags = 0; + + local_irq_save(flags); + + switch (size) { + case 1: + __asm__ __volatile__ + ("%0 = b%2 (z);\n\t" + "b%2 = %1;\n\t" + : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); + break; + case 2: + __asm__ __volatile__ + ("%0 = w%2 (z);\n\t" + "w%2 = %1;\n\t" + : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); + break; + case 4: + __asm__ __volatile__ + ("%0 = %2;\n\t" + "%2 = %1;\n\t" + : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); + break; + } + local_irq_restore(flags); + return tmp; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long tmp = 0; + unsigned long flags = 0; + + local_irq_save(flags); + + switch (size) { + case 1: + __asm__ __volatile__ + ("%0 = b%3 (z);\n\t" + "CC = %1 == %0;\n\t" + "IF !CC JUMP 1f;\n\t" + "b%3 = %2;\n\t" + "1:\n\t" + : "=&d" (tmp) : "d" (old), "d" (new), "m" (*__xg(ptr)) : "memory"); + break; + case 2: + __asm__ __volatile__ + ("%0 = w%3 (z);\n\t" + "CC = %1 == %0;\n\t" + "IF !CC JUMP 1f;\n\t" + "w%3 = %2;\n\t" + "1:\n\t" + : "=&d" (tmp) : "d" (old), "d" (new), "m" (*__xg(ptr)) : "memory"); + break; + case 4: + __asm__ __volatile__ + ("%0 = %3;\n\t" + "CC = %1 == %0;\n\t" + "IF !CC JUMP 1f;\n\t" + "%3 = %2;\n\t" + "1:\n\t" + : "=&d" (tmp) : "d" (old), "d" (new), "m" (*__xg(ptr)) : "memory"); + break; + } + local_irq_restore(flags); + return tmp; +} + +#define cmpxchg(ptr,o,n)\ + ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ + (unsigned long)(n),sizeof(*(ptr)))) + +#define prepare_to_switch() do { } while(0) + +/* + * switch_to(n) should switch tasks to task ptr, first checking that + * ptr isn't the current task, in which case it does nothing. + */ + +#include + +asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_struct *next); + +#define switch_to(prev,next,last) \ +do { \ + memcpy (&prev->thread_info->l1_task_info, L1_SCRATCH_TASK_INFO, \ + sizeof *L1_SCRATCH_TASK_INFO); \ + memcpy (L1_SCRATCH_TASK_INFO, &next->thread_info->l1_task_info, \ + sizeof *L1_SCRATCH_TASK_INFO); \ + (last) = resume (prev, next); \ +} while (0) + +#endif /* _BLACKFIN_SYSTEM_H */ diff --git a/include/asm-blackfin/termbits.h b/include/asm-blackfin/termbits.h new file mode 100644 index 000000000000..2fd9dabdba77 --- /dev/null +++ b/include/asm-blackfin/termbits.h @@ -0,0 +1,184 @@ +#ifndef __ARCH_BFIN_TERMBITS_H__ +#define __ARCH_BFIN_TERMBITS_H__ + +#include + +typedef unsigned char cc_t; +typedef unsigned int speed_t; +typedef unsigned int tcflag_t; + +#define NCCS 19 +struct termios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ +}; + +struct ktermios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* c_cc characters */ +#define VINTR 0 +#define VQUIT 1 +#define VERASE 2 +#define VKILL 3 +#define VEOF 4 +#define VTIME 5 +#define VMIN 6 +#define VSWTC 7 +#define VSTART 8 +#define VSTOP 9 +#define VSUSP 10 +#define VEOL 11 +#define VREPRINT 12 +#define VDISCARD 13 +#define VWERASE 14 +#define VLNEXT 15 +#define VEOL2 16 + +/* c_iflag bits */ +#define IGNBRK 0000001 +#define BRKINT 0000002 +#define IGNPAR 0000004 +#define PARMRK 0000010 +#define INPCK 0000020 +#define ISTRIP 0000040 +#define INLCR 0000100 +#define IGNCR 0000200 +#define ICRNL 0000400 +#define IUCLC 0001000 +#define IXON 0002000 +#define IXANY 0004000 +#define IXOFF 0010000 +#define IMAXBEL 0020000 +#define IUTF8 0040000 + +/* c_oflag bits */ +#define OPOST 0000001 +#define OLCUC 0000002 +#define ONLCR 0000004 +#define OCRNL 0000010 +#define ONOCR 0000020 +#define ONLRET 0000040 +#define OFILL 0000100 +#define OFDEL 0000200 +#define NLDLY 0000400 +#define NL0 0000000 +#define NL1 0000400 +#define CRDLY 0003000 +#define CR0 0000000 +#define CR1 0001000 +#define CR2 0002000 +#define CR3 0003000 +#define TABDLY 0014000 +#define TAB0 0000000 +#define TAB1 0004000 +#define TAB2 0010000 +#define TAB3 0014000 +#define XTABS 0014000 +#define BSDLY 0020000 +#define BS0 0000000 +#define BS1 0020000 +#define VTDLY 0040000 +#define VT0 0000000 +#define VT1 0040000 +#define FFDLY 0100000 +#define FF0 0000000 +#define FF1 0100000 + +/* c_cflag bit meaning */ +#define CBAUD 0010017 +#define B0 0000000 /* hang up */ +#define B50 0000001 +#define B75 0000002 +#define B110 0000003 +#define B134 0000004 +#define B150 0000005 +#define B200 0000006 +#define B300 0000007 +#define B600 0000010 +#define B1200 0000011 +#define B1800 0000012 +#define B2400 0000013 +#define B4800 0000014 +#define B9600 0000015 +#define B19200 0000016 +#define B38400 0000017 +#define EXTA B19200 +#define EXTB B38400 +#define CSIZE 0000060 +#define CS5 0000000 +#define CS6 0000020 +#define CS7 0000040 +#define CS8 0000060 +#define CSTOPB 0000100 +#define CREAD 0000200 +#define PARENB 0000400 +#define PARODD 0001000 +#define HUPCL 0002000 +#define CLOCAL 0004000 +#define CBAUDEX 0010000 +#define B57600 0010001 +#define B115200 0010002 +#define B230400 0010003 +#define B460800 0010004 +#define B500000 0010005 +#define B576000 0010006 +#define B921600 0010007 +#define B1000000 0010010 +#define B1152000 0010011 +#define B1500000 0010012 +#define B2000000 0010013 +#define B2500000 0010014 +#define B3000000 0010015 +#define B3500000 0010016 +#define B4000000 0010017 +#define CIBAUD 002003600000 /* input baud rate (not used) */ +#define CMSPAR 010000000000 /* mark or space (stick) parity */ +#define CRTSCTS 020000000000 /* flow control */ + +/* c_lflag bits */ +#define ISIG 0000001 +#define ICANON 0000002 +#define XCASE 0000004 +#define ECHO 0000010 +#define ECHOE 0000020 +#define ECHOK 0000040 +#define ECHONL 0000100 +#define NOFLSH 0000200 +#define TOSTOP 0000400 +#define ECHOCTL 0001000 +#define ECHOPRT 0002000 +#define ECHOKE 0004000 +#define FLUSHO 0010000 +#define PENDIN 0040000 +#define IEXTEN 0100000 + +/* tcflow() and TCXONC use these */ +#define TCOOFF 0 +#define TCOON 1 +#define TCIOFF 2 +#define TCION 3 + +/* tcflush() and TCFLSH use these */ +#define TCIFLUSH 0 +#define TCOFLUSH 1 +#define TCIOFLUSH 2 + +/* tcsetattr uses these */ +#define TCSANOW 0 +#define TCSADRAIN 1 +#define TCSAFLUSH 2 + +#endif /* __ARCH_BFIN_TERMBITS_H__ */ diff --git a/include/asm-blackfin/termios.h b/include/asm-blackfin/termios.h new file mode 100644 index 000000000000..5c41478a51c6 --- /dev/null +++ b/include/asm-blackfin/termios.h @@ -0,0 +1,106 @@ +#ifndef __BFIN_TERMIOS_H__ +#define __BFIN_TERMIOS_H__ + +#include +#include + +struct winsize { + unsigned short ws_row; + unsigned short ws_col; + unsigned short ws_xpixel; + unsigned short ws_ypixel; +}; + +#define NCC 8 +struct termio { + unsigned short c_iflag; /* input mode flags */ + unsigned short c_oflag; /* output mode flags */ + unsigned short c_cflag; /* control mode flags */ + unsigned short c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[NCC]; /* control characters */ +}; + +/* modem lines */ +#define TIOCM_LE 0x001 +#define TIOCM_DTR 0x002 +#define TIOCM_RTS 0x004 +#define TIOCM_ST 0x008 +#define TIOCM_SR 0x010 +#define TIOCM_CTS 0x020 +#define TIOCM_CAR 0x040 +#define TIOCM_RNG 0x080 +#define TIOCM_DSR 0x100 +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RI TIOCM_RNG +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 + +/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ + +/* line disciplines */ +#define N_TTY 0 +#define N_SLIP 1 +#define N_MOUSE 2 +#define N_PPP 3 +#define N_STRIP 4 +#define N_AX25 5 +#define N_X25 6 /* X.25 async */ +#define N_6PACK 7 +#define N_MASC 8 /* Reserved for Mobitex module */ +#define N_R3964 9 /* Reserved for Simatic R3964 module */ +#define N_PROFIBUS_FDL 10 /* Reserved for Profibus */ +#define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */ +#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */ +#define N_HDLC 13 /* synchronous HDLC */ +#define N_SYNC_PPP 14 /* synchronous PPP */ +#define N_HCI 15 /* Bluetooth HCI UART */ + +#ifdef __KERNEL__ + +/* intr=^C quit=^\ erase=del kill=^U + eof=^D vtime=\0 vmin=\1 sxtc=\0 + start=^Q stop=^S susp=^Z eol=\0 + reprint=^R discard=^U werase=^W lnext=^V + eol2=\0 +*/ +#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ +#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ + unsigned short __tmp; \ + get_user(__tmp,&(termio)->x); \ + *(unsigned short *) &(termios)->x = __tmp; \ +} + +#define user_termio_to_kernel_termios(termios, termio) \ +({ \ + SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ + copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ +}) + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ +#define kernel_termios_to_user_termio(termio, termios) \ +({ \ + put_user((termios)->c_iflag, &(termio)->c_iflag); \ + put_user((termios)->c_oflag, &(termio)->c_oflag); \ + put_user((termios)->c_cflag, &(termio)->c_cflag); \ + put_user((termios)->c_lflag, &(termio)->c_lflag); \ + put_user((termios)->c_line, &(termio)->c_line); \ + copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ +}) + +#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) + +#endif /* __KERNEL__ */ + +#endif /* __BFIN_TERMIOS_H__ */ diff --git a/include/asm-blackfin/thread_info.h b/include/asm-blackfin/thread_info.h new file mode 100644 index 000000000000..fa8f08cf283e --- /dev/null +++ b/include/asm-blackfin/thread_info.h @@ -0,0 +1,143 @@ +/* + * File: include/asm-blackfin/thread_info.h + * Based on: include/asm-m68knommu/thread_info.h + * Author: LG Soft India + * Copyright (C) 2004-2005 Analog Devices Inc. + * Created: Tue Sep 21 2004 + * Description: Blackfin low-level thread information + * Modified: + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ASM_THREAD_INFO_H +#define _ASM_THREAD_INFO_H + +#include +#include +#include +#include + +#ifdef __KERNEL__ + +/* Thread Align Mask to reach to the top of the stack + * for any process + */ +#define ALIGN_PAGE_MASK 0xffffe000 + +#ifndef __ASSEMBLY__ + +typedef unsigned long mm_segment_t; + +/* + * low level task data. + * If you change this, change the TI_* offsets below to match. + */ + +struct thread_info { + struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + unsigned long flags; /* low level flags */ + int cpu; /* cpu we're on */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ + mm_segment_t addr_limit; /* address limit */ + struct restart_block restart_block; + struct l1_scratch_task_info l1_task_info; +}; + +/* + * macros/functions for gaining access to the thread information structure + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .flags = 0, \ + .cpu = 0, \ + .preempt_count = 1, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +} +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + +/* + * Size of kernel stack for each process. This must be a power of 2... + */ +#define THREAD_SIZE 8192 /* 2 pages */ + +/* How to get the thread information struct from C */ + +static inline struct thread_info *current_thread_info(void) + __attribute__ ((__const__)); + +/* Given a task stack pointer, you can find it's task structure + * just by masking it to the 8K boundary. + */ +static inline struct thread_info *current_thread_info(void) +{ + struct thread_info *ti; + __asm__("%0 = sp;": "=&d"(ti): + ); + return (struct thread_info *)((long)ti & ~8191UL); +} + +/* thread information allocation */ +#define alloc_thread_info(tsk) ((struct thread_info *) \ + __get_free_pages(GFP_KERNEL, 1)) +#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) +#endif /* __ASSEMBLY__ */ + +/* + * Offsets in thread_info structure, used in assembly code + */ +#define TI_TASK 0 +#define TI_EXECDOMAIN 4 +#define TI_FLAGS 8 +#define TI_CPU 12 +#define TI_PREEMPT 16 + +#define PREEMPT_ACTIVE 0x4000000 + +/* + * thread information flag bit numbers + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_POLLING_NRFLAG 4 /* true if poll_idle() is polling + TIF_NEED_RESCHED */ +#define TIF_MEMDIE 5 +#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ +#define TIF_FREEZE 7 /* is freezing for suspend */ + +/* as above, but as bit values */ +#define _TIF_SYSCALL_TRACE (1<mm) + +#include + +#endif /* _BLACKFIN_TLB_H */ diff --git a/include/asm-blackfin/tlbflush.h b/include/asm-blackfin/tlbflush.h new file mode 100644 index 000000000000..10a07ba1e011 --- /dev/null +++ b/include/asm-blackfin/tlbflush.h @@ -0,0 +1,62 @@ +#ifndef _BLACKFIN_TLBFLUSH_H +#define _BLACKFIN_TLBFLUSH_H + +/* + * Copyright (C) 2000 Lineo, David McCullough + * Copyright (C) 2000-2002, Greg Ungerer + */ + +#include + +/* + * flush all user-space atc entries. + */ +static inline void __flush_tlb(void) +{ + BUG(); +} + +static inline void __flush_tlb_one(unsigned long addr) +{ + BUG(); +} + +#define flush_tlb() __flush_tlb() + +/* + * flush all atc entries (both kernel and user-space entries). + */ +static inline void flush_tlb_all(void) +{ + BUG(); +} + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + BUG(); +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long addr) +{ + BUG(); +} + +static inline void flush_tlb_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + BUG(); +} + +static inline void flush_tlb_kernel_page(unsigned long addr) +{ + BUG(); +} + +static inline void flush_tlb_pgtables(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + BUG(); +} + +#endif diff --git a/include/asm-blackfin/topology.h b/include/asm-blackfin/topology.h new file mode 100644 index 000000000000..acee23987897 --- /dev/null +++ b/include/asm-blackfin/topology.h @@ -0,0 +1,6 @@ +#ifndef _ASM_BLACKFIN_TOPOLOGY_H +#define _ASM_BLACKFIN_TOPOLOGY_H + +#include + +#endif /* _ASM_BLACKFIN_TOPOLOGY_H */ diff --git a/include/asm-blackfin/traps.h b/include/asm-blackfin/traps.h new file mode 100644 index 000000000000..fe365b1b7ca8 --- /dev/null +++ b/include/asm-blackfin/traps.h @@ -0,0 +1,75 @@ +/* + * linux/include/asm/traps.h + * + * Copyright (C) 1993 Hamish Macdonald + * + * Lineo, Inc Jul 2001 Tony Kou + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#ifndef _BFIN_TRAPS_H +#define _BFIN_TRAPS_H + +#define VEC_SYS (0) +#define VEC_EXCPT01 (1) +#define VEC_EXCPT02 (2) +#define VEC_EXCPT03 (3) +#define VEC_EXCPT04 (4) +#define VEC_EXCPT05 (5) +#define VEC_EXCPT06 (6) +#define VEC_EXCPT07 (7) +#define VEC_EXCPT08 (8) +#define VEC_EXCPT09 (9) +#define VEC_EXCPT10 (10) +#define VEC_EXCPT11 (11) +#define VEC_EXCPT12 (12) +#define VEC_EXCPT13 (13) +#define VEC_EXCPT14 (14) +#define VEC_EXCPT15 (15) +#define VEC_STEP (16) +#define VEC_OVFLOW (17) +#define VEC_UNDEF_I (33) +#define VEC_ILGAL_I (34) +#define VEC_CPLB_VL (35) +#define VEC_MISALI_D (36) +#define VEC_UNCOV (37) +#define VEC_CPLB_M (38) +#define VEC_CPLB_MHIT (39) +#define VEC_WATCH (40) +#define VEC_ISTRU_VL (41) /*ADSP-BF535 only (MH) */ +#define VEC_MISALI_I (42) +#define VEC_CPLB_I_VL (43) +#define VEC_CPLB_I_M (44) +#define VEC_CPLB_I_MHIT (45) +#define VEC_ILL_RES (46) /* including unvalid supervisor mode insn */ + +#ifndef __ASSEMBLY__ + +#define HWC_x2 "System MMR Error\nAn error occurred due to an invalid access to an System MMR location\nPossible reason: a 32-bit register is accessed with a 16-bit instruction,\nor a 16-bit register is accessed with a 32-bit instruction.\n" +#define HWC_x3 "External Memory Addressing Error\n" +#define HWC_x12 "Performance Monitor Overflow\n" +#define HWC_x18 "RAISE 5 instruction\n Software issued a RAISE 5 instruction to invoke the Hardware\n" +#define HWC_default "Reserved\n" + +#define EXC_0x03 "Application stack overflow\n - Please increase the stack size of the application using elf2flt -s option,\n and/or reduce the stack use of the application.\n" +#define EXC_0x10 "Single step\n - When the processor is in single step mode, every instruction\n generates an exception. Primarily used for debugging.\n" +#define EXC_0x11 "Exception caused by a trace buffer full condition\n - The processor takes this exception when the trace\n buffer overflows (only when enabled by the Trace Unit Control register).\n" +#define EXC_0x21 "Undefined instruction\n - May be used to emulate instructions that are not defined for\n a particular processor implementation.\n" +#define EXC_0x22 "Illegal instruction combination\n - See section for multi-issue rules in the ADSP-BF53x Blackfin\n Processor Instruction Set Reference.\n" +#define EXC_0x23 "Data access CPLB protection violation\n - Attempted read or write to Supervisor resource,\n or illegal data memory access. \n" +#define EXC_0x24 "Data access misaligned address violation\n - Attempted misaligned data memory or data cache access.\n" +#define EXC_0x25 "Unrecoverable event\n - For example, an exception generated while processing a previous exception.\n" +#define EXC_0x26 "Data access CPLB miss\n - Used by the MMU to signal a CPLB miss on a data access.\n" +#define EXC_0x27 "Data access multiple CPLB hits\n - More than one CPLB entry matches data fetch address.\n" +#define EXC_0x28 "Program Sequencer Exception caused by an emulation watchpoint match\n - There is a watchpoint match, and one of the EMUSW\n bits in the Watchpoint Instruction Address Control register (WPIACTL) is set.\n" +#define EXC_0x2A "Instruction fetch misaligned address violation\n - Attempted misaligned instruction cache fetch. On a misaligned instruction fetch exception,\n the return address provided in RETX is the destination address which is misaligned, rather than the address of the offending instruction.\n" +#define EXC_0x2B "CPLB protection violation\n - Illegal instruction fetch access (memory protection violation).\n" +#define EXC_0x2C "Instruction fetch CPLB miss\n - CPLB miss on an instruction fetch.\n" +#define EXC_0x2D "Instruction fetch multiple CPLB hits\n - More than one CPLB entry matches instruction fetch address.\n" +#define EXC_0x2E "Illegal use of supervisor resource\n - Attempted to use a Supervisor register or instruction from User mode.\n Supervisor resources are registers and instructions that are reserved\n for Supervisor use: Supervisor only registers, all MMRs, and Supervisor\n only instructions.\n" + +#endif /* __ASSEMBLY__ */ +#endif /* _BFIN_TRAPS_H */ diff --git a/include/asm-blackfin/types.h b/include/asm-blackfin/types.h new file mode 100644 index 000000000000..36f8dc8c52ba --- /dev/null +++ b/include/asm-blackfin/types.h @@ -0,0 +1,66 @@ +#ifndef _BFIN_TYPES_H +#define _BFIN_TYPES_H + +/* + * This file is never included by application software unless + * explicitly requested (e.g., via linux/types.h) in which case the + * application is Linux specific so (user-) name space pollution is + * not a major issue. However, for interoperability, libraries still + * need to be careful to avoid a name clashes. + */ +#ifndef __ASSEMBLY__ + +typedef unsigned short umode_t; + +/* + * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the + * header files exported to user space + */ + +typedef __signed__ char __s8; +typedef unsigned char __u8; + +typedef __signed__ short __s16; +typedef unsigned short __u16; + +typedef __signed__ int __s32; +typedef unsigned int __u32; + +/* HK0617 -- Changes to unsigned long temporarily */ +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +typedef __signed__ long long __s64; +typedef unsigned long long __u64; +#endif + +#endif /* __ASSEMBLY__ */ +/* + * These aren't exported outside the kernel to avoid name space clashes + */ +#ifdef __KERNEL__ + +#define BITS_PER_LONG 32 + +#ifndef __ASSEMBLY__ + +typedef signed char s8; +typedef unsigned char u8; + +typedef signed short s16; +typedef unsigned short u16; + +typedef signed int s32; +typedef unsigned int u32; + +typedef signed long long s64; +typedef unsigned long long u64; + +/* Dma addresses are 32-bits wide. */ + +typedef u32 dma_addr_t; +typedef u64 dma64_addr_t; + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _BFIN_TYPES_H */ diff --git a/include/asm-blackfin/uaccess.h b/include/asm-blackfin/uaccess.h new file mode 100644 index 000000000000..bfcb6794c672 --- /dev/null +++ b/include/asm-blackfin/uaccess.h @@ -0,0 +1,271 @@ +/* Changes made by Lineo Inc. May 2001 + * + * Based on: include/asm-m68knommu/uaccess.h + */ + +#ifndef __BLACKFIN_UACCESS_H +#define __BLACKFIN_UACCESS_H + +/* + * User space memory access functions + */ +#include +#include +#include + +#include +#ifndef CONFIG_NO_ACCESS_CHECK +# include +#endif + +#define get_ds() (KERNEL_DS) +#define get_fs() (current_thread_info()->addr_limit) + +static inline void set_fs(mm_segment_t fs) +{ + current_thread_info()->addr_limit = fs; +} + +#define segment_eq(a,b) ((a) == (b)) + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +#define access_ok(type,addr,size) _access_ok((unsigned long)(addr),(size)) + +static inline int is_in_rom(unsigned long addr) +{ + /* + * What we are really trying to do is determine if addr is + * in an allocated kernel memory region. If not then assume + * we cannot free it or otherwise de-allocate it. Ideally + * we could restrict this to really being in a ROM or flash, + * but that would need to be done on a board by board basis, + * not globally. + */ + if ((addr < _ramstart) || (addr >= _ramend)) + return (1); + + /* Default case, not in ROM */ + return (0); +} + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + */ + +#ifdef CONFIG_NO_ACCESS_CHECK +static inline int _access_ok(unsigned long addr, unsigned long size) { return 1; } +#else +#ifdef CONFIG_ACCESS_OK_L1 +extern int _access_ok(unsigned long addr, unsigned long size)__attribute__((l1_text)); +#else +extern int _access_ok(unsigned long addr, unsigned long size); +#endif +#endif + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry { + unsigned long insn, fixup; +}; + +/* Returns 0 if exception not found and fixup otherwise. */ +extern unsigned long search_exception_table(unsigned long); + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + */ + +#define put_user(x,p) \ + ({ \ + int _err = 0; \ + typeof(*(p)) _x = (x); \ + typeof(*(p)) *_p = (p); \ + if (!access_ok(VERIFY_WRITE, _p, sizeof(*(_p)))) {\ + _err = -EFAULT; \ + } \ + else { \ + switch (sizeof (*(_p))) { \ + case 1: \ + __put_user_asm(_x, _p, B); \ + break; \ + case 2: \ + __put_user_asm(_x, _p, W); \ + break; \ + case 4: \ + __put_user_asm(_x, _p, ); \ + break; \ + case 8: { \ + long _xl, _xh; \ + _xl = ((long *)&_x)[0]; \ + _xh = ((long *)&_x)[1]; \ + __put_user_asm(_xl, ((long *)_p)+0, ); \ + __put_user_asm(_xh, ((long *)_p)+1, ); \ + } break; \ + default: \ + _err = __put_user_bad(); \ + break; \ + } \ + } \ + _err; \ + }) + +#define __put_user(x,p) put_user(x,p) +static inline int bad_user_access_length(void) +{ + panic("bad_user_access_length"); + return -1; +} + +#define __put_user_bad() (printk(KERN_INFO "put_user_bad %s:%d %s\n",\ + __FILE__, __LINE__, __FUNCTION__),\ + bad_user_access_length(), (-EFAULT)) + +/* + * Tell gcc we read from memory instead of writing: this is because + * we do not write to any memory gcc knows about, so there are no + * aliasing issues. + */ + +#define __ptr(x) ((unsigned long *)(x)) + +#define __put_user_asm(x,p,bhw) \ + __asm__ (#bhw"[%1] = %0;\n\t" \ + : /* no outputs */ \ + :"d" (x),"a" (__ptr(p)) : "memory") + +#define get_user(x,p) \ + ({ \ + int _err = 0; \ + typeof(*(p)) *_p = (p); \ + if (!access_ok(VERIFY_READ, _p, sizeof(*(_p)))) { \ + _err = -EFAULT; \ + } \ + else { \ + switch (sizeof(*(_p))) { \ + case 1: \ + __get_user_asm(x, _p, B,(Z)); \ + break; \ + case 2: \ + __get_user_asm(x, _p, W,(Z)); \ + break; \ + case 4: \ + __get_user_asm(x, _p, , ); \ + break; \ + case 8: { \ + unsigned long _xl, _xh; \ + __get_user_asm(_xl, ((unsigned long *)_p)+0, , ); \ + __get_user_asm(_xh, ((unsigned long *)_p)+1, , ); \ + ((unsigned long *)&x)[0] = _xl; \ + ((unsigned long *)&x)[1] = _xh; \ + } break; \ + default: \ + x = 0; \ + printk(KERN_INFO "get_user_bad: %s:%d %s\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + _err = __get_user_bad(); \ + break; \ + } \ + } \ + _err; \ + }) + +#define __get_user(x,p) get_user(x,p) + +#define __get_user_bad() (bad_user_access_length(), (-EFAULT)) + +#define __get_user_asm(x,p,bhw,option) \ + { \ + unsigned long _tmp; \ + __asm__ ("%0 =" #bhw "[%1]"#option";\n\t" \ + : "=d" (_tmp) \ + : "a" (__ptr(p))); \ + (x) = (__typeof__(*(p))) _tmp; \ + } + +#define __copy_from_user(to, from, n) copy_from_user(to, from, n) +#define __copy_to_user(to, from, n) copy_to_user(to, from, n) +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user + +#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n))\ + return retval; }) + +#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n))\ + return retval; }) + +static inline long copy_from_user(void *to, + const void __user * from, unsigned long n) +{ + if (access_ok(VERIFY_READ, from, n)) + memcpy(to, from, n); + else + return n; + return 0; +} + +static inline long copy_to_user(void *to, + const void __user * from, unsigned long n) +{ + if (access_ok(VERIFY_WRITE, to, n)) + memcpy(to, from, n); + else + return n; + return 0; +} + +/* + * Copy a null terminated string from userspace. + */ + +static inline long strncpy_from_user(char *dst, + const char *src, long count) +{ + char *tmp; + if (!access_ok(VERIFY_READ, src, 1)) + return -EFAULT; + strncpy(dst, src, count); + for (tmp = dst; *tmp && count > 0; tmp++, count--) ; + return (tmp - dst); +} + +/* + * Return the size of a string (including the ending 0) + * + * Return 0 on exception, a value greater than N if too long + */ +static inline long strnlen_user(const char *src, long n) +{ + return (strlen(src) + 1); +} + +#define strlen_user(str) strnlen_user(str, 32767) + +/* + * Zero Userspace + */ + +static inline unsigned long __clear_user(void *to, unsigned long n) +{ + memset(to, 0, n); + return 0; +} + +#define clear_user(to, n) __clear_user(to, n) + +#endif /* _BLACKFIN_UACCESS_H */ diff --git a/include/asm-blackfin/ucontext.h b/include/asm-blackfin/ucontext.h new file mode 100644 index 000000000000..4a4e3856beba --- /dev/null +++ b/include/asm-blackfin/ucontext.h @@ -0,0 +1,17 @@ +/** Changes made by Tony Kou Lineo Inc. May 2001 + * + * Based on: include/m68knommu/ucontext.h + */ + +#ifndef _BLACKFIN_UCONTEXT_H +#define _BLACKFIN_UCONTEXT_H + +struct ucontext { + unsigned long uc_flags; /* the others are necessary */ + struct ucontext *uc_link; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +#endif /* _BLACKFIN_UCONTEXT_H */ diff --git a/include/asm-blackfin/unaligned.h b/include/asm-blackfin/unaligned.h new file mode 100644 index 000000000000..10081dc241ef --- /dev/null +++ b/include/asm-blackfin/unaligned.h @@ -0,0 +1,6 @@ +#ifndef __BFIN_UNALIGNED_H +#define __BFIN_UNALIGNED_H + +#include + +#endif /* __BFIN_UNALIGNED_H */ diff --git a/include/asm-blackfin/unistd.h b/include/asm-blackfin/unistd.h new file mode 100644 index 000000000000..4df8790a67d5 --- /dev/null +++ b/include/asm-blackfin/unistd.h @@ -0,0 +1,382 @@ +#ifndef __ASM_BFIN_UNISTD_H +#define __ASM_BFIN_UNISTD_H +/* + * This file contains the system call numbers. + */ +#define __NR_exit 1 +#define __NR_fork 2 +#define __NR_read 3 +#define __NR_write 4 +#define __NR_open 5 +#define __NR_close 6 + /* 7 __NR_waitpid obsolete */ +#define __NR_creat 8 +#define __NR_link 9 +#define __NR_unlink 10 +#define __NR_execve 11 +#define __NR_chdir 12 +#define __NR_time 13 +#define __NR_mknod 14 +#define __NR_chmod 15 +#define __NR_chown 16 + /* 17 __NR_break obsolete */ + /* 18 __NR_oldstat obsolete */ +#define __NR_lseek 19 +#define __NR_getpid 20 +#define __NR_mount 21 + /* 22 __NR_umount obsolete */ +#define __NR_setuid 23 +#define __NR_getuid 24 +#define __NR_stime 25 +#define __NR_ptrace 26 +#define __NR_alarm 27 + /* 28 __NR_oldfstat obsolete */ +#define __NR_pause 29 + /* 30 __NR_utime obsolete */ + /* 31 __NR_stty obsolete */ + /* 32 __NR_gtty obsolete */ +#define __NR_access 33 +#define __NR_nice 34 + /* 35 __NR_ftime obsolete */ +#define __NR_sync 36 +#define __NR_kill 37 +#define __NR_rename 38 +#define __NR_mkdir 39 +#define __NR_rmdir 40 +#define __NR_dup 41 +#define __NR_pipe 42 +#define __NR_times 43 + /* 44 __NR_prof obsolete */ +#define __NR_brk 45 +#define __NR_setgid 46 +#define __NR_getgid 47 + /* 48 __NR_signal obsolete */ +#define __NR_geteuid 49 +#define __NR_getegid 50 +#define __NR_acct 51 +#define __NR_umount2 52 + /* 53 __NR_lock obsolete */ +#define __NR_ioctl 54 +#define __NR_fcntl 55 + /* 56 __NR_mpx obsolete */ +#define __NR_setpgid 57 + /* 58 __NR_ulimit obsolete */ + /* 59 __NR_oldolduname obsolete */ +#define __NR_umask 60 +#define __NR_chroot 61 +#define __NR_ustat 62 +#define __NR_dup2 63 +#define __NR_getppid 64 +#define __NR_getpgrp 65 +#define __NR_setsid 66 + /* 67 __NR_sigaction obsolete */ +#define __NR_sgetmask 68 +#define __NR_ssetmask 69 +#define __NR_setreuid 70 +#define __NR_setregid 71 + /* 72 __NR_sigsuspend obsolete */ + /* 73 __NR_sigpending obsolete */ +#define __NR_sethostname 74 +#define __NR_setrlimit 75 + /* 76 __NR_old_getrlimit obsolete */ +#define __NR_getrusage 77 +#define __NR_gettimeofday 78 +#define __NR_settimeofday 79 +#define __NR_getgroups 80 +#define __NR_setgroups 81 + /* 82 __NR_select obsolete */ +#define __NR_symlink 83 + /* 84 __NR_oldlstat obsolete */ +#define __NR_readlink 85 + /* 86 __NR_uselib obsolete */ + /* 87 __NR_swapon obsolete */ +#define __NR_reboot 88 + /* 89 __NR_readdir obsolete */ + /* 90 __NR_mmap obsolete */ +#define __NR_munmap 91 +#define __NR_truncate 92 +#define __NR_ftruncate 93 +#define __NR_fchmod 94 +#define __NR_fchown 95 +#define __NR_getpriority 96 +#define __NR_setpriority 97 + /* 98 __NR_profil obsolete */ +#define __NR_statfs 99 +#define __NR_fstatfs 100 + /* 101 __NR_ioperm */ + /* 102 __NR_socketcall obsolete */ +#define __NR_syslog 103 +#define __NR_setitimer 104 +#define __NR_getitimer 105 +#define __NR_stat 106 +#define __NR_lstat 107 +#define __NR_fstat 108 + /* 109 __NR_olduname obsolete */ + /* 110 __NR_iopl obsolete */ +#define __NR_vhangup 111 + /* 112 __NR_idle obsolete */ + /* 113 __NR_vm86old */ +#define __NR_wait4 114 + /* 115 __NR_swapoff obsolete */ +#define __NR_sysinfo 116 + /* 117 __NR_ipc oboslete */ +#define __NR_fsync 118 + /* 119 __NR_sigreturn obsolete */ +#define __NR_clone 120 +#define __NR_setdomainname 121 +#define __NR_uname 122 + /* 123 __NR_modify_ldt obsolete */ +#define __NR_adjtimex 124 +#define __NR_mprotect 125 + /* 126 __NR_sigprocmask obsolete */ + /* 127 __NR_create_module obsolete */ +#define __NR_init_module 128 +#define __NR_delete_module 129 + /* 130 __NR_get_kernel_syms obsolete */ +#define __NR_quotactl 131 +#define __NR_getpgid 132 +#define __NR_fchdir 133 +#define __NR_bdflush 134 + /* 135 was sysfs */ +#define __NR_personality 136 + /* 137 __NR_afs_syscall */ +#define __NR_setfsuid 138 +#define __NR_setfsgid 139 +#define __NR__llseek 140 +#define __NR_getdents 141 + /* 142 __NR__newselect obsolete */ +#define __NR_flock 143 + /* 144 __NR_msync obsolete */ +#define __NR_readv 145 +#define __NR_writev 146 +#define __NR_getsid 147 +#define __NR_fdatasync 148 +#define __NR__sysctl 149 + /* 150 __NR_mlock */ + /* 151 __NR_munlock */ + /* 152 __NR_mlockall */ + /* 153 __NR_munlockall */ +#define __NR_sched_setparam 154 +#define __NR_sched_getparam 155 +#define __NR_sched_setscheduler 156 +#define __NR_sched_getscheduler 157 +#define __NR_sched_yield 158 +#define __NR_sched_get_priority_max 159 +#define __NR_sched_get_priority_min 160 +#define __NR_sched_rr_get_interval 161 +#define __NR_nanosleep 162 + /* 163 __NR_mremap */ +#define __NR_setresuid 164 +#define __NR_getresuid 165 + /* 166 __NR_vm86 */ + /* 167 __NR_query_module */ + /* 168 __NR_poll */ + /* 169 __NR_nfsservctl */ +#define __NR_setresgid 170 +#define __NR_getresgid 171 +#define __NR_prctl 172 +#define __NR_rt_sigreturn 173 +#define __NR_rt_sigaction 174 +#define __NR_rt_sigprocmask 175 +#define __NR_rt_sigpending 176 +#define __NR_rt_sigtimedwait 177 +#define __NR_rt_sigqueueinfo 178 +#define __NR_rt_sigsuspend 179 +#define __NR_pread 180 +#define __NR_pwrite 181 +#define __NR_lchown 182 +#define __NR_getcwd 183 +#define __NR_capget 184 +#define __NR_capset 185 +#define __NR_sigaltstack 186 +#define __NR_sendfile 187 + /* 188 __NR_getpmsg */ + /* 189 __NR_putpmsg */ +#define __NR_vfork 190 +#define __NR_getrlimit 191 +#define __NR_mmap2 192 +#define __NR_truncate64 193 +#define __NR_ftruncate64 194 +#define __NR_stat64 195 +#define __NR_lstat64 196 +#define __NR_fstat64 197 +#define __NR_chown32 198 +#define __NR_getuid32 199 +#define __NR_getgid32 200 +#define __NR_geteuid32 201 +#define __NR_getegid32 202 +#define __NR_setreuid32 203 +#define __NR_setregid32 204 +#define __NR_getgroups32 205 +#define __NR_setgroups32 206 +#define __NR_fchown32 207 +#define __NR_setresuid32 208 +#define __NR_getresuid32 209 +#define __NR_setresgid32 210 +#define __NR_getresgid32 211 +#define __NR_lchown32 212 +#define __NR_setuid32 213 +#define __NR_setgid32 214 +#define __NR_setfsuid32 215 +#define __NR_setfsgid32 216 +#define __NR_pivot_root 217 + /* 218 __NR_mincore */ + /* 219 __NR_madvise */ +#define __NR_getdents64 220 +#define __NR_fcntl64 221 + /* 222 reserved for TUX */ + /* 223 reserved for TUX */ +#define __NR_gettid 224 + /* 225 __NR_readahead */ +#define __NR_setxattr 226 +#define __NR_lsetxattr 227 +#define __NR_fsetxattr 228 +#define __NR_getxattr 229 +#define __NR_lgetxattr 230 +#define __NR_fgetxattr 231 +#define __NR_listxattr 232 +#define __NR_llistxattr 233 +#define __NR_flistxattr 234 +#define __NR_removexattr 235 +#define __NR_lremovexattr 236 +#define __NR_fremovexattr 237 +#define __NR_tkill 238 +#define __NR_sendfile64 239 +#define __NR_futex 240 +#define __NR_sched_setaffinity 241 +#define __NR_sched_getaffinity 242 + /* 243 __NR_set_thread_area */ + /* 244 __NR_get_thread_area */ +#define __NR_io_setup 245 +#define __NR_io_destroy 246 +#define __NR_io_getevents 247 +#define __NR_io_submit 248 +#define __NR_io_cancel 249 + /* 250 __NR_alloc_hugepages */ + /* 251 __NR_free_hugepages */ +#define __NR_exit_group 252 +#define __NR_lookup_dcookie 253 +#define __NR_bfin_spinlock 254 + +#define __NR_epoll_create 255 +#define __NR_epoll_ctl 256 +#define __NR_epoll_wait 257 + /* 258 __NR_remap_file_pages */ +#define __NR_set_tid_address 259 +#define __NR_timer_create 260 +#define __NR_timer_settime (__NR_timer_create+1) +#define __NR_timer_gettime (__NR_timer_create+2) +#define __NR_timer_getoverrun (__NR_timer_create+3) +#define __NR_timer_delete (__NR_timer_create+4) +#define __NR_clock_settime (__NR_timer_create+5) +#define __NR_clock_gettime (__NR_timer_create+6) +#define __NR_clock_getres (__NR_timer_create+7) +#define __NR_clock_nanosleep (__NR_timer_create+8) +#define __NR_statfs64 269 +#define __NR_fstatfs64 270 +#define __NR_tgkill 271 +#define __NR_utimes 272 +#define __NR_fadvise64_64 273 + /* 274 __NR_vserver */ + /* 275 __NR_mbind */ + /* 276 __NR_get_mempolicy */ + /* 277 __NR_set_mempolicy */ +#define __NR_mq_open 278 +#define __NR_mq_unlink (__NR_mq_open+1) +#define __NR_mq_timedsend (__NR_mq_open+2) +#define __NR_mq_timedreceive (__NR_mq_open+3) +#define __NR_mq_notify (__NR_mq_open+4) +#define __NR_mq_getsetattr (__NR_mq_open+5) + /* 284 __NR_sys_kexec_load */ +#define __NR_waitid 285 +#define __NR_add_key 286 +#define __NR_request_key 287 +#define __NR_keyctl 288 +#define __NR_ioprio_set 289 +#define __NR_ioprio_get 290 +#define __NR_inotify_init 291 +#define __NR_inotify_add_watch 292 +#define __NR_inotify_rm_watch 293 + /* 294 __NR_migrate_pages */ +#define __NR_openat 295 +#define __NR_mkdirat 296 +#define __NR_mknodat 297 +#define __NR_fchownat 298 +#define __NR_futimesat 299 +#define __NR_fstatat64 300 +#define __NR_unlinkat 301 +#define __NR_renameat 302 +#define __NR_linkat 303 +#define __NR_symlinkat 304 +#define __NR_readlinkat 305 +#define __NR_fchmodat 306 +#define __NR_faccessat 307 +#define __NR_pselect6 308 +#define __NR_ppoll 309 +#define __NR_unshare 310 + +/* Blackfin private syscalls */ +#define __NR_sram_alloc 311 +#define __NR_sram_free 312 +#define __NR_dma_memcpy 313 + +/* socket syscalls */ +#define __NR_accept 314 +#define __NR_bind 315 +#define __NR_connect 316 +#define __NR_getpeername 317 +#define __NR_getsockname 318 +#define __NR_getsockopt 319 +#define __NR_listen 320 +#define __NR_recv 321 +#define __NR_recvfrom 322 +#define __NR_recvmsg 323 +#define __NR_send 324 +#define __NR_sendmsg 325 +#define __NR_sendto 326 +#define __NR_setsockopt 327 +#define __NR_shutdown 328 +#define __NR_socket 329 +#define __NR_socketpair 330 + +/* sysv ipc syscalls */ +#define __NR_semctl 331 +#define __NR_semget 332 +#define __NR_semop 333 +#define __NR_msgctl 334 +#define __NR_msgget 335 +#define __NR_msgrcv 336 +#define __NR_msgsnd 337 +#define __NR_shmat 338 +#define __NR_shmctl 339 +#define __NR_shmdt 340 +#define __NR_shmget 341 + +#define __NR_syscall 342 +#define NR_syscalls __NR_syscall + +#ifdef __KERNEL__ +#define __ARCH_WANT_IPC_PARSE_VERSION +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_ALARM +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_SGETMASK +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_SYS_NICE +#define __ARCH_WANT_SYS_RT_SIGACTION +#define __ARCH_WANT_SYS_RT_SIGSUSPEND +#endif + +/* + * "Conditional" syscalls + * + * What we want is __attribute__((weak,alias("sys_ni_syscall"))), + * but it doesn't work on all toolchains, so we just do it by hand + */ +#define cond_syscall(x) asm(".weak\t_" #x "\n\t.set\t_" #x ",_sys_ni_syscall"); + +#endif /* __ASM_BFIN_UNISTD_H */ diff --git a/include/asm-blackfin/user.h b/include/asm-blackfin/user.h new file mode 100644 index 000000000000..abc34629bd59 --- /dev/null +++ b/include/asm-blackfin/user.h @@ -0,0 +1,89 @@ +#ifndef _BFIN_USER_H +#define _BFIN_USER_H + +/* Changes by Tony Kou Lineo, Inc. July, 2001 + * + * Based include/asm-m68knommu/user.h + * + */ + +/* Core file format: The core file is written in such a way that gdb + can understand it and provide useful information to the user (under + linux we use the 'trad-core' bfd). There are quite a number of + obstacles to being able to view the contents of the floating point + registers, and until these are solved you will not be able to view the + contents of them. Actually, you can read in the core file and look at + the contents of the user struct to find out what the floating point + registers contain. + The actual file contents are as follows: + UPAGE: 1 page consisting of a user struct that tells gdb what is present + in the file. Directly after this is a copy of the task_struct, which + is currently not used by gdb, but it may come in useful at some point. + All of the registers are stored as part of the upage. The upage should + always be only one page. + DATA: The data area is stored. We use current->end_text to + current->brk to pick up all of the user variables, plus any memory + that may have been malloced. No attempt is made to determine if a page + is demand-zero or if a page is totally unused, we just cover the entire + range. All of the addresses are rounded in such a way that an integral + number of pages is written. + STACK: We need the stack information in order to get a meaningful + backtrace. We need to write the data from (esp) to + current->start_stack, so we round each of these off in order to be able + to write an integer number of pages. + The minimum core file size is 3 pages, or 12288 bytes. +*/ +struct user_bfinfp_struct { +}; + +/* This is the old layout of "struct pt_regs" as of Linux 1.x, and + is still the layout used by user (the new pt_regs doesn't have + all registers). */ +struct user_regs_struct { + long r0, r1, r2, r3, r4, r5, r6, r7; + long p0, p1, p2, p3, p4, p5, usp, fp; + long i0, i1, i2, i3; + long l0, l1, l2, l3; + long b0, b1, b2, b3; + long m0, m1, m2, m3; + long a0w, a1w; + long a0x, a1x; + unsigned long rets; + unsigned long astat; + unsigned long pc; + unsigned long orig_p0; +}; + +/* When the kernel dumps core, it starts by dumping the user struct - + this will be used by gdb to figure out where the data and stack segments + are within the file, and what virtual addresses to use. */ + +struct user { +/* We start with the registers, to mimic the way that "memory" is returned + from the ptrace(3,...) function. */ + + struct user_regs_struct regs; /* Where the registers are actually stored */ + +/* The rest of this junk is to help gdb figure out what goes where */ + unsigned long int u_tsize; /* Text segment size (pages). */ + unsigned long int u_dsize; /* Data segment size (pages). */ + unsigned long int u_ssize; /* Stack segment size (pages). */ + unsigned long start_code; /* Starting virtual address of text. */ + unsigned long start_stack; /* Starting virtual address of stack area. + This is actually the bottom of the stack, + the top of the stack is always found in the + esp register. */ + long int signal; /* Signal that caused the core dump. */ + int reserved; /* No longer used */ + struct user_regs_struct *u_ar0; + /* Used by gdb to help find the values for */ + /* the registers. */ + unsigned long magic; /* To uniquely identify a core file */ + char u_comm[32]; /* User command that was responsible */ +}; +#define NBPG PAGE_SIZE +#define UPAGES 1 +#define HOST_TEXT_START_ADDR (u.start_code) +#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) + +#endif diff --git a/include/linux/elf-em.h b/include/linux/elf-em.h index 666e0a5f00fc..0311bad838b1 100644 --- a/include/linux/elf-em.h +++ b/include/linux/elf-em.h @@ -30,6 +30,7 @@ #define EM_V850 87 /* NEC v850 */ #define EM_M32R 88 /* Renesas M32R */ #define EM_H8_300 46 /* Renesas H8/300,300H,H8S */ +#define EM_BLACKFIN 106 /* ADI Blackfin Processor */ #define EM_FRV 0x5441 /* Fujitsu FR-V */ #define EM_AVR32 0x18ad /* Atmel AVR32 */ diff --git a/include/linux/spi/ad7877.h b/include/linux/spi/ad7877.h new file mode 100644 index 000000000000..cdbed816f25e --- /dev/null +++ b/include/linux/spi/ad7877.h @@ -0,0 +1,24 @@ +/* linux/spi/ad7877.h */ + +/* Touchscreen characteristics vary between boards and models. The + * platform_data for the device's "struct device" holds this information. + * + * It's OK if the min/max values are zero. + */ +struct ad7877_platform_data { + u16 model; /* 7877 */ + u16 vref_delay_usecs; /* 0 for external vref; etc */ + u16 x_plate_ohms; + u16 y_plate_ohms; + + u16 x_min, x_max; + u16 y_min, y_max; + u16 pressure_min, pressure_max; + + u8 stopacq_polarity; /* 1 = Active HIGH, 0 = Active LOW */ + u8 first_conversion_delay; /* 0 = 0.5us, 1 = 128us, 2 = 1ms, 3 = 8ms */ + u8 acquisition_time; /* 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */ + u8 averaging; /* 0 = 1, 1 = 4, 2 = 8, 3 = 16 */ + u8 pen_down_acc_interval; /* 0 = covert once, 1 = every 0.5 ms, + 2 = ever 1 ms, 3 = every 8 ms,*/ +}; diff --git a/include/linux/usb_sl811.h b/include/linux/usb_sl811.h new file mode 100644 index 000000000000..4f2d012d7309 --- /dev/null +++ b/include/linux/usb_sl811.h @@ -0,0 +1,26 @@ + +/* + * board initialization should put one of these into dev->platform_data + * and place the sl811hs onto platform_bus named "sl811-hcd". + */ + +struct sl811_platform_data { + unsigned can_wakeup:1; + + /* given port_power, msec/2 after power on till power good */ + u8 potpg; + + /* mA/2 power supplied on this port (max = default = 250) */ + u8 power; + + /* sl811 relies on an external source of VBUS current */ + void (*port_power)(struct device *dev, int is_on); + + /* pulse sl811 nRST (probably with a GPIO) */ + void (*reset)(struct device *dev); + + // some boards need something like these: + // int (*check_overcurrent)(struct device *dev); + // void (*clock_enable)(struct device *dev, int is_on); +}; + diff --git a/init/Kconfig b/init/Kconfig index 7ce952052947..ebe04f56d834 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -356,7 +356,7 @@ menuconfig EMBEDDED config UID16 bool "Enable 16-bit UID system calls" if EMBEDDED - depends on ARM || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && SPARC32_COMPAT) || UML || (X86_64 && IA32_EMULATION) + depends on ARM || BFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && SPARC32_COMPAT) || UML || (X86_64 && IA32_EMULATION) default y help This enables the legacy 16-bit UID syscall wrappers. diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index bdbf3fe14de6..9a287796da8e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -320,7 +320,7 @@ config DEBUG_HIGHMEM config DEBUG_BUGVERBOSE bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED depends on BUG - depends on ARM || ARM26 || AVR32 || M32R || M68K || SPARC32 || SPARC64 || FRV || SUPERH || GENERIC_BUG + depends on ARM || ARM26 || AVR32 || M32R || M68K || SPARC32 || SPARC64 || FRV || SUPERH || GENERIC_BUG || BFIN default !EMBEDDED help Say Y here to make BUG() panics output the file name and line number @@ -360,7 +360,7 @@ config DEBUG_LIST config FRAME_POINTER bool "Compile the kernel with frame pointers" - depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || AVR32 || SUPERH) + depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || AVR32 || SUPERH || BFIN) default y if DEBUG_INFO && UML help If you say Y here the resulting kernel image will be slightly larger diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c index b0381823e404..511023b430a8 100644 --- a/scripts/genksyms/genksyms.c +++ b/scripts/genksyms/genksyms.c @@ -516,7 +516,8 @@ int main(int argc, char **argv) genksyms_usage(); return 1; } - if ((strcmp(arch, "v850") == 0) || (strcmp(arch, "h8300") == 0)) + if ((strcmp(arch, "v850") == 0) || (strcmp(arch, "h8300") == 0) + || (strcmp(arch, "blackfin") == 0)) mod_prefix = "_"; { extern int yydebug; diff --git a/scripts/mod/mk_elfconfig.c b/scripts/mod/mk_elfconfig.c index 725d61c0fb43..db3881f14c2d 100644 --- a/scripts/mod/mk_elfconfig.c +++ b/scripts/mod/mk_elfconfig.c @@ -55,7 +55,8 @@ main(int argc, char **argv) else exit(1); - if ((strcmp(argv[1], "v850") == 0) || (strcmp(argv[1], "h8300") == 0)) + if ((strcmp(argv[1], "v850") == 0) || (strcmp(argv[1], "h8300") == 0) + || (strcmp(argv[1], "blackfin") == 0)) printf("#define MODULE_SYMBOL_PREFIX \"_\"\n"); else printf("#define MODULE_SYMBOL_PREFIX \"\"\n"); -- cgit v1.2.3 From 194de5612777a9ff4f96dae1932f77a5a89e5f0a Mon Sep 17 00:00:00 2001 From: Bryan Wu Date: Sun, 6 May 2007 14:50:30 -0700 Subject: blackfin: serial driver This patch implements the driver necessary use the Analog Devices Blackfin processor's Serial Port. Signed-off-by: Bryan Wu Cc: Alan Cox Cc: Russell King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/serial/Kconfig | 94 ++++ drivers/serial/Makefile | 1 + drivers/serial/bfin_5xx.c | 1012 +++++++++++++++++++++++++++++++++++++++++++ include/linux/serial_core.h | 3 + 4 files changed, 1110 insertions(+) create mode 100644 drivers/serial/bfin_5xx.c (limited to 'include/linux') diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index e9d927e3b8e6..924e9bd757f0 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -508,6 +508,100 @@ config SERIAL_SA1100_CONSOLE your boot loader (lilo or loadlin) about how to pass options to the kernel at boot time.) +config SERIAL_BFIN + tristate "Blackfin serial port support" + depends on BFIN + select SERIAL_CORE + select SERIAL_BFIN_UART0 if (BF531 || BF532 || BF533 || BF561) + help + Add support for the built-in UARTs on the Blackfin. + + To compile this driver as a module, choose M here: the + module will be called bfin_5xx. + +config SERIAL_BFIN_CONSOLE + bool "Console on Blackfin serial port" + depends on SERIAL_BFIN + select SERIAL_CORE_CONSOLE + +choice + prompt "UART Mode" + depends on SERIAL_BFIN + default SERIAL_BFIN_DMA + help + This driver supports the built-in serial ports of the Blackfin family + of CPUs + +config SERIAL_BFIN_DMA + bool "DMA mode" + depends on DMA_UNCACHED_1M + help + This driver works under DMA mode. If this option is selected, the + blackfin simple dma driver is also enabled. + +config SERIAL_BFIN_PIO + bool "PIO mode" + help + This driver works under PIO mode. + +endchoice + +config SERIAL_BFIN_UART0 + bool "Enable UART0" + depends on SERIAL_BFIN + help + Enable UART0 + +config BFIN_UART0_CTSRTS + bool "Enable UART0 hardware flow control" + depends on SERIAL_BFIN_UART0 + help + Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS + signal. + +config UART0_CTS_PIN + int "UART0 CTS pin" + depends on BFIN_UART0_CTSRTS + default 23 + help + The default pin is GPIO_GP7. + Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. + +config UART0_RTS_PIN + int "UART0 RTS pin" + depends on BFIN_UART0_CTSRTS + default 22 + help + The default pin is GPIO_GP6. + Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. + +config SERIAL_BFIN_UART1 + bool "Enable UART1" + depends on SERIAL_BFIN && (BF534 || BF536 || BF537) + help + Enable UART1 + +config BFIN_UART1_CTSRTS + bool "Enable UART1 hardware flow control" + depends on SERIAL_BFIN_UART1 + help + Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS + signal. + +config UART1_CTS_PIN + int "UART1 CTS pin" + depends on BFIN_UART1_CTSRTS + default -1 + help + Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. + +config UART1_RTS_PIN + int "UART1 RTS pin" + depends on BFIN_UART1_CTSRTS + default -1 + help + Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. + config SERIAL_IMX bool "IMX serial port support" depends on ARM && ARCH_IMX diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile index 6b3560c5749a..4959bcb8d1ef 100644 --- a/drivers/serial/Makefile +++ b/drivers/serial/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o obj-$(CONFIG_SERIAL_PXA) += pxa.o obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o obj-$(CONFIG_SERIAL_SA1100) += sa1100.o +obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o obj-$(CONFIG_SERIAL_SUNHV) += sunhv.o diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c new file mode 100644 index 000000000000..408390f93db9 --- /dev/null +++ b/drivers/serial/bfin_5xx.c @@ -0,0 +1,1012 @@ +/* + * File: drivers/serial/bfin_5xx.c + * Based on: Based on drivers/serial/sa1100.c + * Author: Aubrey Li + * + * Created: + * Description: Driver for blackfin 5xx serial ports + * + * Rev: $Id: bfin_5xx.c,v 1.19 2006/09/24 02:33:53 aubrey Exp $ + * + * Modified: + * Copyright 2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) +#define SUPPORT_SYSRQ +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef CONFIG_SERIAL_BFIN_DMA +#include +#include +#include +#include +#endif + +/* UART name and device definitions */ +#define BFIN_SERIAL_NAME "ttyBF" +#define BFIN_SERIAL_MAJOR 204 +#define BFIN_SERIAL_MINOR 64 + +/* + * Setup for console. Argument comes from the menuconfig + */ +#define DMA_RX_XCOUNT 512 +#define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT) + +#define DMA_RX_FLUSH_JIFFIES 5 + +#ifdef CONFIG_SERIAL_BFIN_DMA +static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart); +#else +static void bfin_serial_do_work(struct work_struct *work); +static void bfin_serial_tx_chars(struct bfin_serial_port *uart); +static void local_put_char(struct bfin_serial_port *uart, char ch); +#endif + +static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); + +/* + * interrupts are disabled on entry + */ +static void bfin_serial_stop_tx(struct uart_port *port) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + +#ifdef CONFIG_SERIAL_BFIN_DMA + disable_dma(uart->tx_dma_channel); +#else + unsigned short ier; + + ier = UART_GET_IER(uart); + ier &= ~ETBEI; + UART_PUT_IER(uart, ier); +#endif +} + +/* + * port is locked and interrupts are disabled + */ +static void bfin_serial_start_tx(struct uart_port *port) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + +#ifdef CONFIG_SERIAL_BFIN_DMA + bfin_serial_dma_tx_chars(uart); +#else + unsigned short ier; + ier = UART_GET_IER(uart); + ier |= ETBEI; + UART_PUT_IER(uart, ier); + bfin_serial_tx_chars(uart); +#endif +} + +/* + * Interrupts are enabled + */ +static void bfin_serial_stop_rx(struct uart_port *port) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + unsigned short ier; + + ier = UART_GET_IER(uart); + ier &= ~ERBFI; + UART_PUT_IER(uart, ier); +} + +/* + * Set the modem control timer to fire immediately. + */ +static void bfin_serial_enable_ms(struct uart_port *port) +{ +} + +#ifdef CONFIG_SERIAL_BFIN_PIO +static void local_put_char(struct bfin_serial_port *uart, char ch) +{ + unsigned short status; + int flags = 0; + + spin_lock_irqsave(&uart->port.lock, flags); + + do { + status = UART_GET_LSR(uart); + } while (!(status & THRE)); + + UART_PUT_CHAR(uart, ch); + SSYNC(); + + spin_unlock_irqrestore(&uart->port.lock, flags); +} + +static void bfin_serial_rx_chars(struct bfin_serial_port *uart) +{ + struct tty_struct *tty = uart->port.info?uart->port.info->tty:0; + unsigned int status, ch, flg; +#ifdef BF533_FAMILY + static int in_break = 0; +#endif + + status = UART_GET_LSR(uart); + ch = UART_GET_CHAR(uart); + uart->port.icount.rx++; + +#ifdef BF533_FAMILY + /* The BF533 family of processors have a nice misbehavior where + * they continuously generate characters for a "single" break. + * We have to basically ignore this flood until the "next" valid + * character comes across. All other Blackfin families operate + * properly though. + */ + if (in_break) { + if (ch != 0) { + in_break = 0; + ch = UART_GET_CHAR(uart); + } + return; + } +#endif + + if (status & BI) { +#ifdef BF533_FAMILY + in_break = 1; +#endif + uart->port.icount.brk++; + if (uart_handle_break(&uart->port)) + goto ignore_char; + flg = TTY_BREAK; + } else if (status & PE) { + flg = TTY_PARITY; + uart->port.icount.parity++; + } else if (status & OE) { + flg = TTY_OVERRUN; + uart->port.icount.overrun++; + } else if (status & FE) { + flg = TTY_FRAME; + uart->port.icount.frame++; + } else + flg = TTY_NORMAL; + + if (uart_handle_sysrq_char(&uart->port, ch)) + goto ignore_char; + if (tty) + uart_insert_char(&uart->port, status, 2, ch, flg); + +ignore_char: + if (tty) + tty_flip_buffer_push(tty); +} + +static void bfin_serial_tx_chars(struct bfin_serial_port *uart) +{ + struct circ_buf *xmit = &uart->port.info->xmit; + + if (uart->port.x_char) { + UART_PUT_CHAR(uart, uart->port.x_char); + uart->port.icount.tx++; + uart->port.x_char = 0; + return; + } + /* + * Check the modem control lines before + * transmitting anything. + */ + bfin_serial_mctrl_check(uart); + + if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { + bfin_serial_stop_tx(&uart->port); + return; + } + + local_put_char(uart, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + uart->port.icount.tx++; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&uart->port); + + if (uart_circ_empty(xmit)) + bfin_serial_stop_tx(&uart->port); +} + +static irqreturn_t bfin_serial_int(int irq, void *dev_id) +{ + struct bfin_serial_port *uart = dev_id; + unsigned short status; + + spin_lock(&uart->port.lock); + status = UART_GET_IIR(uart); + do { + if ((status & IIR_STATUS) == IIR_TX_READY) + bfin_serial_tx_chars(uart); + if ((status & IIR_STATUS) == IIR_RX_READY) + bfin_serial_rx_chars(uart); + status = UART_GET_IIR(uart); + } while (status & (IIR_TX_READY | IIR_RX_READY)); + spin_unlock(&uart->port.lock); + return IRQ_HANDLED; +} + +static void bfin_serial_do_work(struct work_struct *work) +{ + struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue); + + bfin_serial_mctrl_check(uart); +} + +#endif + +#ifdef CONFIG_SERIAL_BFIN_DMA +static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) +{ + struct circ_buf *xmit = &uart->port.info->xmit; + unsigned short ier; + int flags = 0; + + if (!uart->tx_done) + return; + + uart->tx_done = 0; + + if (uart->port.x_char) { + UART_PUT_CHAR(uart, uart->port.x_char); + uart->port.icount.tx++; + uart->port.x_char = 0; + uart->tx_done = 1; + return; + } + /* + * Check the modem control lines before + * transmitting anything. + */ + bfin_serial_mctrl_check(uart); + + if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { + bfin_serial_stop_tx(&uart->port); + uart->tx_done = 1; + return; + } + + spin_lock_irqsave(&uart->port.lock, flags); + uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); + if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) + uart->tx_count = UART_XMIT_SIZE - xmit->tail; + blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail), + (unsigned long)(xmit->buf+xmit->tail+uart->tx_count)); + set_dma_config(uart->tx_dma_channel, + set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP, + INTR_ON_BUF, + DIMENSION_LINEAR, + DATA_SIZE_8)); + set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail)); + set_dma_x_count(uart->tx_dma_channel, uart->tx_count); + set_dma_x_modify(uart->tx_dma_channel, 1); + enable_dma(uart->tx_dma_channel); + ier = UART_GET_IER(uart); + ier |= ETBEI; + UART_PUT_IER(uart, ier); + spin_unlock_irqrestore(&uart->port.lock, flags); +} + +static void bfin_serial_dma_rx_chars(struct bfin_serial_port * uart) +{ + struct tty_struct *tty = uart->port.info->tty; + int i, flg, status; + + status = UART_GET_LSR(uart); + uart->port.icount.rx += CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, UART_XMIT_SIZE);; + + if (status & BI) { + uart->port.icount.brk++; + if (uart_handle_break(&uart->port)) + goto dma_ignore_char; + flg = TTY_BREAK; + } else if (status & PE) { + flg = TTY_PARITY; + uart->port.icount.parity++; + } else if (status & OE) { + flg = TTY_OVERRUN; + uart->port.icount.overrun++; + } else if (status & FE) { + flg = TTY_FRAME; + uart->port.icount.frame++; + } else + flg = TTY_NORMAL; + + for (i = uart->rx_dma_buf.head; i < uart->rx_dma_buf.tail; i++) { + if (uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) + goto dma_ignore_char; + uart_insert_char(&uart->port, status, 2, uart->rx_dma_buf.buf[i], flg); + } +dma_ignore_char: + tty_flip_buffer_push(tty); +} + +void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) +{ + int x_pos, pos; + int flags = 0; + + bfin_serial_dma_tx_chars(uart); + + spin_lock_irqsave(&uart->port.lock, flags); + x_pos = DMA_RX_XCOUNT - get_dma_curr_xcount(uart->rx_dma_channel); + if (x_pos == DMA_RX_XCOUNT) + x_pos = 0; + + pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; + + if (pos>uart->rx_dma_buf.tail) { + uart->rx_dma_buf.tail = pos; + bfin_serial_dma_rx_chars(uart); + uart->rx_dma_buf.head = uart->rx_dma_buf.tail; + } + spin_unlock_irqrestore(&uart->port.lock, flags); + uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; + add_timer(&(uart->rx_dma_timer)); +} + +static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) +{ + struct bfin_serial_port *uart = dev_id; + struct circ_buf *xmit = &uart->port.info->xmit; + unsigned short ier; + + spin_lock(&uart->port.lock); + if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { + clear_dma_irqstat(uart->tx_dma_channel); + disable_dma(uart->tx_dma_channel); + ier = UART_GET_IER(uart); + ier &= ~ETBEI; + UART_PUT_IER(uart, ier); + xmit->tail = (xmit->tail+uart->tx_count) &(UART_XMIT_SIZE -1); + uart->port.icount.tx+=uart->tx_count; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&uart->port); + + if (uart_circ_empty(xmit)) + bfin_serial_stop_tx(&uart->port); + uart->tx_done = 1; + } + + spin_unlock(&uart->port.lock); + return IRQ_HANDLED; +} + +static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) +{ + struct bfin_serial_port *uart = dev_id; + unsigned short irqstat; + + uart->rx_dma_nrows++; + if (uart->rx_dma_nrows == DMA_RX_YCOUNT) { + uart->rx_dma_nrows = 0; + uart->rx_dma_buf.tail = DMA_RX_XCOUNT*DMA_RX_YCOUNT; + bfin_serial_dma_rx_chars(uart); + uart->rx_dma_buf.head = uart->rx_dma_buf.tail = 0; + } + spin_lock(&uart->port.lock); + irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); + clear_dma_irqstat(uart->rx_dma_channel); + + spin_unlock(&uart->port.lock); + return IRQ_HANDLED; +} +#endif + +/* + * Return TIOCSER_TEMT when transmitter is not busy. + */ +static unsigned int bfin_serial_tx_empty(struct uart_port *port) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + unsigned short lsr; + + lsr = UART_GET_LSR(uart); + if (lsr & TEMT) + return TIOCSER_TEMT; + else + return 0; +} + +static unsigned int bfin_serial_get_mctrl(struct uart_port *port) +{ +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + if (uart->cts_pin < 0) + return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; + + if (gpio_get_value(uart->cts_pin)) + return TIOCM_DSR | TIOCM_CAR; + else +#endif + return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; +} + +static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + if (uart->rts_pin < 0) + return; + + if (mctrl & TIOCM_RTS) + gpio_set_value(uart->rts_pin, 0); + else + gpio_set_value(uart->rts_pin, 1); +#endif +} + +/* + * Handle any change of modem status signal since we were last called. + */ +static void bfin_serial_mctrl_check(struct bfin_serial_port *uart) +{ +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + unsigned int status; +# ifdef CONFIG_SERIAL_BFIN_DMA + struct uart_info *info = uart->port.info; + struct tty_struct *tty = info->tty; + + status = bfin_serial_get_mctrl(&uart->port); + if (!(status & TIOCM_CTS)) { + tty->hw_stopped = 1; + } else { + tty->hw_stopped = 0; + } +# else + status = bfin_serial_get_mctrl(&uart->port); + uart_handle_cts_change(&uart->port, status & TIOCM_CTS); + if (!(status & TIOCM_CTS)) + schedule_work(&uart->cts_workqueue); +# endif +#endif +} + +/* + * Interrupts are always disabled. + */ +static void bfin_serial_break_ctl(struct uart_port *port, int break_state) +{ +} + +static int bfin_serial_startup(struct uart_port *port) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + +#ifdef CONFIG_SERIAL_BFIN_DMA + dma_addr_t dma_handle; + + if (request_dma(uart->rx_dma_channel, "BFIN_UART_RX") < 0) { + printk(KERN_NOTICE "Unable to attach Blackfin UART RX DMA channel\n"); + return -EBUSY; + } + + if (request_dma(uart->tx_dma_channel, "BFIN_UART_TX") < 0) { + printk(KERN_NOTICE "Unable to attach Blackfin UART TX DMA channel\n"); + free_dma(uart->rx_dma_channel); + return -EBUSY; + } + + set_dma_callback(uart->rx_dma_channel, bfin_serial_dma_rx_int, uart); + set_dma_callback(uart->tx_dma_channel, bfin_serial_dma_tx_int, uart); + + uart->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA); + uart->rx_dma_buf.head = 0; + uart->rx_dma_buf.tail = 0; + uart->rx_dma_nrows = 0; + + set_dma_config(uart->rx_dma_channel, + set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO, + INTR_ON_ROW, DIMENSION_2D, + DATA_SIZE_8)); + set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT); + set_dma_x_modify(uart->rx_dma_channel, 1); + set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT); + set_dma_y_modify(uart->rx_dma_channel, 1); + set_dma_start_addr(uart->rx_dma_channel, (unsigned long)uart->rx_dma_buf.buf); + enable_dma(uart->rx_dma_channel); + + uart->rx_dma_timer.data = (unsigned long)(uart); + uart->rx_dma_timer.function = (void *)bfin_serial_rx_dma_timeout; + uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; + add_timer(&(uart->rx_dma_timer)); +#else + if (request_irq + (uart->port.irq, bfin_serial_int, IRQF_DISABLED, + "BFIN_UART_RX", uart)) { + printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n"); + return -EBUSY; + } + + if (request_irq + (uart->port.irq+1, bfin_serial_int, IRQF_DISABLED, + "BFIN_UART_TX", uart)) { + printk(KERN_NOTICE "Unable to attach BlackFin UART TX interrupt\n"); + free_irq(uart->port.irq, uart); + return -EBUSY; + } +#endif + UART_PUT_IER(uart, UART_GET_IER(uart) | ERBFI); + return 0; +} + +static void bfin_serial_shutdown(struct uart_port *port) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + +#ifdef CONFIG_SERIAL_BFIN_DMA + disable_dma(uart->tx_dma_channel); + free_dma(uart->tx_dma_channel); + disable_dma(uart->rx_dma_channel); + free_dma(uart->rx_dma_channel); + del_timer(&(uart->rx_dma_timer)); +#else + free_irq(uart->port.irq, uart); + free_irq(uart->port.irq+1, uart); +#endif +} + +static void +bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios, + struct ktermios *old) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + unsigned long flags; + unsigned int baud, quot; + unsigned short val, ier, lsr, lcr = 0; + + switch (termios->c_cflag & CSIZE) { + case CS8: + lcr = WLS(8); + break; + case CS7: + lcr = WLS(7); + break; + case CS6: + lcr = WLS(6); + break; + case CS5: + lcr = WLS(5); + break; + default: + printk(KERN_ERR "%s: word lengh not supported\n", + __FUNCTION__); + } + + if (termios->c_cflag & CSTOPB) + lcr |= STB; + if (termios->c_cflag & PARENB) { + lcr |= PEN; + if (!(termios->c_cflag & PARODD)) + lcr |= EPS; + } + + /* These controls are not implemented for this port */ + termios->c_iflag |= INPCK | BRKINT | PARMRK; + termios->c_iflag &= ~(IGNPAR | IGNBRK); + + /* These controls are not implemented for this port */ + termios->c_iflag |= INPCK | BRKINT | PARMRK; + termios->c_iflag &= ~(IGNPAR | IGNBRK); + + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); + quot = uart_get_divisor(port, baud); + spin_lock_irqsave(&uart->port.lock, flags); + + do { + lsr = UART_GET_LSR(uart); + } while (!(lsr & TEMT)); + + /* Disable UART */ + ier = UART_GET_IER(uart); + UART_PUT_IER(uart, 0); + + /* Set DLAB in LCR to Access DLL and DLH */ + val = UART_GET_LCR(uart); + val |= DLAB; + UART_PUT_LCR(uart, val); + SSYNC(); + + UART_PUT_DLL(uart, quot & 0xFF); + SSYNC(); + UART_PUT_DLH(uart, (quot >> 8) & 0xFF); + SSYNC(); + + /* Clear DLAB in LCR to Access THR RBR IER */ + val = UART_GET_LCR(uart); + val &= ~DLAB; + UART_PUT_LCR(uart, val); + SSYNC(); + + UART_PUT_LCR(uart, lcr); + + /* Enable UART */ + UART_PUT_IER(uart, ier); + + val = UART_GET_GCTL(uart); + val |= UCEN; + UART_PUT_GCTL(uart, val); + + spin_unlock_irqrestore(&uart->port.lock, flags); +} + +static const char *bfin_serial_type(struct uart_port *port) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + + return uart->port.type == PORT_BFIN ? "BFIN-UART" : NULL; +} + +/* + * Release the memory region(s) being used by 'port'. + */ +static void bfin_serial_release_port(struct uart_port *port) +{ +} + +/* + * Request the memory region(s) being used by 'port'. + */ +static int bfin_serial_request_port(struct uart_port *port) +{ + return 0; +} + +/* + * Configure/autoconfigure the port. + */ +static void bfin_serial_config_port(struct uart_port *port, int flags) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + + if (flags & UART_CONFIG_TYPE && + bfin_serial_request_port(&uart->port) == 0) + uart->port.type = PORT_BFIN; +} + +/* + * Verify the new serial_struct (for TIOCSSERIAL). + * The only change we allow are to the flags and type, and + * even then only between PORT_BFIN and PORT_UNKNOWN + */ +static int +bfin_serial_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + return 0; +} + +static struct uart_ops bfin_serial_pops = { + .tx_empty = bfin_serial_tx_empty, + .set_mctrl = bfin_serial_set_mctrl, + .get_mctrl = bfin_serial_get_mctrl, + .stop_tx = bfin_serial_stop_tx, + .start_tx = bfin_serial_start_tx, + .stop_rx = bfin_serial_stop_rx, + .enable_ms = bfin_serial_enable_ms, + .break_ctl = bfin_serial_break_ctl, + .startup = bfin_serial_startup, + .shutdown = bfin_serial_shutdown, + .set_termios = bfin_serial_set_termios, + .type = bfin_serial_type, + .release_port = bfin_serial_release_port, + .request_port = bfin_serial_request_port, + .config_port = bfin_serial_config_port, + .verify_port = bfin_serial_verify_port, +}; + +static void __init bfin_serial_init_ports(void) +{ + static int first = 1; + int i; + + if (!first) + return; + first = 0; + + for (i = 0; i < nr_ports; i++) { + bfin_serial_ports[i].port.uartclk = get_sclk(); + bfin_serial_ports[i].port.ops = &bfin_serial_pops; + bfin_serial_ports[i].port.line = i; + bfin_serial_ports[i].port.iotype = UPIO_MEM; + bfin_serial_ports[i].port.membase = + (void __iomem *)bfin_serial_resource[i].uart_base_addr; + bfin_serial_ports[i].port.mapbase = + bfin_serial_resource[i].uart_base_addr; + bfin_serial_ports[i].port.irq = + bfin_serial_resource[i].uart_irq; + bfin_serial_ports[i].port.flags = UPF_BOOT_AUTOCONF; +#ifdef CONFIG_SERIAL_BFIN_DMA + bfin_serial_ports[i].tx_done = 1; + bfin_serial_ports[i].tx_count = 0; + bfin_serial_ports[i].tx_dma_channel = + bfin_serial_resource[i].uart_tx_dma_channel; + bfin_serial_ports[i].rx_dma_channel = + bfin_serial_resource[i].uart_rx_dma_channel; + init_timer(&(bfin_serial_ports[i].rx_dma_timer)); +#else + INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work); +#endif +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + bfin_serial_ports[i].cts_pin = + bfin_serial_resource[i].uart_cts_pin; + bfin_serial_ports[i].rts_pin = + bfin_serial_resource[i].uart_rts_pin; +#endif + bfin_serial_hw_init(&bfin_serial_ports[i]); + + } +} + +#ifdef CONFIG_SERIAL_BFIN_CONSOLE +static void bfin_serial_console_putchar(struct uart_port *port, int ch) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + while (!(UART_GET_LSR(uart))) + barrier(); + UART_PUT_CHAR(uart, ch); + SSYNC(); +} + +/* + * Interrupts are disabled on entering + */ +static void +bfin_serial_console_write(struct console *co, const char *s, unsigned int count) +{ + struct bfin_serial_port *uart = &bfin_serial_ports[co->index]; + int flags = 0; + + spin_lock_irqsave(&uart->port.lock, flags); + uart_console_write(&uart->port, s, count, bfin_serial_console_putchar); + spin_unlock_irqrestore(&uart->port.lock, flags); + +} + +/* + * If the port was already initialised (eg, by a boot loader), + * try to determine the current setup. + */ +static void __init +bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud, + int *parity, int *bits) +{ + unsigned short status; + + status = UART_GET_IER(uart) & (ERBFI | ETBEI); + if (status == (ERBFI | ETBEI)) { + /* ok, the port was enabled */ + unsigned short lcr, val; + unsigned short dlh, dll; + + lcr = UART_GET_LCR(uart); + + *parity = 'n'; + if (lcr & PEN) { + if (lcr & EPS) + *parity = 'e'; + else + *parity = 'o'; + } + switch (lcr & 0x03) { + case 0: *bits = 5; break; + case 1: *bits = 6; break; + case 2: *bits = 7; break; + case 3: *bits = 8; break; + } + /* Set DLAB in LCR to Access DLL and DLH */ + val = UART_GET_LCR(uart); + val |= DLAB; + UART_PUT_LCR(uart, val); + + dll = UART_GET_DLL(uart); + dlh = UART_GET_DLH(uart); + + /* Clear DLAB in LCR to Access THR RBR IER */ + val = UART_GET_LCR(uart); + val &= ~DLAB; + UART_PUT_LCR(uart, val); + + *baud = get_sclk() / (16*(dll | dlh << 8)); + } + pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __FUNCTION__, *baud, *parity, *bits); +} + +static int __init +bfin_serial_console_setup(struct console *co, char *options) +{ + struct bfin_serial_port *uart; + int baud = 57600; + int bits = 8; + int parity = 'n'; +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + int flow = 'r'; +#else + int flow = 'n'; +#endif + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index == -1 || co->index >= nr_ports) + co->index = 0; + uart = &bfin_serial_ports[co->index]; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + bfin_serial_console_get_options(uart, &baud, &parity, &bits); + + return uart_set_options(&uart->port, co, baud, parity, bits, flow); +} + +static struct uart_driver bfin_serial_reg; +static struct console bfin_serial_console = { + .name = BFIN_SERIAL_NAME, + .write = bfin_serial_console_write, + .device = uart_console_device, + .setup = bfin_serial_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &bfin_serial_reg, +}; + +static int __init bfin_serial_rs_console_init(void) +{ + bfin_serial_init_ports(); + register_console(&bfin_serial_console); + return 0; +} +console_initcall(bfin_serial_rs_console_init); + +#define BFIN_SERIAL_CONSOLE &bfin_serial_console +#else +#define BFIN_SERIAL_CONSOLE NULL +#endif + +static struct uart_driver bfin_serial_reg = { + .owner = THIS_MODULE, + .driver_name = "bfin-uart", + .dev_name = BFIN_SERIAL_NAME, + .major = BFIN_SERIAL_MAJOR, + .minor = BFIN_SERIAL_MINOR, + .nr = NR_PORTS, + .cons = BFIN_SERIAL_CONSOLE, +}; + +static int bfin_serial_suspend(struct platform_device *dev, pm_message_t state) +{ + struct bfin_serial_port *uart = platform_get_drvdata(dev); + + if (uart) + uart_suspend_port(&bfin_serial_reg, &uart->port); + + return 0; +} + +static int bfin_serial_resume(struct platform_device *dev) +{ + struct bfin_serial_port *uart = platform_get_drvdata(dev); + + if (uart) + uart_resume_port(&bfin_serial_reg, &uart->port); + + return 0; +} + +static int bfin_serial_probe(struct platform_device *dev) +{ + struct resource *res = dev->resource; + int i; + + for (i = 0; i < dev->num_resources; i++, res++) + if (res->flags & IORESOURCE_MEM) + break; + + if (i < dev->num_resources) { + for (i = 0; i < nr_ports; i++, res++) { + if (bfin_serial_ports[i].port.mapbase != res->start) + continue; + bfin_serial_ports[i].port.dev = &dev->dev; + uart_add_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port); + platform_set_drvdata(dev, &bfin_serial_ports[i]); + } + } + + return 0; +} + +static int bfin_serial_remove(struct platform_device *pdev) +{ + struct bfin_serial_port *uart = platform_get_drvdata(pdev); + + +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + gpio_free(uart->cts_pin); + gpio_free(uart->rts_pin); +#endif + + platform_set_drvdata(pdev, NULL); + + if (uart) + uart_remove_one_port(&bfin_serial_reg, &uart->port); + + return 0; +} + +static struct platform_driver bfin_serial_driver = { + .probe = bfin_serial_probe, + .remove = bfin_serial_remove, + .suspend = bfin_serial_suspend, + .resume = bfin_serial_resume, + .driver = { + .name = "bfin-uart", + }, +}; + +static int __init bfin_serial_init(void) +{ + int ret; + + pr_info("Serial: Blackfin serial driver\n"); + + bfin_serial_init_ports(); + + ret = uart_register_driver(&bfin_serial_reg); + if (ret == 0) { + ret = platform_driver_register(&bfin_serial_driver); + if (ret) { + pr_debug("uart register failed\n"); + uart_unregister_driver(&bfin_serial_reg); + } + } + return ret; +} + +static void __exit bfin_serial_exit(void) +{ + platform_driver_unregister(&bfin_serial_driver); + uart_unregister_driver(&bfin_serial_reg); +} + +module_init(bfin_serial_init); +module_exit(bfin_serial_exit); + +MODULE_AUTHOR("Aubrey.Li "); +MODULE_DESCRIPTION("Blackfin generic serial port driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CHARDEV_MAJOR(BFIN_SERIAL_MAJOR); diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index aadbfd30763f..aa2653a159f4 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -136,6 +136,9 @@ /* Xilinx uartlite */ #define PORT_UARTLITE 74 +/* Blackfin bf5xx */ +#define PORT_BFIN 75 + #ifdef __KERNEL__ #include -- cgit v1.2.3 From 7be9823491ecbaf9700d7d3502cb4b4dd0ed868a Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 6 May 2007 14:50:42 -0700 Subject: swsusp: use inline functions for changing page flags Replace direct invocations of SetPageNosave(), SetPageNosaveFree() etc. with calls to inline functions that can be changed in subsequent patches without modifying the code calling them. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/suspend.h | 33 +++++++++++++++++++++++++++++++++ kernel/power/snapshot.c | 48 +++++++++++++++++++++++++----------------------- mm/page_alloc.c | 6 +++--- 3 files changed, 61 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/suspend.h b/include/linux/suspend.h index bf99bd49f8ef..a45b9f514492 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -8,6 +8,7 @@ #include #include #include +#include /* struct pbe is used for creating lists of pages that should be restored * atomically during the resume from disk, because the page frames they have @@ -49,6 +50,38 @@ void __save_processor_state(struct saved_context *ctxt); void __restore_processor_state(struct saved_context *ctxt); unsigned long get_safe_page(gfp_t gfp_mask); +/* Page management functions for the software suspend (swsusp) */ + +static inline void swsusp_set_page_forbidden(struct page *page) +{ + SetPageNosave(page); +} + +static inline int swsusp_page_is_forbidden(struct page *page) +{ + return PageNosave(page); +} + +static inline void swsusp_unset_page_forbidden(struct page *page) +{ + ClearPageNosave(page); +} + +static inline void swsusp_set_page_free(struct page *page) +{ + SetPageNosaveFree(page); +} + +static inline int swsusp_page_is_free(struct page *page) +{ + return PageNosaveFree(page); +} + +static inline void swsusp_unset_page_free(struct page *page) +{ + ClearPageNosaveFree(page); +} + /* * XXX: We try to keep some more pages free so that I/O operations succeed * without paging. Might this be more? diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 704c25a3ffec..48fc7a35571b 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -67,15 +67,15 @@ static void *get_image_page(gfp_t gfp_mask, int safe_needed) res = (void *)get_zeroed_page(gfp_mask); if (safe_needed) - while (res && PageNosaveFree(virt_to_page(res))) { + while (res && swsusp_page_is_free(virt_to_page(res))) { /* The page is unsafe, mark it for swsusp_free() */ - SetPageNosave(virt_to_page(res)); + swsusp_set_page_forbidden(virt_to_page(res)); allocated_unsafe_pages++; res = (void *)get_zeroed_page(gfp_mask); } if (res) { - SetPageNosave(virt_to_page(res)); - SetPageNosaveFree(virt_to_page(res)); + swsusp_set_page_forbidden(virt_to_page(res)); + swsusp_set_page_free(virt_to_page(res)); } return res; } @@ -91,8 +91,8 @@ static struct page *alloc_image_page(gfp_t gfp_mask) page = alloc_page(gfp_mask); if (page) { - SetPageNosave(page); - SetPageNosaveFree(page); + swsusp_set_page_forbidden(page); + swsusp_set_page_free(page); } return page; } @@ -110,9 +110,9 @@ static inline void free_image_page(void *addr, int clear_nosave_free) page = virt_to_page(addr); - ClearPageNosave(page); + swsusp_unset_page_forbidden(page); if (clear_nosave_free) - ClearPageNosaveFree(page); + swsusp_unset_page_free(page); __free_page(page); } @@ -615,7 +615,8 @@ static struct page *saveable_highmem_page(unsigned long pfn) BUG_ON(!PageHighMem(page)); - if (PageNosave(page) || PageReserved(page) || PageNosaveFree(page)) + if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) || + PageReserved(page)) return NULL; return page; @@ -670,7 +671,7 @@ static struct page *saveable_page(unsigned long pfn) BUG_ON(PageHighMem(page)); - if (PageNosave(page) || PageNosaveFree(page)) + if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page)) return NULL; if (PageReserved(page) && pfn_is_nosave(pfn)) @@ -810,9 +811,10 @@ void swsusp_free(void) if (pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); - if (PageNosave(page) && PageNosaveFree(page)) { - ClearPageNosave(page); - ClearPageNosaveFree(page); + if (swsusp_page_is_forbidden(page) && + swsusp_page_is_free(page)) { + swsusp_unset_page_forbidden(page); + swsusp_unset_page_free(page); __free_page(page); } } @@ -1135,7 +1137,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm) max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) if (pfn_valid(pfn)) - ClearPageNosaveFree(pfn_to_page(pfn)); + swsusp_unset_page_free(pfn_to_page(pfn)); } /* Mark pages that correspond to the "original" pfns as "unsafe" */ @@ -1144,7 +1146,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm) pfn = memory_bm_next_pfn(bm); if (likely(pfn != BM_END_OF_MAP)) { if (likely(pfn_valid(pfn))) - SetPageNosaveFree(pfn_to_page(pfn)); + swsusp_set_page_free(pfn_to_page(pfn)); else return -EFAULT; } @@ -1310,14 +1312,14 @@ prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p) struct page *page; page = alloc_page(__GFP_HIGHMEM); - if (!PageNosaveFree(page)) { + if (!swsusp_page_is_free(page)) { /* The page is "safe", set its bit the bitmap */ memory_bm_set_bit(bm, page_to_pfn(page)); safe_highmem_pages++; } /* Mark the page as allocated */ - SetPageNosave(page); - SetPageNosaveFree(page); + swsusp_set_page_forbidden(page); + swsusp_set_page_free(page); } memory_bm_position_reset(bm); safe_highmem_bm = bm; @@ -1349,7 +1351,7 @@ get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) struct highmem_pbe *pbe; void *kaddr; - if (PageNosave(page) && PageNosaveFree(page)) { + if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) { /* We have allocated the "original" page frame and we can * use it directly to store the loaded page. */ @@ -1511,14 +1513,14 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) error = -ENOMEM; goto Free; } - if (!PageNosaveFree(virt_to_page(lp))) { + if (!swsusp_page_is_free(virt_to_page(lp))) { /* The page is "safe", add it to the list */ lp->next = safe_pages_list; safe_pages_list = lp; } /* Mark the page as allocated */ - SetPageNosave(virt_to_page(lp)); - SetPageNosaveFree(virt_to_page(lp)); + swsusp_set_page_forbidden(virt_to_page(lp)); + swsusp_set_page_free(virt_to_page(lp)); nr_pages--; } /* Free the reserved safe pages so that chain_alloc() can use them */ @@ -1547,7 +1549,7 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) if (PageHighMem(page)) return get_highmem_page_buffer(page, ca); - if (PageNosave(page) && PageNosaveFree(page)) + if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) /* We have allocated the "original" page frame and we can * use it directly to store the loaded page. */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 36d713e216e8..59164313167f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -775,8 +775,8 @@ void mark_free_pages(struct zone *zone) if (pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); - if (!PageNosave(page)) - ClearPageNosaveFree(page); + if (!swsusp_page_is_forbidden(page)) + swsusp_unset_page_free(page); } for (order = MAX_ORDER - 1; order >= 0; --order) @@ -785,7 +785,7 @@ void mark_free_pages(struct zone *zone) pfn = page_to_pfn(list_entry(curr, struct page, lru)); for (i = 0; i < (1UL << order); i++) - SetPageNosaveFree(pfn_to_page(pfn + i)); + swsusp_set_page_free(pfn_to_page(pfn + i)); } spin_unlock_irqrestore(&zone->lock, flags); -- cgit v1.2.3 From 74dfd666de861c97d47bdbd892f6d21b801d0247 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 6 May 2007 14:50:43 -0700 Subject: swsusp: do not use page flags Make swsusp use memory bitmaps instead of page flags for marking 'nosave' and free pages. This allows us to 'recycle' two page flags that can be used for other purposes. Also, the memory needed to store the bitmaps is allocated when necessary (ie. before the suspend) and freed after the resume which is more reasonable. The patch is designed to minimize the amount of changes and there are some nice simplifications and optimizations possible on top of it. I am going to implement them separately in the future. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/e820.c | 26 ++--- include/linux/suspend.h | 58 ++++------- kernel/power/disk.c | 23 +++-- kernel/power/main.c | 2 + kernel/power/power.h | 2 + kernel/power/snapshot.c | 250 +++++++++++++++++++++++++++++++++++++++++++--- kernel/power/user.c | 4 + 7 files changed, 283 insertions(+), 82 deletions(-) (limited to 'include/linux') diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c index be8965427a93..13c6c37610e0 100644 --- a/arch/x86_64/kernel/e820.c +++ b/arch/x86_64/kernel/e820.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include #include @@ -256,22 +258,6 @@ void __init e820_reserve_resources(void) } } -/* Mark pages corresponding to given address range as nosave */ -static void __init -e820_mark_nosave_range(unsigned long start, unsigned long end) -{ - unsigned long pfn, max_pfn; - - if (start >= end) - return; - - printk("Nosave address range: %016lx - %016lx\n", start, end); - max_pfn = end >> PAGE_SHIFT; - for (pfn = start >> PAGE_SHIFT; pfn < max_pfn; pfn++) - if (pfn_valid(pfn)) - SetPageNosave(pfn_to_page(pfn)); -} - /* * Find the ranges of physical addresses that do not correspond to * e820 RAM areas and mark the corresponding pages as nosave for software @@ -290,13 +276,13 @@ void __init e820_mark_nosave_regions(void) struct e820entry *ei = &e820.map[i]; if (paddr < ei->addr) - e820_mark_nosave_range(paddr, - round_up(ei->addr, PAGE_SIZE)); + register_nosave_region(PFN_DOWN(paddr), + PFN_UP(ei->addr)); paddr = round_down(ei->addr + ei->size, PAGE_SIZE); if (ei->type != E820_RAM) - e820_mark_nosave_range(round_up(ei->addr, PAGE_SIZE), - paddr); + register_nosave_region(PFN_UP(ei->addr), + PFN_DOWN(paddr)); if (paddr >= (end_pfn << PAGE_SHIFT)) break; diff --git a/include/linux/suspend.h b/include/linux/suspend.h index a45b9f514492..3cc4d6394c07 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -24,63 +24,41 @@ struct pbe { extern void drain_local_pages(void); extern void mark_free_pages(struct zone *zone); -#ifdef CONFIG_PM -/* kernel/power/swsusp.c */ -extern int software_suspend(void); - -#if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) +#if defined(CONFIG_PM) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) extern int pm_prepare_console(void); extern void pm_restore_console(void); #else static inline int pm_prepare_console(void) { return 0; } static inline void pm_restore_console(void) {} -#endif /* defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) */ +#endif + +#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) +/* kernel/power/swsusp.c */ +extern int software_suspend(void); +/* kernel/power/snapshot.c */ +extern void __init register_nosave_region(unsigned long, unsigned long); +extern int swsusp_page_is_forbidden(struct page *); +extern void swsusp_set_page_free(struct page *); +extern void swsusp_unset_page_free(struct page *); +extern unsigned long get_safe_page(gfp_t gfp_mask); #else static inline int software_suspend(void) { printk("Warning: fake suspend called\n"); return -ENOSYS; } -#endif /* CONFIG_PM */ + +static inline void register_nosave_region(unsigned long b, unsigned long e) {} +static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } +static inline void swsusp_set_page_free(struct page *p) {} +static inline void swsusp_unset_page_free(struct page *p) {} +#endif /* defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) */ void save_processor_state(void); void restore_processor_state(void); struct saved_context; void __save_processor_state(struct saved_context *ctxt); void __restore_processor_state(struct saved_context *ctxt); -unsigned long get_safe_page(gfp_t gfp_mask); - -/* Page management functions for the software suspend (swsusp) */ - -static inline void swsusp_set_page_forbidden(struct page *page) -{ - SetPageNosave(page); -} - -static inline int swsusp_page_is_forbidden(struct page *page) -{ - return PageNosave(page); -} - -static inline void swsusp_unset_page_forbidden(struct page *page) -{ - ClearPageNosave(page); -} - -static inline void swsusp_set_page_free(struct page *page) -{ - SetPageNosaveFree(page); -} - -static inline int swsusp_page_is_free(struct page *page) -{ - return PageNosaveFree(page); -} - -static inline void swsusp_unset_page_free(struct page *page) -{ - ClearPageNosaveFree(page); -} /* * XXX: We try to keep some more pages free so that I/O operations succeed diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 8df51c23bba4..403bc3722fee 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -139,14 +139,19 @@ int pm_suspend_disk(void) mdelay(5000); goto Thaw; } + /* Allocate memory management structures */ + error = create_basic_memory_bitmaps(); + if (error) + goto Thaw; + /* Free memory before shutting down devices. */ error = swsusp_shrink_memory(); if (error) - goto Thaw; + goto Finish; error = platform_prepare(); if (error) - goto Thaw; + goto Finish; suspend_console(); error = device_suspend(PMSG_FREEZE); @@ -181,7 +186,7 @@ int pm_suspend_disk(void) power_down(); else { swsusp_free(); - goto Thaw; + goto Finish; } } else { pr_debug("PM: Image restored successfully.\n"); @@ -194,6 +199,8 @@ int pm_suspend_disk(void) platform_finish(); device_resume(); resume_console(); + Finish: + free_basic_memory_bitmaps(); Thaw: unprepare_processes(); return error; @@ -239,13 +246,15 @@ static int software_resume(void) } pr_debug("PM: Checking swsusp image.\n"); - error = swsusp_check(); if (error) - goto Done; + goto Unlock; - pr_debug("PM: Preparing processes for restore.\n"); + error = create_basic_memory_bitmaps(); + if (error) + goto Unlock; + pr_debug("PM: Preparing processes for restore.\n"); error = prepare_processes(); if (error) { swsusp_close(); @@ -280,7 +289,9 @@ static int software_resume(void) printk(KERN_ERR "PM: Restore failed, recovering.\n"); unprepare_processes(); Done: + free_basic_memory_bitmaps(); /* For success case, the suspend path will release the lock */ + Unlock: mutex_unlock(&pm_mutex); pr_debug("PM: Resume from disk failed.\n"); return 0; diff --git a/kernel/power/main.c b/kernel/power/main.c index b21c2a56f960..5a779270cdbc 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -244,6 +244,7 @@ static int enter_state(suspend_state_t state) return error; } +#ifdef CONFIG_SOFTWARE_SUSPEND /* * This is main interface to the outside world. It needs to be * called from process context. @@ -252,6 +253,7 @@ int software_suspend(void) { return enter_state(PM_SUSPEND_DISK); } +#endif /** diff --git a/kernel/power/power.h b/kernel/power/power.h index 33bd94ceba32..1f8052bda0f7 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -48,6 +48,8 @@ extern sector_t swsusp_resume_block; extern asmlinkage int swsusp_arch_suspend(void); extern asmlinkage int swsusp_arch_resume(void); +extern int create_basic_memory_bitmaps(void); +extern void free_basic_memory_bitmaps(void); extern unsigned int count_data_pages(void); /** diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 48fc7a35571b..f66e4411795b 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -34,6 +35,10 @@ #include "power.h" +static int swsusp_page_is_free(struct page *); +static void swsusp_set_page_forbidden(struct page *); +static void swsusp_unset_page_forbidden(struct page *); + /* List of PBEs needed for restoring the pages that were allocated before * the suspend and included in the suspend image, but have also been * allocated by the "resume" kernel, so their contents cannot be written @@ -224,11 +229,6 @@ static void chain_free(struct chain_allocator *ca, int clear_page_nosave) * of type unsigned long each). It also contains the pfns that * correspond to the start and end of the represented memory area and * the number of bit chunks in the block. - * - * NOTE: Memory bitmaps are used for two types of operations only: - * "set a bit" and "find the next bit set". Moreover, the searching - * is always carried out after all of the "set a bit" operations - * on given bitmap. */ #define BM_END_OF_MAP (~0UL) @@ -443,15 +443,13 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) } /** - * memory_bm_set_bit - set the bit in the bitmap @bm that corresponds + * memory_bm_find_bit - find the bit in the bitmap @bm that corresponds * to given pfn. The cur_zone_bm member of @bm and the cur_block member * of @bm->cur_zone_bm are updated. - * - * If the bit cannot be set, the function returns -EINVAL . */ -static int -memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) +static void memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, + void **addr, unsigned int *bit_nr) { struct zone_bitmap *zone_bm; struct bm_block *bb; @@ -463,8 +461,8 @@ memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) /* We don't assume that the zones are sorted by pfns */ while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { zone_bm = zone_bm->next; - if (unlikely(!zone_bm)) - return -EINVAL; + + BUG_ON(!zone_bm); } bm->cur.zone_bm = zone_bm; } @@ -475,13 +473,40 @@ memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) while (pfn >= bb->end_pfn) { bb = bb->next; - if (unlikely(!bb)) - return -EINVAL; + + BUG_ON(!bb); } zone_bm->cur_block = bb; pfn -= bb->start_pfn; - set_bit(pfn % BM_BITS_PER_CHUNK, bb->data + pfn / BM_BITS_PER_CHUNK); - return 0; + *bit_nr = pfn % BM_BITS_PER_CHUNK; + *addr = bb->data + pfn / BM_BITS_PER_CHUNK; +} + +static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) +{ + void *addr; + unsigned int bit; + + memory_bm_find_bit(bm, pfn, &addr, &bit); + set_bit(bit, addr); +} + +static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn) +{ + void *addr; + unsigned int bit; + + memory_bm_find_bit(bm, pfn, &addr, &bit); + clear_bit(bit, addr); +} + +static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) +{ + void *addr; + unsigned int bit; + + memory_bm_find_bit(bm, pfn, &addr, &bit); + return test_bit(bit, addr); } /* Two auxiliary functions for memory_bm_next_pfn */ @@ -563,6 +588,199 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit; } +/** + * This structure represents a range of page frames the contents of which + * should not be saved during the suspend. + */ + +struct nosave_region { + struct list_head list; + unsigned long start_pfn; + unsigned long end_pfn; +}; + +static LIST_HEAD(nosave_regions); + +/** + * register_nosave_region - register a range of page frames the contents + * of which should not be saved during the suspend (to be used in the early + * initialization code) + */ + +void __init +register_nosave_region(unsigned long start_pfn, unsigned long end_pfn) +{ + struct nosave_region *region; + + if (start_pfn >= end_pfn) + return; + + if (!list_empty(&nosave_regions)) { + /* Try to extend the previous region (they should be sorted) */ + region = list_entry(nosave_regions.prev, + struct nosave_region, list); + if (region->end_pfn == start_pfn) { + region->end_pfn = end_pfn; + goto Report; + } + } + /* This allocation cannot fail */ + region = alloc_bootmem_low(sizeof(struct nosave_region)); + region->start_pfn = start_pfn; + region->end_pfn = end_pfn; + list_add_tail(®ion->list, &nosave_regions); + Report: + printk("swsusp: Registered nosave memory region: %016lx - %016lx\n", + start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT); +} + +/* + * Set bits in this map correspond to the page frames the contents of which + * should not be saved during the suspend. + */ +static struct memory_bitmap *forbidden_pages_map; + +/* Set bits in this map correspond to free page frames. */ +static struct memory_bitmap *free_pages_map; + +/* + * Each page frame allocated for creating the image is marked by setting the + * corresponding bits in forbidden_pages_map and free_pages_map simultaneously + */ + +void swsusp_set_page_free(struct page *page) +{ + if (free_pages_map) + memory_bm_set_bit(free_pages_map, page_to_pfn(page)); +} + +static int swsusp_page_is_free(struct page *page) +{ + return free_pages_map ? + memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0; +} + +void swsusp_unset_page_free(struct page *page) +{ + if (free_pages_map) + memory_bm_clear_bit(free_pages_map, page_to_pfn(page)); +} + +static void swsusp_set_page_forbidden(struct page *page) +{ + if (forbidden_pages_map) + memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page)); +} + +int swsusp_page_is_forbidden(struct page *page) +{ + return forbidden_pages_map ? + memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0; +} + +static void swsusp_unset_page_forbidden(struct page *page) +{ + if (forbidden_pages_map) + memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page)); +} + +/** + * mark_nosave_pages - set bits corresponding to the page frames the + * contents of which should not be saved in a given bitmap. + */ + +static void mark_nosave_pages(struct memory_bitmap *bm) +{ + struct nosave_region *region; + + if (list_empty(&nosave_regions)) + return; + + list_for_each_entry(region, &nosave_regions, list) { + unsigned long pfn; + + printk("swsusp: Marking nosave pages: %016lx - %016lx\n", + region->start_pfn << PAGE_SHIFT, + region->end_pfn << PAGE_SHIFT); + + for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) + memory_bm_set_bit(bm, pfn); + } +} + +/** + * create_basic_memory_bitmaps - create bitmaps needed for marking page + * frames that should not be saved and free page frames. The pointers + * forbidden_pages_map and free_pages_map are only modified if everything + * goes well, because we don't want the bits to be used before both bitmaps + * are set up. + */ + +int create_basic_memory_bitmaps(void) +{ + struct memory_bitmap *bm1, *bm2; + int error = 0; + + BUG_ON(forbidden_pages_map || free_pages_map); + + bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_ATOMIC); + if (!bm1) + return -ENOMEM; + + error = memory_bm_create(bm1, GFP_ATOMIC | __GFP_COLD, PG_ANY); + if (error) + goto Free_first_object; + + bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_ATOMIC); + if (!bm2) + goto Free_first_bitmap; + + error = memory_bm_create(bm2, GFP_ATOMIC | __GFP_COLD, PG_ANY); + if (error) + goto Free_second_object; + + forbidden_pages_map = bm1; + free_pages_map = bm2; + mark_nosave_pages(forbidden_pages_map); + + printk("swsusp: Basic memory bitmaps created\n"); + + return 0; + + Free_second_object: + kfree(bm2); + Free_first_bitmap: + memory_bm_free(bm1, PG_UNSAFE_CLEAR); + Free_first_object: + kfree(bm1); + return -ENOMEM; +} + +/** + * free_basic_memory_bitmaps - free memory bitmaps allocated by + * create_basic_memory_bitmaps(). The auxiliary pointers are necessary + * so that the bitmaps themselves are not referred to while they are being + * freed. + */ + +void free_basic_memory_bitmaps(void) +{ + struct memory_bitmap *bm1, *bm2; + + BUG_ON(!(forbidden_pages_map && free_pages_map)); + + bm1 = forbidden_pages_map; + bm2 = free_pages_map; + forbidden_pages_map = NULL; + free_pages_map = NULL; + memory_bm_free(bm1, PG_UNSAFE_CLEAR); + kfree(bm1); + memory_bm_free(bm2, PG_UNSAFE_CLEAR); + kfree(bm2); + + printk("swsusp: Basic memory bitmaps freed\n"); +} + /** * snapshot_additional_pages - estimate the number of additional pages * be needed for setting up the suspend image data structures for given diff --git a/kernel/power/user.c b/kernel/power/user.c index 7cf6713b2325..845acd84cb23 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -52,6 +52,9 @@ static int snapshot_open(struct inode *inode, struct file *filp) if ((filp->f_flags & O_ACCMODE) == O_RDWR) return -ENOSYS; + if(create_basic_memory_bitmaps()) + return -ENOMEM; + nonseekable_open(inode, filp); data = &snapshot_state; filp->private_data = data; @@ -77,6 +80,7 @@ static int snapshot_release(struct inode *inode, struct file *filp) struct snapshot_data *data; swsusp_free(); + free_basic_memory_bitmaps(); data = filp->private_data; free_all_swap_pages(data->swap, data->bitmap); free_bitmap(data->bitmap); -- cgit v1.2.3 From 04293355ac9dbe81bd01b89ca2adb58be34c2c60 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 6 May 2007 14:50:43 -0700 Subject: mm: remove unused page flags Remove the two page flags that were previously used by swsusp and are no longer needed. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index a3c8b60a9c3a..ae2d79f2107e 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -83,13 +83,11 @@ #define PG_private 11 /* If pagecache, has fs-private data */ #define PG_writeback 12 /* Page is under writeback */ -#define PG_nosave 13 /* Used for system suspend/resume */ #define PG_compound 14 /* Part of a compound page */ #define PG_swapcache 15 /* Swap page: swp_entry_t in private */ #define PG_mappedtodisk 16 /* Has blocks allocated on-disk */ #define PG_reclaim 17 /* To be reclaimed asap */ -#define PG_nosave_free 18 /* Used for system suspend/resume */ #define PG_buddy 19 /* Page is free, on buddy lists */ /* PG_owner_priv_1 users should have descriptive aliases */ @@ -215,16 +213,6 @@ static inline void SetPageUptodate(struct page *page) ret; \ }) -#define PageNosave(page) test_bit(PG_nosave, &(page)->flags) -#define SetPageNosave(page) set_bit(PG_nosave, &(page)->flags) -#define TestSetPageNosave(page) test_and_set_bit(PG_nosave, &(page)->flags) -#define ClearPageNosave(page) clear_bit(PG_nosave, &(page)->flags) -#define TestClearPageNosave(page) test_and_clear_bit(PG_nosave, &(page)->flags) - -#define PageNosaveFree(page) test_bit(PG_nosave_free, &(page)->flags) -#define SetPageNosaveFree(page) set_bit(PG_nosave_free, &(page)->flags) -#define ClearPageNosaveFree(page) clear_bit(PG_nosave_free, &(page)->flags) - #define PageBuddy(page) test_bit(PG_buddy, &(page)->flags) #define __SetPageBuddy(page) __set_bit(PG_buddy, &(page)->flags) #define __ClearPageBuddy(page) __clear_bit(PG_buddy, &(page)->flags) -- cgit v1.2.3 From ab3bfca7abf3fd0fe41d26d839610a787aa7e587 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 6 May 2007 14:50:49 -0700 Subject: remove software_suspend() Remove software_suspend() and all its users since pm_suspend(PM_SUSPEND_DISK) should be equivalent and there's no point in having two interfaces for the same thing. The patch also changes the valid_state function to return 0 (false) for PM_SUSPEND_DISK when SOFTWARE_SUSPEND is not configured instead of accepting it and having the whole thing fail later. Signed-off-by: Johannes Berg Acked-by: "Rafael J. Wysocki" Cc: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/acpi/sleep/proc.c | 2 +- drivers/i2c/chips/tps65010.c | 4 +++- include/linux/init.h | 2 +- include/linux/suspend.h | 8 -------- kernel/power/main.c | 21 +++++++-------------- kernel/sys.c | 2 +- 6 files changed, 13 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c index 2d912b71e543..dcde9ddd105a 100644 --- a/drivers/acpi/sleep/proc.c +++ b/drivers/acpi/sleep/proc.c @@ -60,7 +60,7 @@ acpi_system_write_sleep(struct file *file, state = simple_strtoul(str, NULL, 0); #ifdef CONFIG_SOFTWARE_SUSPEND if (state == 4) { - error = software_suspend(); + error = pm_suspend(PM_SUSPEND_DISK); goto Done; } #endif diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c index 214fbb1423c5..7ed92dc3d833 100644 --- a/drivers/i2c/chips/tps65010.c +++ b/drivers/i2c/chips/tps65010.c @@ -351,8 +351,10 @@ static void tps65010_interrupt(struct tps65010 *tps) #if 0 /* REVISIT: this might need its own workqueue * plus tweaks including deadlock avoidance ... + * also needs to get error handling and probably + * an #ifdef CONFIG_SOFTWARE_SUSPEND */ - software_suspend(); + pm_suspend(PM_SUSPEND_DISK); #endif poll = 1; } diff --git a/include/linux/init.h b/include/linux/init.h index 9abf120ec9f8..dbbdbd1bec77 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -233,7 +233,7 @@ void __init parse_early_param(void); #define __obsolete_setup(str) /* nothing */ #endif -/* Data marked not to be saved by software_suspend() */ +/* Data marked not to be saved by software suspend */ #define __nosavedata __attribute__ ((__section__ (".data.nosave"))) /* This means "can be init if no module support, otherwise module load diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 3cc4d6394c07..3aecc96acc76 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -33,8 +33,6 @@ static inline void pm_restore_console(void) {} #endif #if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) -/* kernel/power/swsusp.c */ -extern int software_suspend(void); /* kernel/power/snapshot.c */ extern void __init register_nosave_region(unsigned long, unsigned long); extern int swsusp_page_is_forbidden(struct page *); @@ -42,12 +40,6 @@ extern void swsusp_set_page_free(struct page *); extern void swsusp_unset_page_free(struct page *); extern unsigned long get_safe_page(gfp_t gfp_mask); #else -static inline int software_suspend(void) -{ - printk("Warning: fake suspend called\n"); - return -ENOSYS; -} - static inline void register_nosave_region(unsigned long b, unsigned long e) {} static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } static inline void swsusp_set_page_free(struct page *p) {} diff --git a/kernel/power/main.c b/kernel/power/main.c index 5a779270cdbc..f6dda685e7e2 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -184,17 +184,21 @@ static void suspend_finish(suspend_state_t state) static const char * const pm_states[PM_SUSPEND_MAX] = { [PM_SUSPEND_STANDBY] = "standby", [PM_SUSPEND_MEM] = "mem", -#ifdef CONFIG_SOFTWARE_SUSPEND [PM_SUSPEND_DISK] = "disk", -#endif }; static inline int valid_state(suspend_state_t state) { /* Suspend-to-disk does not really need low-level support. - * It can work with reboot if needed. */ + * It can work with shutdown/reboot if needed. If it isn't + * configured, then it cannot be supported. + */ if (state == PM_SUSPEND_DISK) +#ifdef CONFIG_SOFTWARE_SUSPEND return 1; +#else + return 0; +#endif /* all other states need lowlevel support and need to be * valid to the lowlevel implementation, no valid callback @@ -244,17 +248,6 @@ static int enter_state(suspend_state_t state) return error; } -#ifdef CONFIG_SOFTWARE_SUSPEND -/* - * This is main interface to the outside world. It needs to be - * called from process context. - */ -int software_suspend(void) -{ - return enter_state(PM_SUSPEND_DISK); -} -#endif - /** * pm_suspend - Externally visible function for suspending system. diff --git a/kernel/sys.c b/kernel/sys.c index 123b165080e6..fe1f3ab20477 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -881,7 +881,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user #ifdef CONFIG_SOFTWARE_SUSPEND case LINUX_REBOOT_CMD_SW_SUSPEND: { - int ret = software_suspend(); + int ret = pm_suspend(PM_SUSPEND_DISK); unlock_kernel(); return ret; } -- cgit v1.2.3 From 56f99bcb52d64d70078b41cc176dd8b6f5763108 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 6 May 2007 14:50:52 -0700 Subject: swsusp: free more memory Move the definition of PAGES_FOR_IO to kernel/power/power.h and introduce SPARE_PAGES representing the number of pages that should be freed by the swsusp's memory shrinker in addition to PAGES_FOR_IO so that device drivers can allocate some memory (up to 1 MB total) in their .suspend() routines without causing the suspend to fail. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Cc: Nigel Cunningham Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/suspend.h | 6 ------ kernel/power/power.h | 12 +++++++++++- kernel/power/swsusp.c | 2 +- 3 files changed, 12 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 3aecc96acc76..96868be9c211 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -52,10 +52,4 @@ struct saved_context; void __save_processor_state(struct saved_context *ctxt); void __restore_processor_state(struct saved_context *ctxt); -/* - * XXX: We try to keep some more pages free so that I/O operations succeed - * without paging. Might this be more? - */ -#define PAGES_FOR_IO 1024 - #endif /* _LINUX_SWSUSP_H */ diff --git a/kernel/power/power.h b/kernel/power/power.h index a3e47cbdaf31..34b43542785a 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -14,8 +14,18 @@ struct swsusp_info { #ifdef CONFIG_SOFTWARE_SUSPEND -extern int pm_suspend_disk(void); +/* + * Keep some memory free so that I/O operations can succeed without paging + * [Might this be more than 4 MB?] + */ +#define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT) +/* + * Keep 1 MB of memory free so that device drivers can allocate some pages in + * their .suspend() routines without breaking the suspend to disk. + */ +#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) +extern int pm_suspend_disk(void); #else static inline int pm_suspend_disk(void) { diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 1109023d8358..5da304c8f1f6 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -233,7 +233,7 @@ int swsusp_shrink_memory(void) long size, highmem_size; highmem_size = count_highmem_pages(); - size = count_data_pages() + PAGES_FOR_IO; + size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES; tmp = size; size += highmem_size; for_each_zone (zone) -- cgit v1.2.3 From c5e631cf65f4d6875efcd571275436f2964a8b48 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sun, 6 May 2007 14:51:05 -0700 Subject: ARRAY_SIZE: check for type We can use a gcc extension to ensure that ARRAY_SIZE() is handed an array, not a pointer. This is especially important when code is changed from a fixed array to a pointer. I assume the Intel compiler doesn't support __builtin_types_compatible_p. [jdike@addtoit.com: uml: update UML definition of ARRAY_SIZE] Signed-off-by: Rusty Russell Signed-off-by: Jeff Dike Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/include/user_util.h | 15 +++++++++++++-- include/linux/compiler-gcc.h | 3 +++ include/linux/compiler-intel.h | 3 +++ include/linux/kernel.h | 3 ++- 4 files changed, 21 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/arch/um/include/user_util.h b/arch/um/include/user_util.h index 73ffea1829fc..b52bc4084830 100644 --- a/arch/um/include/user_util.h +++ b/arch/um/include/user_util.h @@ -8,8 +8,19 @@ #include "sysdep/ptrace.h" -/* Copied from kernel.h */ -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +/* Copied from kernel.h and compiler-gcc.h */ + +/* Force a compilation error if condition is true, but also produce a + result (of value 0 and type size_t), so the expression can be used + e.g. in a structure initializer (or where-ever else comma expressions + aren't permitted). */ +#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) + +/* &a[0] degrades to a pointer: a different type from an array */ +#define __must_be_array(a) \ + BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0]))) + +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) #define CATCH_EINTR(expr) while ((errno = 0, ((expr) < 0)) && (errno == EINTR)) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 9008eabb9c3d..a9f794716a81 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -22,6 +22,9 @@ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ (typeof(ptr)) (__ptr + (off)); }) +/* &a[0] degrades to a pointer: a different type from an array */ +#define __must_be_array(a) \ + BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0]))) #define inline inline __attribute__((always_inline)) #define __inline__ __inline__ __attribute__((always_inline)) diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index a436eea43912..b769961e6f23 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -21,6 +21,9 @@ __ptr = (unsigned long) (ptr); \ (typeof(ptr)) (__ptr + (off)); }) +/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */ +#define __must_be_array(a) 0 + #endif #define uninitialized_var(x) x diff --git a/include/linux/kernel.h b/include/linux/kernel.h index e2f41b051b12..144b615f3a89 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -35,7 +35,8 @@ extern const char linux_proc_banner[]; #define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) #define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) + #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) -- cgit v1.2.3 From e024715f5f6250179a31716a898800a48cf23b39 Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Sun, 6 May 2007 14:51:13 -0700 Subject: uml: improve checking and diagnostics of ethernet MACs Improve checking and diagnostics for broadcast and multicast Ethernet MAC addresses, and distinguish between those cases in output; also make sure the device is assigned a MAC address valid only locally to avoid collisions. Signed-off-by: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Jeff Dike Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/drivers/net_kern.c | 38 +++++++++++++++++++++++++++++--------- include/linux/etherdevice.h | 12 ++++++++++++ 2 files changed, 41 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 20963f106c34..cd466e30af67 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -283,7 +283,7 @@ void uml_net_user_timer_expire(unsigned long _conn) #endif } -static void setup_etheraddr(char *str, unsigned char *addr) +static void setup_etheraddr(char *str, unsigned char *addr, char *name) { char *end; int i; @@ -302,15 +302,32 @@ static void setup_etheraddr(char *str, unsigned char *addr) } str = end + 1; } - if(addr[0] & 1){ + if (is_multicast_ether_addr(addr)) { printk(KERN_ERR - "Attempt to assign a broadcast ethernet address to a " + "Attempt to assign a multicast ethernet address to a " "device disallowed\n"); goto random; } + if (!is_valid_ether_addr(addr)) { + printk(KERN_ERR + "Attempt to assign an invalid ethernet address to a " + "device disallowed\n"); + goto random; + } + if (!is_local_ether_addr(addr)) { + printk(KERN_WARNING + "Warning: attempt to assign a globally valid ethernet address to a " + "device\n"); + printk(KERN_WARNING "You should better enable the 2nd rightmost bit " + "in the first byte of the MAC, i.e. " + "%02x:%02x:%02x:%02x:%02x:%02x\n", + addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4], addr[5]); + } return; random: + printk(KERN_INFO + "Choosing a random ethernet address for device %s\n", name); random_ether_addr(addr); } @@ -331,6 +348,7 @@ static void eth_configure(int n, void *init, char *mac, struct net_device *dev; struct uml_net_private *lp; int save, err, size; + char name[sizeof(dev->name)]; size = transport->private_size + sizeof(struct uml_net_private) + sizeof(((struct uml_net_private *) 0)->user); @@ -344,7 +362,13 @@ static void eth_configure(int n, void *init, char *mac, INIT_LIST_HEAD(&device->list); device->index = n; - setup_etheraddr(mac, device->mac); + /* If this name ends up conflicting with an existing registered + * netdevice, that is OK, register_netdev{,ice}() will notice this + * and fail. + */ + snprintf(name, sizeof(name), "eth%d", n); + + setup_etheraddr(mac, device->mac, name); printk(KERN_INFO "Netdevice %d ", n); printk("(%02x:%02x:%02x:%02x:%02x:%02x) ", @@ -375,11 +399,7 @@ static void eth_configure(int n, void *init, char *mac, goto out_free_netdev; SET_NETDEV_DEV(dev,&device->pdev.dev); - /* If this name ends up conflicting with an existing registered - * netdevice, that is OK, register_netdev{,ice}() will notice this - * and fail. - */ - snprintf(dev->name, sizeof(dev->name), "eth%d", n); + strcpy(dev->name, name); device->dev = dev; /* diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 745c988359c0..071c67abed86 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -70,6 +70,18 @@ static inline int is_multicast_ether_addr(const u8 *addr) return (0x01 & addr[0]); } +/** + * is_local_ether_addr - Determine if the Ethernet address is locally-assigned + * one (IEEE 802). + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if the address is a local address. + */ +static inline int is_local_ether_addr(const u8 *addr) +{ + return (0x02 & addr[0]); +} + /** * is_broadcast_ether_addr - Determine if the Ethernet address is broadcast * @addr: Pointer to a six-byte array containing the Ethernet address -- cgit v1.2.3 From 972edcb79ec8c8512ed5b29ca6718065328d6992 Mon Sep 17 00:00:00 2001 From: Vitaly Wool Date: Sun, 6 May 2007 18:46:57 +0400 Subject: [MTD] [NAND] platform NAND driver: update header This patch extends nand.h in order to enable platform NAND driver. Signed-off-by: Vitaly Wool Signed-off-by: Thomas Gleixner Signed-off-by: David Woodhouse --- include/linux/mtd/nand.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index cf197ad62da6..d2365c8dcacc 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -560,6 +560,7 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len, * @chip_delay: R/B delay value in us * @options: Option flags, e.g. 16bit buswidth * @ecclayout: ecc layout info structure + * @part_probe_types: NULL-terminated array of probe types * @priv: hardware controller specific settings */ struct platform_nand_chip { @@ -570,6 +571,7 @@ struct platform_nand_chip { struct nand_ecclayout *ecclayout; int chip_delay; unsigned int options; + const char **part_probe_types; void *priv; }; @@ -578,6 +580,8 @@ struct platform_nand_chip { * @hwcontrol: platform specific hardware control structure * @dev_ready: platform specific function to read ready/busy pin * @select_chip: platform specific chip select function + * @cmd_ctrl: platform specific function for controlling + * ALE/CLE/nCE. Also used to write command and address * @priv: private data to transport driver specific settings * * All fields are optional and depend on the hardware driver requirements @@ -586,9 +590,21 @@ struct platform_nand_ctrl { void (*hwcontrol)(struct mtd_info *mtd, int cmd); int (*dev_ready)(struct mtd_info *mtd); void (*select_chip)(struct mtd_info *mtd, int chip); + void (*cmd_ctrl)(struct mtd_info *mtd, int dat, + unsigned int ctrl); void *priv; }; +/** + * struct platform_nand_data - container structure for platform-specific data + * @chip: chip level chip structure + * @ctrl: controller level device structure + */ +struct platform_nand_data { + struct platform_nand_chip chip; + struct platform_nand_ctrl ctrl; +}; + /* Some helpers to access the data structures */ static inline struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd) -- cgit v1.2.3 From f596575e81999c0faf01a2fd340bc96dda058f80 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 8 May 2007 01:08:00 +1000 Subject: [POWERPC] via-pmu: remove LED sleep notifier The generic LED code now makes sure that suspended devices don't blink, so we no longer need to do it ourselves. For the suspend to disk case, however, we need to make sure that we don't blink if the PMU sysdev was suspended before the LED device. Signed-off-by: Johannes Berg Acked-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- drivers/macintosh/via-pmu-led.c | 35 +++-------------------------------- drivers/macintosh/via-pmu.c | 2 +- include/linux/pmu.h | 8 ++++++++ 3 files changed, 12 insertions(+), 33 deletions(-) (limited to 'include/linux') diff --git a/drivers/macintosh/via-pmu-led.c b/drivers/macintosh/via-pmu-led.c index fc89a7047cd0..55ad95671387 100644 --- a/drivers/macintosh/via-pmu-led.c +++ b/drivers/macintosh/via-pmu-led.c @@ -31,7 +31,6 @@ static spinlock_t pmu_blink_lock; static struct adb_request pmu_blink_req; /* -1: no change, 0: request off, 1: request on */ static int requested_change; -static int sleeping; static void pmu_req_done(struct adb_request * req) { @@ -41,7 +40,7 @@ static void pmu_req_done(struct adb_request * req) /* if someone requested a change in the meantime * (we only see the last one which is fine) * then apply it now */ - if (requested_change != -1 && !sleeping) + if (requested_change != -1 && !pmu_sys_suspended) pmu_request(&pmu_blink_req, NULL, 4, 0xee, 4, 0, requested_change); /* reset requested change */ requested_change = -1; @@ -66,7 +65,7 @@ static void pmu_led_set(struct led_classdev *led_cdev, break; } /* if request isn't done, then don't do anything */ - if (pmu_blink_req.complete && !sleeping) + if (pmu_blink_req.complete && !pmu_sys_suspended) pmu_request(&pmu_blink_req, NULL, 4, 0xee, 4, 0, requested_change); out: spin_unlock_irqrestore(&pmu_blink_lock, flags); @@ -80,32 +79,6 @@ static struct led_classdev pmu_led = { .brightness_set = pmu_led_set, }; -#ifdef CONFIG_PM -static void pmu_led_sleep_call(struct pmu_sleep_notifier *self, int when) -{ - unsigned long flags; - - spin_lock_irqsave(&pmu_blink_lock, flags); - - switch (when) { - case PBOOK_SLEEP_REQUEST: - sleeping = 1; - break; - case PBOOK_WAKE: - sleeping = 0; - break; - default: - /* do nothing */ - break; - } - spin_unlock_irqrestore(&pmu_blink_lock, flags); -} - -static struct pmu_sleep_notifier via_pmu_led_sleep_notif = { - .notifier_call = pmu_led_sleep_call, -}; -#endif - static int __init via_pmu_led_init(void) { struct device_node *dt; @@ -135,9 +108,7 @@ static int __init via_pmu_led_init(void) /* no outstanding req */ pmu_blink_req.complete = 1; pmu_blink_req.done = pmu_req_done; -#ifdef CONFIG_PM - pmu_register_sleep_notifier(&via_pmu_led_sleep_notif); -#endif + return led_classdev_register(NULL, &pmu_led); } diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 0707624dfad3..157080b3b468 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -2759,7 +2759,7 @@ pmu_polled_request(struct adb_request *req) #if defined(CONFIG_PM) && defined(CONFIG_PPC32) -static int pmu_sys_suspended; +int pmu_sys_suspended; static int pmu_sys_suspend(struct sys_device *sysdev, pm_message_t state) { diff --git a/include/linux/pmu.h b/include/linux/pmu.h index b0952e532ed5..37ca57392add 100644 --- a/include/linux/pmu.h +++ b/include/linux/pmu.h @@ -225,4 +225,12 @@ extern unsigned int pmu_power_flags; /* Backlight */ extern void pmu_backlight_init(void); +/* some code needs to know if the PMU was suspended for hibernation */ +#ifdef CONFIG_PM +extern int pmu_sys_suspended; +#else +/* if power management is not configured it can't be suspended */ +#define pmu_sys_suspended 0 +#endif + #endif /* __KERNEL__ */ -- cgit v1.2.3 From b46b8f19c9cd435ecac4d9d12b39d78c137ecd66 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 8 May 2007 00:22:59 -0700 Subject: Increase slab redzone to 64bits There are two problems with the existing redzone implementation. Firstly, it's causing misalignment of structures which contain a 64-bit integer, such as netfilter's 'struct ipt_entry' -- causing netfilter modules to fail to load because of the misalignment. (In particular, the first check in net/ipv4/netfilter/ip_tables.c::check_entry_size_and_hooks()) On ppc32 and sparc32, amongst others, __alignof__(uint64_t) == 8. With slab debugging, we use 32-bit redzones. And allocated slab objects aren't sufficiently aligned to hold a structure containing a uint64_t. By _just_ setting ARCH_KMALLOC_MINALIGN to __alignof__(u64) we'd disable redzone checks on those architectures. By using 64-bit redzones we avoid that loss of debugging, and also fix the other problem while we're at it. When investigating this, I noticed that on 64-bit platforms we're using a 32-bit value of RED_ACTIVE/RED_INACTIVE in the 64-bit memory location set aside for the redzone. Which means that the four bytes immediately before or after the allocated object at 0x00,0x00,0x00,0x00 for LE and BE machines, respectively. Which is probably not the most useful choice of poison value. One way to fix both of those at once is just to switch to 64-bit redzones in all cases. Signed-off-by: David Woodhouse Acked-by: Pekka Enberg Cc: Christoph Lameter Acked-by: David S. Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/poison.h | 4 ++-- mm/slab.c | 42 +++++++++++++++++++++++------------------- 2 files changed, 25 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/poison.h b/include/linux/poison.h index 95f518b17684..d93c300a3449 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -15,8 +15,8 @@ * Magic nums for obj red zoning. * Placed in the first word before and the first word after an obj. */ -#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */ -#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */ +#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */ +#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */ #define SLUB_RED_INACTIVE 0xbb #define SLUB_RED_ACTIVE 0xcc diff --git a/mm/slab.c b/mm/slab.c index 5920a412b377..1115e2065bfc 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -148,10 +148,11 @@ * Usually, the kmalloc caches are cache_line_size() aligned, except when * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. * Some archs want to perform DMA into kmalloc caches and need a guaranteed - * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that. - * Note that this flag disables some debug features. + * alignment larger than the alignment of a 64-bit integer. + * ARCH_KMALLOC_MINALIGN allows that. + * Note that increasing this value may disable some debug features. */ -#define ARCH_KMALLOC_MINALIGN 0 +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) #endif #ifndef ARCH_SLAB_MINALIGN @@ -536,19 +537,22 @@ static int obj_size(struct kmem_cache *cachep) return cachep->obj_size; } -static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp) +static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); - return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD); + return (unsigned long long*) (objp + obj_offset(cachep) - + sizeof(unsigned long long)); } -static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp) +static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); if (cachep->flags & SLAB_STORE_USER) - return (unsigned long *)(objp + cachep->buffer_size - - 2 * BYTES_PER_WORD); - return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD); + return (unsigned long long *)(objp + cachep->buffer_size - + sizeof(unsigned long long) - + BYTES_PER_WORD); + return (unsigned long long *) (objp + cachep->buffer_size - + sizeof(unsigned long long)); } static void **dbg_userword(struct kmem_cache *cachep, void *objp) @@ -561,8 +565,8 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) #define obj_offset(x) 0 #define obj_size(cachep) (cachep->buffer_size) -#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;}) -#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;}) +#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) +#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) #endif @@ -1776,7 +1780,7 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) char *realobj; if (cachep->flags & SLAB_RED_ZONE) { - printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", + printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n", *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); } @@ -2239,7 +2243,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, * is greater than BYTES_PER_WORD. */ if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) - ralign = BYTES_PER_WORD; + ralign = __alignof__(unsigned long long); /* 2) arch mandated alignment */ if (ralign < ARCH_SLAB_MINALIGN) { @@ -2250,7 +2254,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ralign = align; } /* disable debug if necessary */ - if (ralign > BYTES_PER_WORD) + if (ralign > __alignof__(unsigned long long)) flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); /* * 4) Store it. @@ -2271,8 +2275,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, */ if (flags & SLAB_RED_ZONE) { /* add space for red zone words */ - cachep->obj_offset += BYTES_PER_WORD; - size += 2 * BYTES_PER_WORD; + cachep->obj_offset += sizeof(unsigned long long); + size += 2 * sizeof(unsigned long long); } if (flags & SLAB_STORE_USER) { /* user store requires one word storage behind the end of @@ -2833,7 +2837,7 @@ static void kfree_debugcheck(const void *objp) static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) { - unsigned long redzone1, redzone2; + unsigned long long redzone1, redzone2; redzone1 = *dbg_redzone1(cache, obj); redzone2 = *dbg_redzone2(cache, obj); @@ -2849,7 +2853,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) else slab_error(cache, "memory outside object was overwritten"); - printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n", + printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n", obj, redzone1, redzone2); } @@ -3065,7 +3069,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, slab_error(cachep, "double free, or memory outside" " object was overwritten"); printk(KERN_ERR - "%p: redzone 1:0x%lx, redzone 2:0x%lx\n", + "%p: redzone 1:0x%llx, redzone 2:0x%llx\n", objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); } -- cgit v1.2.3 From 0ceb331433e8aad9c5f441a965d7c681f8b9046f Mon Sep 17 00:00:00 2001 From: Dmitriy Monakhov Date: Tue, 8 May 2007 00:23:02 -0700 Subject: mm: move common segment checks to separate helper function [akpm@linux-foundation.org: cleanup] Signed-off-by: Monakhov Dmitriy Cc: Christoph Hellwig Acked-by: Anton Altaparmakov Acked-by: David Chinner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ntfs/file.c | 21 ++---------- fs/xfs/linux-2.6/xfs_lrw.c | 22 ++----------- include/linux/fs.h | 2 ++ mm/filemap.c | 82 +++++++++++++++++++++++++--------------------- 4 files changed, 53 insertions(+), 74 deletions(-) (limited to 'include/linux') diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index dbbac5593106..621de369e6f8 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -2129,28 +2129,13 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb, struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; loff_t pos; - unsigned long seg; size_t count; /* after file limit checks */ ssize_t written, err; count = 0; - for (seg = 0; seg < nr_segs; seg++) { - const struct iovec *iv = &iov[seg]; - /* - * If any segment has a negative length, or the cumulative - * length ever wraps negative then return -EINVAL. - */ - count += iv->iov_len; - if (unlikely((ssize_t)(count|iv->iov_len) < 0)) - return -EINVAL; - if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) - continue; - if (!seg) - return -EFAULT; - nr_segs = seg; - count -= iv->iov_len; /* This segment is no good */ - break; - } + err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); + if (err) + return err; pos = *ppos; vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* We can write back this queue in page reclaim. */ diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index ff8d64eba9f8..558076dd0752 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -639,7 +639,6 @@ xfs_write( xfs_fsize_t isize, new_size; xfs_iocore_t *io; bhv_vnode_t *vp; - unsigned long seg; int iolock; int eventsent = 0; bhv_vrwlock_t locktype; @@ -652,24 +651,9 @@ xfs_write( vp = BHV_TO_VNODE(bdp); xip = XFS_BHVTOI(bdp); - for (seg = 0; seg < segs; seg++) { - const struct iovec *iv = &iovp[seg]; - - /* - * If any segment has a negative length, or the cumulative - * length ever wraps negative then return -EINVAL. - */ - ocount += iv->iov_len; - if (unlikely((ssize_t)(ocount|iv->iov_len) < 0)) - return -EINVAL; - if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) - continue; - if (seg == 0) - return -EFAULT; - segs = seg; - ocount -= iv->iov_len; /* This segment is no good */ - break; - } + error = generic_segment_checks(iovp, &segs, &ocount, VERIFY_READ); + if (error) + return error; count = ocount; pos = *offset; diff --git a/include/linux/fs.h b/include/linux/fs.h index bc6d27cecaac..020a8426e10a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1735,6 +1735,8 @@ extern ssize_t generic_file_sendfile(struct file *, loff_t *, size_t, read_actor extern void do_generic_mapping_read(struct address_space *mapping, struct file_ra_state *, struct file *, loff_t *, read_descriptor_t *, read_actor_t); +extern int generic_segment_checks(const struct iovec *iov, + unsigned long *nr_segs, size_t *count, int access_flags); /* fs/splice.c */ extern ssize_t generic_file_splice_read(struct file *, loff_t *, diff --git a/mm/filemap.c b/mm/filemap.c index 5631d6b2a62d..3e49fe13d6ac 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1110,6 +1110,45 @@ success: return size; } +/* + * Performs necessary checks before doing a write + * @iov: io vector request + * @nr_segs: number of segments in the iovec + * @count: number of bytes to write + * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE + * + * Adjust number of segments and amount of bytes to write (nr_segs should be + * properly initialized first). Returns appropriate error code that caller + * should return or zero in case that write should be allowed. + */ +int generic_segment_checks(const struct iovec *iov, + unsigned long *nr_segs, size_t *count, int access_flags) +{ + unsigned long seg; + size_t cnt = 0; + for (seg = 0; seg < *nr_segs; seg++) { + const struct iovec *iv = &iov[seg]; + + /* + * If any segment has a negative length, or the cumulative + * length ever wraps negative then return -EINVAL. + */ + cnt += iv->iov_len; + if (unlikely((ssize_t)(cnt|iv->iov_len) < 0)) + return -EINVAL; + if (access_ok(access_flags, iv->iov_base, iv->iov_len)) + continue; + if (seg == 0) + return -EFAULT; + *nr_segs = seg; + cnt -= iv->iov_len; /* This segment is no good */ + break; + } + *count = cnt; + return 0; +} +EXPORT_SYMBOL(generic_segment_checks); + /** * generic_file_aio_read - generic filesystem read routine * @iocb: kernel I/O control block @@ -1131,24 +1170,9 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, loff_t *ppos = &iocb->ki_pos; count = 0; - for (seg = 0; seg < nr_segs; seg++) { - const struct iovec *iv = &iov[seg]; - - /* - * If any segment has a negative length, or the cumulative - * length ever wraps negative then return -EINVAL. - */ - count += iv->iov_len; - if (unlikely((ssize_t)(count|iv->iov_len) < 0)) - return -EINVAL; - if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len)) - continue; - if (seg == 0) - return -EFAULT; - nr_segs = seg; - count -= iv->iov_len; /* This segment is no good */ - break; - } + retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); + if (retval) + return retval; /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ if (filp->f_flags & O_DIRECT) { @@ -2218,30 +2242,14 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, size_t ocount; /* original count */ size_t count; /* after file limit checks */ struct inode *inode = mapping->host; - unsigned long seg; loff_t pos; ssize_t written; ssize_t err; ocount = 0; - for (seg = 0; seg < nr_segs; seg++) { - const struct iovec *iv = &iov[seg]; - - /* - * If any segment has a negative length, or the cumulative - * length ever wraps negative then return -EINVAL. - */ - ocount += iv->iov_len; - if (unlikely((ssize_t)(ocount|iv->iov_len) < 0)) - return -EINVAL; - if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) - continue; - if (seg == 0) - return -EFAULT; - nr_segs = seg; - ocount -= iv->iov_len; /* This segment is no good */ - break; - } + err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); + if (err) + return err; count = ocount; pos = *ppos; -- cgit v1.2.3 From 3e9f45bd18191bbd05468b19b7064b8da8262aba Mon Sep 17 00:00:00 2001 From: Guillaume Chazarain Date: Tue, 8 May 2007 00:23:25 -0700 Subject: Factor outstanding I/O error handling Cleanup: setting an outstanding error on a mapping was open coded too many times. Factor it out in mapping_set_error(). Signed-off-by: Guillaume Chazarain Cc: Steven Whitehouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/gfs2/glops.c | 5 +---- fs/mpage.c | 16 ++-------------- include/linux/pagemap.h | 11 +++++++++++ mm/page-writeback.c | 7 +------ mm/vmscan.c | 8 ++------ 5 files changed, 17 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 39c8ae23bd9c..7b82657a9910 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -163,10 +163,7 @@ static void inode_go_sync(struct gfs2_glock *gl) if (ip) { struct address_space *mapping = ip->i_inode.i_mapping; int error = filemap_fdatawait(mapping); - if (error == -ENOSPC) - set_bit(AS_ENOSPC, &mapping->flags); - else if (error) - set_bit(AS_EIO, &mapping->flags); + mapping_set_error(mapping, error); } clear_bit(GLF_DIRTY, &gl->gl_flags); gfs2_ail_empty_gl(gl); diff --git a/fs/mpage.c b/fs/mpage.c index 692a3e578fc8..fa2441f57b41 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -663,12 +663,7 @@ confused: /* * The caller has a ref on the inode, so *mapping is stable */ - if (*ret) { - if (*ret == -ENOSPC) - set_bit(AS_ENOSPC, &mapping->flags); - else - set_bit(AS_EIO, &mapping->flags); - } + mapping_set_error(mapping, *ret); out: return bio; } @@ -776,14 +771,7 @@ retry: if (writepage) { ret = (*writepage)(page, wbc); - if (ret) { - if (ret == -ENOSPC) - set_bit(AS_ENOSPC, - &mapping->flags); - else - set_bit(AS_EIO, - &mapping->flags); - } + mapping_set_error(mapping, ret); } else { bio = __mpage_writepage(bio, page, get_block, &last_block_in_bio, &ret, wbc, diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index b4def5e083ed..8a83537d6978 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -11,6 +11,7 @@ #include #include #include +#include /* * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page @@ -19,6 +20,16 @@ #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ +static inline void mapping_set_error(struct address_space *mapping, int error) +{ + if (error) { + if (error == -ENOSPC) + set_bit(AS_ENOSPC, &mapping->flags); + else + set_bit(AS_EIO, &mapping->flags); + } +} + static inline gfp_t mapping_gfp_mask(struct address_space * mapping) { return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 029dfad5a235..63cd88840eb2 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -683,12 +683,7 @@ retry: } ret = (*writepage)(page, wbc); - if (ret) { - if (ret == -ENOSPC) - set_bit(AS_ENOSPC, &mapping->flags); - else - set_bit(AS_EIO, &mapping->flags); - } + mapping_set_error(mapping, ret); if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) unlock_page(page); diff --git a/mm/vmscan.c b/mm/vmscan.c index 56651a10c366..1c8e75a1cfcd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -284,12 +284,8 @@ static void handle_write_error(struct address_space *mapping, struct page *page, int error) { lock_page(page); - if (page_mapping(page) == mapping) { - if (error == -ENOSPC) - set_bit(AS_ENOSPC, &mapping->flags); - else - set_bit(AS_EIO, &mapping->flags); - } + if (page_mapping(page) == mapping) + mapping_set_error(mapping, error); unlock_page(page); } -- cgit v1.2.3 From ab1b6f03a10ba1f5638188ab06bf46e33ac3a160 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 8 May 2007 00:23:29 -0700 Subject: simplify the stacktrace code Simplify the stacktrace code: - remove the unused task argument to save_stack_trace, it's always current - remove the all_contexts flag, it's alwasy 0 Signed-off-by: Christoph Hellwig Cc: Paul Mundt Cc: Ralf Baechle Cc: Martin Schwidefsky Cc: "David S. Miller" Cc: Andi Kleen Cc: Akinobu Mita Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/mips/kernel/stacktrace.c | 22 ++++++---------------- arch/s390/kernel/stacktrace.c | 18 +++++++----------- arch/sh/kernel/stacktrace.c | 9 +-------- arch/sparc64/kernel/stacktrace.c | 20 +++++++------------- arch/x86_64/kernel/stacktrace.c | 8 +++----- include/linux/stacktrace.h | 6 ++---- kernel/lockdep.c | 3 +-- lib/fault-inject.c | 3 +-- 8 files changed, 28 insertions(+), 61 deletions(-) (limited to 'include/linux') diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c index a586aba337a7..ebd9db8d1ece 100644 --- a/arch/mips/kernel/stacktrace.c +++ b/arch/mips/kernel/stacktrace.c @@ -31,8 +31,7 @@ static void save_raw_context_stack(struct stack_trace *trace, } } -static void save_context_stack(struct stack_trace *trace, - struct task_struct *task, struct pt_regs *regs) +static void save_context_stack(struct stack_trace *trace, struct pt_regs *regs) { unsigned long sp = regs->regs[29]; #ifdef CONFIG_KALLSYMS @@ -41,7 +40,7 @@ static void save_context_stack(struct stack_trace *trace, if (raw_show_trace || !__kernel_text_address(pc)) { unsigned long stack_page = - (unsigned long)task_stack_page(task); + (unsigned long)task_stack_page(current); if (stack_page && sp >= stack_page && sp <= stack_page + THREAD_SIZE - 32) save_raw_context_stack(trace, sp); @@ -54,7 +53,7 @@ static void save_context_stack(struct stack_trace *trace, trace->entries[trace->nr_entries++] = pc; if (trace->nr_entries >= trace->max_entries) break; - pc = unwind_stack(task, &sp, pc, &ra); + pc = unwind_stack(current, &sp, pc, &ra); } while (pc); #else save_raw_context_stack(trace, sp); @@ -64,22 +63,13 @@ static void save_context_stack(struct stack_trace *trace, /* * Save stack-backtrace addresses into a stack_trace buffer. */ -void save_stack_trace(struct stack_trace *trace, struct task_struct *task) +void save_stack_trace(struct stack_trace *trace) { struct pt_regs dummyregs; struct pt_regs *regs = &dummyregs; WARN_ON(trace->nr_entries || !trace->max_entries); - if (task && task != current) { - regs->regs[29] = task->thread.reg29; - regs->regs[31] = 0; - regs->cp0_epc = task->thread.reg31; - } else { - if (!task) - task = current; - prepare_frametrace(regs); - } - - save_context_stack(trace, task, regs); + prepare_frametrace(regs); + save_context_stack(trace, regs); } diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 2e5c65a1863e..515ff9011dd7 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -59,7 +59,7 @@ static unsigned long save_context_stack(struct stack_trace *trace, } } -void save_stack_trace(struct stack_trace *trace, struct task_struct *task) +void save_stack_trace(struct stack_trace *trace) { register unsigned long sp asm ("15"); unsigned long orig_sp, new_sp; @@ -69,20 +69,16 @@ void save_stack_trace(struct stack_trace *trace, struct task_struct *task) new_sp = save_context_stack(trace, &trace->skip, orig_sp, S390_lowcore.panic_stack - PAGE_SIZE, S390_lowcore.panic_stack); - if ((new_sp != orig_sp) && !trace->all_contexts) + if (new_sp != orig_sp) return; new_sp = save_context_stack(trace, &trace->skip, new_sp, S390_lowcore.async_stack - ASYNC_SIZE, S390_lowcore.async_stack); - if ((new_sp != orig_sp) && !trace->all_contexts) + if (new_sp != orig_sp) return; - if (task) - save_context_stack(trace, &trace->skip, new_sp, - (unsigned long) task_stack_page(task), - (unsigned long) task_stack_page(task) + THREAD_SIZE); - else - save_context_stack(trace, &trace->skip, new_sp, - S390_lowcore.thread_info, - S390_lowcore.thread_info + THREAD_SIZE); + + save_context_stack(trace, &trace->skip, new_sp, + S390_lowcore.thread_info, + S390_lowcore.thread_info + THREAD_SIZE); return; } diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index 0d5268afe80f..4bdd2f83535d 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c @@ -19,14 +19,7 @@ */ void save_stack_trace(struct stack_trace *trace, struct task_struct *task) { - unsigned long *sp; - - if (!task) - task = current; - if (task == current) - sp = (unsigned long *)current_stack_pointer; - else - sp = (unsigned long *)task->thread.sp; + unsigned long *sp = (unsigned long *)current_stack_pointer; while (!kstack_end(sp)) { unsigned long addr = *sp++; diff --git a/arch/sparc64/kernel/stacktrace.c b/arch/sparc64/kernel/stacktrace.c index c4d15f2762b9..47f92a59be18 100644 --- a/arch/sparc64/kernel/stacktrace.c +++ b/arch/sparc64/kernel/stacktrace.c @@ -3,22 +3,16 @@ #include #include -void save_stack_trace(struct stack_trace *trace, struct task_struct *task) +void save_stack_trace(struct stack_trace *trace) { unsigned long ksp, fp, thread_base; - struct thread_info *tp; + struct thread_info *tp = task_thread_info(current); - if (!task) - task = current; - tp = task_thread_info(task); - if (task == current) { - flushw_all(); - __asm__ __volatile__( - "mov %%fp, %0" - : "=r" (ksp) - ); - } else - ksp = tp->ksp; + flushw_all(); + __asm__ __volatile__( + "mov %%fp, %0" + : "=r" (ksp) + ); fp = ksp + STACK_BIAS; thread_base = (unsigned long) tp; diff --git a/arch/x86_64/kernel/stacktrace.c b/arch/x86_64/kernel/stacktrace.c index 65ac2c6b34a6..cb9109113584 100644 --- a/arch/x86_64/kernel/stacktrace.c +++ b/arch/x86_64/kernel/stacktrace.c @@ -21,8 +21,7 @@ save_stack_warning_symbol(void *data, char *msg, unsigned long symbol) static int save_stack_stack(void *data, char *name) { - struct stack_trace *trace = (struct stack_trace *)data; - return trace->all_contexts ? 0 : -1; + return -1; } static void save_stack_address(void *data, unsigned long addr) @@ -46,11 +45,10 @@ static struct stacktrace_ops save_stack_ops = { /* * Save stack-backtrace addresses into a stack_trace buffer. */ -void save_stack_trace(struct stack_trace *trace, struct task_struct *task) +void save_stack_trace(struct stack_trace *trace) { - dump_trace(task, NULL, NULL, &save_stack_ops, trace); + dump_trace(current, NULL, NULL, &save_stack_ops, trace); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL(save_stack_trace); - diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 50e2b01e517c..1d2b084c0185 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -6,15 +6,13 @@ struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; int skip; /* input argument: How many entries to skip */ - int all_contexts; /* input argument: if true do than one stack */ }; -extern void save_stack_trace(struct stack_trace *trace, - struct task_struct *task); +extern void save_stack_trace(struct stack_trace *trace); extern void print_stack_trace(struct stack_trace *trace, int spaces); #else -# define save_stack_trace(trace, task) do { } while (0) +# define save_stack_trace(trace) do { } while (0) # define print_stack_trace(trace) do { } while (0) #endif diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 7065a687ac54..c1e308a080b8 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -257,9 +257,8 @@ static int save_trace(struct stack_trace *trace) trace->entries = stack_trace + nr_stack_trace_entries; trace->skip = 3; - trace->all_contexts = 0; - save_stack_trace(trace, NULL); + save_stack_trace(trace); trace->max_entries = trace->nr_entries; diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 0fabd12c39d7..b18fc2ff9ffe 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -72,9 +72,8 @@ static bool fail_stacktrace(struct fault_attr *attr) trace.entries = entries; trace.max_entries = depth; trace.skip = 1; - trace.all_contexts = 0; - save_stack_trace(&trace, NULL); + save_stack_trace(&trace); for (n = 0; n < trace.nr_entries; n++) { if (attr->reject_start <= entries[n] && entries[n] < attr->reject_end) -- cgit v1.2.3 From 97dc32cdb1b53832801159d5f634b41aad9d0a23 Mon Sep 17 00:00:00 2001 From: William Cohen Date: Tue, 8 May 2007 00:23:41 -0700 Subject: reduce size of task_struct on 64-bit machines This past week I was playing around with that pahole tool (http://oops.ghostprotocols.net:81/acme/dwarves/) and looking at the size of various struct in the kernel. I was surprised by the size of the task_struct on x86_64, approaching 4K. I looked through the fields in task_struct and found that a number of them were declared as "unsigned long" rather than "unsigned int" despite them appearing okay as 32-bit sized fields. On x86_64 "unsigned long" ends up being 8 bytes in size and forces 8 byte alignment. Is there a reason there a reason they are "unsigned long"? The patch below drops the size of the struct from 3808 bytes (60 64-byte cachelines) to 3760 bytes (59 64-byte cachelines). A couple other fields in the task struct take a signficant amount of space: struct thread_struct thread; 688 struct held_lock held_locks[30]; 1680 CONFIG_LOCKDEP is turned on in the .config [akpm@linux-foundation.org: fix printk warnings] Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/array.c | 4 ++-- include/linux/sched.h | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/fs/proc/array.c b/fs/proc/array.c index 07c9cdbcdcac..74f30e0c0381 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -410,9 +410,9 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole) /* convert nsec -> ticks */ start_time = nsec_to_clock_t(start_time); - res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ + res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %u %lu \ %lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \ -%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu %llu\n", +%lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu\n", task->pid, tcomm, state, diff --git a/include/linux/sched.h b/include/linux/sched.h index a1707583de49..d9acbbb39f96 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -801,8 +801,8 @@ struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ struct thread_info *thread_info; atomic_t usage; - unsigned long flags; /* per process flags, defined below */ - unsigned long ptrace; + unsigned int flags; /* per process flags, defined below */ + unsigned int ptrace; int lock_depth; /* BKL lock depth */ @@ -825,7 +825,7 @@ struct task_struct { unsigned long long sched_time; /* sched_clock time spent running */ enum sleep_type sleep_type; - unsigned long policy; + unsigned int policy; cpumask_t cpus_allowed; unsigned int time_slice, first_time_slice; @@ -845,11 +845,11 @@ struct task_struct { /* task state */ struct linux_binfmt *binfmt; - long exit_state; + int exit_state; int exit_code, exit_signal; int pdeath_signal; /* The signal sent when the parent dies */ /* ??? */ - unsigned long personality; + unsigned int personality; unsigned did_exec:1; pid_t pid; pid_t tgid; @@ -881,7 +881,7 @@ struct task_struct { int __user *set_child_tid; /* CLONE_CHILD_SETTID */ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ - unsigned long rt_priority; + unsigned int rt_priority; cputime_t utime, stime; unsigned long nvcsw, nivcsw; /* context switch counts */ struct timespec start_time; -- cgit v1.2.3 From 8e2c20023f34b652605a5fb7c68bb843d2b100a8 Mon Sep 17 00:00:00 2001 From: Trent Piepho Date: Tue, 8 May 2007 00:24:05 -0700 Subject: Fix constant folding and poor optimization in byte swapping code Constant folding does not work for the swabXX() byte swapping functions, and the C versions optimize poorly. Attempting to initialize a global variable to swab16(0x1234) or put something like "case swab32(42):" in a switch statement will not compile. It can work, swab.h just isn't doing it correctly. This patch fixes that. Contrary to the comment in asm-i386/byteorder.h, gcc does not recognize the "C" version of swab16 and turn it into efficient code. gcc can do this, just not with the current code. The simple function: u16 foo(u16 x) { return swab16(x); } Would compile to: movzwl %ax, %eax movl %eax, %edx shrl $8, %eax sall $8, %edx orl %eax, %edx With this patch, it will compile to: rolw $8, %ax I also attempted to document the maze different macros/inline functions that are used to create the final product. Signed-off-by: Trent Piepho Cc: Francois-Rene Rideau Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/byteorder/swab.h | 108 ++++++++++++++++++++++++++--------------- 1 file changed, 69 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/include/linux/byteorder/swab.h b/include/linux/byteorder/swab.h index 25f7f32883ec..142134ff1645 100644 --- a/include/linux/byteorder/swab.h +++ b/include/linux/byteorder/swab.h @@ -10,6 +10,10 @@ * separated swab functions from cpu_to_XX, * to clean up support for bizarre-endian architectures. * + * Trent Piepho 2007114 + * make constant-folding work, provide C versions that + * gcc can optimize better, explain different versions + * * See asm-i386/byteorder.h and suches for examples of how to provide * architecture-dependent optimized versions * @@ -17,40 +21,66 @@ #include +/* Functions/macros defined, there are a lot: + * + * ___swabXX + * Generic C versions of the swab functions. + * + * ___constant_swabXX + * C versions that gcc can fold into a compile-time constant when + * the argument is a compile-time constant. + * + * __arch__swabXX[sp]? + * Architecture optimized versions of all the swab functions + * (including the s and p versions). These can be defined in + * asm-arch/byteorder.h. Any which are not, are defined here. + * __arch__swabXXs() is defined in terms of __arch__swabXXp(), which + * is defined in terms of __arch__swabXX(), which is in turn defined + * in terms of ___swabXX(x). + * These must be macros. They may be unsafe for arguments with + * side-effects. + * + * __fswabXX + * Inline function versions of the __arch__ macros. These _are_ safe + * if the arguments have side-effects. Note there are no s and p + * versions of these. + * + * __swabXX[sb] + * There are the ones you should actually use. The __swabXX versions + * will be a constant given a constant argument and use the arch + * specific code (if any) for non-constant arguments. The s and p + * versions always use the arch specific code (constant folding + * doesn't apply). They are safe to use with arguments with + * side-effects. + * + * swabXX[sb] + * Nicknames for __swabXX[sb] to use in the kernel. + */ + /* casts are necessary for constants, because we never know how for sure * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. */ -#define ___swab16(x) \ -({ \ - __u16 __x = (x); \ - ((__u16)( \ - (((__u16)(__x) & (__u16)0x00ffU) << 8) | \ - (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \ -}) -#define ___swab32(x) \ -({ \ - __u32 __x = (x); \ - ((__u32)( \ - (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \ - (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \ - (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \ - (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \ -}) - -#define ___swab64(x) \ -({ \ - __u64 __x = (x); \ - ((__u64)( \ - (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \ - (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \ - (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \ - (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \ - (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \ - (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ - (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \ - (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \ -}) +static __inline__ __attribute_const__ __u16 ___swab16(__u16 x) +{ + return x<<8 | x>>8; +} +static __inline__ __attribute_const__ __u32 ___swab32(__u32 x) +{ + return x<<24 | x>>24 | + (x & (__u32)0x0000ff00UL)<<8 | + (x & (__u32)0x00ff0000UL)>>8; +} +static __inline__ __attribute_const__ __u64 ___swab64(__u64 x) +{ + return x<<56 | x>>56 | + (x & (__u64)0x000000000000ff00ULL)<<40 | + (x & (__u64)0x0000000000ff0000ULL)<<24 | + (x & (__u64)0x00000000ff000000ULL)<< 8 | + (x & (__u64)0x000000ff00000000ULL)>> 8 | + (x & (__u64)0x0000ff0000000000ULL)>>24 | + (x & (__u64)0x00ff000000000000ULL)>>40; +} #define ___constant_swab16(x) \ ((__u16)( \ @@ -77,13 +107,13 @@ * provide defaults when no architecture-specific optimization is detected */ #ifndef __arch__swab16 -# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); }) +# define __arch__swab16(x) ___swab16(x) #endif #ifndef __arch__swab32 -# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); }) +# define __arch__swab32(x) ___swab32(x) #endif #ifndef __arch__swab64 -# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); }) +# define __arch__swab64(x) ___swab64(x) #endif #ifndef __arch__swab16p @@ -97,13 +127,13 @@ #endif #ifndef __arch__swab16s -# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0) +# define __arch__swab16s(x) ((void)(*(x) = __arch__swab16p(x))) #endif #ifndef __arch__swab32s -# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0) +# define __arch__swab32s(x) ((void)(*(x) = __arch__swab32p(x))) #endif #ifndef __arch__swab64s -# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0) +# define __arch__swab64s(x) ((void)(*(x) = __arch__swab64p(x))) #endif @@ -113,15 +143,15 @@ #if defined(__GNUC__) && defined(__OPTIMIZE__) # define __swab16(x) \ (__builtin_constant_p((__u16)(x)) ? \ - ___swab16((x)) : \ + ___constant_swab16((x)) : \ __fswab16((x))) # define __swab32(x) \ (__builtin_constant_p((__u32)(x)) ? \ - ___swab32((x)) : \ + ___constant_swab32((x)) : \ __fswab32((x))) # define __swab64(x) \ (__builtin_constant_p((__u64)(x)) ? \ - ___swab64((x)) : \ + ___constant_swab64((x)) : \ __fswab64((x))) #else # define __swab16(x) __fswab16(x) -- cgit v1.2.3 From 1ae7075bcd805c3aa5e8f53effc63a4562d6110e Mon Sep 17 00:00:00 2001 From: Chris Snook Date: Tue, 8 May 2007 00:24:15 -0700 Subject: use use SEEK_MAX to validate user lseek arguments Add SEEK_MAX and use it to validate lseek arguments from userspace. Signed-off-by: Chris Snook Acked-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/read_write.c | 4 ++-- include/linux/fs.h | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/read_write.c b/fs/read_write.c index 0642a1691d15..4d03008f015b 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -139,7 +139,7 @@ asmlinkage off_t sys_lseek(unsigned int fd, off_t offset, unsigned int origin) goto bad; retval = -EINVAL; - if (origin <= 2) { + if (origin <= SEEK_MAX) { loff_t res = vfs_llseek(file, offset, origin); retval = res; if (res != (loff_t)retval) @@ -166,7 +166,7 @@ asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high, goto bad; retval = -EINVAL; - if (origin > 2) + if (origin > SEEK_MAX) goto out_putf; offset = vfs_llseek(file, ((loff_t) offset_high << 32) | offset_low, diff --git a/include/linux/fs.h b/include/linux/fs.h index 020a8426e10a..986b5d5d369f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -30,6 +30,7 @@ #define SEEK_SET 0 /* seek relative to beginning of file */ #define SEEK_CUR 1 /* seek relative to current file position */ #define SEEK_END 2 /* seek relative to end of file */ +#define SEEK_MAX SEEK_END /* And dynamically-tunable limits and defaults: */ struct files_stat_struct { -- cgit v1.2.3 From 46595390e97b3ab2741a36f5ff69e8f6033fa9c0 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Tue, 8 May 2007 00:24:47 -0700 Subject: init/do_mounts.c: proper prepare_namespace() prototype Add a proper protype for prepare_namespace() in include/linux/init.h. Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/init.h | 3 ++- init/do_mounts.c | 1 + init/main.c | 1 - 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/init.h b/include/linux/init.h index dbbdbd1bec77..8bc32bb2fce2 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -77,7 +77,8 @@ extern char *saved_command_line; extern unsigned int reset_devices; /* used by init/main.c */ -extern void setup_arch(char **); +void setup_arch(char **); +void prepare_namespace(void); #endif diff --git a/init/do_mounts.c b/init/do_mounts.c index dc1ec0803ef9..3f57ed4599d6 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include diff --git a/init/main.c b/init/main.c index 0e22f40487bb..949c27b59d12 100644 --- a/init/main.c +++ b/init/main.c @@ -94,7 +94,6 @@ extern void pidmap_init(void); extern void prio_tree_init(void); extern void radix_tree_init(void); extern void free_initmem(void); -extern void prepare_namespace(void); #ifdef CONFIG_ACPI extern void acpi_early_init(void); #else -- cgit v1.2.3 From ee7b9e3706b9c5f90113eb16a1a84a1c01e09f95 Mon Sep 17 00:00:00 2001 From: Michal Schmidt Date: Tue, 8 May 2007 00:24:49 -0700 Subject: Fix compilation of drivers with -O0 It is sometimes useful to compile individual drivers with optimization disabled for easier debugging. Currently drivers which use htonl() and similar functions don't compile with -O0. This patch fixes it. It also removes obsolete and misleading comments. This header is not for userspace, so we don't have to care about strange programs these comments mention. (akpm: -O0 probably isn't a good idea, but this code looks pretty crufty and unuseful) Signed-off-by: Michal Schmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/byteorder/generic.h | 25 ------------------------- 1 file changed, 25 deletions(-) (limited to 'include/linux') diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h index e86e4a938373..3dc715b02500 100644 --- a/include/linux/byteorder/generic.h +++ b/include/linux/byteorder/generic.h @@ -124,19 +124,8 @@ #define be32_to_cpus __be32_to_cpus #define cpu_to_be16s __cpu_to_be16s #define be16_to_cpus __be16_to_cpus -#endif - -#if defined(__KERNEL__) /* - * Handle ntohl and suches. These have various compatibility - * issues - like we want to give the prototype even though we - * also have a macro for them in case some strange program - * wants to take the address of the thing or something.. - * - * Note that these used to return a "long" in libc5, even though - * long is often 64-bit these days.. Thus the casts. - * * They have to be macros in order to do the constant folding * correctly - if the argument passed into a inline function * it is no longer constant according to gcc.. @@ -147,17 +136,6 @@ #undef htonl #undef htons -/* - * Do the prototypes. Somebody might want to take the - * address or some such sick thing.. - */ -extern __u32 ntohl(__be32); -extern __be32 htonl(__u32); -extern __u16 ntohs(__be16); -extern __be16 htons(__u16); - -#if defined(__GNUC__) && defined(__OPTIMIZE__) - #define ___htonl(x) __cpu_to_be32(x) #define ___htons(x) __cpu_to_be16(x) #define ___ntohl(x) __be32_to_cpu(x) @@ -168,9 +146,6 @@ extern __be16 htons(__u16); #define htons(x) ___htons(x) #define ntohs(x) ___ntohs(x) -#endif /* OPTIMIZE */ - #endif /* KERNEL */ - #endif /* _LINUX_BYTEORDER_GENERIC_H */ -- cgit v1.2.3 From fe08a9d4982d9618ec25760ea715c46fe051e508 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 8 May 2007 00:24:55 -0700 Subject: reiserfs: shrink superblock if no xattrs This makes in-core superblock fit into one cacheline here. Before: struct dentry * xattr_root; /* 124 4 */ /* --- cacheline 1 boundary (128 bytes) --- */ struct rw_semaphore xattr_dir_sem; /* 128 12 */ int j_errno; /* 140 4 */ }; /* size: 144, cachelines: 2 */ /* sum members: 142, holes: 1, sum holes: 2 */ /* last cacheline: 16 bytes */ After: int j_errno; /* 124 4 */ /* --- cacheline 1 boundary (128 bytes) --- */ }; /* size: 128, cachelines: 1 */ /* sum members: 126, holes: 1, sum holes: 2 */ Signed-off-by: Alexey Dobriyan Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/reiserfs/super.c | 6 ++++-- include/linux/reiserfs_fs_sb.h | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 7054aaef0493..5c58b419f4ee 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -433,12 +433,13 @@ int remove_save_link(struct inode *inode, int truncate) static void reiserfs_kill_sb(struct super_block *s) { if (REISERFS_SB(s)) { +#ifdef CONFIG_REISERFS_FS_XATTR if (REISERFS_SB(s)->xattr_root) { d_invalidate(REISERFS_SB(s)->xattr_root); dput(REISERFS_SB(s)->xattr_root); REISERFS_SB(s)->xattr_root = NULL; } - +#endif if (REISERFS_SB(s)->priv_root) { d_invalidate(REISERFS_SB(s)->priv_root); dput(REISERFS_SB(s)->priv_root); @@ -1562,9 +1563,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) REISERFS_SB(s)->s_alloc_options.preallocmin = 0; /* Preallocate by 16 blocks (17-1) at once */ REISERFS_SB(s)->s_alloc_options.preallocsize = 17; +#ifdef CONFIG_REISERFS_FS_XATTR /* Initialize the rwsem for xattr dir */ init_rwsem(&REISERFS_SB(s)->xattr_dir_sem); - +#endif /* setup default block allocator options */ reiserfs_init_alloc_options(s); diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h index 3a28742d86f9..1e5488ede037 100644 --- a/include/linux/reiserfs_fs_sb.h +++ b/include/linux/reiserfs_fs_sb.h @@ -401,9 +401,10 @@ struct reiserfs_sb_info { int reserved_blocks; /* amount of blocks reserved for further allocations */ spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */ struct dentry *priv_root; /* root of /.reiserfs_priv */ +#ifdef CONFIG_REISERFS_FS_XATTR struct dentry *xattr_root; /* root of /.reiserfs_priv/.xa */ struct rw_semaphore xattr_dir_sem; - +#endif int j_errno; #ifdef CONFIG_QUOTA char *s_qf_names[MAXQUOTAS]; -- cgit v1.2.3 From 616883df78bd4b3fcdb6ddc39bd3d4cb902bfa32 Mon Sep 17 00:00:00 2001 From: Monakhov Dmitriy Date: Tue, 8 May 2007 00:25:07 -0700 Subject: IRQ: add __must_check to request_irq This could help to find buggy drivers where request_irq return value wasn't checked. There's just no reason to ignore errors which can and do occur. Anyone who got warning during compilation have to realise what it is't realy safe code. Signed-off-by: Monakhov Dmitriy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/interrupt.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 0319f665dd3f..2067a7ef6e62 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -83,11 +83,11 @@ struct irqaction { }; extern irqreturn_t no_action(int cpl, void *dev_id); -extern int request_irq(unsigned int, irq_handler_t handler, +extern int __must_check request_irq(unsigned int, irq_handler_t handler, unsigned long, const char *, void *); extern void free_irq(unsigned int, void *); -extern int devm_request_irq(struct device *dev, unsigned int irq, +extern int __must_check devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); -- cgit v1.2.3 From e3222c4ecc649c4ae568e61dda9349482401b501 Mon Sep 17 00:00:00 2001 From: Badari Pulavarty Date: Tue, 8 May 2007 00:25:21 -0700 Subject: Merge sys_clone()/sys_unshare() nsproxy and namespace handling sys_clone() and sys_unshare() both makes copies of nsproxy and its associated namespaces. But they have different code paths. This patch merges all the nsproxy and its associated namespace copy/clone handling (as much as possible). Posted on container list earlier for feedback. - Create a new nsproxy and its associated namespaces and pass it back to caller to attach it to right process. - Changed all copy_*_ns() routines to return a new copy of namespace instead of attaching it to task->nsproxy. - Moved the CAP_SYS_ADMIN checks out of copy_*_ns() routines. - Removed unnessary !ns checks from copy_*_ns() and added BUG_ON() just incase. - Get rid of all individual unshare_*_ns() routines and make use of copy_*_ns() instead. [akpm@osdl.org: cleanups, warning fix] [clg@fr.ibm.com: remove dup_namespaces() declaration] [serue@us.ibm.com: fix CONFIG_IPC_NS=n, clone(CLONE_NEWIPC) retval] [akpm@linux-foundation.org: fix build with CONFIG_SYSVIPC=n] Signed-off-by: Badari Pulavarty Signed-off-by: Serge Hallyn Cc: Cedric Le Goater Cc: "Eric W. Biederman" Cc: Signed-off-by: Cedric Le Goater Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/namespace.c | 30 +++------ include/linux/ipc.h | 11 ++-- include/linux/mnt_namespace.h | 5 +- include/linux/nsproxy.h | 3 +- include/linux/pid_namespace.h | 2 +- include/linux/utsname.h | 19 ++---- ipc/util.c | 53 +++------------- kernel/fork.c | 85 ++------------------------ kernel/nsproxy.c | 139 ++++++++++++++++++++++++++---------------- kernel/pid.c | 11 +--- kernel/utsname.c | 41 ++----------- 11 files changed, 131 insertions(+), 268 deletions(-) (limited to 'include/linux') diff --git a/fs/namespace.c b/fs/namespace.c index fd999cab7b57..be5e56bfb73e 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1441,10 +1441,9 @@ dput_out: * Allocate a new namespace structure and populate it with contents * copied from the namespace of the passed in task structure. */ -struct mnt_namespace *dup_mnt_ns(struct task_struct *tsk, +static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, struct fs_struct *fs) { - struct mnt_namespace *mnt_ns = tsk->nsproxy->mnt_ns; struct mnt_namespace *new_ns; struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL; struct vfsmount *p, *q; @@ -1509,36 +1508,21 @@ struct mnt_namespace *dup_mnt_ns(struct task_struct *tsk, return new_ns; } -int copy_mnt_ns(int flags, struct task_struct *tsk) +struct mnt_namespace *copy_mnt_ns(int flags, struct mnt_namespace *ns, + struct fs_struct *new_fs) { - struct mnt_namespace *ns = tsk->nsproxy->mnt_ns; struct mnt_namespace *new_ns; - int err = 0; - - if (!ns) - return 0; + BUG_ON(!ns); get_mnt_ns(ns); if (!(flags & CLONE_NEWNS)) - return 0; - - if (!capable(CAP_SYS_ADMIN)) { - err = -EPERM; - goto out; - } - - new_ns = dup_mnt_ns(tsk, tsk->fs); - if (!new_ns) { - err = -ENOMEM; - goto out; - } + return ns; - tsk->nsproxy->mnt_ns = new_ns; + new_ns = dup_mnt_ns(ns, new_fs); -out: put_mnt_ns(ns); - return err; + return new_ns; } asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name, diff --git a/include/linux/ipc.h b/include/linux/ipc.h index 6da6772c19ff..1980867a64a4 100644 --- a/include/linux/ipc.h +++ b/include/linux/ipc.h @@ -92,16 +92,19 @@ extern struct ipc_namespace init_ipc_ns; #ifdef CONFIG_SYSVIPC #define INIT_IPC_NS(ns) .ns = &init_ipc_ns, -extern int copy_ipcs(unsigned long flags, struct task_struct *tsk); +extern struct ipc_namespace *copy_ipcs(unsigned long flags, + struct ipc_namespace *ns); #else #define INIT_IPC_NS(ns) -static inline int copy_ipcs(unsigned long flags, struct task_struct *tsk) -{ return 0; } +static inline struct ipc_namespace *copy_ipcs(unsigned long flags, + struct ipc_namespace *ns) +{ + return ns; +} #endif #ifdef CONFIG_IPC_NS extern void free_ipc_ns(struct kref *kref); -extern int unshare_ipcs(unsigned long flags, struct ipc_namespace **ns); #endif static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h index 4af0b1fc282a..1fa4d9813b31 100644 --- a/include/linux/mnt_namespace.h +++ b/include/linux/mnt_namespace.h @@ -14,10 +14,9 @@ struct mnt_namespace { int event; }; -extern int copy_mnt_ns(int, struct task_struct *); -extern void __put_mnt_ns(struct mnt_namespace *ns); -extern struct mnt_namespace *dup_mnt_ns(struct task_struct *, +extern struct mnt_namespace *copy_mnt_ns(int, struct mnt_namespace *, struct fs_struct *); +extern void __put_mnt_ns(struct mnt_namespace *ns); static inline void put_mnt_ns(struct mnt_namespace *ns) { diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index 0b9f0dc30d61..189e0dc993ab 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -31,10 +31,11 @@ struct nsproxy { }; extern struct nsproxy init_nsproxy; -struct nsproxy *dup_namespaces(struct nsproxy *orig); int copy_namespaces(int flags, struct task_struct *tsk); void get_task_namespaces(struct task_struct *tsk); void free_nsproxy(struct nsproxy *ns); +int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **, + struct fs_struct *); static inline void put_nsproxy(struct nsproxy *ns) { diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 2833806d42c6..169c6c24209b 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -29,7 +29,7 @@ static inline void get_pid_ns(struct pid_namespace *ns) kref_get(&ns->kref); } -extern int copy_pid_ns(int flags, struct task_struct *tsk); +extern struct pid_namespace *copy_pid_ns(int flags, struct pid_namespace *ns); extern void free_pid_ns(struct kref *kref); static inline void put_pid_ns(struct pid_namespace *ns) diff --git a/include/linux/utsname.h b/include/linux/utsname.h index e10267d402c5..f8d3b326e93a 100644 --- a/include/linux/utsname.h +++ b/include/linux/utsname.h @@ -49,9 +49,7 @@ static inline void get_uts_ns(struct uts_namespace *ns) } #ifdef CONFIG_UTS_NS -extern int unshare_utsname(unsigned long unshare_flags, - struct uts_namespace **new_uts); -extern int copy_utsname(int flags, struct task_struct *tsk); +extern struct uts_namespace *copy_utsname(int flags, struct uts_namespace *ns); extern void free_uts_ns(struct kref *kref); static inline void put_uts_ns(struct uts_namespace *ns) @@ -59,21 +57,12 @@ static inline void put_uts_ns(struct uts_namespace *ns) kref_put(&ns->kref, free_uts_ns); } #else -static inline int unshare_utsname(unsigned long unshare_flags, - struct uts_namespace **new_uts) +static inline struct uts_namespace *copy_utsname(int flags, + struct uts_namespace *ns) { - if (unshare_flags & CLONE_NEWUTS) - return -EINVAL; - - return 0; + return ns; } -static inline int copy_utsname(int flags, struct task_struct *tsk) -{ - if (flags & CLONE_NEWUTS) - return -EINVAL; - return 0; -} static inline void put_uts_ns(struct uts_namespace *ns) { } diff --git a/ipc/util.c b/ipc/util.c index 0b652387d169..0ad5b1c3ca01 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -85,53 +85,20 @@ err_mem: return ERR_PTR(err); } -int unshare_ipcs(unsigned long unshare_flags, struct ipc_namespace **new_ipc) +struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns) { - struct ipc_namespace *new; - - if (unshare_flags & CLONE_NEWIPC) { - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - new = clone_ipc_ns(current->nsproxy->ipc_ns); - if (IS_ERR(new)) - return PTR_ERR(new); - - *new_ipc = new; - } - - return 0; -} - -int copy_ipcs(unsigned long flags, struct task_struct *tsk) -{ - struct ipc_namespace *old_ns = tsk->nsproxy->ipc_ns; struct ipc_namespace *new_ns; - int err = 0; - if (!old_ns) - return 0; - - get_ipc_ns(old_ns); + BUG_ON(!ns); + get_ipc_ns(ns); if (!(flags & CLONE_NEWIPC)) - return 0; + return ns; - if (!capable(CAP_SYS_ADMIN)) { - err = -EPERM; - goto out; - } + new_ns = clone_ipc_ns(ns); - new_ns = clone_ipc_ns(old_ns); - if (!new_ns) { - err = -ENOMEM; - goto out; - } - - tsk->nsproxy->ipc_ns = new_ns; -out: - put_ipc_ns(old_ns); - return err; + put_ipc_ns(ns); + return new_ns; } void free_ipc_ns(struct kref *kref) @@ -145,11 +112,11 @@ void free_ipc_ns(struct kref *kref) kfree(ns); } #else -int copy_ipcs(unsigned long flags, struct task_struct *tsk) +struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns) { if (flags & CLONE_NEWIPC) - return -EINVAL; - return 0; + return ERR_PTR(-EINVAL); + return ns; } #endif diff --git a/kernel/fork.c b/kernel/fork.c index b7d169def942..fd211b9dddd4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1515,26 +1515,6 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) return 0; } -/* - * Unshare the mnt_namespace structure if it is being shared - */ -static int unshare_mnt_namespace(unsigned long unshare_flags, - struct mnt_namespace **new_nsp, struct fs_struct *new_fs) -{ - struct mnt_namespace *ns = current->nsproxy->mnt_ns; - - if ((unshare_flags & CLONE_NEWNS) && ns) { - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - *new_nsp = dup_mnt_ns(current, new_fs ? new_fs : current->fs); - if (!*new_nsp) - return -ENOMEM; - } - - return 0; -} - /* * Unsharing of sighand is not supported yet */ @@ -1593,16 +1573,6 @@ static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **n return 0; } -#ifndef CONFIG_IPC_NS -static inline int unshare_ipcs(unsigned long flags, struct ipc_namespace **ns) -{ - if (flags & CLONE_NEWIPC) - return -EINVAL; - - return 0; -} -#endif - /* * unshare allows a process to 'unshare' part of the process * context which was originally shared using clone. copy_* @@ -1615,14 +1585,11 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) { int err = 0; struct fs_struct *fs, *new_fs = NULL; - struct mnt_namespace *ns, *new_ns = NULL; struct sighand_struct *new_sigh = NULL; struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; struct files_struct *fd, *new_fd = NULL; struct sem_undo_list *new_ulist = NULL; struct nsproxy *new_nsproxy = NULL, *old_nsproxy = NULL; - struct uts_namespace *uts, *new_uts = NULL; - struct ipc_namespace *ipc, *new_ipc = NULL; check_unshare_flags(&unshare_flags); @@ -1637,36 +1604,24 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) goto bad_unshare_out; if ((err = unshare_fs(unshare_flags, &new_fs))) goto bad_unshare_cleanup_thread; - if ((err = unshare_mnt_namespace(unshare_flags, &new_ns, new_fs))) - goto bad_unshare_cleanup_fs; if ((err = unshare_sighand(unshare_flags, &new_sigh))) - goto bad_unshare_cleanup_ns; + goto bad_unshare_cleanup_fs; if ((err = unshare_vm(unshare_flags, &new_mm))) goto bad_unshare_cleanup_sigh; if ((err = unshare_fd(unshare_flags, &new_fd))) goto bad_unshare_cleanup_vm; if ((err = unshare_semundo(unshare_flags, &new_ulist))) goto bad_unshare_cleanup_fd; - if ((err = unshare_utsname(unshare_flags, &new_uts))) + if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, + new_fs))) goto bad_unshare_cleanup_semundo; - if ((err = unshare_ipcs(unshare_flags, &new_ipc))) - goto bad_unshare_cleanup_uts; - - if (new_ns || new_uts || new_ipc) { - old_nsproxy = current->nsproxy; - new_nsproxy = dup_namespaces(old_nsproxy); - if (!new_nsproxy) { - err = -ENOMEM; - goto bad_unshare_cleanup_ipc; - } - } - if (new_fs || new_ns || new_mm || new_fd || new_ulist || - new_uts || new_ipc) { + if (new_fs || new_mm || new_fd || new_ulist || new_nsproxy) { task_lock(current); if (new_nsproxy) { + old_nsproxy = current->nsproxy; current->nsproxy = new_nsproxy; new_nsproxy = old_nsproxy; } @@ -1677,12 +1632,6 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) new_fs = fs; } - if (new_ns) { - ns = current->nsproxy->mnt_ns; - current->nsproxy->mnt_ns = new_ns; - new_ns = ns; - } - if (new_mm) { mm = current->mm; active_mm = current->active_mm; @@ -1698,32 +1647,12 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) new_fd = fd; } - if (new_uts) { - uts = current->nsproxy->uts_ns; - current->nsproxy->uts_ns = new_uts; - new_uts = uts; - } - - if (new_ipc) { - ipc = current->nsproxy->ipc_ns; - current->nsproxy->ipc_ns = new_ipc; - new_ipc = ipc; - } - task_unlock(current); } if (new_nsproxy) put_nsproxy(new_nsproxy); -bad_unshare_cleanup_ipc: - if (new_ipc) - put_ipc_ns(new_ipc); - -bad_unshare_cleanup_uts: - if (new_uts) - put_uts_ns(new_uts); - bad_unshare_cleanup_semundo: bad_unshare_cleanup_fd: if (new_fd) @@ -1738,10 +1667,6 @@ bad_unshare_cleanup_sigh: if (atomic_dec_and_test(&new_sigh->count)) kmem_cache_free(sighand_cachep, new_sigh); -bad_unshare_cleanup_ns: - if (new_ns) - put_mnt_ns(new_ns); - bad_unshare_cleanup_fs: if (new_fs) put_fs_struct(new_fs); diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index f5b9ee6f6bbb..1bc4b55241a8 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c @@ -38,10 +38,8 @@ void get_task_namespaces(struct task_struct *tsk) /* * creates a copy of "orig" with refcount 1. - * This does not grab references to the contained namespaces, - * so that needs to be done by dup_namespaces. */ -static inline struct nsproxy *clone_namespaces(struct nsproxy *orig) +static inline struct nsproxy *clone_nsproxy(struct nsproxy *orig) { struct nsproxy *ns; @@ -52,26 +50,49 @@ static inline struct nsproxy *clone_namespaces(struct nsproxy *orig) } /* - * copies the nsproxy, setting refcount to 1, and grabbing a - * reference to all contained namespaces. Called from - * sys_unshare() + * Create new nsproxy and all of its the associated namespaces. + * Return the newly created nsproxy. Do not attach this to the task, + * leave it to the caller to do proper locking and attach it to task. */ -struct nsproxy *dup_namespaces(struct nsproxy *orig) +static struct nsproxy *create_new_namespaces(int flags, struct task_struct *tsk, + struct fs_struct *new_fs) { - struct nsproxy *ns = clone_namespaces(orig); + struct nsproxy *new_nsp; - if (ns) { - if (ns->mnt_ns) - get_mnt_ns(ns->mnt_ns); - if (ns->uts_ns) - get_uts_ns(ns->uts_ns); - if (ns->ipc_ns) - get_ipc_ns(ns->ipc_ns); - if (ns->pid_ns) - get_pid_ns(ns->pid_ns); - } + new_nsp = clone_nsproxy(tsk->nsproxy); + if (!new_nsp) + return ERR_PTR(-ENOMEM); - return ns; + new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, new_fs); + if (IS_ERR(new_nsp->mnt_ns)) + goto out_ns; + + new_nsp->uts_ns = copy_utsname(flags, tsk->nsproxy->uts_ns); + if (IS_ERR(new_nsp->uts_ns)) + goto out_uts; + + new_nsp->ipc_ns = copy_ipcs(flags, tsk->nsproxy->ipc_ns); + if (IS_ERR(new_nsp->ipc_ns)) + goto out_ipc; + + new_nsp->pid_ns = copy_pid_ns(flags, tsk->nsproxy->pid_ns); + if (IS_ERR(new_nsp->pid_ns)) + goto out_pid; + + return new_nsp; + +out_pid: + if (new_nsp->ipc_ns) + put_ipc_ns(new_nsp->ipc_ns); +out_ipc: + if (new_nsp->uts_ns) + put_uts_ns(new_nsp->uts_ns); +out_uts: + if (new_nsp->mnt_ns) + put_mnt_ns(new_nsp->mnt_ns); +out_ns: + kfree(new_nsp); + return ERR_PTR(-ENOMEM); } /* @@ -92,47 +113,21 @@ int copy_namespaces(int flags, struct task_struct *tsk) if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC))) return 0; - new_ns = clone_namespaces(old_ns); - if (!new_ns) { - err = -ENOMEM; + if (!capable(CAP_SYS_ADMIN)) { + err = -EPERM; goto out; } - tsk->nsproxy = new_ns; - - err = copy_mnt_ns(flags, tsk); - if (err) - goto out_ns; - - err = copy_utsname(flags, tsk); - if (err) - goto out_uts; - - err = copy_ipcs(flags, tsk); - if (err) - goto out_ipc; - - err = copy_pid_ns(flags, tsk); - if (err) - goto out_pid; + new_ns = create_new_namespaces(flags, tsk, tsk->fs); + if (IS_ERR(new_ns)) { + err = PTR_ERR(new_ns); + goto out; + } + tsk->nsproxy = new_ns; out: put_nsproxy(old_ns); return err; - -out_pid: - if (new_ns->ipc_ns) - put_ipc_ns(new_ns->ipc_ns); -out_ipc: - if (new_ns->uts_ns) - put_uts_ns(new_ns->uts_ns); -out_uts: - if (new_ns->mnt_ns) - put_mnt_ns(new_ns->mnt_ns); -out_ns: - tsk->nsproxy = old_ns; - kfree(new_ns); - goto out; } void free_nsproxy(struct nsproxy *ns) @@ -147,3 +142,41 @@ void free_nsproxy(struct nsproxy *ns) put_pid_ns(ns->pid_ns); kfree(ns); } + +/* + * Called from unshare. Unshare all the namespaces part of nsproxy. + * On sucess, returns the new nsproxy and a reference to old nsproxy + * to make sure it stays around. + */ +int unshare_nsproxy_namespaces(unsigned long unshare_flags, + struct nsproxy **new_nsp, struct fs_struct *new_fs) +{ + struct nsproxy *old_ns = current->nsproxy; + int err = 0; + + if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC))) + return 0; + +#ifndef CONFIG_IPC_NS + if (unshare_flags & CLONE_NEWIPC) + return -EINVAL; +#endif + +#ifndef CONFIG_UTS_NS + if (unshare_flags & CLONE_NEWUTS) + return -EINVAL; +#endif + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + get_nsproxy(old_ns); + + *new_nsp = create_new_namespaces(unshare_flags, current, + new_fs ? new_fs : current->fs); + if (IS_ERR(*new_nsp)) { + err = PTR_ERR(*new_nsp); + put_nsproxy(old_ns); + } + return err; +} diff --git a/kernel/pid.c b/kernel/pid.c index 9c80bc23d6b8..d3ad724afa83 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -360,16 +360,11 @@ struct pid *find_ge_pid(int nr) } EXPORT_SYMBOL_GPL(find_get_pid); -int copy_pid_ns(int flags, struct task_struct *tsk) +struct pid_namespace *copy_pid_ns(int flags, struct pid_namespace *old_ns) { - struct pid_namespace *old_ns = tsk->nsproxy->pid_ns; - int err = 0; - - if (!old_ns) - return 0; - + BUG_ON(!old_ns); get_pid_ns(old_ns); - return err; + return old_ns; } void free_pid_ns(struct kref *kref) diff --git a/kernel/utsname.c b/kernel/utsname.c index c859164a6993..160c8c5136bd 100644 --- a/kernel/utsname.c +++ b/kernel/utsname.c @@ -31,59 +31,26 @@ static struct uts_namespace *clone_uts_ns(struct uts_namespace *old_ns) return ns; } -/* - * unshare the current process' utsname namespace. - * called only in sys_unshare() - */ -int unshare_utsname(unsigned long unshare_flags, struct uts_namespace **new_uts) -{ - if (unshare_flags & CLONE_NEWUTS) { - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - *new_uts = clone_uts_ns(current->nsproxy->uts_ns); - if (!*new_uts) - return -ENOMEM; - } - - return 0; -} - /* * Copy task tsk's utsname namespace, or clone it if flags * specifies CLONE_NEWUTS. In latter case, changes to the * utsname of this process won't be seen by parent, and vice * versa. */ -int copy_utsname(int flags, struct task_struct *tsk) +struct uts_namespace *copy_utsname(int flags, struct uts_namespace *old_ns) { - struct uts_namespace *old_ns = tsk->nsproxy->uts_ns; struct uts_namespace *new_ns; - int err = 0; - - if (!old_ns) - return 0; + BUG_ON(!old_ns); get_uts_ns(old_ns); if (!(flags & CLONE_NEWUTS)) - return 0; - - if (!capable(CAP_SYS_ADMIN)) { - err = -EPERM; - goto out; - } + return old_ns; new_ns = clone_uts_ns(old_ns); - if (!new_ns) { - err = -ENOMEM; - goto out; - } - tsk->nsproxy->uts_ns = new_ns; -out: put_uts_ns(old_ns); - return err; + return new_ns; } void free_uts_ns(struct kref *kref) -- cgit v1.2.3 From 2e17c5508fa015f2c7690e29041f437e9308c64f Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 8 May 2007 00:25:29 -0700 Subject: init dma masks in pnp_dev PNP now initializes device dma masks, which prevents oopses when generic dma calls are made using pnp device nodes. This assumes PNP only uses ISA DMA, with 24 bit addresses; and that it's safe to init those masks for all devices (rather than finding out which devices have been assigned DMA channels, and handling only those). Signed-off-by: David Brownell Cc: Adam Belay Cc: Jaroslav Kysela Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/pnp/core.c | 3 +++ include/linux/pnp.h | 1 + 2 files changed, 4 insertions(+) (limited to 'include/linux') diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c index aec83ec5ea23..d8d75541552c 100644 --- a/drivers/pnp/core.c +++ b/drivers/pnp/core.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "base.h" @@ -114,6 +115,8 @@ int __pnp_add_device(struct pnp_dev *dev) int ret; pnp_fixup_device(dev); dev->dev.bus = &pnp_bus_type; + dev->dev.dma_mask = &dev->dma_mask; + dev->dma_mask = dev->dev.coherent_dma_mask = DMA_24BIT_MASK; dev->dev.release = &pnp_release_device; dev->status = PNP_READY; spin_lock(&pnp_lock); diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 9a5226f0f169..00dae5ba128a 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -177,6 +177,7 @@ static inline void pnp_set_card_drvdata (struct pnp_card_link *pcard, void *data struct pnp_dev { struct device dev; /* Driver Model device interface */ + u64 dma_mask; unsigned char number; /* used as an index, must be unique */ int status; -- cgit v1.2.3 From 79c0b2df79eb56fc71e54c75cd7fb3acf84370f9 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 8 May 2007 00:25:43 -0700 Subject: add filesystem subtype support There's a slight problem with filesystem type representation in fuse based filesystems. From the kernel's view, there are just two filesystem types: fuse and fuseblk. From the user's view there are lots of different filesystem types. The user is not even much concerned if the filesystem is fuse based or not. So there's a conflict of interest in how this should be represented in fstab, mtab and /proc/mounts. The current scheme is to encode the real filesystem type in the mount source. So an sshfs mount looks like this: sshfs#user@server:/ /mnt/server fuse rw,nosuid,nodev,... This url-ish syntax works OK for sshfs and similar filesystems. However for block device based filesystems (ntfs-3g, zfs) it doesn't work, since the kernel expects the mount source to be a real device name. A possibly better scheme would be to encode the real type in the type field as "type.subtype". So fuse mounts would look like this: /dev/hda1 /mnt/windows fuseblk.ntfs-3g rw,... user@server:/ /mnt/server fuse.sshfs rw,nosuid,nodev,... This patch adds the necessary code to the kernel so that this can be correctly displayed in /proc/mounts. Signed-off-by: Miklos Szeredi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/filesystems.c | 21 +++++++++++++++------ fs/fuse/inode.c | 2 ++ fs/namespace.c | 4 ++++ fs/super.c | 27 +++++++++++++++++++++++++++ include/linux/fs.h | 7 +++++++ 5 files changed, 55 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/fs/filesystems.c b/fs/filesystems.c index 7a4f61aa05f8..f37f87262837 100644 --- a/fs/filesystems.c +++ b/fs/filesystems.c @@ -41,11 +41,12 @@ void put_filesystem(struct file_system_type *fs) module_put(fs->owner); } -static struct file_system_type **find_filesystem(const char *name) +static struct file_system_type **find_filesystem(const char *name, unsigned len) { struct file_system_type **p; for (p=&file_systems; *p; p=&(*p)->next) - if (strcmp((*p)->name,name) == 0) + if (strlen((*p)->name) == len && + strncmp((*p)->name, name, len) == 0) break; return p; } @@ -68,11 +69,12 @@ int register_filesystem(struct file_system_type * fs) int res = 0; struct file_system_type ** p; + BUG_ON(strchr(fs->name, '.')); if (fs->next) return -EBUSY; INIT_LIST_HEAD(&fs->fs_supers); write_lock(&file_systems_lock); - p = find_filesystem(fs->name); + p = find_filesystem(fs->name, strlen(fs->name)); if (*p) res = -EBUSY; else @@ -215,19 +217,26 @@ int get_filesystem_list(char * buf) struct file_system_type *get_fs_type(const char *name) { struct file_system_type *fs; + const char *dot = strchr(name, '.'); + unsigned len = dot ? dot - name : strlen(name); read_lock(&file_systems_lock); - fs = *(find_filesystem(name)); + fs = *(find_filesystem(name, len)); if (fs && !try_module_get(fs->owner)) fs = NULL; read_unlock(&file_systems_lock); - if (!fs && (request_module("%s", name) == 0)) { + if (!fs && (request_module("%.*s", len, name) == 0)) { read_lock(&file_systems_lock); - fs = *(find_filesystem(name)); + fs = *(find_filesystem(name, len)); if (fs && !try_module_get(fs->owner)) fs = NULL; read_unlock(&file_systems_lock); } + + if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { + put_filesystem(fs); + fs = NULL; + } return fs; } diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index d8003be56e05..1397018ff476 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -636,6 +636,7 @@ static int fuse_get_sb(struct file_system_type *fs_type, static struct file_system_type fuse_fs_type = { .owner = THIS_MODULE, .name = "fuse", + .fs_flags = FS_HAS_SUBTYPE, .get_sb = fuse_get_sb, .kill_sb = kill_anon_super, }; @@ -652,6 +653,7 @@ static int fuse_get_sb_blk(struct file_system_type *fs_type, static struct file_system_type fuseblk_fs_type = { .owner = THIS_MODULE, .name = "fuseblk", + .fs_flags = FS_HAS_SUBTYPE, .get_sb = fuse_get_sb_blk, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, diff --git a/fs/namespace.c b/fs/namespace.c index be5e56bfb73e..c5b88100d914 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -377,6 +377,10 @@ static int show_vfsmnt(struct seq_file *m, void *v) seq_path(m, mnt, mnt->mnt_root, " \t\n\\"); seq_putc(m, ' '); mangle(m, mnt->mnt_sb->s_type->name); + if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) { + seq_putc(m, '.'); + mangle(m, mnt->mnt_sb->s_subtype); + } seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? " ro" : " rw"); for (fs_infop = fs_info; fs_infop->flag; fs_infop++) { if (mnt->mnt_sb->s_flags & fs_infop->flag) diff --git a/fs/super.c b/fs/super.c index 8341e4e1d738..5260d620c555 100644 --- a/fs/super.c +++ b/fs/super.c @@ -107,6 +107,7 @@ out: static inline void destroy_super(struct super_block *s) { security_sb_free(s); + kfree(s->s_subtype); kfree(s); } @@ -907,6 +908,29 @@ out: EXPORT_SYMBOL_GPL(vfs_kern_mount); +static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype) +{ + int err; + const char *subtype = strchr(fstype, '.'); + if (subtype) { + subtype++; + err = -EINVAL; + if (!subtype[0]) + goto err; + } else + subtype = ""; + + mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL); + err = -ENOMEM; + if (!mnt->mnt_sb->s_subtype) + goto err; + return mnt; + + err: + mntput(mnt); + return ERR_PTR(err); +} + struct vfsmount * do_kern_mount(const char *fstype, int flags, const char *name, void *data) { @@ -915,6 +939,9 @@ do_kern_mount(const char *fstype, int flags, const char *name, void *data) if (!type) return ERR_PTR(-ENODEV); mnt = vfs_kern_mount(type, flags, name, data); + if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) && + !mnt->mnt_sb->s_subtype) + mnt = fs_set_subtype(mnt, fstype); put_filesystem(type); return mnt; } diff --git a/include/linux/fs.h b/include/linux/fs.h index 986b5d5d369f..527a09a82297 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -92,6 +92,7 @@ extern int dir_notify_enable; /* public flags for file_system_type */ #define FS_REQUIRES_DEV 1 #define FS_BINARY_MOUNTDATA 2 +#define FS_HAS_SUBTYPE 4 #define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() * during rename() internally. @@ -961,6 +962,12 @@ struct super_block { /* Granularity of c/m/atime in ns. Cannot be worse than a second */ u32 s_time_gran; + + /* + * Filesystem subtype. If non-empty the filesystem type field + * in /proc/mounts will be "type.subtype" + */ + char *s_subtype; }; extern struct timespec current_fs_time(struct super_block *sb); -- cgit v1.2.3 From 7695650a924a6859910c8c19dfa43b4d08224d66 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 8 May 2007 00:25:45 -0700 Subject: Fix race between proc_get_inode() and remove_proc_entry() proc_lookup remove_proc_entry =========== ================= lock_kernel(); spin_lock(&proc_subdir_lock); [find PDE with refcount 0] spin_unlock(&proc_subdir_lock); spin_lock(&proc_subdir_lock); [find PDE with refcount 0] [check refcount and free PDE] spin_unlock(&proc_subdir_lock); proc_get_inode: de_get(de); /* boom */ Signed-off-by: Alexey Dobriyan Cc: "Eric W. Biederman" Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/generic.c | 2 ++ fs/proc/inode.c | 12 ++++-------- include/linux/proc_fs.h | 3 +++ 3 files changed, 9 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 775fb21294d8..22a08ff3475d 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -398,6 +398,7 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { unsigned int ino = de->low_ino; + de_get(de); spin_unlock(&proc_subdir_lock); error = -EINVAL; inode = proc_get_inode(dir->i_sb, ino, de); @@ -414,6 +415,7 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam d_add(dentry, inode); return NULL; } + de_put(de); return ERR_PTR(error); } diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 22b1158389ae..d1de6378930c 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -21,7 +21,7 @@ #include "internal.h" -static inline struct proc_dir_entry * de_get(struct proc_dir_entry *de) +struct proc_dir_entry *de_get(struct proc_dir_entry *de) { if (de) atomic_inc(&de->count); @@ -31,7 +31,7 @@ static inline struct proc_dir_entry * de_get(struct proc_dir_entry *de) /* * Decrements the use count and checks for deferred deletion. */ -static void de_put(struct proc_dir_entry *de) +void de_put(struct proc_dir_entry *de) { if (de) { lock_kernel(); @@ -146,11 +146,6 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, { struct inode * inode; - /* - * Increment the use count so the dir entry can't disappear. - */ - de_get(de); - WARN_ON(de && de->deleted); if (de != NULL && !try_module_get(de->owner)) @@ -184,7 +179,6 @@ out_ino: if (de != NULL) module_put(de->owner); out_mod: - de_put(de); return NULL; } @@ -199,6 +193,7 @@ int proc_fill_super(struct super_block *s, void *data, int silent) s->s_op = &proc_sops; s->s_time_gran = 1; + de_get(&proc_root); root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root); if (!root_inode) goto out_no_root; @@ -212,6 +207,7 @@ int proc_fill_super(struct super_block *s, void *data, int silent) out_no_root: printk("proc_read_super: get root inode failed\n"); iput(root_inode); + de_put(&proc_root); return -ENOMEM; } MODULE_LICENSE("GPL"); diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index f4f7a63cae1f..3469f96bc8b2 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -106,6 +106,9 @@ int task_statm(struct mm_struct *, int *, int *, int *, int *); char *task_mem(struct mm_struct *, char *); void clear_refs_smap(struct mm_struct *mm); +struct proc_dir_entry *de_get(struct proc_dir_entry *de); +void de_put(struct proc_dir_entry *de); + extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, struct proc_dir_entry *parent); extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); -- cgit v1.2.3 From c23fbb6bcb3eb9cdf39a103edadf57bde8ce309c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 8 May 2007 00:26:18 -0700 Subject: VFS: delay the dentry name generation on sockets and pipes 1) Introduces a new method in 'struct dentry_operations'. This method called d_dname() might be called from d_path() to build a pathname for special filesystems. It is called without locks. Future patches (if we succeed in having one common dentry for all pipes/sockets) may need to change prototype of this method, but we now use : char *d_dname(struct dentry *dentry, char *buffer, int buflen); 2) Adds a dynamic_dname() helper function that eases d_dname() implementations 3) Defines d_dname method for sockets : No more sprintf() at socket creation. This is delayed up to the moment someone does an access to /proc/pid/fd/... 4) Defines d_dname method for pipes : No more sprintf() at pipe creation. This is delayed up to the moment someone does an access to /proc/pid/fd/... A benchmark consisting of 1.000.000 calls to pipe()/close()/close() gives a *nice* speedup on my Pentium(M) 1.6 Ghz : 3.090 s instead of 3.450 s Signed-off-by: Eric Dumazet Acked-by: Christoph Hellwig Acked-by: Linus Torvalds Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/Locking | 2 ++ Documentation/filesystems/vfs.txt | 23 ++++++++++++++++++++++- fs/dcache.c | 31 +++++++++++++++++++++++++++++++ fs/pipe.c | 18 ++++++++++++------ include/linux/dcache.h | 6 ++++++ net/socket.c | 20 +++++++++++++------- 6 files changed, 86 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 28bfea75bcf2..59c14159cc47 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -15,6 +15,7 @@ prototypes: int (*d_delete)(struct dentry *); void (*d_release)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); + char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen); locking rules: none have BKL @@ -25,6 +26,7 @@ d_compare: no yes no no d_delete: yes no yes no d_release: no no no yes d_iput: no no no yes +d_dname: no no no no --------------------------- inode_operations --------------------------- prototypes: diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index ea271f2d3954..a47cc819f37b 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -827,7 +827,7 @@ This describes how a filesystem can overload the standard dentry operations. Dentries and the dcache are the domain of the VFS and the individual filesystem implementations. Device drivers have no business here. These methods may be set to NULL, as they are either optional or -the VFS uses a default. As of kernel 2.6.13, the following members are +the VFS uses a default. As of kernel 2.6.22, the following members are defined: struct dentry_operations { @@ -837,6 +837,7 @@ struct dentry_operations { int (*d_delete)(struct dentry *); void (*d_release)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); + char *(*d_dname)(struct dentry *, char *, int); }; d_revalidate: called when the VFS needs to revalidate a dentry. This @@ -859,6 +860,26 @@ struct dentry_operations { VFS calls iput(). If you define this method, you must call iput() yourself + d_dname: called when the pathname of a dentry should be generated. + Usefull for some pseudo filesystems (sockfs, pipefs, ...) to delay + pathname generation. (Instead of doing it when dentry is created, + its done only when the path is needed.). Real filesystems probably + dont want to use it, because their dentries are present in global + dcache hash, so their hash should be an invariant. As no lock is + held, d_dname() should not try to modify the dentry itself, unless + appropriate SMP safety is used. CAUTION : d_path() logic is quite + tricky. The correct way to return for example "Hello" is to put it + at the end of the buffer, and returns a pointer to the first char. + dynamic_dname() helper function is provided to take care of this. + +Example : + +static char *pipefs_dname(struct dentry *dent, char *buffer, int buflen) +{ + return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]", + dentry->d_inode->i_ino); +} + Each dentry has a pointer to its parent dentry, as well as a hash list of child dentries. Child dentries are basically like files in a directory. diff --git a/fs/dcache.c b/fs/dcache.c index 268da2e2bc09..2135ab8bb103 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1853,6 +1853,16 @@ char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt, struct vfsmount *rootmnt; struct dentry *root; + /* + * We have various synthetic filesystems that never get mounted. On + * these filesystems dentries are never used for lookup purposes, and + * thus don't need to be hashed. They also don't need a name until a + * user wants to identify the object in /proc/pid/fd/. The little hack + * below allows us to generate a name for these objects on demand: + */ + if (dentry->d_op && dentry->d_op->d_dname) + return dentry->d_op->d_dname(dentry, buf, buflen); + read_lock(¤t->fs->lock); rootmnt = mntget(current->fs->rootmnt); root = dget(current->fs->root); @@ -1865,6 +1875,27 @@ char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt, return res; } +/* + * Helper function for dentry_operations.d_dname() members + */ +char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, + const char *fmt, ...) +{ + va_list args; + char temp[64]; + int sz; + + va_start(args, fmt); + sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; + va_end(args); + + if (sz > sizeof(temp) || sz > buflen) + return ERR_PTR(-ENAMETOOLONG); + + buffer += buflen - sz; + return memcpy(buffer, temp, sz); +} + /* * NOTE! The user-level library version returns a * character pointer. The kernel system call just diff --git a/fs/pipe.c b/fs/pipe.c index ebafde7d6aba..3a89592bdf57 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -841,8 +841,18 @@ static int pipefs_delete_dentry(struct dentry *dentry) return 0; } +/* + * pipefs_dname() is called from d_path(). + */ +static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen) +{ + return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]", + dentry->d_inode->i_ino); +} + static struct dentry_operations pipefs_dentry_operations = { .d_delete = pipefs_delete_dentry, + .d_dname = pipefs_dname, }; static struct inode * get_pipe_inode(void) @@ -888,8 +898,7 @@ struct file *create_write_pipe(void) struct inode *inode; struct file *f; struct dentry *dentry; - char name[32]; - struct qstr this; + struct qstr name = { .name = "" }; f = get_empty_filp(); if (!f) @@ -899,11 +908,8 @@ struct file *create_write_pipe(void) if (!inode) goto err_file; - this.len = sprintf(name, "[%lu]", inode->i_ino); - this.name = name; - this.hash = 0; err = -ENOMEM; - dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &this); + dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &name); if (!dentry) goto err_inode; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 63f64a9a5bf7..aab53df4fafa 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -133,6 +133,7 @@ struct dentry_operations { int (*d_delete)(struct dentry *); void (*d_release)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); + char *(*d_dname)(struct dentry *, char *, int); }; /* the dentry parameter passed to d_hash and d_compare is the parent @@ -293,6 +294,11 @@ extern struct dentry * d_hash_and_lookup(struct dentry *, struct qstr *); /* validate "insecure" dentry pointer */ extern int d_validate(struct dentry *, struct dentry *); +/* + * helper function for dentry_operations.d_dname() members + */ +extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); + extern char * d_path(struct dentry *, struct vfsmount *, char *, int); /* Allocation counts.. */ diff --git a/net/socket.c b/net/socket.c index 759825b7ca26..98a8f67abbfc 100644 --- a/net/socket.c +++ b/net/socket.c @@ -313,8 +313,19 @@ static int sockfs_delete_dentry(struct dentry *dentry) dentry->d_flags |= DCACHE_UNHASHED; return 0; } + +/* + * sockfs_dname() is called from d_path(). + */ +static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) +{ + return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]", + dentry->d_inode->i_ino); +} + static struct dentry_operations sockfs_dentry_operations = { .d_delete = sockfs_delete_dentry, + .d_dname = sockfs_dname, }; /* @@ -354,14 +365,9 @@ static int sock_alloc_fd(struct file **filep) static int sock_attach_fd(struct socket *sock, struct file *file) { - struct qstr this; - char name[32]; - - this.len = sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino); - this.name = name; - this.hash = 0; + struct qstr name = { .name = "" }; - file->f_path.dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this); + file->f_path.dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name); if (unlikely(!file->f_path.dentry)) return -ENOMEM; -- cgit v1.2.3 From 9b3af29bf33bfe08c604769632799d27d56ae103 Mon Sep 17 00:00:00 2001 From: Ananth N Mavinakayanahalli Date: Tue, 8 May 2007 00:26:23 -0700 Subject: Kprobes: Make kprobe.symbol_name const Kprobes doesn't scribble the kprobe.symbol_name field. Its only set by the module when registering the probe. Modules that exercise good hygiene using the "const" qualifier will see warnings... warning: assignment discards qualifiers from pointer target type Make struct kprobe.symbol_name const char * Signed-off-by: Ananth N Mavinakayanahalli Signed-off-by: Jim Keniston Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kprobes.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 769be39b9681..f26460700379 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -78,7 +78,7 @@ struct kprobe { kprobe_opcode_t *addr; /* Allow user to indicate symbol name of the probe point */ - char *symbol_name; + const char *symbol_name; /* Offset into the symbol */ unsigned int offset; -- cgit v1.2.3 From 7c4e95bf483231d55bc0d491bc585bb9b7e852b8 Mon Sep 17 00:00:00 2001 From: Klaus Kudielka Date: Tue, 8 May 2007 00:26:25 -0700 Subject: fix cyclades.h for x86_64 (and probably others) At least on x86_64 the present cyclades.h is broken due to the wrong size of uclong. This affects, of course, both the kernel and the user-level utilities. The symptom is that cyzload refuses to load the firmware. I also managed to freeze the machine when unloading the module. The patch below fixes this in an architecture-independent way. I have tested it with 2.6.19 and the driver works fine again with a Cyclades-Z on an Athlon 64 X2. [akpm@linux-foundation.org: fix warnings] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/cyclades.c | 2 +- include/linux/cyclades.h | 14 ++++++-------- 2 files changed, 7 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 16dc5d1d3cb4..7f73bff0c81c 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -1533,7 +1533,7 @@ cyz_issue_cmd(struct cyclades_card *cinfo, struct FIRM_ID __iomem *firm_id; struct ZFW_CTRL __iomem *zfw_ctrl; struct BOARD_CTRL __iomem *board_ctrl; - unsigned long __iomem *pci_doorbell; + uclong __iomem *pci_doorbell; int index; firm_id = cinfo->base_addr + ID_ADDRESS; diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index 46d8254c1a79..a6865f0479f7 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -149,14 +149,12 @@ struct CYZ_BOOT_CTRL { * architectures and compilers. */ -#if defined(__alpha__) -typedef unsigned long ucdouble; /* 64 bits, unsigned */ -typedef unsigned int uclong; /* 32 bits, unsigned */ -#else -typedef unsigned long uclong; /* 32 bits, unsigned */ -#endif -typedef unsigned short ucshort; /* 16 bits, unsigned */ -typedef unsigned char ucchar; /* 8 bits, unsigned */ +#include + +typedef __u64 ucdouble; /* 64 bits, unsigned */ +typedef __u32 uclong; /* 32 bits, unsigned */ +typedef __u16 ucshort; /* 16 bits, unsigned */ +typedef __u8 ucchar; /* 8 bits, unsigned */ /* * Memory Window Sizes -- cgit v1.2.3 From 1a86b5e34e4d09e3246a983c53929ce38af52275 Mon Sep 17 00:00:00 2001 From: Klaus Kudielka Date: Tue, 8 May 2007 00:26:26 -0700 Subject: cyclades: remove custom types Switch from private uclong, etc over to standard types. Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/cyclades.c | 49 +++++++------ include/linux/cyclades.h | 180 ++++++++++++++++++++++++----------------------- 2 files changed, 115 insertions(+), 114 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 7f73bff0c81c..29b5375feeba 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -669,7 +669,6 @@ spin_unlock_irqrestore(&cy_card[info->card].card_lock, flags); \ } while (0) -#include #include #include @@ -845,7 +844,7 @@ MODULE_DEVICE_TABLE(pci, cy_pci_dev_id); static void cy_start(struct tty_struct *); static void set_line_char(struct cyclades_port *); -static int cyz_issue_cmd(struct cyclades_card *, uclong, ucchar, uclong); +static int cyz_issue_cmd(struct cyclades_card *, __u32, __u8, __u32); #ifdef CONFIG_ISA static unsigned detect_isa_irq(void __iomem *); #endif /* CONFIG_ISA */ @@ -1498,7 +1497,7 @@ static irqreturn_t cyy_interrupt(int irq, void *dev_id) static int cyz_fetch_msg(struct cyclades_card *cinfo, - uclong * channel, ucchar * cmd, uclong * param) + __u32 * channel, __u8 * cmd, __u32 * param) { struct FIRM_ID __iomem *firm_id; struct ZFW_CTRL __iomem *zfw_ctrl; @@ -1518,7 +1517,7 @@ cyz_fetch_msg(struct cyclades_card *cinfo, if (loc_doorbell) { *cmd = (char)(0xff & loc_doorbell); *channel = cy_readl(&board_ctrl->fwcmd_channel); - *param = (uclong) cy_readl(&board_ctrl->fwcmd_param); + *param = (__u32) cy_readl(&board_ctrl->fwcmd_param); cy_writel(&((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))-> loc_doorbell, 0xffffffff); return 1; @@ -1528,12 +1527,12 @@ cyz_fetch_msg(struct cyclades_card *cinfo, static int cyz_issue_cmd(struct cyclades_card *cinfo, - uclong channel, ucchar cmd, uclong param) + __u32 channel, __u8 cmd, __u32 param) { struct FIRM_ID __iomem *firm_id; struct ZFW_CTRL __iomem *zfw_ctrl; struct BOARD_CTRL __iomem *board_ctrl; - uclong __iomem *pci_doorbell; + __u32 __iomem *pci_doorbell; int index; firm_id = cinfo->base_addr + ID_ADDRESS; @@ -1574,7 +1573,7 @@ cyz_handle_rx(struct cyclades_port *info, #else char data; #endif - volatile uclong rx_put, rx_get, new_rx_get, rx_bufsize, rx_bufaddr; + volatile __u32 rx_put, rx_get, new_rx_get, rx_bufsize, rx_bufaddr; rx_get = new_rx_get = cy_readl(&buf_ctrl->rx_get); rx_put = cy_readl(&buf_ctrl->rx_put); @@ -1670,7 +1669,7 @@ cyz_handle_tx(struct cyclades_port *info, #ifdef BLOCKMOVE int small_count; #endif - volatile uclong tx_put, tx_get, tx_bufsize, tx_bufaddr; + volatile __u32 tx_put, tx_get, tx_bufsize, tx_bufaddr; if (info->xmit_cnt <= 0) /* Nothing to transmit */ return; @@ -1755,10 +1754,10 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo) static volatile struct BOARD_CTRL __iomem *board_ctrl; static volatile struct CH_CTRL __iomem *ch_ctrl; static volatile struct BUF_CTRL __iomem *buf_ctrl; - uclong channel; - ucchar cmd; - uclong param; - uclong hw_ver, fw_ver; + __u32 channel; + __u8 cmd; + __u32 param; + __u32 hw_ver, fw_ver; int special_count; int delta_count; @@ -1892,7 +1891,7 @@ static void cyz_rx_restart(unsigned long arg) struct cyclades_port *info = (struct cyclades_port *)arg; int retval; int card = info->card; - uclong channel = (info->line) - (cy_card[card].first_line); + __u32 channel = (info->line) - (cy_card[card].first_line); unsigned long flags; CY_LOCK(info, flags); @@ -2289,7 +2288,7 @@ static void shutdown(struct cyclades_port *info) if (!info->tty || (info->tty->termios->c_cflag & HUPCL)) { cy_writel(&ch_ctrl[channel].rs_control, - (uclong)(cy_readl(&ch_ctrl[channel].rs_control)& + (__u32)(cy_readl(&ch_ctrl[channel].rs_control)& ~(C_RS_RTS | C_RS_DTR))); retval = cyz_issue_cmd(&cy_card[info->card], channel, C_CM_IOCTLM, 0L); @@ -3026,7 +3025,7 @@ static int cy_chars_in_buffer(struct tty_struct *tty) static volatile struct CH_CTRL *ch_ctrl; static volatile struct BUF_CTRL *buf_ctrl; int char_count; - volatile uclong tx_put, tx_get, tx_bufsize; + volatile __u32 tx_put, tx_get, tx_bufsize; firm_id = cy_card[card].base_addr + ID_ADDRESS; zfw_ctrl = cy_card[card].base_addr + @@ -3055,10 +3054,10 @@ static int cy_chars_in_buffer(struct tty_struct *tty) * ------------------------------------------------------------ */ -static void cyy_baud_calc(struct cyclades_port *info, uclong baud) +static void cyy_baud_calc(struct cyclades_port *info, __u32 baud) { int co, co_val, bpr; - uclong cy_clock = ((info->chip_rev >= CD1400_REV_J) ? 60000000 : + __u32 cy_clock = ((info->chip_rev >= CD1400_REV_J) ? 60000000 : 25000000); if (baud == 0) { @@ -3348,7 +3347,7 @@ static void set_line_char(struct cyclades_port *info) struct BOARD_CTRL __iomem *board_ctrl; struct CH_CTRL __iomem *ch_ctrl; struct BUF_CTRL __iomem *buf_ctrl; - uclong sw_flow; + __u32 sw_flow; int retval; firm_id = cy_card[card].base_addr + ID_ADDRESS; @@ -4721,7 +4720,7 @@ static int __init cy_detect_isa(void) #endif /* CONFIG_ISA */ } /* cy_detect_isa */ -static void plx_init(void __iomem * addr, uclong initctl) +static void plx_init(void __iomem * addr, __u32 initctl) { /* Reset PLX */ cy_writel(addr + initctl, cy_readl(addr + initctl) | 0x40000000); @@ -4747,14 +4746,14 @@ static int __init cy_detect_pci(void) struct pci_dev *pdev = NULL; unsigned char cyy_rev_id; unsigned char cy_pci_irq = 0; - uclong cy_pci_phys0, cy_pci_phys2; + __u32 cy_pci_phys0, cy_pci_phys2; void __iomem *cy_pci_addr0, *cy_pci_addr2; unsigned short i, j, cy_pci_nchan, plx_ver; unsigned short device_id, dev_index = 0; - uclong mailbox; - uclong ZeIndex = 0; + __u32 mailbox; + __u32 ZeIndex = 0; void __iomem *Ze_addr0[NR_CARDS], *Ze_addr2[NR_CARDS]; - uclong Ze_phys0[NR_CARDS], Ze_phys2[NR_CARDS]; + __u32 Ze_phys0[NR_CARDS], Ze_phys2[NR_CARDS]; unsigned char Ze_irq[NR_CARDS]; struct pci_dev *Ze_pdev[NR_CARDS]; @@ -4959,7 +4958,7 @@ static int __init cy_detect_pci(void) cy_pci_irq); mailbox = - (uclong)cy_readl(&((struct RUNTIME_9060 __iomem *) + (__u32)cy_readl(&((struct RUNTIME_9060 __iomem *) cy_pci_addr0)->mail_box_0); if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) { @@ -5122,7 +5121,7 @@ static int __init cy_detect_pci(void) Ze_pdev[j] = Ze_pdev[j + 1]; } ZeIndex--; - mailbox = (uclong)cy_readl(&((struct RUNTIME_9060 __iomem *) + mailbox = (__u32)cy_readl(&((struct RUNTIME_9060 __iomem *) cy_pci_addr0)->mail_box_0); #ifdef CY_PCI_DEBUG printk("Cyclades-Z/PCI: relocate winaddr=0x%lx ctladdr=0x%lx\n", diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index a6865f0479f7..4c5b4763f5b8 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -67,6 +67,8 @@ #ifndef _LINUX_CYCLADES_H #define _LINUX_CYCLADES_H +#include + struct cyclades_monitor { unsigned long int_count; unsigned long char_count; @@ -172,24 +174,24 @@ typedef __u8 ucchar; /* 8 bits, unsigned */ */ struct CUSTOM_REG { - uclong fpga_id; /* FPGA Identification Register */ - uclong fpga_version; /* FPGA Version Number Register */ - uclong cpu_start; /* CPU start Register (write) */ - uclong cpu_stop; /* CPU stop Register (write) */ - uclong misc_reg; /* Miscelaneous Register */ - uclong idt_mode; /* IDT mode Register */ - uclong uart_irq_status; /* UART IRQ status Register */ - uclong clear_timer0_irq; /* Clear timer interrupt Register */ - uclong clear_timer1_irq; /* Clear timer interrupt Register */ - uclong clear_timer2_irq; /* Clear timer interrupt Register */ - uclong test_register; /* Test Register */ - uclong test_count; /* Test Count Register */ - uclong timer_select; /* Timer select register */ - uclong pr_uart_irq_status; /* Prioritized UART IRQ stat Reg */ - uclong ram_wait_state; /* RAM wait-state Register */ - uclong uart_wait_state; /* UART wait-state Register */ - uclong timer_wait_state; /* timer wait-state Register */ - uclong ack_wait_state; /* ACK wait State Register */ + __u32 fpga_id; /* FPGA Identification Register */ + __u32 fpga_version; /* FPGA Version Number Register */ + __u32 cpu_start; /* CPU start Register (write) */ + __u32 cpu_stop; /* CPU stop Register (write) */ + __u32 misc_reg; /* Miscelaneous Register */ + __u32 idt_mode; /* IDT mode Register */ + __u32 uart_irq_status; /* UART IRQ status Register */ + __u32 clear_timer0_irq; /* Clear timer interrupt Register */ + __u32 clear_timer1_irq; /* Clear timer interrupt Register */ + __u32 clear_timer2_irq; /* Clear timer interrupt Register */ + __u32 test_register; /* Test Register */ + __u32 test_count; /* Test Count Register */ + __u32 timer_select; /* Timer select register */ + __u32 pr_uart_irq_status; /* Prioritized UART IRQ stat Reg */ + __u32 ram_wait_state; /* RAM wait-state Register */ + __u32 uart_wait_state; /* UART wait-state Register */ + __u32 timer_wait_state; /* timer wait-state Register */ + __u32 ack_wait_state; /* ACK wait State Register */ }; /* @@ -199,34 +201,34 @@ struct CUSTOM_REG { */ struct RUNTIME_9060 { - uclong loc_addr_range; /* 00h - Local Address Range */ - uclong loc_addr_base; /* 04h - Local Address Base */ - uclong loc_arbitr; /* 08h - Local Arbitration */ - uclong endian_descr; /* 0Ch - Big/Little Endian Descriptor */ - uclong loc_rom_range; /* 10h - Local ROM Range */ - uclong loc_rom_base; /* 14h - Local ROM Base */ - uclong loc_bus_descr; /* 18h - Local Bus descriptor */ - uclong loc_range_mst; /* 1Ch - Local Range for Master to PCI */ - uclong loc_base_mst; /* 20h - Local Base for Master PCI */ - uclong loc_range_io; /* 24h - Local Range for Master IO */ - uclong pci_base_mst; /* 28h - PCI Base for Master PCI */ - uclong pci_conf_io; /* 2Ch - PCI configuration for Master IO */ - uclong filler1; /* 30h */ - uclong filler2; /* 34h */ - uclong filler3; /* 38h */ - uclong filler4; /* 3Ch */ - uclong mail_box_0; /* 40h - Mail Box 0 */ - uclong mail_box_1; /* 44h - Mail Box 1 */ - uclong mail_box_2; /* 48h - Mail Box 2 */ - uclong mail_box_3; /* 4Ch - Mail Box 3 */ - uclong filler5; /* 50h */ - uclong filler6; /* 54h */ - uclong filler7; /* 58h */ - uclong filler8; /* 5Ch */ - uclong pci_doorbell; /* 60h - PCI to Local Doorbell */ - uclong loc_doorbell; /* 64h - Local to PCI Doorbell */ - uclong intr_ctrl_stat; /* 68h - Interrupt Control/Status */ - uclong init_ctrl; /* 6Ch - EEPROM control, Init Control, etc */ + __u32 loc_addr_range; /* 00h - Local Address Range */ + __u32 loc_addr_base; /* 04h - Local Address Base */ + __u32 loc_arbitr; /* 08h - Local Arbitration */ + __u32 endian_descr; /* 0Ch - Big/Little Endian Descriptor */ + __u32 loc_rom_range; /* 10h - Local ROM Range */ + __u32 loc_rom_base; /* 14h - Local ROM Base */ + __u32 loc_bus_descr; /* 18h - Local Bus descriptor */ + __u32 loc_range_mst; /* 1Ch - Local Range for Master to PCI */ + __u32 loc_base_mst; /* 20h - Local Base for Master PCI */ + __u32 loc_range_io; /* 24h - Local Range for Master IO */ + __u32 pci_base_mst; /* 28h - PCI Base for Master PCI */ + __u32 pci_conf_io; /* 2Ch - PCI configuration for Master IO */ + __u32 filler1; /* 30h */ + __u32 filler2; /* 34h */ + __u32 filler3; /* 38h */ + __u32 filler4; /* 3Ch */ + __u32 mail_box_0; /* 40h - Mail Box 0 */ + __u32 mail_box_1; /* 44h - Mail Box 1 */ + __u32 mail_box_2; /* 48h - Mail Box 2 */ + __u32 mail_box_3; /* 4Ch - Mail Box 3 */ + __u32 filler5; /* 50h */ + __u32 filler6; /* 54h */ + __u32 filler7; /* 58h */ + __u32 filler8; /* 5Ch */ + __u32 pci_doorbell; /* 60h - PCI to Local Doorbell */ + __u32 loc_doorbell; /* 64h - Local to PCI Doorbell */ + __u32 intr_ctrl_stat; /* 68h - Interrupt Control/Status */ + __u32 init_ctrl; /* 6Ch - EEPROM control, Init Control, etc */ }; /* Values for the Local Base Address re-map register */ @@ -268,8 +270,8 @@ struct RUNTIME_9060 { #define ZF_TINACT ZF_TINACT_DEF struct FIRM_ID { - uclong signature; /* ZFIRM/U signature */ - uclong zfwctrl_addr; /* pointer to ZFW_CTRL structure */ + __u32 signature; /* ZFIRM/U signature */ + __u32 zfwctrl_addr; /* pointer to ZFW_CTRL structure */ }; /* Op. System id */ @@ -406,24 +408,24 @@ struct FIRM_ID { */ struct CH_CTRL { - uclong op_mode; /* operation mode */ - uclong intr_enable; /* interrupt masking */ - uclong sw_flow; /* SW flow control */ - uclong flow_status; /* output flow status */ - uclong comm_baud; /* baud rate - numerically specified */ - uclong comm_parity; /* parity */ - uclong comm_data_l; /* data length/stop */ - uclong comm_flags; /* other flags */ - uclong hw_flow; /* HW flow control */ - uclong rs_control; /* RS-232 outputs */ - uclong rs_status; /* RS-232 inputs */ - uclong flow_xon; /* xon char */ - uclong flow_xoff; /* xoff char */ - uclong hw_overflow; /* hw overflow counter */ - uclong sw_overflow; /* sw overflow counter */ - uclong comm_error; /* frame/parity error counter */ - uclong ichar; - uclong filler[7]; + __u32 op_mode; /* operation mode */ + __u32 intr_enable; /* interrupt masking */ + __u32 sw_flow; /* SW flow control */ + __u32 flow_status; /* output flow status */ + __u32 comm_baud; /* baud rate - numerically specified */ + __u32 comm_parity; /* parity */ + __u32 comm_data_l; /* data length/stop */ + __u32 comm_flags; /* other flags */ + __u32 hw_flow; /* HW flow control */ + __u32 rs_control; /* RS-232 outputs */ + __u32 rs_status; /* RS-232 inputs */ + __u32 flow_xon; /* xon char */ + __u32 flow_xoff; /* xoff char */ + __u32 hw_overflow; /* hw overflow counter */ + __u32 sw_overflow; /* sw overflow counter */ + __u32 comm_error; /* frame/parity error counter */ + __u32 ichar; + __u32 filler[7]; }; @@ -433,18 +435,18 @@ struct CH_CTRL { */ struct BUF_CTRL { - uclong flag_dma; /* buffers are in Host memory */ - uclong tx_bufaddr; /* address of the tx buffer */ - uclong tx_bufsize; /* tx buffer size */ - uclong tx_threshold; /* tx low water mark */ - uclong tx_get; /* tail index tx buf */ - uclong tx_put; /* head index tx buf */ - uclong rx_bufaddr; /* address of the rx buffer */ - uclong rx_bufsize; /* rx buffer size */ - uclong rx_threshold; /* rx high water mark */ - uclong rx_get; /* tail index rx buf */ - uclong rx_put; /* head index rx buf */ - uclong filler[5]; /* filler to align structures */ + __u32 flag_dma; /* buffers are in Host memory */ + __u32 tx_bufaddr; /* address of the tx buffer */ + __u32 tx_bufsize; /* tx buffer size */ + __u32 tx_threshold; /* tx low water mark */ + __u32 tx_get; /* tail index tx buf */ + __u32 tx_put; /* head index tx buf */ + __u32 rx_bufaddr; /* address of the rx buffer */ + __u32 rx_bufsize; /* rx buffer size */ + __u32 rx_threshold; /* rx high water mark */ + __u32 rx_get; /* tail index rx buf */ + __u32 rx_put; /* head index rx buf */ + __u32 filler[5]; /* filler to align structures */ }; /* @@ -455,27 +457,27 @@ struct BUF_CTRL { struct BOARD_CTRL { /* static info provided by the on-board CPU */ - uclong n_channel; /* number of channels */ - uclong fw_version; /* firmware version */ + __u32 n_channel; /* number of channels */ + __u32 fw_version; /* firmware version */ /* static info provided by the driver */ - uclong op_system; /* op_system id */ - uclong dr_version; /* driver version */ + __u32 op_system; /* op_system id */ + __u32 dr_version; /* driver version */ /* board control area */ - uclong inactivity; /* inactivity control */ + __u32 inactivity; /* inactivity control */ /* host to FW commands */ - uclong hcmd_channel; /* channel number */ - uclong hcmd_param; /* pointer to parameters */ + __u32 hcmd_channel; /* channel number */ + __u32 hcmd_param; /* pointer to parameters */ /* FW to Host commands */ - uclong fwcmd_channel; /* channel number */ - uclong fwcmd_param; /* pointer to parameters */ - uclong zf_int_queue_addr; /* offset for INT_QUEUE structure */ + __u32 fwcmd_channel; /* channel number */ + __u32 fwcmd_param; /* pointer to parameters */ + __u32 zf_int_queue_addr; /* offset for INT_QUEUE structure */ /* filler so the structures are aligned */ - uclong filler[6]; + __u32 filler[6]; }; /* Host Interrupt Queue */ -- cgit v1.2.3 From 9adef58b1d4fbb58d7daed931b6790c5a3b7543a Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 8 May 2007 00:26:42 -0700 Subject: futex: get_futex_key, get_key_refs and drop_key_refs lguest uses the convenient futex infrastructure for inter-domain I/O, so expose get_futex_key, get_key_refs (renamed get_futex_key_refs) and drop_key_refs (renamed drop_futex_key_refs). Also means we need to expose the union that these use. No code changes. Signed-off-by: Rusty Russell Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/futex.h | 29 +++++++++++++++++++++++++++++ kernel/futex.c | 50 ++++++++++++++------------------------------------ 2 files changed, 43 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/include/linux/futex.h b/include/linux/futex.h index 3f153b4e156c..820125c628c1 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -100,6 +100,35 @@ long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); +/* + * Futexes are matched on equal values of this key. + * The key type depends on whether it's a shared or private mapping. + * Don't rearrange members without looking at hash_futex(). + * + * offset is aligned to a multiple of sizeof(u32) (== 4) by definition. + * We set bit 0 to indicate if it's an inode-based key. + */ +union futex_key { + struct { + unsigned long pgoff; + struct inode *inode; + int offset; + } shared; + struct { + unsigned long address; + struct mm_struct *mm; + int offset; + } private; + struct { + unsigned long word; + void *ptr; + int offset; + } both; +}; +int get_futex_key(u32 __user *uaddr, union futex_key *key); +void get_futex_key_refs(union futex_key *key); +void drop_futex_key_refs(union futex_key *key); + #ifdef CONFIG_FUTEX extern void exit_robust_list(struct task_struct *curr); extern void exit_pi_state_list(struct task_struct *curr); diff --git a/kernel/futex.c b/kernel/futex.c index 5a270b5e3f95..7ae2f50641ed 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -48,38 +48,13 @@ #include #include #include +#include #include #include "rtmutex_common.h" #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) -/* - * Futexes are matched on equal values of this key. - * The key type depends on whether it's a shared or private mapping. - * Don't rearrange members without looking at hash_futex(). - * - * offset is aligned to a multiple of sizeof(u32) (== 4) by definition. - * We set bit 0 to indicate if it's an inode-based key. - */ -union futex_key { - struct { - unsigned long pgoff; - struct inode *inode; - int offset; - } shared; - struct { - unsigned long address; - struct mm_struct *mm; - int offset; - } private; - struct { - unsigned long word; - void *ptr; - int offset; - } both; -}; - /* * Priority Inheritance state: */ @@ -175,7 +150,7 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2) * * Should be called with ¤t->mm->mmap_sem but NOT any spinlocks. */ -static int get_futex_key(u32 __user *uaddr, union futex_key *key) +int get_futex_key(u32 __user *uaddr, union futex_key *key) { unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; @@ -246,6 +221,7 @@ static int get_futex_key(u32 __user *uaddr, union futex_key *key) } return err; } +EXPORT_SYMBOL_GPL(get_futex_key); /* * Take a reference to the resource addressed by a key. @@ -254,7 +230,7 @@ static int get_futex_key(u32 __user *uaddr, union futex_key *key) * NOTE: mmap_sem MUST be held between get_futex_key() and calling this * function, if it is called at all. mmap_sem keeps key->shared.inode valid. */ -static inline void get_key_refs(union futex_key *key) +inline void get_futex_key_refs(union futex_key *key) { if (key->both.ptr != 0) { if (key->both.offset & 1) @@ -263,12 +239,13 @@ static inline void get_key_refs(union futex_key *key) atomic_inc(&key->private.mm->mm_count); } } +EXPORT_SYMBOL_GPL(get_futex_key_refs); /* * Drop a reference to the resource addressed by a key. * The hash bucket spinlock must not be held. */ -static void drop_key_refs(union futex_key *key) +void drop_futex_key_refs(union futex_key *key) { if (key->both.ptr != 0) { if (key->both.offset & 1) @@ -277,6 +254,7 @@ static void drop_key_refs(union futex_key *key) mmdrop(key->private.mm); } } +EXPORT_SYMBOL_GPL(drop_futex_key_refs); static inline int get_futex_value_locked(u32 *dest, u32 __user *from) { @@ -873,7 +851,7 @@ static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2, this->lock_ptr = &hb2->lock; } this->key = key2; - get_key_refs(&key2); + get_futex_key_refs(&key2); drop_count++; if (ret - nr_wake >= nr_requeue) @@ -886,9 +864,9 @@ out_unlock: if (hb1 != hb2) spin_unlock(&hb2->lock); - /* drop_key_refs() must be called outside the spinlocks. */ + /* drop_futex_key_refs() must be called outside the spinlocks. */ while (--drop_count >= 0) - drop_key_refs(&key1); + drop_futex_key_refs(&key1); out: up_read(¤t->mm->mmap_sem); @@ -906,7 +884,7 @@ queue_lock(struct futex_q *q, int fd, struct file *filp) init_waitqueue_head(&q->waiters); - get_key_refs(&q->key); + get_futex_key_refs(&q->key); hb = hash_futex(&q->key); q->lock_ptr = &hb->lock; @@ -925,7 +903,7 @@ static inline void queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) { spin_unlock(&hb->lock); - drop_key_refs(&q->key); + drop_futex_key_refs(&q->key); } /* @@ -980,7 +958,7 @@ static int unqueue_me(struct futex_q *q) ret = 1; } - drop_key_refs(&q->key); + drop_futex_key_refs(&q->key); return ret; } @@ -999,7 +977,7 @@ static void unqueue_me_pi(struct futex_q *q, struct futex_hash_bucket *hb) spin_unlock(&hb->lock); - drop_key_refs(&q->key); + drop_futex_key_refs(&q->key); } static int futex_wait(u32 __user *uaddr, u32 val, unsigned long time) -- cgit v1.2.3 From 6ae9200f2cab7b328e505fc9a7021db64e0590cf Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 8 May 2007 00:26:47 -0700 Subject: enlarge console.name console.name[] is eight chars, but so is "earlyvga". So when we try to print console->name when using earlyvga it runs off the end of the string. Make it bigger. Diagnosed-by: Gerd Hoffmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/console.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/console.h b/include/linux/console.h index de25ee3b7919..fcc18e012e45 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -92,9 +92,8 @@ void give_up_console(const struct consw *sw); #define CON_BOOT (8) #define CON_ANYTIME (16) /* Safe to call when cpu is offline */ -struct console -{ - char name[8]; +struct console { + char name[16]; void (*write)(struct console *, const char *, unsigned); int (*read)(struct console *, char *, unsigned); struct tty_driver *(*device)(struct console *, int *); -- cgit v1.2.3 From 98a27ba485c7508ef9d9527fe06e4686f3a163dc Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 8 May 2007 00:26:56 -0700 Subject: tty: introduce no_tty and use it in selinux While researching the tty layer pid leaks I found a weird case in selinux when we drop a controlling tty because of inadequate permissions we don't do the normal hangup processing. Which is a problem if it happens the session leader has exec'd something that can no longer access the tty. We already have code in the kernel to handle this case in the form of the TIOCNOTTY ioctl. So this patch factors out a helper function that is the essence of that ioctl and calls it from the selinux code. This removes the inconsistency in handling dropping of a controlling tty and who knows it might even make some part of user space happy because it received a SIGHUP it was expecting. In addition since this removes the last user of proc_set_tty outside of tty_io.c proc_set_tty is made static and removed from tty.h Signed-off-by: Eric W. Biederman Acked-by: Alan Cox Cc: James Morris Cc: Stephen Smalley Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/tty_io.c | 19 +++++++++++++++---- include/linux/tty.h | 2 +- security/selinux/hooks.c | 7 +++---- 3 files changed, 19 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 39db186d5c5b..5d405a1bfbe3 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -154,6 +154,7 @@ int tty_ioctl(struct inode * inode, struct file * file, static int tty_fasync(int fd, struct file * filp, int on); static void release_tty(struct tty_struct *tty, int idx); static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty); +static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty); /** * alloc_tty_struct - allocate a tty object @@ -1558,6 +1559,18 @@ void disassociate_ctty(int on_exit) unlock_kernel(); } +/** + * + * no_tty - Ensure the current process does not have a controlling tty + */ +void no_tty(void) +{ + struct task_struct *tsk = current; + if (tsk->signal->leader) + disassociate_ctty(0); + proc_clear_tty(tsk); +} + /** * stop_tty - propogate flow control @@ -3280,9 +3293,7 @@ int tty_ioctl(struct inode * inode, struct file * file, case TIOCNOTTY: if (current->signal->tty != tty) return -ENOTTY; - if (current->signal->leader) - disassociate_ctty(0); - proc_clear_tty(current); + no_tty(); return 0; case TIOCSCTTY: return tiocsctty(tty, arg); @@ -3844,7 +3855,7 @@ static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty) tsk->signal->tty_old_pgrp = NULL; } -void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty) +static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty) { spin_lock_irq(&tsk->sighand->siglock); __proc_set_tty(tsk, tty); diff --git a/include/linux/tty.h b/include/linux/tty.h index dee72b9a20fb..bb4576085203 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -313,6 +313,7 @@ extern int tty_hung_up_p(struct file * filp); extern void do_SAK(struct tty_struct *tty); extern void __do_SAK(struct tty_struct *tty); extern void disassociate_ctty(int priv); +extern void no_tty(void); extern void tty_flip_buffer_push(struct tty_struct *tty); extern speed_t tty_get_baud_rate(struct tty_struct *tty); extern speed_t tty_termios_baud_rate(struct ktermios *termios); @@ -333,7 +334,6 @@ extern int tty_ioctl(struct inode *inode, struct file *file, unsigned int cmd, extern dev_t tty_devnum(struct tty_struct *tty); extern void proc_clear_tty(struct task_struct *p); -extern void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty); extern struct tty_struct *get_current_tty(void); extern struct mutex tty_mutex; diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 885a9a958b8d..fa9dbb6b110d 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -1758,12 +1758,11 @@ static inline void flush_unauthorized_files(struct files_struct * files) } } file_list_unlock(); - - /* Reset controlling tty. */ - if (drop_tty) - proc_set_tty(current, NULL); } mutex_unlock(&tty_mutex); + /* Reset controlling tty. */ + if (drop_tty) + no_tty(); /* Revalidate access to inherited open files. */ -- cgit v1.2.3 From 1eeb66a1bb973534dc3d064920a5ca683823372e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 8 May 2007 00:27:03 -0700 Subject: move die notifier handling to common code This patch moves the die notifier handling to common code. Previous various architectures had exactly the same code for it. Note that the new code is compiled unconditionally, this should be understood as an appel to the other architecture maintainer to implement support for it aswell (aka sprinkling a notify_die or two in the proper place) arm had a notifiy_die that did something totally different, I renamed it to arm_notify_die as part of the patch and made it static to the file it's declared and used at. avr32 used to pass slightly less information through this interface and I brought it into line with the other architectures. [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix vmalloc_sync_all bustage] [bryan.wu@analog.com: fix vmalloc_sync_all in nommu] Signed-off-by: Christoph Hellwig Cc: Cc: Russell King Signed-off-by: Bryan Wu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/kernel/traps.c | 14 +++++++------- arch/arm/mm/fault.c | 2 +- arch/avr32/kernel/kprobes.c | 2 +- arch/avr32/kernel/ptrace.c | 4 ++-- arch/avr32/kernel/traps.c | 14 -------------- arch/avr32/mm/fault.c | 2 +- arch/i386/kernel/crash.c | 2 +- arch/i386/kernel/kprobes.c | 2 +- arch/i386/kernel/nmi.c | 2 +- arch/i386/kernel/traps.c | 16 +--------------- arch/i386/mm/fault.c | 3 ++- arch/i386/oprofile/nmi_int.c | 2 +- arch/i386/oprofile/nmi_timer_int.c | 3 +-- arch/ia64/kernel/crash.c | 2 +- arch/ia64/kernel/kprobes.c | 2 +- arch/ia64/kernel/mca.c | 2 +- arch/ia64/kernel/process.c | 2 +- arch/ia64/kernel/traps.c | 18 +----------------- arch/ia64/mm/fault.c | 2 +- arch/ia64/sn/kernel/xpc_main.c | 4 ++-- arch/powerpc/kernel/kprobes.c | 2 +- arch/powerpc/kernel/traps.c | 16 +--------------- arch/powerpc/mm/fault.c | 2 +- arch/s390/kernel/kprobes.c | 2 +- arch/s390/kernel/traps.c | 16 +--------------- arch/s390/mm/fault.c | 2 +- arch/sparc/kernel/head.S | 2 +- arch/sparc/kernel/setup.c | 2 +- arch/sparc/kernel/traps.c | 2 +- arch/sparc/mm/fault.c | 2 +- arch/sparc/mm/srmmu.c | 2 +- arch/sparc64/kernel/kprobes.c | 2 +- arch/sparc64/kernel/traps.c | 15 +-------------- arch/sparc64/mm/fault.c | 2 +- arch/x86_64/kernel/crash.c | 2 +- arch/x86_64/kernel/kprobes.c | 2 +- arch/x86_64/kernel/mce.c | 2 +- arch/x86_64/kernel/nmi.c | 2 +- arch/x86_64/kernel/pci-gart.c | 2 +- arch/x86_64/kernel/process.c | 2 +- arch/x86_64/kernel/reboot.c | 2 +- arch/x86_64/kernel/smpboot.c | 2 +- arch/x86_64/kernel/traps.c | 18 +----------------- arch/x86_64/mm/fault.c | 3 ++- drivers/char/ipmi/ipmi_watchdog.c | 1 + include/asm-alpha/kdebug.h | 1 + include/asm-arm/kdebug.h | 1 + include/asm-arm/system.h | 2 +- include/asm-arm26/kdebug.h | 1 + include/asm-avr32/kdebug.h | 25 ++----------------------- include/asm-cris/kdebug.h | 1 + include/asm-frv/kdebug.h | 1 + include/asm-generic/kdebug.h | 8 ++++++++ include/asm-h8300/kdebug.h | 1 + include/asm-i386/kdebug.h | 24 ------------------------ include/asm-i386/pgtable.h | 2 -- include/asm-ia64/kdebug.h | 27 --------------------------- include/asm-m32r/kdebug.h | 1 + include/asm-m68k/kdebug.h | 1 + include/asm-m68knommu/kdebug.h | 1 + include/asm-mips/kdebug.h | 1 + include/asm-parisc/kdebug.h | 1 + include/asm-powerpc/kdebug.h | 18 ------------------ include/asm-ppc/kdebug.h | 1 + include/asm-s390/kdebug.h | 30 ------------------------------ include/asm-sh/kdebug.h | 1 + include/asm-sh64/kdebug.h | 1 + include/asm-sparc/kdebug.h | 4 ++++ include/asm-sparc64/kdebug.h | 23 ----------------------- include/asm-um/kdebug.h | 1 + include/asm-v850/kdebug.h | 1 + include/asm-x86_64/kdebug.h | 24 ------------------------ include/asm-x86_64/pgtable.h | 1 - include/asm-xtensa/kdebug.h | 1 + include/linux/kdebug.h | 20 ++++++++++++++++++++ include/linux/vmalloc.h | 1 + kernel/Makefile | 2 +- kernel/die_notifier.c | 38 ++++++++++++++++++++++++++++++++++++++ kernel/kprobes.c | 2 +- mm/nommu.c | 8 ++++++++ mm/vmalloc.c | 7 +++++++ 81 files changed, 162 insertions(+), 328 deletions(-) create mode 100644 include/asm-alpha/kdebug.h create mode 100644 include/asm-arm/kdebug.h create mode 100644 include/asm-arm26/kdebug.h create mode 100644 include/asm-cris/kdebug.h create mode 100644 include/asm-frv/kdebug.h create mode 100644 include/asm-generic/kdebug.h create mode 100644 include/asm-h8300/kdebug.h create mode 100644 include/asm-m32r/kdebug.h create mode 100644 include/asm-m68k/kdebug.h create mode 100644 include/asm-m68knommu/kdebug.h create mode 100644 include/asm-mips/kdebug.h create mode 100644 include/asm-parisc/kdebug.h create mode 100644 include/asm-ppc/kdebug.h create mode 100644 include/asm-sh64/kdebug.h create mode 100644 include/asm-um/kdebug.h create mode 100644 include/asm-v850/kdebug.h create mode 100644 include/asm-xtensa/kdebug.h create mode 100644 include/linux/kdebug.h create mode 100644 kernel/die_notifier.c (limited to 'include/linux') diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index f05e66b0f868..10ff36e4e414 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -245,8 +245,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) do_exit(SIGSEGV); } -void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, - unsigned long err, unsigned long trap) +void arm_notify_die(const char *str, struct pt_regs *regs, + struct siginfo *info, unsigned long err, unsigned long trap) { if (user_mode(regs)) { current->thread.error_code = err; @@ -330,7 +330,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) info.si_code = ILL_ILLOPC; info.si_addr = pc; - notify_die("Oops - undefined instruction", regs, &info, 0, 6); + arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6); } asmlinkage void do_unexp_fiq (struct pt_regs *regs) @@ -384,7 +384,7 @@ static int bad_syscall(int n, struct pt_regs *regs) info.si_addr = (void __user *)instruction_pointer(regs) - (thumb_mode(regs) ? 2 : 4); - notify_die("Oops - bad syscall", regs, &info, n, 0); + arm_notify_die("Oops - bad syscall", regs, &info, n, 0); return regs->ARM_r0; } @@ -428,7 +428,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) info.si_code = SEGV_MAPERR; info.si_addr = NULL; - notify_die("branch through zero", regs, &info, 0, 0); + arm_notify_die("branch through zero", regs, &info, 0, 0); return 0; case NR(breakpoint): /* SWI BREAK_POINT */ @@ -564,7 +564,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) info.si_addr = (void __user *)instruction_pointer(regs) - (thumb_mode(regs) ? 2 : 4); - notify_die("Oops - bad syscall(2)", regs, &info, no, 0); + arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0); return 0; } @@ -638,7 +638,7 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs) info.si_code = ILL_ILLOPC; info.si_addr = (void __user *)addr; - notify_die("unknown data abort code", regs, &info, instr, 0); + arm_notify_die("unknown data abort code", regs, &info, instr, 0); } void __attribute__((noreturn)) __bug(const char *file, int line) diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 5d9ce7deb4a7..75d491448e45 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -453,7 +453,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) info.si_errno = 0; info.si_code = inf->code; info.si_addr = (void __user *)addr; - notify_die("", regs, &info, fsr, 0); + arm_notify_die("", regs, &info, fsr, 0); } asmlinkage void __exception diff --git a/arch/avr32/kernel/kprobes.c b/arch/avr32/kernel/kprobes.c index d0abbcaf1c1e..004c94b6fc1d 100644 --- a/arch/avr32/kernel/kprobes.c +++ b/arch/avr32/kernel/kprobes.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include DEFINE_PER_CPU(struct kprobe *, current_kprobe); diff --git a/arch/avr32/kernel/ptrace.c b/arch/avr32/kernel/ptrace.c index 6f4388f7c20b..e8a3d7f01bae 100644 --- a/arch/avr32/kernel/ptrace.c +++ b/arch/avr32/kernel/ptrace.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include static struct pt_regs *get_user_regs(struct task_struct *tsk) { @@ -300,7 +300,7 @@ asmlinkage void do_debug_priv(struct pt_regs *regs) else die_val = DIE_BREAKPOINT; - if (notify_die(die_val, regs, 0, SIGTRAP) == NOTIFY_STOP) + if (notify_die(die_val, "ptrace", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) return; if (likely(ds & DS_SSS)) { diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c index 4f0382d8483f..4de9edf96ed2 100644 --- a/arch/avr32/kernel/traps.c +++ b/arch/avr32/kernel/traps.c @@ -20,20 +20,6 @@ #include #include -ATOMIC_NOTIFIER_HEAD(avr32_die_chain); - -int register_die_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(&avr32_die_chain, nb); -} -EXPORT_SYMBOL(register_die_notifier); - -int unregister_die_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&avr32_die_chain, nb); -} -EXPORT_SYMBOL(unregister_die_notifier); - static DEFINE_SPINLOCK(die_lock); void NORET_TYPE die(const char *str, struct pt_regs *regs, long err) diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 146ebdbdc302..88b00b15970f 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c index a5e0e990ea95..53589d1b1a05 100644 --- a/arch/i386/kernel/crash.c +++ b/arch/i386/kernel/crash.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index b545bc746fce..e00f75ecf1a8 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c @@ -31,8 +31,8 @@ #include #include #include +#include #include -#include #include #include diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 33cf2f3c444f..fba121f7973f 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -23,10 +23,10 @@ #include #include #include +#include #include #include -#include #include "mach_traps.h" diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 58c8e015e77e..4bec0cbf407a 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -52,7 +52,7 @@ #include #include #include -#include +#include #include #include @@ -95,20 +95,6 @@ asmlinkage void machine_check(void); int kstack_depth_to_print = 24; static unsigned int code_bytes = 64; -ATOMIC_NOTIFIER_HEAD(i386die_chain); - -int register_die_notifier(struct notifier_block *nb) -{ - vmalloc_sync_all(); - return atomic_notifier_chain_register(&i386die_chain, nb); -} -EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */ - -int unregister_die_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&i386die_chain, nb); -} -EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) { diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c index f534c29e80b2..ca0aa0cedc35 100644 --- a/arch/i386/mm/fault.c +++ b/arch/i386/mm/fault.c @@ -21,13 +21,14 @@ #include /* For unblank_screen() */ #include #include /* for max_low_pfn */ +#include #include #include #include +#include #include #include -#include #include extern void die(const char *,struct pt_regs *,long); diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c index 695f737516ae..8e185208dfd4 100644 --- a/arch/i386/oprofile/nmi_int.c +++ b/arch/i386/oprofile/nmi_int.c @@ -14,10 +14,10 @@ #include #include #include +#include #include #include #include -#include #include "op_counter.h" #include "op_x86_model.h" diff --git a/arch/i386/oprofile/nmi_timer_int.c b/arch/i386/oprofile/nmi_timer_int.c index abf0ba52a635..1418e36ae7ab 100644 --- a/arch/i386/oprofile/nmi_timer_int.c +++ b/arch/i386/oprofile/nmi_timer_int.c @@ -12,12 +12,11 @@ #include #include #include - +#include #include #include #include -#include static int profile_timer_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index 80a94e707827..3d51a3f77017 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -16,8 +16,8 @@ #include #include #include +#include -#include #include int kdump_status[NR_CPUS]; diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 6cb56dd4056d..779fe00c9025 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -29,9 +29,9 @@ #include #include #include +#include #include -#include #include #include diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 491687f84fb5..65e3b81a442e 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -72,9 +72,9 @@ #include #include #include +#include #include -#include #include #include #include diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index ae96d4176995..7ab36c772e21 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -27,13 +27,13 @@ #include #include #include +#include #include #include #include #include #include -#include #include #include #include diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 765cbe5ba6ae..5bfb8be02b70 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -16,33 +16,17 @@ #include #include #include /* for ssleep() */ +#include #include #include #include #include #include -#include fpswa_interface_t *fpswa_interface; EXPORT_SYMBOL(fpswa_interface); -ATOMIC_NOTIFIER_HEAD(ia64die_chain); - -int -register_die_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(&ia64die_chain, nb); -} -EXPORT_SYMBOL_GPL(register_die_notifier); - -int -unregister_die_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&ia64die_chain, nb); -} -EXPORT_SYMBOL_GPL(unregister_die_notifier); - void __init trap_init (void) { diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 59f3ab937615..cb4791d17cce 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -10,12 +10,12 @@ #include #include #include +#include #include #include #include #include -#include extern void die (char *, struct pt_regs *, long); diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 68355ef6f841..e336e1692a73 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c @@ -55,9 +55,9 @@ #include #include #include +#include #include #include -#include #include #include @@ -1332,7 +1332,7 @@ xpc_init(void) dev_warn(xpc_part, "can't register reboot notifier\n"); } - /* add ourselves to the die_notifier list (i.e., ia64die_chain) */ + /* add ourselves to the die_notifier list */ ret = register_die_notifier(&xpc_die_notifier); if (ret != 0) { dev_warn(xpc_part, "can't register die notifier\n"); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index ef647e7a9dc3..2a2c696dcc7a 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -30,8 +30,8 @@ #include #include #include +#include #include -#include #include #include diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index f7862224fe85..bf6445ac9f1c 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -33,8 +33,8 @@ #include #include #include +#include -#include #include #include #include @@ -72,20 +72,6 @@ EXPORT_SYMBOL(__debugger_dabr_match); EXPORT_SYMBOL(__debugger_fault_handler); #endif -ATOMIC_NOTIFIER_HEAD(powerpc_die_chain); - -int register_die_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(&powerpc_die_chain, nb); -} -EXPORT_SYMBOL(register_die_notifier); - -int unregister_die_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&powerpc_die_chain, nb); -} -EXPORT_SYMBOL(unregister_die_notifier); - /* * Trap & Exception support */ diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 03aeb3a46077..bec0cce79a78 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -36,7 +37,6 @@ #include #include #include -#include #include #ifdef CONFIG_KPROBES diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 23c61f6d965b..8516a94d8163 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -24,8 +24,8 @@ #include #include #include +#include #include -#include #include #include #include diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 49dec830373a..a750bcc69aaa 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -40,7 +41,6 @@ #include #include #include -#include /* Called from entry.S only */ extern void handle_per_exception(struct pt_regs *regs); @@ -70,20 +70,6 @@ static int kstack_depth_to_print = 12; static int kstack_depth_to_print = 20; #endif /* CONFIG_64BIT */ -ATOMIC_NOTIFIER_HEAD(s390die_chain); - -int register_die_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(&s390die_chain, nb); -} -EXPORT_SYMBOL(register_die_notifier); - -int unregister_die_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&s390die_chain, nb); -} -EXPORT_SYMBOL(unregister_die_notifier); - /* * For show_trace we have tree different stack to consider: * - the panic stack which is used if the kernel stack has overflown diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 91f705adc3f9..8b924b359774 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -30,7 +31,6 @@ #include #include -#include #include #ifndef CONFIG_64BIT diff --git a/arch/sparc/kernel/head.S b/arch/sparc/kernel/head.S index 9a219e8b5ddb..97da13c52563 100644 --- a/arch/sparc/kernel/head.S +++ b/arch/sparc/kernel/head.S @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include /* TI_UWINMASK */ #include diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c index eccd8e87f529..64c0ed98820a 100644 --- a/arch/sparc/kernel/setup.c +++ b/arch/sparc/kernel/setup.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -40,7 +41,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/sparc/kernel/traps.c b/arch/sparc/kernel/traps.c index 527687afc1c4..dc9ffea2a4f7 100644 --- a/arch/sparc/kernel/traps.c +++ b/arch/sparc/kernel/traps.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -22,7 +23,6 @@ #include #include #include -#include #include #include diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c index 9eeed3347df3..c133f270cf70 100644 --- a/arch/sparc/mm/fault.c +++ b/arch/sparc/mm/fault.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -30,7 +31,6 @@ #include #include #include -#include #include extern int prom_node_root; diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 0df7121cef07..e5eaa8072ae0 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -18,13 +18,13 @@ #include #include #include +#include #include #include #include #include #include -#include #include #include #include diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c index ae221f0d4a6f..a44fe47a3c2b 100644 --- a/arch/sparc64/kernel/kprobes.c +++ b/arch/sparc64/kernel/kprobes.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index ad67784292db..64057fdfc7a8 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -36,26 +37,12 @@ #include #include #include -#include #include #ifdef CONFIG_KMOD #include #endif #include -ATOMIC_NOTIFIER_HEAD(sparc64die_chain); - -int register_die_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(&sparc64die_chain, nb); -} -EXPORT_SYMBOL(register_die_notifier); - -int unregister_die_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&sparc64die_chain, nb); -} -EXPORT_SYMBOL(unregister_die_notifier); /* When an irrecoverable trap occurs at tl > 0, the trap entry * code logs the trap state registers at every level in the trap diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index 55ae802dc0ad..7c36527b7454 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -29,7 +30,6 @@ #include #include #include -#include #include #ifdef CONFIG_KPROBES diff --git a/arch/x86_64/kernel/crash.c b/arch/x86_64/kernel/crash.c index 95a7a2c13131..13432a1ae904 100644 --- a/arch/x86_64/kernel/crash.c +++ b/arch/x86_64/kernel/crash.c @@ -17,13 +17,13 @@ #include #include #include +#include #include #include #include #include #include -#include /* This keeps a track of which one is crashing cpu. */ static int crashing_cpu; diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 209c8c0bec71..90e7008cd899 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c @@ -37,10 +37,10 @@ #include #include #include +#include #include #include -#include #include void jprobe_return_end(void); diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index fa2672682477..442169640e45 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c @@ -20,10 +20,10 @@ #include #include #include +#include #include #include #include -#include #include #include diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 6cd2b30e2ffc..931c64bad5e6 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c @@ -21,11 +21,11 @@ #include #include #include +#include #include #include #include -#include #include int unknown_nmi_panic; diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c index 0a762e10f2be..373ef66ca1dc 100644 --- a/arch/x86_64/kernel/pci-gart.c +++ b/arch/x86_64/kernel/pci-gart.c @@ -22,13 +22,13 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include #include #include diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 4f21765078b7..5909039f37aa 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -46,7 +47,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86_64/kernel/reboot.c b/arch/x86_64/kernel/reboot.c index 2d6769847456..c116b54d422e 100644 --- a/arch/x86_64/kernel/reboot.c +++ b/arch/x86_64/kernel/reboot.c @@ -7,8 +7,8 @@ #include #include #include +#include #include -#include #include #include #include diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 4d9dacfae575..193af9922c26 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -49,11 +49,11 @@ #include #include #include +#include #include #include #include -#include #include #include #include diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index 0484a2ceac87..8c2ac41187c1 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -39,7 +40,6 @@ #include #include #include -#include #include #include #include @@ -71,22 +71,6 @@ asmlinkage void alignment_check(void); asmlinkage void machine_check(void); asmlinkage void spurious_interrupt_bug(void); -ATOMIC_NOTIFIER_HEAD(die_chain); -EXPORT_SYMBOL(die_chain); - -int register_die_notifier(struct notifier_block *nb) -{ - vmalloc_sync_all(); - return atomic_notifier_chain_register(&die_chain, nb); -} -EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */ - -int unregister_die_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&die_chain, nb); -} -EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */ - static inline void conditional_sti(struct pt_regs *regs) { if (regs->eflags & X86_EFLAGS_IF) diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index de99dba2c515..fd6ecc2cee38 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c @@ -21,16 +21,17 @@ #include #include /* For unblank_screen() */ #include +#include #include #include #include +#include #include #include #include #include #include -#include #include /* Page fault error code bits */ diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 9e9c5de2e549..b6a972ed5bb3 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include diff --git a/include/asm-alpha/kdebug.h b/include/asm-alpha/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-alpha/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-arm/kdebug.h b/include/asm-arm/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-arm/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h index 63b3080bdac4..25f84da4a97e 100644 --- a/include/asm-arm/system.h +++ b/include/asm-arm/system.h @@ -93,7 +93,7 @@ void die(const char *msg, struct pt_regs *regs, int err) __attribute__((noreturn)); struct siginfo; -void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, +void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, unsigned long err, unsigned long trap); void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, diff --git a/include/asm-arm26/kdebug.h b/include/asm-arm26/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-arm26/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-avr32/kdebug.h b/include/asm-avr32/kdebug.h index f583b643ffb2..de419278fc39 100644 --- a/include/asm-avr32/kdebug.h +++ b/include/asm-avr32/kdebug.h @@ -3,19 +3,6 @@ #include -struct pt_regs; - -struct die_args { - struct pt_regs *regs; - int trapnr; -}; - -int register_die_notifier(struct notifier_block *nb); -int unregister_die_notifier(struct notifier_block *nb); -int register_page_fault_notifier(struct notifier_block *nb); -int unregister_page_fault_notifier(struct notifier_block *nb); -extern struct atomic_notifier_head avr32_die_chain; - /* Grossly misnamed. */ enum die_val { DIE_FAULT, @@ -24,15 +11,7 @@ enum die_val { DIE_PAGE_FAULT, }; -static inline int notify_die(enum die_val val, struct pt_regs *regs, - int trap, int sig) -{ - struct die_args args = { - .regs = regs, - .trapnr = trap, - }; - - return atomic_notifier_call_chain(&avr32_die_chain, val, &args); -} +int register_page_fault_notifier(struct notifier_block *nb); +int unregister_page_fault_notifier(struct notifier_block *nb); #endif /* __ASM_AVR32_KDEBUG_H */ diff --git a/include/asm-cris/kdebug.h b/include/asm-cris/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-cris/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-frv/kdebug.h b/include/asm-frv/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-frv/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h new file mode 100644 index 000000000000..2b799c90b2d4 --- /dev/null +++ b/include/asm-generic/kdebug.h @@ -0,0 +1,8 @@ +#ifndef _ASM_GENERIC_KDEBUG_H +#define _ASM_GENERIC_KDEBUG_H + +enum die_val { + DIE_UNUSED, +}; + +#endif /* _ASM_GENERIC_KDEBUG_H */ diff --git a/include/asm-h8300/kdebug.h b/include/asm-h8300/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-h8300/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h index 6e1c8e1b5e2b..05c3117788b9 100644 --- a/include/asm-i386/kdebug.h +++ b/include/asm-i386/kdebug.h @@ -9,19 +9,8 @@ struct pt_regs; -struct die_args { - struct pt_regs *regs; - const char *str; - long err; - int trapnr; - int signr; -}; - -extern int register_die_notifier(struct notifier_block *); -extern int unregister_die_notifier(struct notifier_block *); extern int register_page_fault_notifier(struct notifier_block *); extern int unregister_page_fault_notifier(struct notifier_block *); -extern struct atomic_notifier_head i386die_chain; /* Grossly misnamed. */ @@ -42,17 +31,4 @@ enum die_val { DIE_PAGE_FAULT, }; -static inline int notify_die(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) -{ - struct die_args args = { - .regs = regs, - .str = str, - .err = err, - .trapnr = trap, - .signr = sig - }; - return atomic_notifier_call_chain(&i386die_chain, val, &args); -} - #endif diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index e16359f81a40..61fbf8578ff1 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -243,8 +243,6 @@ static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; re static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } -extern void vmalloc_sync_all(void); - #ifdef CONFIG_X86_PAE # include #else diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h index aed7142f9e4a..ba211e011a1d 100644 --- a/include/asm-ia64/kdebug.h +++ b/include/asm-ia64/kdebug.h @@ -28,21 +28,8 @@ */ #include -struct pt_regs; - -struct die_args { - struct pt_regs *regs; - const char *str; - long err; - int trapnr; - int signr; -}; - -extern int register_die_notifier(struct notifier_block *); -extern int unregister_die_notifier(struct notifier_block *); extern int register_page_fault_notifier(struct notifier_block *); extern int unregister_page_fault_notifier(struct notifier_block *); -extern struct atomic_notifier_head ia64die_chain; enum die_val { DIE_BREAK = 1, @@ -74,18 +61,4 @@ enum die_val { DIE_KDUMP_LEAVE, }; -static inline int notify_die(enum die_val val, char *str, struct pt_regs *regs, - long err, int trap, int sig) -{ - struct die_args args = { - .regs = regs, - .str = str, - .err = err, - .trapnr = trap, - .signr = sig - }; - - return atomic_notifier_call_chain(&ia64die_chain, val, &args); -} - #endif diff --git a/include/asm-m32r/kdebug.h b/include/asm-m32r/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-m32r/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-m68k/kdebug.h b/include/asm-m68k/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-m68k/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-m68knommu/kdebug.h b/include/asm-m68knommu/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-m68knommu/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-mips/kdebug.h b/include/asm-mips/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-mips/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-parisc/kdebug.h b/include/asm-parisc/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-parisc/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h index 532bfee934f4..1c73d2ec9b59 100644 --- a/include/asm-powerpc/kdebug.h +++ b/include/asm-powerpc/kdebug.h @@ -6,18 +6,6 @@ #include -struct pt_regs; - -struct die_args { - struct pt_regs *regs; - const char *str; - long err; - int trapnr; - int signr; -}; - -extern int register_die_notifier(struct notifier_block *); -extern int unregister_die_notifier(struct notifier_block *); extern int register_page_fault_notifier(struct notifier_block *); extern int unregister_page_fault_notifier(struct notifier_block *); extern struct atomic_notifier_head powerpc_die_chain; @@ -32,11 +20,5 @@ enum die_val { DIE_PAGE_FAULT, }; -static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig) -{ - struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig }; - return atomic_notifier_call_chain(&powerpc_die_chain, val, &args); -} - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_KDEBUG_H */ diff --git a/include/asm-ppc/kdebug.h b/include/asm-ppc/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-ppc/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-s390/kdebug.h b/include/asm-s390/kdebug.h index d2d7ad276148..04418af08f85 100644 --- a/include/asm-s390/kdebug.h +++ b/include/asm-s390/kdebug.h @@ -8,21 +8,6 @@ struct pt_regs; -struct die_args { - struct pt_regs *regs; - const char *str; - long err; - int trapnr; - int signr; -}; - -/* Note - you should never unregister because that can race with NMIs. - * If you really want to do it first unregister - then synchronize_sched - * - then free. - */ -extern int register_die_notifier(struct notifier_block *); -extern int unregister_die_notifier(struct notifier_block *); - /* * These are only here because kprobes.c wants them to implement a * blatant layering violation. Will hopefully go away soon once all @@ -37,8 +22,6 @@ static inline int unregister_page_fault_notifier(struct notifier_block *nb) return 0; } -extern struct atomic_notifier_head s390die_chain; - enum die_val { DIE_OOPS = 1, DIE_BPT, @@ -54,19 +37,6 @@ enum die_val { DIE_NMI_IPI, }; -static inline int notify_die(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) -{ - struct die_args args = { - .regs = regs, - .str = str, - .err = err, - .trapnr = trap, - .signr = sig - }; - return atomic_notifier_call_chain(&s390die_chain, val, &args); -} - extern void die(const char *, struct pt_regs *, long); #endif diff --git a/include/asm-sh/kdebug.h b/include/asm-sh/kdebug.h index ef009baf5a11..493c20629747 100644 --- a/include/asm-sh/kdebug.h +++ b/include/asm-sh/kdebug.h @@ -2,6 +2,7 @@ #define __ASM_SH_KDEBUG_H #include +#include struct pt_regs; diff --git a/include/asm-sh64/kdebug.h b/include/asm-sh64/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-sh64/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-sparc/kdebug.h b/include/asm-sparc/kdebug.h index fba92485fdba..404d80767323 100644 --- a/include/asm-sparc/kdebug.h +++ b/include/asm-sparc/kdebug.h @@ -66,4 +66,8 @@ static inline void sp_enter_debugger(void) #define KDEBUG_DUNNO2_OFF 0x8 #define KDEBUG_TEACH_OFF 0xc +enum die_val { + DIE_UNUSED, +}; + #endif /* !(_SPARC_KDEBUG_H) */ diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h index 11251bdd00cb..f8032e73f384 100644 --- a/include/asm-sparc64/kdebug.h +++ b/include/asm-sparc64/kdebug.h @@ -7,19 +7,8 @@ struct pt_regs; -struct die_args { - struct pt_regs *regs; - const char *str; - long err; - int trapnr; - int signr; -}; - -extern int register_die_notifier(struct notifier_block *); -extern int unregister_die_notifier(struct notifier_block *); extern int register_page_fault_notifier(struct notifier_block *); extern int unregister_page_fault_notifier(struct notifier_block *); -extern struct atomic_notifier_head sparc64die_chain; extern void bad_trap(struct pt_regs *, long); @@ -36,16 +25,4 @@ enum die_val { DIE_PAGE_FAULT, }; -static inline int notify_die(enum die_val val,char *str, struct pt_regs *regs, - long err, int trap, int sig) -{ - struct die_args args = { .regs = regs, - .str = str, - .err = err, - .trapnr = trap, - .signr = sig }; - - return atomic_notifier_call_chain(&sparc64die_chain, val, &args); -} - #endif diff --git a/include/asm-um/kdebug.h b/include/asm-um/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-um/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-v850/kdebug.h b/include/asm-v850/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-v850/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h index e9ce163b1550..74feae945a26 100644 --- a/include/asm-x86_64/kdebug.h +++ b/include/asm-x86_64/kdebug.h @@ -5,19 +5,8 @@ struct pt_regs; -struct die_args { - struct pt_regs *regs; - const char *str; - long err; - int trapnr; - int signr; -}; - -extern int register_die_notifier(struct notifier_block *); -extern int unregister_die_notifier(struct notifier_block *); extern int register_page_fault_notifier(struct notifier_block *); extern int unregister_page_fault_notifier(struct notifier_block *); -extern struct atomic_notifier_head die_chain; /* Grossly misnamed. */ enum die_val { @@ -37,19 +26,6 @@ enum die_val { DIE_PAGE_FAULT, }; -static inline int notify_die(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) -{ - struct die_args args = { - .regs = regs, - .str = str, - .err = err, - .trapnr = trap, - .signr = sig - }; - return atomic_notifier_call_chain(&die_chain, val, &args); -} - extern void printk_address(unsigned long address); extern void die(const char *,struct pt_regs *,long); extern void __die(const char *,struct pt_regs *,long); diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index da3390faaea6..5f75ee5379a7 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -411,7 +411,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) extern spinlock_t pgd_lock; extern struct list_head pgd_list; -void vmalloc_sync_all(void); extern int kern_addr_valid(unsigned long addr); diff --git a/include/asm-xtensa/kdebug.h b/include/asm-xtensa/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/include/asm-xtensa/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/kdebug.h b/include/linux/kdebug.h new file mode 100644 index 000000000000..5db38d6d8b92 --- /dev/null +++ b/include/linux/kdebug.h @@ -0,0 +1,20 @@ +#ifndef _LINUX_KDEBUG_H +#define _LINUX_KDEBUG_H + +#include + +struct die_args { + struct pt_regs *regs; + const char *str; + long err; + int trapnr; + int signr; +}; + +int register_die_notifier(struct notifier_block *nb); +int unregister_die_notifier(struct notifier_block *nb); + +int notify_die(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig); + +#endif /* _LINUX_KDEBUG_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 924e502905d4..4b7ee83787c1 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -53,6 +53,7 @@ extern void vunmap(void *addr); extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); +void vmalloc_sync_all(void); /* * Lowlevel-APIs (not for driver use!) diff --git a/kernel/Makefile b/kernel/Makefile index ac6b27abb1ad..642d4277c2ea 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -8,7 +8,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ signal.o sys.o kmod.o workqueue.o pid.o \ rcupdate.o extable.o params.o posix-timers.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ - hrtimer.o rwsem.o latency.o nsproxy.o srcu.o + hrtimer.o rwsem.o latency.o nsproxy.o srcu.o die_notifier.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += time/ diff --git a/kernel/die_notifier.c b/kernel/die_notifier.c new file mode 100644 index 000000000000..0d98827887a7 --- /dev/null +++ b/kernel/die_notifier.c @@ -0,0 +1,38 @@ + +#include +#include +#include +#include + + +static ATOMIC_NOTIFIER_HEAD(die_chain); + +int notify_die(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig) +{ + struct die_args args = { + .regs = regs, + .str = str, + .err = err, + .trapnr = trap, + .signr = sig, + + }; + + return atomic_notifier_call_chain(&die_chain, val, &args); +} + +int register_die_notifier(struct notifier_block *nb) +{ + vmalloc_sync_all(); + return atomic_notifier_chain_register(&die_chain, nb); +} +EXPORT_SYMBOL_GPL(register_die_notifier); + +int unregister_die_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&die_chain, nb); +} +EXPORT_SYMBOL_GPL(unregister_die_notifier); + + diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 6c86d67ed1a7..0207045b4f6f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -42,10 +42,10 @@ #include #include #include +#include #include #include #include -#include #define KPROBE_HASH_BITS 6 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) diff --git a/mm/nommu.c b/mm/nommu.c index 1f60194d9b9b..2b16b00a5b11 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -261,6 +261,14 @@ void vunmap(void *addr) BUG(); } +/* + * Implement a stub for vmalloc_sync_all() if the architecture chose not to + * have one. + */ +void __attribute__((weak)) vmalloc_sync_all(void) +{ +} + /* * sys_brk() for the most part doesn't need the global kernel * lock, except when an application is doing something nasty diff --git a/mm/vmalloc.c b/mm/vmalloc.c index cb5aabda7046..faa2a521dea3 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -755,3 +755,10 @@ out_einval_locked: } EXPORT_SYMBOL(remap_vmalloc_range); +/* + * Implement a stub for vmalloc_sync_all() if the architecture chose not to + * have one. + */ +void __attribute__((weak)) vmalloc_sync_all(void) +{ +} -- cgit v1.2.3 From ef51c97623b94f51e439ac91d2736aab3d1b6594 Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Tue, 8 May 2007 00:27:10 -0700 Subject: Remove do_sync_file_range() Remove do_sync_file_range() and convert callers to just use do_sync_mapping_range(). Signed-off-by: Mark Fasheh Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 8 ++++---- fs/sync.c | 2 +- include/linux/fs.h | 5 ----- mm/filemap.c | 8 ++++---- 4 files changed, 9 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index e61e0efe9ec7..5a4a74c1097c 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1456,10 +1456,10 @@ int bitmap_create(mddev_t *mddev) bitmap->offset = mddev->bitmap_offset; if (file) { get_file(file); - do_sync_file_range(file, 0, LLONG_MAX, - SYNC_FILE_RANGE_WAIT_BEFORE | - SYNC_FILE_RANGE_WRITE | - SYNC_FILE_RANGE_WAIT_AFTER); + do_sync_mapping_range(file->f_mapping, 0, LLONG_MAX, + SYNC_FILE_RANGE_WAIT_BEFORE | + SYNC_FILE_RANGE_WRITE | + SYNC_FILE_RANGE_WAIT_AFTER); } /* read superblock from bitmap file (this sets bitmap->chunksize) */ err = bitmap_read_sb(bitmap); diff --git a/fs/sync.c b/fs/sync.c index 5cb9e7e43383..2f97576355b8 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -229,7 +229,7 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, !S_ISLNK(i_mode)) goto out_put; - ret = do_sync_file_range(file, offset, endbyte, flags); + ret = do_sync_mapping_range(file->f_mapping, offset, endbyte, flags); out_put: fput_light(file, fput_needed); out: diff --git a/include/linux/fs.h b/include/linux/fs.h index 527a09a82297..7cf0c54a46a7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -849,11 +849,6 @@ extern int fcntl_getlease(struct file *filp); /* fs/sync.c */ extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset, loff_t endbyte, unsigned int flags); -static inline int do_sync_file_range(struct file *file, loff_t offset, - loff_t endbyte, unsigned int flags) -{ - return do_sync_mapping_range(file->f_mapping, offset, endbyte, flags); -} /* fs/locks.c */ extern void locks_init_lock(struct file_lock *); diff --git a/mm/filemap.c b/mm/filemap.c index 3e49fe13d6ac..9cbf4fea4a59 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2309,10 +2309,10 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, * semantics. */ endbyte = pos + written_buffered - written - 1; - err = do_sync_file_range(file, pos, endbyte, - SYNC_FILE_RANGE_WAIT_BEFORE| - SYNC_FILE_RANGE_WRITE| - SYNC_FILE_RANGE_WAIT_AFTER); + err = do_sync_mapping_range(file->f_mapping, pos, endbyte, + SYNC_FILE_RANGE_WAIT_BEFORE| + SYNC_FILE_RANGE_WRITE| + SYNC_FILE_RANGE_WAIT_AFTER); if (err == 0) { written = written_buffered; invalidate_mapping_pages(mapping, -- cgit v1.2.3 From e5f00f42f35e6f4699f105a3bd56874847cbf72f Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Tue, 8 May 2007 00:27:22 -0700 Subject: make remove_inode_dquot_ref() static remove_inode_dquot_ref() can now become static. Signed-off-by: Adrian Bunk Acked-by: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dquot.c | 3 ++- include/linux/quotaops.h | 3 --- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/fs/dquot.c b/fs/dquot.c index 0a5febc159f2..ca40bfc5cccc 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -721,7 +721,8 @@ static inline int dqput_blocks(struct dquot *dquot) /* Remove references to dquots from inode - add dquot to list for freeing if needed */ /* We can't race with anybody because we hold dqptr_sem for writing... */ -int remove_inode_dquot_ref(struct inode *inode, int type, struct list_head *tofree_head) +static int remove_inode_dquot_ref(struct inode *inode, int type, + struct list_head *tofree_head) { struct dquot *dquot = inode->i_dquot[type]; diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 90c23f690c0d..5110201a4159 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -37,9 +37,6 @@ extern int dquot_release(struct dquot *dquot); extern int dquot_commit_info(struct super_block *sb, int type); extern int dquot_mark_dquot_dirty(struct dquot *dquot); -int remove_inode_dquot_ref(struct inode *inode, int type, - struct list_head *tofree_head); - extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path); extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name, int format_id, int type); -- cgit v1.2.3 From c467a388ae9f236c039d4d0f4c4be07c7deebe97 Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Tue, 8 May 2007 00:27:26 -0700 Subject: Delete unused header file linux/awe_voice.h Delete the unused header file include/linux/awe_voice.h, as well as its corresponding Kbuild entry. Signed-off-by: Robert P. J. Day Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/Kbuild | 1 - include/linux/awe_voice.h | 525 ---------------------------------------------- 2 files changed, 526 deletions(-) delete mode 100644 include/linux/awe_voice.h (limited to 'include/linux') diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 9f05279e7dd3..2c2b6fc91e30 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -33,7 +33,6 @@ header-y += atmsvc.h header-y += atm_zatm.h header-y += auto_fs4.h header-y += auxvec.h -header-y += awe_voice.h header-y += ax25.h header-y += b1lli.h header-y += baycom.h diff --git a/include/linux/awe_voice.h b/include/linux/awe_voice.h deleted file mode 100644 index bf33f17bea99..000000000000 --- a/include/linux/awe_voice.h +++ /dev/null @@ -1,525 +0,0 @@ -/* - * include/linux/awe_voice.h - * - * Voice information definitions for the low level driver for the - * AWE32/SB32/AWE64 wave table synth. - * version 0.4.4; Jan. 4, 2000 - * - * Copyright (C) 1996-2000 Takashi Iwai - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef AWE_VOICE_H -#define AWE_VOICE_H - -#ifndef SAMPLE_TYPE_AWE32 -#define SAMPLE_TYPE_AWE32 0x20 -#endif - -#define _LINUX_PATCHKEY_H_INDIRECT -#include -#undef _LINUX_PATCHKEY_H_INDIRECT - -/*---------------------------------------------------------------- - * patch information record - *----------------------------------------------------------------*/ - -/* patch interface header: 16 bytes */ -typedef struct awe_patch_info { - short key; /* use AWE_PATCH here */ -#define AWE_PATCH _PATCHKEY(0x07) - - short device_no; /* synthesizer number */ - unsigned short sf_id; /* file id (should be zero) */ - short optarg; /* optional argument */ - int len; /* data length (without this header) */ - - short type; /* patch operation type */ -#define AWE_LOAD_INFO 0 /* awe_voice_rec */ -#define AWE_LOAD_DATA 1 /* awe_sample_info */ -#define AWE_OPEN_PATCH 2 /* awe_open_parm */ -#define AWE_CLOSE_PATCH 3 /* none */ -#define AWE_UNLOAD_PATCH 4 /* none */ -#define AWE_REPLACE_DATA 5 /* awe_sample_info (optarg=#channels)*/ -#define AWE_MAP_PRESET 6 /* awe_voice_map */ -/*#define AWE_PROBE_INFO 7*/ /* awe_voice_map (pat only) */ -#define AWE_PROBE_DATA 8 /* optarg=sample */ -#define AWE_REMOVE_INFO 9 /* optarg=(bank<<8)|instr */ -#define AWE_LOAD_CHORUS_FX 0x10 /* awe_chorus_fx_rec (optarg=mode) */ -#define AWE_LOAD_REVERB_FX 0x11 /* awe_reverb_fx_rec (optarg=mode) */ - - short reserved; /* word alignment data */ - - /* the actual patch data begins after this */ -#if defined(AWE_COMPAT_030) && AWE_COMPAT_030 - char data[0]; -#endif -} awe_patch_info; - -/*#define AWE_PATCH_INFO_SIZE 16*/ -#define AWE_PATCH_INFO_SIZE sizeof(awe_patch_info) - - -/*---------------------------------------------------------------- - * open patch - *----------------------------------------------------------------*/ - -#define AWE_PATCH_NAME_LEN 32 - -typedef struct _awe_open_parm { - unsigned short type; /* sample type */ -#define AWE_PAT_TYPE_MISC 0 -#define AWE_PAT_TYPE_GM 1 -#define AWE_PAT_TYPE_GS 2 -#define AWE_PAT_TYPE_MT32 3 -#define AWE_PAT_TYPE_XG 4 -#define AWE_PAT_TYPE_SFX 5 -#define AWE_PAT_TYPE_GUS 6 -#define AWE_PAT_TYPE_MAP 7 - -#define AWE_PAT_LOCKED 0x100 /* lock the samples */ -#define AWE_PAT_SHARED 0x200 /* sample is shared */ - - short reserved; - char name[AWE_PATCH_NAME_LEN]; -} awe_open_parm; - -/*#define AWE_OPEN_PARM_SIZE 28*/ -#define AWE_OPEN_PARM_SIZE sizeof(awe_open_parm) - - -/*---------------------------------------------------------------- - * raw voice information record - *----------------------------------------------------------------*/ - -/* wave table envelope & effect parameters to control EMU8000 */ -typedef struct _awe_voice_parm { - unsigned short moddelay; /* modulation delay (0x8000) */ - unsigned short modatkhld; /* modulation attack & hold time (0x7f7f) */ - unsigned short moddcysus; /* modulation decay & sustain (0x7f7f) */ - unsigned short modrelease; /* modulation release time (0x807f) */ - short modkeyhold, modkeydecay; /* envelope change per key (not used) */ - unsigned short voldelay; /* volume delay (0x8000) */ - unsigned short volatkhld; /* volume attack & hold time (0x7f7f) */ - unsigned short voldcysus; /* volume decay & sustain (0x7f7f) */ - unsigned short volrelease; /* volume release time (0x807f) */ - short volkeyhold, volkeydecay; /* envelope change per key (not used) */ - unsigned short lfo1delay; /* LFO1 delay (0x8000) */ - unsigned short lfo2delay; /* LFO2 delay (0x8000) */ - unsigned short pefe; /* modulation pitch & cutoff (0x0000) */ - unsigned short fmmod; /* LFO1 pitch & cutoff (0x0000) */ - unsigned short tremfrq; /* LFO1 volume & freq (0x0000) */ - unsigned short fm2frq2; /* LFO2 pitch & freq (0x0000) */ - unsigned char cutoff; /* initial cutoff (0xff) */ - unsigned char filterQ; /* initial filter Q [0-15] (0x0) */ - unsigned char chorus; /* chorus send (0x00) */ - unsigned char reverb; /* reverb send (0x00) */ - unsigned short reserved[4]; /* not used */ -} awe_voice_parm; - -typedef struct _awe_voice_parm_block { - unsigned short moddelay; /* modulation delay (0x8000) */ - unsigned char modatk, modhld; - unsigned char moddcy, modsus; - unsigned char modrel, moddummy; - short modkeyhold, modkeydecay; /* envelope change per key (not used) */ - unsigned short voldelay; /* volume delay (0x8000) */ - unsigned char volatk, volhld; - unsigned char voldcy, volsus; - unsigned char volrel, voldummy; - short volkeyhold, volkeydecay; /* envelope change per key (not used) */ - unsigned short lfo1delay; /* LFO1 delay (0x8000) */ - unsigned short lfo2delay; /* LFO2 delay (0x8000) */ - unsigned char env1fc, env1pit; - unsigned char lfo1fc, lfo1pit; - unsigned char lfo1freq, lfo1vol; - unsigned char lfo2freq, lfo2pit; - unsigned char cutoff; /* initial cutoff (0xff) */ - unsigned char filterQ; /* initial filter Q [0-15] (0x0) */ - unsigned char chorus; /* chorus send (0x00) */ - unsigned char reverb; /* reverb send (0x00) */ - unsigned short reserved[4]; /* not used */ -} awe_voice_parm_block; - -#define AWE_VOICE_PARM_SIZE 48 - - -/* wave table parameters: 92 bytes */ -typedef struct _awe_voice_info { - unsigned short sf_id; /* file id (should be zero) */ - unsigned short sample; /* sample id */ - int start, end; /* sample offset correction */ - int loopstart, loopend; /* loop offset correction */ - short rate_offset; /* sample rate pitch offset */ - unsigned short mode; /* sample mode */ -#define AWE_MODE_ROMSOUND 0x8000 -#define AWE_MODE_STEREO 1 -#define AWE_MODE_LOOPING 2 -#define AWE_MODE_NORELEASE 4 /* obsolete */ -#define AWE_MODE_INIT_PARM 8 - - short root; /* midi root key */ - short tune; /* pitch tuning (in cents) */ - signed char low, high; /* key note range */ - signed char vellow, velhigh; /* velocity range */ - signed char fixkey, fixvel; /* fixed key, velocity */ - signed char pan, fixpan; /* panning, fixed panning */ - short exclusiveClass; /* exclusive class (0 = none) */ - unsigned char amplitude; /* sample volume (127 max) */ - unsigned char attenuation; /* attenuation (0.375dB) */ - short scaleTuning; /* pitch scale tuning(%), normally 100 */ - awe_voice_parm parm; /* voice envelope parameters */ - short index; /* internal index (set by driver) */ -} awe_voice_info; - -/*#define AWE_VOICE_INFO_SIZE 92*/ -#define AWE_VOICE_INFO_SIZE sizeof(awe_voice_info) - -/*----------------------------------------------------------------*/ - -/* The info entry of awe_voice_rec is changed from 0 to 1 - * for some compilers refusing zero size array. - * Due to this change, sizeof(awe_voice_rec) becomes different - * from older versions. - * Use AWE_VOICE_REC_SIZE instead. - */ - -/* instrument info header: 4 bytes */ -typedef struct _awe_voice_rec_hdr { - unsigned char bank; /* midi bank number */ - unsigned char instr; /* midi preset number */ - char nvoices; /* number of voices */ - char write_mode; /* write mode; normally 0 */ -#define AWE_WR_APPEND 0 /* append anyway */ -#define AWE_WR_EXCLUSIVE 1 /* skip if already exists */ -#define AWE_WR_REPLACE 2 /* replace if already exists */ -} awe_voice_rec_hdr; - -/*#define AWE_VOICE_REC_SIZE 4*/ -#define AWE_VOICE_REC_SIZE sizeof(awe_voice_rec_hdr) - -/* the standard patch structure for one sample */ -typedef struct _awe_voice_rec_patch { - awe_patch_info patch; - awe_voice_rec_hdr hdr; - awe_voice_info info; -} awe_voice_rec_patch; - - -/* obsolete data type */ -#if defined(AWE_COMPAT_030) && AWE_COMPAT_030 -#define AWE_INFOARRAY_SIZE 0 -#else -#define AWE_INFOARRAY_SIZE 1 -#endif - -typedef struct _awe_voice_rec { - unsigned char bank; /* midi bank number */ - unsigned char instr; /* midi preset number */ - short nvoices; /* number of voices */ - /* voice information follows here */ - awe_voice_info info[AWE_INFOARRAY_SIZE]; -} awe_voice_rec; - - -/*---------------------------------------------------------------- - * sample wave information - *----------------------------------------------------------------*/ - -/* wave table sample header: 32 bytes */ -typedef struct awe_sample_info { - unsigned short sf_id; /* file id (should be zero) */ - unsigned short sample; /* sample id */ - int start, end; /* start & end offset */ - int loopstart, loopend; /* loop start & end offset */ - int size; /* size (0 = ROM) */ - short checksum_flag; /* use check sum = 1 */ - unsigned short mode_flags; /* mode flags */ -#define AWE_SAMPLE_8BITS 1 /* wave data is 8bits */ -#define AWE_SAMPLE_UNSIGNED 2 /* wave data is unsigned */ -#define AWE_SAMPLE_NO_BLANK 4 /* no blank loop is attached */ -#define AWE_SAMPLE_SINGLESHOT 8 /* single-shot w/o loop */ -#define AWE_SAMPLE_BIDIR_LOOP 16 /* bidirectional looping */ -#define AWE_SAMPLE_STEREO_LEFT 32 /* stereo left sound */ -#define AWE_SAMPLE_STEREO_RIGHT 64 /* stereo right sound */ -#define AWE_SAMPLE_REVERSE_LOOP 128 /* reverse looping */ - unsigned int checksum; /* check sum */ -#if defined(AWE_COMPAT_030) && AWE_COMPAT_030 - unsigned short data[0]; /* sample data follows here */ -#endif -} awe_sample_info; - -/*#define AWE_SAMPLE_INFO_SIZE 32*/ -#define AWE_SAMPLE_INFO_SIZE sizeof(awe_sample_info) - - -/*---------------------------------------------------------------- - * voice preset mapping - *----------------------------------------------------------------*/ - -typedef struct awe_voice_map { - int map_bank, map_instr, map_key; /* key = -1 means all keys */ - int src_bank, src_instr, src_key; -} awe_voice_map; - -#define AWE_VOICE_MAP_SIZE sizeof(awe_voice_map) - - -/*---------------------------------------------------------------- - * awe hardware controls - *----------------------------------------------------------------*/ - -#define _AWE_DEBUG_MODE 0x00 -#define _AWE_REVERB_MODE 0x01 -#define _AWE_CHORUS_MODE 0x02 -#define _AWE_REMOVE_LAST_SAMPLES 0x03 -#define _AWE_INITIALIZE_CHIP 0x04 -#define _AWE_SEND_EFFECT 0x05 -#define _AWE_TERMINATE_CHANNEL 0x06 -#define _AWE_TERMINATE_ALL 0x07 -#define _AWE_INITIAL_VOLUME 0x08 -#define _AWE_INITIAL_ATTEN _AWE_INITIAL_VOLUME -#define _AWE_RESET_CHANNEL 0x09 -#define _AWE_CHANNEL_MODE 0x0a -#define _AWE_DRUM_CHANNELS 0x0b -#define _AWE_MISC_MODE 0x0c -#define _AWE_RELEASE_ALL 0x0d -#define _AWE_NOTEOFF_ALL 0x0e -#define _AWE_CHN_PRESSURE 0x0f -/*#define _AWE_GET_CURRENT_MODE 0x10*/ -#define _AWE_EQUALIZER 0x11 -/*#define _AWE_GET_MISC_MODE 0x12*/ -/*#define _AWE_GET_FONTINFO 0x13*/ - -#define _AWE_MODE_FLAG 0x80 -#define _AWE_COOKED_FLAG 0x40 /* not supported */ -#define _AWE_MODE_VALUE_MASK 0x3F - -/*----------------------------------------------------------------*/ - -#define _AWE_SET_CMD(p,dev,voice,cmd,p1,p2) \ -{((char*)(p))[0] = SEQ_PRIVATE;\ - ((char*)(p))[1] = dev;\ - ((char*)(p))[2] = _AWE_MODE_FLAG|(cmd);\ - ((char*)(p))[3] = voice;\ - ((unsigned short*)(p))[2] = p1;\ - ((unsigned short*)(p))[3] = p2;} - -/* buffered access */ -#define _AWE_CMD(dev, voice, cmd, p1, p2) \ -{_SEQ_NEEDBUF(8);\ - _AWE_SET_CMD(_seqbuf + _seqbufptr, dev, voice, cmd, p1, p2);\ - _SEQ_ADVBUF(8);} - -/* direct access */ -#define _AWE_CMD_NOW(seqfd,dev,voice,cmd,p1,p2) \ -{struct seq_event_rec tmp;\ - _AWE_SET_CMD(&tmp, dev, voice, cmd, p1, p2);\ - ioctl(seqfd, SNDCTL_SEQ_OUTOFBAND, &tmp);} - -/*----------------------------------------------------------------*/ - -/* set debugging mode */ -#define AWE_DEBUG_MODE(dev,p1) _AWE_CMD(dev, 0, _AWE_DEBUG_MODE, p1, 0) -/* set reverb mode; from 0 to 7 */ -#define AWE_REVERB_MODE(dev,p1) _AWE_CMD(dev, 0, _AWE_REVERB_MODE, p1, 0) -/* set chorus mode; from 0 to 7 */ -#define AWE_CHORUS_MODE(dev,p1) _AWE_CMD(dev, 0, _AWE_CHORUS_MODE, p1, 0) - -/* reset channel */ -#define AWE_RESET_CHANNEL(dev,ch) _AWE_CMD(dev, ch, _AWE_RESET_CHANNEL, 0, 0) -#define AWE_RESET_CONTROL(dev,ch) _AWE_CMD(dev, ch, _AWE_RESET_CHANNEL, 1, 0) - -/* send an effect to all layers */ -#define AWE_SEND_EFFECT(dev,voice,type,value) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,type,value) -#define AWE_ADD_EFFECT(dev,voice,type,value) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((type)|0x80),value) -#define AWE_UNSET_EFFECT(dev,voice,type) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((type)|0x40),0) -/* send an effect to a layer */ -#define AWE_SEND_LAYER_EFFECT(dev,voice,layer,type,value) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((layer+1)<<8|(type)),value) -#define AWE_ADD_LAYER_EFFECT(dev,voice,layer,type,value) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((layer+1)<<8|(type)|0x80),value) -#define AWE_UNSET_LAYER_EFFECT(dev,voice,layer,type) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((layer+1)<<8|(type)|0x40),0) - -/* terminate sound on the channel/voice */ -#define AWE_TERMINATE_CHANNEL(dev,voice) _AWE_CMD(dev,voice,_AWE_TERMINATE_CHANNEL,0,0) -/* terminate all sounds */ -#define AWE_TERMINATE_ALL(dev) _AWE_CMD(dev, 0, _AWE_TERMINATE_ALL, 0, 0) -/* release all sounds (w/o sustain effect) */ -#define AWE_RELEASE_ALL(dev) _AWE_CMD(dev, 0, _AWE_RELEASE_ALL, 0, 0) -/* note off all sounds (w sustain effect) */ -#define AWE_NOTEOFF_ALL(dev) _AWE_CMD(dev, 0, _AWE_NOTEOFF_ALL, 0, 0) - -/* set initial attenuation */ -#define AWE_INITIAL_VOLUME(dev,atten) _AWE_CMD(dev, 0, _AWE_INITIAL_VOLUME, atten, 0) -#define AWE_INITIAL_ATTEN AWE_INITIAL_VOLUME -/* relative attenuation */ -#define AWE_SET_ATTEN(dev,atten) _AWE_CMD(dev, 0, _AWE_INITIAL_VOLUME, atten, 1) - -/* set channel playing mode; mode=0/1/2 */ -#define AWE_SET_CHANNEL_MODE(dev,mode) _AWE_CMD(dev, 0, _AWE_CHANNEL_MODE, mode, 0) -#define AWE_PLAY_INDIRECT 0 /* indirect voice mode (default) */ -#define AWE_PLAY_MULTI 1 /* multi note voice mode */ -#define AWE_PLAY_DIRECT 2 /* direct single voice mode */ -#define AWE_PLAY_MULTI2 3 /* sequencer2 mode; used internally */ - -/* set drum channel mask; channels is 32bit long value */ -#define AWE_DRUM_CHANNELS(dev,channels) _AWE_CMD(dev, 0, _AWE_DRUM_CHANNELS, ((channels) & 0xffff), ((channels) >> 16)) - -/* set bass and treble control; values are from 0 to 11 */ -#define AWE_EQUALIZER(dev,bass,treble) _AWE_CMD(dev, 0, _AWE_EQUALIZER, bass, treble) - -/* remove last loaded samples */ -#define AWE_REMOVE_LAST_SAMPLES(seqfd,dev) _AWE_CMD_NOW(seqfd, dev, 0, _AWE_REMOVE_LAST_SAMPLES, 0, 0) -/* initialize emu8000 chip */ -#define AWE_INITIALIZE_CHIP(seqfd,dev) _AWE_CMD_NOW(seqfd, dev, 0, _AWE_INITIALIZE_CHIP, 0, 0) - -/* set miscellaneous modes; meta command */ -#define AWE_MISC_MODE(dev,mode,value) _AWE_CMD(dev, 0, _AWE_MISC_MODE, mode, value) -/* exclusive sound off; 1=off */ -#define AWE_EXCLUSIVE_SOUND(dev,mode) AWE_MISC_MODE(dev,AWE_MD_EXCLUSIVE_SOUND,mode) -/* default GUS bank number */ -#define AWE_SET_GUS_BANK(dev,bank) AWE_MISC_MODE(dev,AWE_MD_GUS_BANK,bank) -/* change panning position in realtime; 0=don't 1=do */ -#define AWE_REALTIME_PAN(dev,mode) AWE_MISC_MODE(dev,AWE_MD_REALTIME_PAN,mode) - -/* extended pressure controls; not portable with other sound drivers */ -#define AWE_KEY_PRESSURE(dev,ch,note,vel) SEQ_START_NOTE(dev,ch,(note)+128,vel) -#define AWE_CHN_PRESSURE(dev,ch,vel) _AWE_CMD(dev,ch,_AWE_CHN_PRESSURE,vel,0) - -/*----------------------------------------------------------------*/ - -/* reverb mode parameters */ -#define AWE_REVERB_ROOM1 0 -#define AWE_REVERB_ROOM2 1 -#define AWE_REVERB_ROOM3 2 -#define AWE_REVERB_HALL1 3 -#define AWE_REVERB_HALL2 4 -#define AWE_REVERB_PLATE 5 -#define AWE_REVERB_DELAY 6 -#define AWE_REVERB_PANNINGDELAY 7 -#define AWE_REVERB_PREDEFINED 8 -/* user can define reverb modes up to 32 */ -#define AWE_REVERB_NUMBERS 32 - -typedef struct awe_reverb_fx_rec { - unsigned short parms[28]; -} awe_reverb_fx_rec; - -/*----------------------------------------------------------------*/ - -/* chorus mode parameters */ -#define AWE_CHORUS_1 0 -#define AWE_CHORUS_2 1 -#define AWE_CHORUS_3 2 -#define AWE_CHORUS_4 3 -#define AWE_CHORUS_FEEDBACK 4 -#define AWE_CHORUS_FLANGER 5 -#define AWE_CHORUS_SHORTDELAY 6 -#define AWE_CHORUS_SHORTDELAY2 7 -#define AWE_CHORUS_PREDEFINED 8 -/* user can define chorus modes up to 32 */ -#define AWE_CHORUS_NUMBERS 32 - -typedef struct awe_chorus_fx_rec { - unsigned short feedback; /* feedback level (0xE600-0xE6FF) */ - unsigned short delay_offset; /* delay (0-0x0DA3) [1/44100 sec] */ - unsigned short lfo_depth; /* LFO depth (0xBC00-0xBCFF) */ - unsigned int delay; /* right delay (0-0xFFFFFFFF) [1/256/44100 sec] */ - unsigned int lfo_freq; /* LFO freq LFO freq (0-0xFFFFFFFF) */ -} awe_chorus_fx_rec; - -/*----------------------------------------------------------------*/ - -/* misc mode types */ -enum { -/* 0*/ AWE_MD_EXCLUSIVE_OFF, /* obsolete */ -/* 1*/ AWE_MD_EXCLUSIVE_ON, /* obsolete */ -/* 2*/ AWE_MD_VERSION, /* read only */ -/* 3*/ AWE_MD_EXCLUSIVE_SOUND, /* 0/1: exclusive note on (default=1) */ -/* 4*/ AWE_MD_REALTIME_PAN, /* 0/1: do realtime pan change (default=1) */ -/* 5*/ AWE_MD_GUS_BANK, /* bank number for GUS patches (default=0) */ -/* 6*/ AWE_MD_KEEP_EFFECT, /* 0/1: keep effect values, (default=0) */ -/* 7*/ AWE_MD_ZERO_ATTEN, /* attenuation of max volume (default=32) */ -/* 8*/ AWE_MD_CHN_PRIOR, /* 0/1: set MIDI channel priority mode (default=1) */ -/* 9*/ AWE_MD_MOD_SENSE, /* integer: modwheel sensitivity (def=18) */ -/*10*/ AWE_MD_DEF_PRESET, /* integer: default preset number (def=0) */ -/*11*/ AWE_MD_DEF_BANK, /* integer: default bank number (def=0) */ -/*12*/ AWE_MD_DEF_DRUM, /* integer: default drumset number (def=0) */ -/*13*/ AWE_MD_TOGGLE_DRUM_BANK, /* 0/1: toggle drum flag with bank# (def=0) */ -/*14*/ AWE_MD_NEW_VOLUME_CALC, /* 0/1: volume calculation mode (def=1) */ -/*15*/ AWE_MD_CHORUS_MODE, /* integer: chorus mode (def=2) */ -/*16*/ AWE_MD_REVERB_MODE, /* integer: chorus mode (def=4) */ -/*17*/ AWE_MD_BASS_LEVEL, /* integer: bass level (def=5) */ -/*18*/ AWE_MD_TREBLE_LEVEL, /* integer: treble level (def=9) */ -/*19*/ AWE_MD_DEBUG_MODE, /* integer: debug level (def=0) */ -/*20*/ AWE_MD_PAN_EXCHANGE, /* 0/1: exchange panning direction (def=0) */ - AWE_MD_END, -}; - -/*----------------------------------------------------------------*/ - -/* effect parameters */ -enum { - -/* modulation envelope parameters */ -/* 0*/ AWE_FX_ENV1_DELAY, /* WORD: ENVVAL */ -/* 1*/ AWE_FX_ENV1_ATTACK, /* BYTE: up ATKHLD */ -/* 2*/ AWE_FX_ENV1_HOLD, /* BYTE: lw ATKHLD */ -/* 3*/ AWE_FX_ENV1_DECAY, /* BYTE: lw DCYSUS */ -/* 4*/ AWE_FX_ENV1_RELEASE, /* BYTE: lw DCYSUS */ -/* 5*/ AWE_FX_ENV1_SUSTAIN, /* BYTE: up DCYSUS */ -/* 6*/ AWE_FX_ENV1_PITCH, /* BYTE: up PEFE */ -/* 7*/ AWE_FX_ENV1_CUTOFF, /* BYTE: lw PEFE */ - -/* volume envelope parameters */ -/* 8*/ AWE_FX_ENV2_DELAY, /* WORD: ENVVOL */ -/* 9*/ AWE_FX_ENV2_ATTACK, /* BYTE: up ATKHLDV */ -/*10*/ AWE_FX_ENV2_HOLD, /* BYTE: lw ATKHLDV */ -/*11*/ AWE_FX_ENV2_DECAY, /* BYTE: lw DCYSUSV */ -/*12*/ AWE_FX_ENV2_RELEASE, /* BYTE: lw DCYSUSV */ -/*13*/ AWE_FX_ENV2_SUSTAIN, /* BYTE: up DCYSUSV */ - -/* LFO1 (tremolo & vibrato) parameters */ -/*14*/ AWE_FX_LFO1_DELAY, /* WORD: LFO1VAL */ -/*15*/ AWE_FX_LFO1_FREQ, /* BYTE: lo TREMFRQ */ -/*16*/ AWE_FX_LFO1_VOLUME, /* BYTE: up TREMFRQ */ -/*17*/ AWE_FX_LFO1_PITCH, /* BYTE: up FMMOD */ -/*18*/ AWE_FX_LFO1_CUTOFF, /* BYTE: lo FMMOD */ - -/* LFO2 (vibrato) parameters */ -/*19*/ AWE_FX_LFO2_DELAY, /* WORD: LFO2VAL */ -/*20*/ AWE_FX_LFO2_FREQ, /* BYTE: lo FM2FRQ2 */ -/*21*/ AWE_FX_LFO2_PITCH, /* BYTE: up FM2FRQ2 */ - -/* Other overall effect parameters */ -/*22*/ AWE_FX_INIT_PITCH, /* SHORT: pitch offset */ -/*23*/ AWE_FX_CHORUS, /* BYTE: chorus effects send (0-255) */ -/*24*/ AWE_FX_REVERB, /* BYTE: reverb effects send (0-255) */ -/*25*/ AWE_FX_CUTOFF, /* BYTE: up IFATN */ -/*26*/ AWE_FX_FILTERQ, /* BYTE: up CCCA */ - -/* Sample / loop offset changes */ -/*27*/ AWE_FX_SAMPLE_START, /* SHORT: offset */ -/*28*/ AWE_FX_LOOP_START, /* SHORT: offset */ -/*29*/ AWE_FX_LOOP_END, /* SHORT: offset */ -/*30*/ AWE_FX_COARSE_SAMPLE_START, /* SHORT: upper word offset */ -/*31*/ AWE_FX_COARSE_LOOP_START, /* SHORT: upper word offset */ -/*32*/ AWE_FX_COARSE_LOOP_END, /* SHORT: upper word offset */ -/*33*/ AWE_FX_ATTEN, /* BYTE: lo IFATN */ - - AWE_FX_END, -}; - -#endif /* AWE_VOICE_H */ -- cgit v1.2.3 From c15a3837d2aa30e3ea41aed49d80abed355ab6bd Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 8 May 2007 00:27:35 -0700 Subject: parport->dev driver model support Currently a parport_driver can't get a handle on the device node for the underlying parport (PNPACPI, PCI, etc). That prevents correct placement of sysfs child nodes, which can affect things like power management. This patch adds a field to "struct parport" pointing to that device node, and updates non-legacy port drivers to initialize that device pointer. That field replaces the analagous PCI-only support in parport_pc. [akpm@linux-foundation.org: fix powerpc build] Signed-off-by: David Brownell Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/parport/parport_cs.c | 2 +- drivers/parport/parport_mfc3.c | 1 + drivers/parport/parport_pc.c | 31 +++++++++++++++++-------------- drivers/parport/parport_serial.c | 2 +- drivers/parport/parport_sunbpp.c | 1 + drivers/parport/share.c | 5 +++++ include/asm-powerpc/parport.h | 5 ----- include/linux/parport.h | 8 ++++++-- include/linux/parport_pc.h | 3 +-- 9 files changed, 33 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c index 316c06f4423c..8b7d84eca05d 100644 --- a/drivers/parport/parport_cs.c +++ b/drivers/parport/parport_cs.c @@ -201,7 +201,7 @@ static int parport_config(struct pcmcia_device *link) p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2, link->irq.AssignedIRQ, PARPORT_DMA_NONE, - NULL); + &link->dev); if (p == NULL) { printk(KERN_NOTICE "parport_cs: parport_pc_probe_port() at " "0x%3x, irq %u failed\n", link->io.BasePort1, diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c index e5b0a544de40..77726fc49766 100644 --- a/drivers/parport/parport_mfc3.c +++ b/drivers/parport/parport_mfc3.c @@ -356,6 +356,7 @@ static int __init parport_mfc3_init(void) if (request_irq(IRQ_AMIGA_PORTS, mfc3_interrupt, IRQF_SHARED, p->name, &pp_mfc3_ops)) goto out_irq; } + p->dev = &z->dev; this_port[pias++] = p; printk(KERN_INFO "%s: Multiface III port using irq\n", p->name); diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 3de2623afa13..c3240a67ef85 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -620,6 +620,7 @@ static size_t parport_pc_fifo_write_block_dma (struct parport *port, unsigned long dmaflag; size_t left = length; const struct parport_pc_private *priv = port->physport->private_data; + struct device *dev = port->physport->dev; dma_addr_t dma_addr, dma_handle; size_t maxlen = 0x10000; /* max 64k per DMA transfer */ unsigned long start = (unsigned long) buf; @@ -631,8 +632,8 @@ dump_parport_state ("enter fifo_write_block_dma", port); if ((start ^ end) & ~0xffffUL) maxlen = 0x10000 - (start & 0xffff); - dma_addr = dma_handle = pci_map_single(priv->dev, (void *)buf, length, - PCI_DMA_TODEVICE); + dma_addr = dma_handle = dma_map_single(dev, (void *)buf, length, + DMA_TO_DEVICE); } else { /* above 16 MB we use a bounce buffer as ISA-DMA is not possible */ maxlen = PAGE_SIZE; /* sizeof(priv->dma_buf) */ @@ -728,9 +729,9 @@ dump_parport_state ("enter fifo_write_block_dma", port); /* Turn off DMA mode */ frob_econtrol (port, 1<<3, 0); - + if (dma_handle) - pci_unmap_single(priv->dev, dma_handle, length, PCI_DMA_TODEVICE); + dma_unmap_single(dev, dma_handle, length, DMA_TO_DEVICE); dump_parport_state ("leave fifo_write_block_dma", port); return length - left; @@ -2146,7 +2147,7 @@ static DEFINE_SPINLOCK(ports_lock); struct parport *parport_pc_probe_port (unsigned long int base, unsigned long int base_hi, int irq, int dma, - struct pci_dev *dev) + struct device *dev) { struct parport_pc_private *priv; struct parport_operations *ops; @@ -2180,9 +2181,10 @@ struct parport *parport_pc_probe_port (unsigned long int base, priv->fifo_depth = 0; priv->dma_buf = NULL; priv->dma_handle = 0; - priv->dev = dev; INIT_LIST_HEAD(&priv->list); priv->port = p; + + p->dev = dev; p->base_hi = base_hi; p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT; p->private_data = priv; @@ -2305,9 +2307,10 @@ struct parport *parport_pc_probe_port (unsigned long int base, p->dma = PARPORT_DMA_NONE; } else { priv->dma_buf = - pci_alloc_consistent(priv->dev, + dma_alloc_coherent(dev, PAGE_SIZE, - &priv->dma_handle); + &priv->dma_handle, + GFP_KERNEL); if (! priv->dma_buf) { printk (KERN_WARNING "%s: " "cannot get buffer for DMA, " @@ -2383,7 +2386,7 @@ void parport_pc_unregister_port (struct parport *p) release_region(p->base_hi, 3); #if defined(CONFIG_PARPORT_PC_FIFO) && defined(HAS_DMA) if (priv->dma_buf) - pci_free_consistent(priv->dev, PAGE_SIZE, + dma_free_coherent(p->physport->dev, PAGE_SIZE, priv->dma_buf, priv->dma_handle); #endif @@ -2489,7 +2492,7 @@ static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq, */ release_resource(base_res); if (parport_pc_probe_port (ite8872_lpt, ite8872_lpthi, - irq, PARPORT_DMA_NONE, NULL)) { + irq, PARPORT_DMA_NONE, &pdev->dev)) { printk (KERN_INFO "parport_pc: ITE 8872 parallel port: io=0x%X", ite8872_lpt); @@ -2672,7 +2675,7 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq, } /* finally, do the probe with values obtained */ - if (parport_pc_probe_port (port1, port2, irq, dma, NULL)) { + if (parport_pc_probe_port (port1, port2, irq, dma, &pdev->dev)) { printk (KERN_INFO "parport_pc: VIA parallel port: io=0x%X", port1); if (irq != PARPORT_IRQ_NONE) @@ -2970,7 +2973,7 @@ static int parport_pc_pci_probe (struct pci_dev *dev, parport_pc_pci_tbl[i + last_sio].device, io_lo, io_hi); data->ports[count] = parport_pc_probe_port (io_lo, io_hi, PARPORT_IRQ_NONE, - PARPORT_DMA_NONE, dev); + PARPORT_DMA_NONE, &dev->dev); if (data->ports[count]) count++; } @@ -3077,8 +3080,8 @@ static int parport_pc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id } else dma = PARPORT_DMA_NONE; - printk(KERN_INFO "parport: PnPBIOS parport detected.\n"); - if (!(pdata = parport_pc_probe_port (io_lo, io_hi, irq, dma, NULL))) + dev_info(&dev->dev, "reported by %s\n", dev->protocol->name); + if (!(pdata = parport_pc_probe_port (io_lo, io_hi, irq, dma, &dev->dev))) return -ENODEV; pnp_set_drvdata(dev,pdata); diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c index 78c0a269a2ba..e06223cf6c26 100644 --- a/drivers/parport/parport_serial.c +++ b/drivers/parport/parport_serial.c @@ -305,7 +305,7 @@ static int __devinit parport_register (struct pci_dev *dev, dev_dbg(&dev->dev, "PCI parallel port detected: I/O at " "%#lx(%#lx)\n", io_lo, io_hi); port = parport_pc_probe_port (io_lo, io_hi, PARPORT_IRQ_NONE, - PARPORT_DMA_NONE, dev); + PARPORT_DMA_NONE, &dev->dev); if (port) { priv->port[priv->num_par++] = port; success = 1; diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c index 400bb90084cf..d27019c2f860 100644 --- a/drivers/parport/parport_sunbpp.c +++ b/drivers/parport/parport_sunbpp.c @@ -322,6 +322,7 @@ static int __devinit init_one_port(struct sbus_dev *sdev) goto out_free_ops; p->size = size; + p->dev = &sdev->ofdev.dev; if ((err = request_irq(p->irq, parport_sunbpp_interrupt, IRQF_SHARED, p->name, p)) != 0) { diff --git a/drivers/parport/share.c b/drivers/parport/share.c index fd9129e424f9..cd66442acfee 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -365,6 +365,11 @@ void parport_announce_port (struct parport *port) parport_daisy_init(port); #endif + if (!port->dev) + printk(KERN_WARNING "%s: fix this legacy " + "no-device port driver!\n", + port->name); + parport_proc_register(port); mutex_lock(®istration_lock); spin_lock_irq(&parportlist_lock); diff --git a/include/asm-powerpc/parport.h b/include/asm-powerpc/parport.h index b37b81e37278..414c50e2e881 100644 --- a/include/asm-powerpc/parport.h +++ b/include/asm-powerpc/parport.h @@ -12,11 +12,6 @@ #include -extern struct parport *parport_pc_probe_port (unsigned long int base, - unsigned long int base_hi, - int irq, int dma, - struct pci_dev *dev); - static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) { struct device_node *np; diff --git a/include/linux/parport.h b/include/linux/parport.h index 80682aaa8f18..9cdd6943e01b 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -279,6 +279,10 @@ struct parport { int dma; int muxport; /* which muxport (if any) this is */ int portnum; /* which physical parallel port (not mux) */ + struct device *dev; /* Physical device associated with IO/DMA. + * This may unfortulately be null if the + * port has a legacy driver. + */ struct parport *physport; /* If this is a non-default mux @@ -289,7 +293,7 @@ struct parport { following structure members are meaningless: devices, cad, muxsel, waithead, waittail, flags, pdir, - ieee1284, *_lock. + dev, ieee1284, *_lock. It this is a default mux parport, or there is no mux involved, this points to @@ -302,7 +306,7 @@ struct parport { struct pardevice *waithead; struct pardevice *waittail; - + struct list_head list; unsigned int flags; diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h index 1cc0f6b1a49a..ea8c6d84996d 100644 --- a/include/linux/parport_pc.h +++ b/include/linux/parport_pc.h @@ -38,7 +38,6 @@ struct parport_pc_private { /* buffer suitable for DMA, if DMA enabled */ char *dma_buf; dma_addr_t dma_handle; - struct pci_dev *dev; struct list_head list; struct parport *port; }; @@ -232,7 +231,7 @@ extern int parport_pc_claim_resources(struct parport *p); extern struct parport *parport_pc_probe_port (unsigned long base, unsigned long base_hi, int irq, int dma, - struct pci_dev *dev); + struct device *dev); extern void parport_pc_unregister_port (struct parport *p); #endif -- cgit v1.2.3 From 6e453a67510a17f01b63835f18569e8c3939a38c Mon Sep 17 00:00:00 2001 From: Venki Pallipadi Date: Tue, 8 May 2007 00:27:44 -0700 Subject: Add support for deferrable timers Introduce a new flag for timers - deferrable: Timers that work normally when system is busy. But, will not cause CPU to come out of idle (just to service this timer), when CPU is idle. Instead, this timer will be serviced when CPU eventually wakes up with a subsequent non-deferrable timer. The main advantage of this is to avoid unnecessary timer interrupts when CPU is idle. If the routine currently called by a timer can wait until next event without any issues, this new timer can be used to setup timer event for that routine. This, with dynticks, allows CPUs to be lazy, allowing them to stay in idle for extended period of time by reducing unnecesary wakeup and thereby reducing the power consumption. This patch: Builds this new timer on top of existing timer infrastructure. It uses last bit in 'base' pointer of timer_list structure to store this deferrable timer flag. __next_timer_interrupt() function skips over these deferrable timers when CPU looks for next timer event for which it has to wake up. This is exported by a new interface init_timer_deferrable() that can be called in place of regular init_timer(). [akpm@linux-foundation.org: Privatise a #define] Signed-off-by: Venkatesh Pallipadi Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Oleg Nesterov Cc: Dave Jones Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/timer.h | 1 + kernel/timer.c | 65 ++++++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 58 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/timer.h b/include/linux/timer.h index 719113b652dd..e0c5c16c992f 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -37,6 +37,7 @@ extern struct tvec_t_base_s boot_tvec_bases; TIMER_INITIALIZER(_function, _expires, _data) void fastcall init_timer(struct timer_list * timer); +void fastcall init_timer_deferrable(struct timer_list *timer); static inline void setup_timer(struct timer_list * timer, void (*function)(unsigned long), diff --git a/kernel/timer.c b/kernel/timer.c index b22bd39740dd..dbe966feff2f 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -74,7 +74,7 @@ struct tvec_t_base_s { tvec_t tv3; tvec_t tv4; tvec_t tv5; -} ____cacheline_aligned_in_smp; +} ____cacheline_aligned; typedef struct tvec_t_base_s tvec_base_t; @@ -82,6 +82,37 @@ tvec_base_t boot_tvec_bases; EXPORT_SYMBOL(boot_tvec_bases); static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; +/* + * Note that all tvec_bases is 2 byte aligned and lower bit of + * base in timer_list is guaranteed to be zero. Use the LSB for + * the new flag to indicate whether the timer is deferrable + */ +#define TBASE_DEFERRABLE_FLAG (0x1) + +/* Functions below help us manage 'deferrable' flag */ +static inline unsigned int tbase_get_deferrable(tvec_base_t *base) +{ + return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG); +} + +static inline tvec_base_t *tbase_get_base(tvec_base_t *base) +{ + return ((tvec_base_t *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG)); +} + +static inline void timer_set_deferrable(struct timer_list *timer) +{ + timer->base = ((tvec_base_t *)((unsigned long)(timer->base) | + TBASE_DEFERRABLE_FLAG)); +} + +static inline void +timer_set_base(struct timer_list *timer, tvec_base_t *new_base) +{ + timer->base = (tvec_base_t *)((unsigned long)(new_base) | + tbase_get_deferrable(timer->base)); +} + /** * __round_jiffies - function to round jiffies to a full second * @j: the time in (absolute) jiffies that should be rounded @@ -295,6 +326,13 @@ void fastcall init_timer(struct timer_list *timer) } EXPORT_SYMBOL(init_timer); +void fastcall init_timer_deferrable(struct timer_list *timer) +{ + init_timer(timer); + timer_set_deferrable(timer); +} +EXPORT_SYMBOL(init_timer_deferrable); + static inline void detach_timer(struct timer_list *timer, int clear_pending) { @@ -325,10 +363,11 @@ static tvec_base_t *lock_timer_base(struct timer_list *timer, tvec_base_t *base; for (;;) { - base = timer->base; + tvec_base_t *prelock_base = timer->base; + base = tbase_get_base(prelock_base); if (likely(base != NULL)) { spin_lock_irqsave(&base->lock, *flags); - if (likely(base == timer->base)) + if (likely(prelock_base == timer->base)) return base; /* The timer has migrated to another CPU */ spin_unlock_irqrestore(&base->lock, *flags); @@ -365,11 +404,11 @@ int __mod_timer(struct timer_list *timer, unsigned long expires) */ if (likely(base->running_timer != timer)) { /* See the comment in lock_timer_base() */ - timer->base = NULL; + timer_set_base(timer, NULL); spin_unlock(&base->lock); base = new_base; spin_lock(&base->lock); - timer->base = base; + timer_set_base(timer, base); } } @@ -397,7 +436,7 @@ void add_timer_on(struct timer_list *timer, int cpu) timer_stats_timer_set_start_info(timer); BUG_ON(timer_pending(timer) || !timer->function); spin_lock_irqsave(&base->lock, flags); - timer->base = base; + timer_set_base(timer, base); internal_add_timer(base, timer); spin_unlock_irqrestore(&base->lock, flags); } @@ -550,7 +589,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index) * don't have to detach them individually. */ list_for_each_entry_safe(timer, tmp, &tv_list, entry) { - BUG_ON(timer->base != base); + BUG_ON(tbase_get_base(timer->base) != base); internal_add_timer(base, timer); } @@ -636,6 +675,9 @@ static unsigned long __next_timer_interrupt(tvec_base_t *base) index = slot = timer_jiffies & TVR_MASK; do { list_for_each_entry(nte, base->tv1.vec + slot, entry) { + if (tbase_get_deferrable(nte->base)) + continue; + found = 1; expires = nte->expires; /* Look at the cascade bucket(s)? */ @@ -1617,6 +1659,13 @@ static int __devinit init_timers_cpu(int cpu) cpu_to_node(cpu)); if (!base) return -ENOMEM; + + /* Make sure that tvec_base is 2 byte aligned */ + if (tbase_get_deferrable(base)) { + WARN_ON(1); + kfree(base); + return -ENOMEM; + } memset(base, 0, sizeof(*base)); per_cpu(tvec_bases, cpu) = base; } else { @@ -1658,7 +1707,7 @@ static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head) while (!list_empty(head)) { timer = list_entry(head->next, struct timer_list, entry); detach_timer(timer, 0); - timer->base = new_base; + timer_set_base(timer, new_base); internal_add_timer(new_base, timer); } } -- cgit v1.2.3 From 28287033e12463c8ff89f1ea8038783d0360391c Mon Sep 17 00:00:00 2001 From: Venki Pallipadi Date: Tue, 8 May 2007 00:27:47 -0700 Subject: Add a new deferrable delayed work init Add a new deferrable delayed work init. This can be used to schedule work that are 'unimportant' when CPU is idle and can be called later, when CPU eventually comes out of idle. Use this init in cpufreq ondemand governor. Signed-off-by: Venkatesh Pallipadi Cc: Dave Jones Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/cpufreq/cpufreq_ondemand.c | 2 +- include/linux/workqueue.h | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 8d053f500fc2..8532bb79e5fc 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -470,7 +470,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) dbs_info->enable = 1; ondemand_powersave_bias_init(); dbs_info->sample_type = DBS_NORMAL_SAMPLE; - INIT_DELAYED_WORK(&dbs_info->work, do_dbs_timer); + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, delay); } diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index b8abfc74d038..f16ba1e0687d 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -121,6 +121,12 @@ struct execute_work { init_timer(&(_work)->timer); \ } while (0) +#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ + do { \ + INIT_WORK(&(_work)->work, (_func)); \ + init_timer_deferrable(&(_work)->timer); \ + } while (0) + /** * work_pending - Find out whether a work item is currently pending * @work: The work item in question -- cgit v1.2.3 From 3367b994fe4f131ab1240600682a1981de7cad0c Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 8 May 2007 00:27:52 -0700 Subject: needs to include sysdev.h uses THIS_MODULE so should include . [akpm@linux-foundation.org: couple of fixes] Signed-off-by: Ralf Baechle Cc: Andi Kleen Cc: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/kernel/time.c | 1 + include/linux/cpu.h | 3 +++ include/linux/sched.h | 3 --- include/linux/sysdev.h | 1 + 4 files changed, 5 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 711dae8da7ad..9c2872a7cca7 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/cpu.h b/include/linux/cpu.h index c22b0dfcbcd2..3b2df2523f1d 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -41,6 +41,9 @@ extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr); extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs); extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs); +extern struct sysdev_attribute attr_sched_mc_power_savings; +extern struct sysdev_attribute attr_sched_smt_power_savings; +extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); #ifdef CONFIG_HOTPLUG_CPU extern void unregister_cpu(struct cpu *cpu); diff --git a/include/linux/sched.h b/include/linux/sched.h index d9acbbb39f96..d464bd0d6578 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1641,10 +1641,7 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); extern long sched_getaffinity(pid_t pid, cpumask_t *mask); -#include extern int sched_mc_power_savings, sched_smt_power_savings; -extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings; -extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); extern void normalize_rt_tasks(void); diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h index 389ccf858d37..e699ab279c2c 100644 --- a/include/linux/sysdev.h +++ b/include/linux/sysdev.h @@ -22,6 +22,7 @@ #define _SYSDEV_H_ #include +#include #include -- cgit v1.2.3 From 329c8d84ca1946c037d9859dc251b56d8b1b4630 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 8 May 2007 00:27:57 -0700 Subject: time: SMP friendly alignment of struct clocksource struct clocksource is a critical data structure. Most of its fields are read only, some of them are heavily modified at each timer interrupt. It makes sense to separate those fields and make sure they all share one cache line, or at least the minimum for machines with small cache lines. [akpm@linux-foundation.org: build fix] Signed-off-by: Eric Dumazet Acked-by: John Stultz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/clocksource.h | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index daa4940cc0f1..2665ca04cf8f 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -52,6 +53,9 @@ struct clocksource; * @xtime_interval: Used internally by timekeeping core, please ignore. */ struct clocksource { + /* + * First part of structure is read mostly + */ char *name; struct list_head list; int rating; @@ -63,8 +67,15 @@ struct clocksource { cycle_t (*vread)(void); /* timekeeping specific data, ignore */ - cycle_t cycle_last, cycle_interval; - u64 xtime_nsec, xtime_interval; + cycle_t cycle_interval; + u64 xtime_interval; + /* + * Second part is written at each timer interrupt + * Keep it in a different cache line to dirty no + * more than one cache line. + */ + cycle_t cycle_last ____cacheline_aligned_in_smp; + u64 xtime_nsec; s64 error; #ifdef CONFIG_CLOCKSOURCE_WATCHDOG -- cgit v1.2.3 From 8524070b7982d76258942275908b7434cfcab4b4 Mon Sep 17 00:00:00 2001 From: john stultz Date: Tue, 8 May 2007 00:27:59 -0700 Subject: Move timekeeping code to timekeeping.c Move the timekeeping code out of kernel/timer.c and into kernel/time/timekeeping.c. I made no cleanups or other changes in transit. [akpm@linux-foundation.org: build fix] Signed-off-by: John Stultz Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/time.h | 1 + kernel/time/Makefile | 2 +- kernel/time/timekeeping.c | 476 ++++++++++++++++++++++++++++++++++++++++++++++ kernel/timer.c | 459 +------------------------------------------- 4 files changed, 479 insertions(+), 459 deletions(-) create mode 100644 kernel/time/timekeeping.c (limited to 'include/linux') diff --git a/include/linux/time.h b/include/linux/time.h index 8ea8dea713c7..50e31b1659a7 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -119,6 +119,7 @@ extern void getnstimeofday(struct timespec *tv); extern struct timespec timespec_trunc(struct timespec t, unsigned gran); extern int timekeeping_is_continuous(void); +extern void update_wall_time(void); /** * timespec_to_ns - Convert timespec to nanoseconds diff --git a/kernel/time/Makefile b/kernel/time/Makefile index 93bccba1f265..99b6034fc86b 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile @@ -1,4 +1,4 @@ -obj-y += ntp.o clocksource.o jiffies.o timer_list.o +obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c new file mode 100644 index 000000000000..f9217bf644f6 --- /dev/null +++ b/kernel/time/timekeeping.c @@ -0,0 +1,476 @@ +/* + * linux/kernel/time/timekeeping.c + * + * Kernel timekeeping code and accessor functions + * + * This code was moved from linux/kernel/timer.c. + * Please see that file for copyright and history logs. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * This read-write spinlock protects us from races in SMP while + * playing with xtime and avenrun. + */ +__attribute__((weak)) __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); + +EXPORT_SYMBOL(xtime_lock); + + +/* + * The current time + * wall_to_monotonic is what we need to add to xtime (or xtime corrected + * for sub jiffie times) to get to monotonic time. Monotonic is pegged + * at zero at system boot time, so wall_to_monotonic will be negative, + * however, we will ALWAYS keep the tv_nsec part positive so we can use + * the usual normalization. + */ +struct timespec xtime __attribute__ ((aligned (16))); +struct timespec wall_to_monotonic __attribute__ ((aligned (16))); + +EXPORT_SYMBOL(xtime); + + +static struct clocksource *clock; /* pointer to current clocksource */ + + +#ifdef CONFIG_GENERIC_TIME +/** + * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook + * + * private function, must hold xtime_lock lock when being + * called. Returns the number of nanoseconds since the + * last call to update_wall_time() (adjusted by NTP scaling) + */ +static inline s64 __get_nsec_offset(void) +{ + cycle_t cycle_now, cycle_delta; + s64 ns_offset; + + /* read clocksource: */ + cycle_now = clocksource_read(clock); + + /* calculate the delta since the last update_wall_time: */ + cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; + + /* convert to nanoseconds: */ + ns_offset = cyc2ns(clock, cycle_delta); + + return ns_offset; +} + +/** + * __get_realtime_clock_ts - Returns the time of day in a timespec + * @ts: pointer to the timespec to be set + * + * Returns the time of day in a timespec. Used by + * do_gettimeofday() and get_realtime_clock_ts(). + */ +static inline void __get_realtime_clock_ts(struct timespec *ts) +{ + unsigned long seq; + s64 nsecs; + + do { + seq = read_seqbegin(&xtime_lock); + + *ts = xtime; + nsecs = __get_nsec_offset(); + + } while (read_seqretry(&xtime_lock, seq)); + + timespec_add_ns(ts, nsecs); +} + +/** + * getnstimeofday - Returns the time of day in a timespec + * @ts: pointer to the timespec to be set + * + * Returns the time of day in a timespec. + */ +void getnstimeofday(struct timespec *ts) +{ + __get_realtime_clock_ts(ts); +} + +EXPORT_SYMBOL(getnstimeofday); + +/** + * do_gettimeofday - Returns the time of day in a timeval + * @tv: pointer to the timeval to be set + * + * NOTE: Users should be converted to using get_realtime_clock_ts() + */ +void do_gettimeofday(struct timeval *tv) +{ + struct timespec now; + + __get_realtime_clock_ts(&now); + tv->tv_sec = now.tv_sec; + tv->tv_usec = now.tv_nsec/1000; +} + +EXPORT_SYMBOL(do_gettimeofday); +/** + * do_settimeofday - Sets the time of day + * @tv: pointer to the timespec variable containing the new time + * + * Sets the time of day to the new time and update NTP and notify hrtimers + */ +int do_settimeofday(struct timespec *tv) +{ + unsigned long flags; + time_t wtm_sec, sec = tv->tv_sec; + long wtm_nsec, nsec = tv->tv_nsec; + + if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) + return -EINVAL; + + write_seqlock_irqsave(&xtime_lock, flags); + + nsec -= __get_nsec_offset(); + + wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); + wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); + + set_normalized_timespec(&xtime, sec, nsec); + set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); + + clock->error = 0; + ntp_clear(); + + update_vsyscall(&xtime, clock); + + write_sequnlock_irqrestore(&xtime_lock, flags); + + /* signal hrtimers about time change */ + clock_was_set(); + + return 0; +} + +EXPORT_SYMBOL(do_settimeofday); + +/** + * change_clocksource - Swaps clocksources if a new one is available + * + * Accumulates current time interval and initializes new clocksource + */ +static void change_clocksource(void) +{ + struct clocksource *new; + cycle_t now; + u64 nsec; + + new = clocksource_get_next(); + + if (clock == new) + return; + + now = clocksource_read(new); + nsec = __get_nsec_offset(); + timespec_add_ns(&xtime, nsec); + + clock = new; + clock->cycle_last = now; + + clock->error = 0; + clock->xtime_nsec = 0; + clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); + + tick_clock_notify(); + + printk(KERN_INFO "Time: %s clocksource has been installed.\n", + clock->name); +} +#else +static inline void change_clocksource(void) { } +#endif + +/** + * timekeeping_is_continuous - check to see if timekeeping is free running + */ +int timekeeping_is_continuous(void) +{ + unsigned long seq; + int ret; + + do { + seq = read_seqbegin(&xtime_lock); + + ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; + + } while (read_seqretry(&xtime_lock, seq)); + + return ret; +} + +/** + * read_persistent_clock - Return time in seconds from the persistent clock. + * + * Weak dummy function for arches that do not yet support it. + * Returns seconds from epoch using the battery backed persistent clock. + * Returns zero if unsupported. + * + * XXX - Do be sure to remove it once all arches implement it. + */ +unsigned long __attribute__((weak)) read_persistent_clock(void) +{ + return 0; +} + +/* + * timekeeping_init - Initializes the clocksource and common timekeeping values + */ +void __init timekeeping_init(void) +{ + unsigned long flags; + unsigned long sec = read_persistent_clock(); + + write_seqlock_irqsave(&xtime_lock, flags); + + ntp_clear(); + + clock = clocksource_get_next(); + clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); + clock->cycle_last = clocksource_read(clock); + + xtime.tv_sec = sec; + xtime.tv_nsec = 0; + set_normalized_timespec(&wall_to_monotonic, + -xtime.tv_sec, -xtime.tv_nsec); + + write_sequnlock_irqrestore(&xtime_lock, flags); +} + +/* flag for if timekeeping is suspended */ +static int timekeeping_suspended; +/* time in seconds when suspend began */ +static unsigned long timekeeping_suspend_time; + +/** + * timekeeping_resume - Resumes the generic timekeeping subsystem. + * @dev: unused + * + * This is for the generic clocksource timekeeping. + * xtime/wall_to_monotonic/jiffies/etc are + * still managed by arch specific suspend/resume code. + */ +static int timekeeping_resume(struct sys_device *dev) +{ + unsigned long flags; + unsigned long now = read_persistent_clock(); + + write_seqlock_irqsave(&xtime_lock, flags); + + if (now && (now > timekeeping_suspend_time)) { + unsigned long sleep_length = now - timekeeping_suspend_time; + + xtime.tv_sec += sleep_length; + wall_to_monotonic.tv_sec -= sleep_length; + } + /* re-base the last cycle value */ + clock->cycle_last = clocksource_read(clock); + clock->error = 0; + timekeeping_suspended = 0; + write_sequnlock_irqrestore(&xtime_lock, flags); + + touch_softlockup_watchdog(); + + clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); + + /* Resume hrtimers */ + hres_timers_resume(); + + return 0; +} + +static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) +{ + unsigned long flags; + + write_seqlock_irqsave(&xtime_lock, flags); + timekeeping_suspended = 1; + timekeeping_suspend_time = read_persistent_clock(); + write_sequnlock_irqrestore(&xtime_lock, flags); + + clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); + + return 0; +} + +/* sysfs resume/suspend bits for timekeeping */ +static struct sysdev_class timekeeping_sysclass = { + .resume = timekeeping_resume, + .suspend = timekeeping_suspend, + set_kset_name("timekeeping"), +}; + +static struct sys_device device_timer = { + .id = 0, + .cls = &timekeeping_sysclass, +}; + +static int __init timekeeping_init_device(void) +{ + int error = sysdev_class_register(&timekeeping_sysclass); + if (!error) + error = sysdev_register(&device_timer); + return error; +} + +device_initcall(timekeeping_init_device); + +/* + * If the error is already larger, we look ahead even further + * to compensate for late or lost adjustments. + */ +static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, + s64 *offset) +{ + s64 tick_error, i; + u32 look_ahead, adj; + s32 error2, mult; + + /* + * Use the current error value to determine how much to look ahead. + * The larger the error the slower we adjust for it to avoid problems + * with losing too many ticks, otherwise we would overadjust and + * produce an even larger error. The smaller the adjustment the + * faster we try to adjust for it, as lost ticks can do less harm + * here. This is tuned so that an error of about 1 msec is adusted + * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). + */ + error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ); + error2 = abs(error2); + for (look_ahead = 0; error2 > 0; look_ahead++) + error2 >>= 2; + + /* + * Now calculate the error in (1 << look_ahead) ticks, but first + * remove the single look ahead already included in the error. + */ + tick_error = current_tick_length() >> + (TICK_LENGTH_SHIFT - clock->shift + 1); + tick_error -= clock->xtime_interval >> 1; + error = ((error - tick_error) >> look_ahead) + tick_error; + + /* Finally calculate the adjustment shift value. */ + i = *interval; + mult = 1; + if (error < 0) { + error = -error; + *interval = -*interval; + *offset = -*offset; + mult = -1; + } + for (adj = 0; error > i; adj++) + error >>= 1; + + *interval <<= adj; + *offset <<= adj; + return mult << adj; +} + +/* + * Adjust the multiplier to reduce the error value, + * this is optimized for the most common adjustments of -1,0,1, + * for other values we can do a bit more work. + */ +static void clocksource_adjust(struct clocksource *clock, s64 offset) +{ + s64 error, interval = clock->cycle_interval; + int adj; + + error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1); + if (error > interval) { + error >>= 2; + if (likely(error <= interval)) + adj = 1; + else + adj = clocksource_bigadjust(error, &interval, &offset); + } else if (error < -interval) { + error >>= 2; + if (likely(error >= -interval)) { + adj = -1; + interval = -interval; + offset = -offset; + } else + adj = clocksource_bigadjust(error, &interval, &offset); + } else + return; + + clock->mult += adj; + clock->xtime_interval += interval; + clock->xtime_nsec -= offset; + clock->error -= (interval - offset) << + (TICK_LENGTH_SHIFT - clock->shift); +} + +/** + * update_wall_time - Uses the current clocksource to increment the wall time + * + * Called from the timer interrupt, must hold a write on xtime_lock. + */ +void update_wall_time(void) +{ + cycle_t offset; + + /* Make sure we're fully resumed: */ + if (unlikely(timekeeping_suspended)) + return; + +#ifdef CONFIG_GENERIC_TIME + offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; +#else + offset = clock->cycle_interval; +#endif + clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; + + /* normally this loop will run just once, however in the + * case of lost or late ticks, it will accumulate correctly. + */ + while (offset >= clock->cycle_interval) { + /* accumulate one interval */ + clock->xtime_nsec += clock->xtime_interval; + clock->cycle_last += clock->cycle_interval; + offset -= clock->cycle_interval; + + if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { + clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; + xtime.tv_sec++; + second_overflow(); + } + + /* interpolator bits */ + time_interpolator_update(clock->xtime_interval + >> clock->shift); + + /* accumulate error between NTP and clock interval */ + clock->error += current_tick_length(); + clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); + } + + /* correct the clock when NTP error is too big */ + clocksource_adjust(clock, offset); + + /* store full nanoseconds into xtime */ + xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift; + clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; + + /* check to see if there is a new clocksource to use */ + change_clocksource(); + update_vsyscall(&xtime, clock); +} diff --git a/kernel/timer.c b/kernel/timer.c index dbe966feff2f..ba41af2bb6cc 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1,7 +1,7 @@ /* * linux/kernel/timer.c * - * Kernel internal timers, kernel timekeeping, basic process system calls + * Kernel internal timers, basic process system calls * * Copyright (C) 1991, 1992 Linus Torvalds * @@ -794,455 +794,6 @@ unsigned long next_timer_interrupt(void) #endif -/******************************************************************/ - -/* - * The current time - * wall_to_monotonic is what we need to add to xtime (or xtime corrected - * for sub jiffie times) to get to monotonic time. Monotonic is pegged - * at zero at system boot time, so wall_to_monotonic will be negative, - * however, we will ALWAYS keep the tv_nsec part positive so we can use - * the usual normalization. - */ -struct timespec xtime __attribute__ ((aligned (16))); -struct timespec wall_to_monotonic __attribute__ ((aligned (16))); - -EXPORT_SYMBOL(xtime); - - -/* XXX - all of this timekeeping code should be later moved to time.c */ -#include -static struct clocksource *clock; /* pointer to current clocksource */ - -#ifdef CONFIG_GENERIC_TIME -/** - * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook - * - * private function, must hold xtime_lock lock when being - * called. Returns the number of nanoseconds since the - * last call to update_wall_time() (adjusted by NTP scaling) - */ -static inline s64 __get_nsec_offset(void) -{ - cycle_t cycle_now, cycle_delta; - s64 ns_offset; - - /* read clocksource: */ - cycle_now = clocksource_read(clock); - - /* calculate the delta since the last update_wall_time: */ - cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; - - /* convert to nanoseconds: */ - ns_offset = cyc2ns(clock, cycle_delta); - - return ns_offset; -} - -/** - * __get_realtime_clock_ts - Returns the time of day in a timespec - * @ts: pointer to the timespec to be set - * - * Returns the time of day in a timespec. Used by - * do_gettimeofday() and get_realtime_clock_ts(). - */ -static inline void __get_realtime_clock_ts(struct timespec *ts) -{ - unsigned long seq; - s64 nsecs; - - do { - seq = read_seqbegin(&xtime_lock); - - *ts = xtime; - nsecs = __get_nsec_offset(); - - } while (read_seqretry(&xtime_lock, seq)); - - timespec_add_ns(ts, nsecs); -} - -/** - * getnstimeofday - Returns the time of day in a timespec - * @ts: pointer to the timespec to be set - * - * Returns the time of day in a timespec. - */ -void getnstimeofday(struct timespec *ts) -{ - __get_realtime_clock_ts(ts); -} - -EXPORT_SYMBOL(getnstimeofday); - -/** - * do_gettimeofday - Returns the time of day in a timeval - * @tv: pointer to the timeval to be set - * - * NOTE: Users should be converted to using get_realtime_clock_ts() - */ -void do_gettimeofday(struct timeval *tv) -{ - struct timespec now; - - __get_realtime_clock_ts(&now); - tv->tv_sec = now.tv_sec; - tv->tv_usec = now.tv_nsec/1000; -} - -EXPORT_SYMBOL(do_gettimeofday); -/** - * do_settimeofday - Sets the time of day - * @tv: pointer to the timespec variable containing the new time - * - * Sets the time of day to the new time and update NTP and notify hrtimers - */ -int do_settimeofday(struct timespec *tv) -{ - unsigned long flags; - time_t wtm_sec, sec = tv->tv_sec; - long wtm_nsec, nsec = tv->tv_nsec; - - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - - write_seqlock_irqsave(&xtime_lock, flags); - - nsec -= __get_nsec_offset(); - - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); - - set_normalized_timespec(&xtime, sec, nsec); - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); - - clock->error = 0; - ntp_clear(); - - update_vsyscall(&xtime, clock); - - write_sequnlock_irqrestore(&xtime_lock, flags); - - /* signal hrtimers about time change */ - clock_was_set(); - - return 0; -} - -EXPORT_SYMBOL(do_settimeofday); - -/** - * change_clocksource - Swaps clocksources if a new one is available - * - * Accumulates current time interval and initializes new clocksource - */ -static void change_clocksource(void) -{ - struct clocksource *new; - cycle_t now; - u64 nsec; - - new = clocksource_get_next(); - - if (clock == new) - return; - - now = clocksource_read(new); - nsec = __get_nsec_offset(); - timespec_add_ns(&xtime, nsec); - - clock = new; - clock->cycle_last = now; - - clock->error = 0; - clock->xtime_nsec = 0; - clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); - - tick_clock_notify(); - - printk(KERN_INFO "Time: %s clocksource has been installed.\n", - clock->name); -} -#else -static inline void change_clocksource(void) { } -#endif - -/** - * timekeeping_is_continuous - check to see if timekeeping is free running - */ -int timekeeping_is_continuous(void) -{ - unsigned long seq; - int ret; - - do { - seq = read_seqbegin(&xtime_lock); - - ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; - - } while (read_seqretry(&xtime_lock, seq)); - - return ret; -} - -/** - * read_persistent_clock - Return time in seconds from the persistent clock. - * - * Weak dummy function for arches that do not yet support it. - * Returns seconds from epoch using the battery backed persistent clock. - * Returns zero if unsupported. - * - * XXX - Do be sure to remove it once all arches implement it. - */ -unsigned long __attribute__((weak)) read_persistent_clock(void) -{ - return 0; -} - -/* - * timekeeping_init - Initializes the clocksource and common timekeeping values - */ -void __init timekeeping_init(void) -{ - unsigned long flags; - unsigned long sec = read_persistent_clock(); - - write_seqlock_irqsave(&xtime_lock, flags); - - ntp_clear(); - - clock = clocksource_get_next(); - clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); - clock->cycle_last = clocksource_read(clock); - - xtime.tv_sec = sec; - xtime.tv_nsec = 0; - set_normalized_timespec(&wall_to_monotonic, - -xtime.tv_sec, -xtime.tv_nsec); - - write_sequnlock_irqrestore(&xtime_lock, flags); -} - -/* flag for if timekeeping is suspended */ -static int timekeeping_suspended; -/* time in seconds when suspend began */ -static unsigned long timekeeping_suspend_time; - -/** - * timekeeping_resume - Resumes the generic timekeeping subsystem. - * @dev: unused - * - * This is for the generic clocksource timekeeping. - * xtime/wall_to_monotonic/jiffies/etc are - * still managed by arch specific suspend/resume code. - */ -static int timekeeping_resume(struct sys_device *dev) -{ - unsigned long flags; - unsigned long now = read_persistent_clock(); - - write_seqlock_irqsave(&xtime_lock, flags); - - if (now && (now > timekeeping_suspend_time)) { - unsigned long sleep_length = now - timekeeping_suspend_time; - - xtime.tv_sec += sleep_length; - wall_to_monotonic.tv_sec -= sleep_length; - } - /* re-base the last cycle value */ - clock->cycle_last = clocksource_read(clock); - clock->error = 0; - timekeeping_suspended = 0; - write_sequnlock_irqrestore(&xtime_lock, flags); - - touch_softlockup_watchdog(); - - clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); - - /* Resume hrtimers */ - hres_timers_resume(); - - return 0; -} - -static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) -{ - unsigned long flags; - - write_seqlock_irqsave(&xtime_lock, flags); - timekeeping_suspended = 1; - timekeeping_suspend_time = read_persistent_clock(); - write_sequnlock_irqrestore(&xtime_lock, flags); - - clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); - - return 0; -} - -/* sysfs resume/suspend bits for timekeeping */ -static struct sysdev_class timekeeping_sysclass = { - .resume = timekeeping_resume, - .suspend = timekeeping_suspend, - set_kset_name("timekeeping"), -}; - -static struct sys_device device_timer = { - .id = 0, - .cls = &timekeeping_sysclass, -}; - -static int __init timekeeping_init_device(void) -{ - int error = sysdev_class_register(&timekeeping_sysclass); - if (!error) - error = sysdev_register(&device_timer); - return error; -} - -device_initcall(timekeeping_init_device); - -/* - * If the error is already larger, we look ahead even further - * to compensate for late or lost adjustments. - */ -static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, - s64 *offset) -{ - s64 tick_error, i; - u32 look_ahead, adj; - s32 error2, mult; - - /* - * Use the current error value to determine how much to look ahead. - * The larger the error the slower we adjust for it to avoid problems - * with losing too many ticks, otherwise we would overadjust and - * produce an even larger error. The smaller the adjustment the - * faster we try to adjust for it, as lost ticks can do less harm - * here. This is tuned so that an error of about 1 msec is adusted - * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). - */ - error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ); - error2 = abs(error2); - for (look_ahead = 0; error2 > 0; look_ahead++) - error2 >>= 2; - - /* - * Now calculate the error in (1 << look_ahead) ticks, but first - * remove the single look ahead already included in the error. - */ - tick_error = current_tick_length() >> - (TICK_LENGTH_SHIFT - clock->shift + 1); - tick_error -= clock->xtime_interval >> 1; - error = ((error - tick_error) >> look_ahead) + tick_error; - - /* Finally calculate the adjustment shift value. */ - i = *interval; - mult = 1; - if (error < 0) { - error = -error; - *interval = -*interval; - *offset = -*offset; - mult = -1; - } - for (adj = 0; error > i; adj++) - error >>= 1; - - *interval <<= adj; - *offset <<= adj; - return mult << adj; -} - -/* - * Adjust the multiplier to reduce the error value, - * this is optimized for the most common adjustments of -1,0,1, - * for other values we can do a bit more work. - */ -static void clocksource_adjust(struct clocksource *clock, s64 offset) -{ - s64 error, interval = clock->cycle_interval; - int adj; - - error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1); - if (error > interval) { - error >>= 2; - if (likely(error <= interval)) - adj = 1; - else - adj = clocksource_bigadjust(error, &interval, &offset); - } else if (error < -interval) { - error >>= 2; - if (likely(error >= -interval)) { - adj = -1; - interval = -interval; - offset = -offset; - } else - adj = clocksource_bigadjust(error, &interval, &offset); - } else - return; - - clock->mult += adj; - clock->xtime_interval += interval; - clock->xtime_nsec -= offset; - clock->error -= (interval - offset) << - (TICK_LENGTH_SHIFT - clock->shift); -} - -/** - * update_wall_time - Uses the current clocksource to increment the wall time - * - * Called from the timer interrupt, must hold a write on xtime_lock. - */ -static void update_wall_time(void) -{ - cycle_t offset; - - /* Make sure we're fully resumed: */ - if (unlikely(timekeeping_suspended)) - return; - -#ifdef CONFIG_GENERIC_TIME - offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; -#else - offset = clock->cycle_interval; -#endif - clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; - - /* normally this loop will run just once, however in the - * case of lost or late ticks, it will accumulate correctly. - */ - while (offset >= clock->cycle_interval) { - /* accumulate one interval */ - clock->xtime_nsec += clock->xtime_interval; - clock->cycle_last += clock->cycle_interval; - offset -= clock->cycle_interval; - - if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { - clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; - xtime.tv_sec++; - second_overflow(); - } - - /* interpolator bits */ - time_interpolator_update(clock->xtime_interval - >> clock->shift); - - /* accumulate error between NTP and clock interval */ - clock->error += current_tick_length(); - clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); - } - - /* correct the clock when NTP error is too big */ - clocksource_adjust(clock, offset); - - /* store full nanoseconds into xtime */ - xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift; - clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; - - /* check to see if there is a new clocksource to use */ - change_clocksource(); - update_vsyscall(&xtime, clock); -} - /* * Called from the timer interrupt handler to charge one tick to the current * process. user_tick is 1 if the tick is user time, 0 for system. @@ -1305,14 +856,6 @@ static inline void calc_load(unsigned long ticks) } } -/* - * This read-write spinlock protects us from races in SMP while - * playing with xtime and avenrun. - */ -__attribute__((weak)) __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); - -EXPORT_SYMBOL(xtime_lock); - /* * This function runs timers and the timer-tq in bottom half context. */ -- cgit v1.2.3 From 04c9167f91e309c9c4ea982992aa08e83b2eb42e Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Tue, 8 May 2007 00:28:05 -0700 Subject: add touch_all_softlockup_watchdogs() Add touch_all_softlockup_watchdogs() to allow the softlockup watchdog timers on all cpus to be updated. This is used to prevent sysrq-t from generating a spurious watchdog message when generating lots of output. Softlockup watchdogs use sched_clock() as its timebase, which is inherently per-cpu (at least, when it is measuring unstolen time). Because of this, it isn't possible for one CPU to directly update the other CPU's timers, but it is possible to tell the other CPUs to do update themselves appropriately. Signed-off-by: Jeremy Fitzhardinge Acked-by: Chris Lalancette Signed-off-by: Prarit Bhargava Cc: Rick Lindsley Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 4 ++++ kernel/sched.c | 2 ++ kernel/softlockup.c | 15 +++++++++++++-- 3 files changed, 19 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index d464bd0d6578..6312521df2c1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -226,6 +226,7 @@ extern void scheduler_tick(void); extern void softlockup_tick(void); extern void spawn_softlockup_task(void); extern void touch_softlockup_watchdog(void); +extern void touch_all_softlockup_watchdogs(void); #else static inline void softlockup_tick(void) { @@ -236,6 +237,9 @@ static inline void spawn_softlockup_task(void) static inline void touch_softlockup_watchdog(void) { } +static inline void touch_all_softlockup_watchdogs(void) +{ +} #endif diff --git a/kernel/sched.c b/kernel/sched.c index 0227f1625a75..5530ed211f72 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4750,6 +4750,8 @@ void show_state_filter(unsigned long state_filter) show_task(p); } while_each_thread(g, p); + touch_all_softlockup_watchdogs(); + read_unlock(&tasklist_lock); /* * Only show locks if all tasks are dumped: diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 5ea631742dbc..8fa7040247ad 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -50,6 +50,16 @@ void touch_softlockup_watchdog(void) } EXPORT_SYMBOL(touch_softlockup_watchdog); +void touch_all_softlockup_watchdogs(void) +{ + int cpu; + + /* Cause each CPU to re-update its timestamp rather than complain */ + for_each_online_cpu(cpu) + per_cpu(touch_timestamp, cpu) = 0; +} +EXPORT_SYMBOL(touch_all_softlockup_watchdogs); + /* * This callback runs from the timer interrupt, and checks * whether the watchdog thread has hung or not: @@ -61,9 +71,10 @@ void softlockup_tick(void) unsigned long print_timestamp; unsigned long now; - /* watchdog task hasn't updated timestamp yet */ - if (touch_timestamp == 0) + if (touch_timestamp == 0) { + touch_softlockup_watchdog(); return; + } print_timestamp = per_cpu(print_timestamp, this_cpu); -- cgit v1.2.3 From 73285082745045bcd64333c1fbaa88f8490f2626 Mon Sep 17 00:00:00 2001 From: Ken Chen Date: Tue, 8 May 2007 00:28:20 -0700 Subject: remove artificial software max_loop limit Remove artificial maximum 256 loop device that can be created due to a legacy device number limit. Searching through lkml archive, there are several instances where users complained about the artificial limit that the loop driver impose. There is no reason to have such limit. This patch rid the limit entirely and make loop device and associated block queue instantiation on demand. With on-demand instantiation, it also gives the benefit of not wasting memory if these devices are not in use (compare to current implementation that always create 8 loop devices), a net improvement in both areas. This version is both tested with creation of large number of loop devices and is compatible with existing losetup/mount user land tools. There are a number of people who worked on this and provided valuable suggestions, in no particular order, by: Jens Axboe Jan Engelhardt Christoph Hellwig Thomas M Signed-off-by: Ken Chen Cc: Jan Engelhardt Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/loop.c | 184 ++++++++++++++++++++++++++++++--------------------- include/linux/loop.h | 2 + 2 files changed, 112 insertions(+), 74 deletions(-) (limited to 'include/linux') diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 0d4ccd4a0957..af6d7274a7cc 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -77,9 +77,8 @@ #include -static int max_loop = 8; -static struct loop_device *loop_dev; -static struct gendisk **disks; +static LIST_HEAD(loop_devices); +static DEFINE_MUTEX(loop_devices_mutex); /* * Transfer functions @@ -183,7 +182,7 @@ figure_loop_size(struct loop_device *lo) if (unlikely((loff_t)x != size)) return -EFBIG; - set_capacity(disks[lo->lo_number], x); + set_capacity(lo->lo_disk, x); return 0; } @@ -812,7 +811,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file, lo->lo_queue->queuedata = lo; lo->lo_queue->unplug_fn = loop_unplug; - set_capacity(disks[lo->lo_number], size); + set_capacity(lo->lo_disk, size); bd_set_size(bdev, size << 9); set_blocksize(bdev, lo_blocksize); @@ -832,7 +831,7 @@ out_clr: lo->lo_device = NULL; lo->lo_backing_file = NULL; lo->lo_flags = 0; - set_capacity(disks[lo->lo_number], 0); + set_capacity(lo->lo_disk, 0); invalidate_bdev(bdev); bd_set_size(bdev, 0); mapping_set_gfp_mask(mapping, lo->old_gfp_mask); @@ -918,7 +917,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); memset(lo->lo_file_name, 0, LO_NAME_SIZE); invalidate_bdev(bdev); - set_capacity(disks[lo->lo_number], 0); + set_capacity(lo->lo_disk, 0); bd_set_size(bdev, 0); mapping_set_gfp_mask(filp->f_mapping, gfp); lo->lo_state = Lo_unbound; @@ -1322,6 +1321,18 @@ static long lo_compat_ioctl(struct file *file, unsigned int cmd, unsigned long a } #endif +static struct loop_device *loop_find_dev(int number) +{ + struct loop_device *lo; + + list_for_each_entry(lo, &loop_devices, lo_list) { + if (lo->lo_number == number) + return lo; + } + return NULL; +} + +static struct loop_device *loop_init_one(int i); static int lo_open(struct inode *inode, struct file *file) { struct loop_device *lo = inode->i_bdev->bd_disk->private_data; @@ -1330,6 +1341,11 @@ static int lo_open(struct inode *inode, struct file *file) lo->lo_refcnt++; mutex_unlock(&lo->lo_ctl_mutex); + mutex_lock(&loop_devices_mutex); + if (!loop_find_dev(lo->lo_number + 1)) + loop_init_one(lo->lo_number + 1); + mutex_unlock(&loop_devices_mutex); + return 0; } @@ -1357,8 +1373,9 @@ static struct block_device_operations lo_fops = { /* * And now the modules code and kernel interface. */ +static int max_loop; module_param(max_loop, int, 0); -MODULE_PARM_DESC(max_loop, "Maximum number of loop devices (1-256)"); +MODULE_PARM_DESC(max_loop, "obsolete, loop device is created on-demand"); MODULE_LICENSE("GPL"); MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); @@ -1383,7 +1400,7 @@ int loop_unregister_transfer(int number) xfer_funcs[n] = NULL; - for (lo = &loop_dev[0]; lo < &loop_dev[max_loop]; lo++) { + list_for_each_entry(lo, &loop_devices, lo_list) { mutex_lock(&lo->lo_ctl_mutex); if (lo->lo_encryption == xfer) @@ -1398,91 +1415,110 @@ int loop_unregister_transfer(int number) EXPORT_SYMBOL(loop_register_transfer); EXPORT_SYMBOL(loop_unregister_transfer); -static int __init loop_init(void) +static struct loop_device *loop_init_one(int i) +{ + struct loop_device *lo; + struct gendisk *disk; + + lo = kzalloc(sizeof(*lo), GFP_KERNEL); + if (!lo) + goto out; + + lo->lo_queue = blk_alloc_queue(GFP_KERNEL); + if (!lo->lo_queue) + goto out_free_dev; + + disk = lo->lo_disk = alloc_disk(1); + if (!disk) + goto out_free_queue; + + mutex_init(&lo->lo_ctl_mutex); + lo->lo_number = i; + lo->lo_thread = NULL; + init_waitqueue_head(&lo->lo_event); + spin_lock_init(&lo->lo_lock); + disk->major = LOOP_MAJOR; + disk->first_minor = i; + disk->fops = &lo_fops; + disk->private_data = lo; + disk->queue = lo->lo_queue; + sprintf(disk->disk_name, "loop%d", i); + add_disk(disk); + list_add_tail(&lo->lo_list, &loop_devices); + return lo; + +out_free_queue: + blk_cleanup_queue(lo->lo_queue); +out_free_dev: + kfree(lo); +out: + return ERR_PTR(-ENOMEM); +} + +static void loop_del_one(struct loop_device *lo) { - int i; + del_gendisk(lo->lo_disk); + blk_cleanup_queue(lo->lo_queue); + put_disk(lo->lo_disk); + list_del(&lo->lo_list); + kfree(lo); +} - if (max_loop < 1 || max_loop > 256) { - printk(KERN_WARNING "loop: invalid max_loop (must be between" - " 1 and 256), using default (8)\n"); - max_loop = 8; - } +static struct kobject *loop_probe(dev_t dev, int *part, void *data) +{ + unsigned int number = dev & MINORMASK; + struct loop_device *lo; + + mutex_lock(&loop_devices_mutex); + lo = loop_find_dev(number); + if (lo == NULL) + lo = loop_init_one(number); + mutex_unlock(&loop_devices_mutex); + + *part = 0; + if (IS_ERR(lo)) + return (void *)lo; + else + return &lo->lo_disk->kobj; +} + +static int __init loop_init(void) +{ + struct loop_device *lo; if (register_blkdev(LOOP_MAJOR, "loop")) return -EIO; + blk_register_region(MKDEV(LOOP_MAJOR, 0), 1UL << MINORBITS, + THIS_MODULE, loop_probe, NULL, NULL); - loop_dev = kmalloc(max_loop * sizeof(struct loop_device), GFP_KERNEL); - if (!loop_dev) - goto out_mem1; - memset(loop_dev, 0, max_loop * sizeof(struct loop_device)); + lo = loop_init_one(0); + if (IS_ERR(lo)) + goto out; - disks = kmalloc(max_loop * sizeof(struct gendisk *), GFP_KERNEL); - if (!disks) - goto out_mem2; + if (max_loop) { + printk(KERN_INFO "loop: the max_loop option is obsolete " + "and will be removed in March 2008\n"); - for (i = 0; i < max_loop; i++) { - disks[i] = alloc_disk(1); - if (!disks[i]) - goto out_mem3; } - - for (i = 0; i < max_loop; i++) { - struct loop_device *lo = &loop_dev[i]; - struct gendisk *disk = disks[i]; - - memset(lo, 0, sizeof(*lo)); - lo->lo_queue = blk_alloc_queue(GFP_KERNEL); - if (!lo->lo_queue) - goto out_mem4; - mutex_init(&lo->lo_ctl_mutex); - lo->lo_number = i; - lo->lo_thread = NULL; - init_waitqueue_head(&lo->lo_event); - spin_lock_init(&lo->lo_lock); - disk->major = LOOP_MAJOR; - disk->first_minor = i; - disk->fops = &lo_fops; - sprintf(disk->disk_name, "loop%d", i); - disk->private_data = lo; - disk->queue = lo->lo_queue; - } - - /* We cannot fail after we call this, so another loop!*/ - for (i = 0; i < max_loop; i++) - add_disk(disks[i]); - printk(KERN_INFO "loop: loaded (max %d devices)\n", max_loop); + printk(KERN_INFO "loop: module loaded\n"); return 0; -out_mem4: - while (i--) - blk_cleanup_queue(loop_dev[i].lo_queue); - i = max_loop; -out_mem3: - while (i--) - put_disk(disks[i]); - kfree(disks); -out_mem2: - kfree(loop_dev); -out_mem1: +out: unregister_blkdev(LOOP_MAJOR, "loop"); printk(KERN_ERR "loop: ran out of memory\n"); return -ENOMEM; } -static void loop_exit(void) +static void __exit loop_exit(void) { - int i; + struct loop_device *lo, *next; - for (i = 0; i < max_loop; i++) { - del_gendisk(disks[i]); - blk_cleanup_queue(loop_dev[i].lo_queue); - put_disk(disks[i]); - } + list_for_each_entry_safe(lo, next, &loop_devices, lo_list) + loop_del_one(lo); + + blk_unregister_region(MKDEV(LOOP_MAJOR, 0), 1UL << MINORBITS); if (unregister_blkdev(LOOP_MAJOR, "loop")) printk(KERN_WARNING "loop: cannot unregister blkdev\n"); - - kfree(disks); - kfree(loop_dev); } module_init(loop_init); diff --git a/include/linux/loop.h b/include/linux/loop.h index 191a595055f0..0b99b31f017b 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h @@ -64,6 +64,8 @@ struct loop_device { wait_queue_head_t lo_event; request_queue_t *lo_queue; + struct gendisk *lo_disk; + struct list_head lo_list; }; #endif /* __KERNEL__ */ -- cgit v1.2.3 From 6672f76a5a1878d42264c1deba8f1ab52b4618d9 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Tue, 8 May 2007 00:28:22 -0700 Subject: kdump/kexec: calculate note size at compile time Currently the size of the per-cpu region reserved to save crash notes is set by the per-architecture value MAX_NOTE_BYTES. Which in turn is currently set to 1024 on all supported architectures. While testing ia64 I recently discovered that this value is in fact too small. The particular setup I was using actually needs 1172 bytes. This lead to very tedious failure mode where the tail of one elf note would overwrite the head of another if they ended up being alocated sequentially by kmalloc, which was often the case. It seems to me that a far better approach is to caclculate the size that the area needs to be. This patch does just that. If a simpler stop-gap patch for ia64 to be squeezed into 2.6.21(.X) is needed then this should be as easy as making MAX_NOTE_BYTES larger in arch/asm-ia64/kexec.h. Perhaps 2048 would be a good choice. However, I think that the approach in this patch is a much more robust idea. Acked-by: Vivek Goyal Signed-off-by: Simon Horman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/crash.c | 2 +- include/asm-arm/kexec.h | 2 -- include/asm-i386/kexec.h | 2 -- include/asm-ia64/kexec.h | 2 -- include/asm-mips/kexec.h | 2 -- include/asm-powerpc/kexec.h | 2 -- include/asm-s390/kexec.h | 2 -- include/asm-sh/kexec.h | 2 -- include/asm-x86_64/kexec.h | 2 -- include/linux/kexec.h | 17 ++++++++++++++++- kernel/kexec.c | 4 ++-- 11 files changed, 19 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index 3d51a3f77017..aeb79fb28f0b 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -74,7 +74,7 @@ crash_save_this_cpu(void) buf = (u64 *) per_cpu_ptr(crash_notes, cpu); if (!buf) return; - buf = append_elf_note(buf, "CORE", NT_PRSTATUS, prstatus, + buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, prstatus, sizeof(*prstatus)); final_note(buf); } diff --git a/include/asm-arm/kexec.h b/include/asm-arm/kexec.h index 8c1c6162a80c..b5b030ef633d 100644 --- a/include/asm-arm/kexec.h +++ b/include/asm-arm/kexec.h @@ -16,8 +16,6 @@ #ifndef __ASSEMBLY__ -#define MAX_NOTE_BYTES 1024 - struct kimage; /* Provide a dummy definition to avoid build failures. */ static inline void crash_setup_regs(struct pt_regs *newregs, diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h index bcb5b21de2d2..4b9dc9e6b701 100644 --- a/include/asm-i386/kexec.h +++ b/include/asm-i386/kexec.h @@ -45,8 +45,6 @@ /* We can also handle crash dumps from 64 bit kernel. */ #define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) -#define MAX_NOTE_BYTES 1024 - /* CPU does not save ss and esp on stack if execution is already * running in kernel mode at the time of NMI occurrence. This code * fixes it. diff --git a/include/asm-ia64/kexec.h b/include/asm-ia64/kexec.h index 41299ddfee30..541be835fc5a 100644 --- a/include/asm-ia64/kexec.h +++ b/include/asm-ia64/kexec.h @@ -14,8 +14,6 @@ /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_IA_64 -#define MAX_NOTE_BYTES 1024 - #define kexec_flush_icache_page(page) do { \ unsigned long page_addr = (unsigned long)page_address(page); \ flush_icache_range(page_addr, page_addr + PAGE_SIZE); \ diff --git a/include/asm-mips/kexec.h b/include/asm-mips/kexec.h index b25267ebcb09..cdbab43b7d3a 100644 --- a/include/asm-mips/kexec.h +++ b/include/asm-mips/kexec.h @@ -21,8 +21,6 @@ /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_MIPS -#define MAX_NOTE_BYTES 1024 - static inline void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs) { diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h index 11cbdf81fd2e..b6f817b8ba3d 100644 --- a/include/asm-powerpc/kexec.h +++ b/include/asm-powerpc/kexec.h @@ -108,8 +108,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs) { } #endif /* !__powerpc64 __ */ -#define MAX_NOTE_BYTES 1024 - extern void kexec_smp_wait(void); /* get and clear naca physid, wait for master to copy new code to 0 */ extern int crashing_cpu; diff --git a/include/asm-s390/kexec.h b/include/asm-s390/kexec.h index 9c35c8ad1afd..7592af708b41 100644 --- a/include/asm-s390/kexec.h +++ b/include/asm-s390/kexec.h @@ -34,8 +34,6 @@ /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_S390 -#define MAX_NOTE_BYTES 1024 - /* Provide a dummy definition to avoid build failures. */ static inline void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs) { } diff --git a/include/asm-sh/kexec.h b/include/asm-sh/kexec.h index da36a7548601..00f4260ef09b 100644 --- a/include/asm-sh/kexec.h +++ b/include/asm-sh/kexec.h @@ -26,8 +26,6 @@ /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_SH -#define MAX_NOTE_BYTES 1024 - static inline void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs) { diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h index 5fab957e1091..738e581b67f8 100644 --- a/include/asm-x86_64/kexec.h +++ b/include/asm-x86_64/kexec.h @@ -48,8 +48,6 @@ /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_X86_64 -#define MAX_NOTE_BYTES 1024 - /* * Saving the registers of the cpu on which panic occured in * crash_kexec to save a valid sp. The registers of other cpus diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 696e5ec63f77..8c2c7fcd58ce 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include /* Verify architecture specific macros are defined */ @@ -31,6 +33,19 @@ #error KEXEC_ARCH not defined #endif +#define KEXEC_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4) +#define KEXEC_CORE_NOTE_NAME "CORE" +#define KEXEC_CORE_NOTE_NAME_BYTES ALIGN(sizeof(KEXEC_CORE_NOTE_NAME), 4) +#define KEXEC_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4) +/* + * The per-cpu notes area is a list of notes terminated by a "NULL" + * note header. For kdump, the code in vmcore.c runs in the context + * of the second kernel to combine them into one note. + */ +#define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) + \ + KEXEC_CORE_NOTE_NAME_BYTES + \ + KEXEC_CORE_NOTE_DESC_BYTES ) + /* * This structure is used to hold the arguments that are used when loading * kernel binaries. @@ -136,7 +151,7 @@ extern struct kimage *kexec_crash_image; /* Location of a reserved region to hold the crash kernel. */ extern struct resource crashk_res; -typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; +typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4]; extern note_buf_t *crash_notes; diff --git a/kernel/kexec.c b/kernel/kexec.c index 2a59c8a01ae0..25db14b89e82 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1118,8 +1118,8 @@ void crash_save_cpu(struct pt_regs *regs, int cpu) memset(&prstatus, 0, sizeof(prstatus)); prstatus.pr_pid = current->pid; elf_core_copy_regs(&prstatus.pr_reg, regs); - buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus, - sizeof(prstatus)); + buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, + &prstatus, sizeof(prstatus)); final_note(buf); } -- cgit v1.2.3 From 0f95b7fc839bc3272b1bf2325d8748a649bd3534 Mon Sep 17 00:00:00 2001 From: Ananth N Mavinakayanahalli Date: Tue, 8 May 2007 00:28:27 -0700 Subject: Kprobes: print details of kretprobe on assertion failure In certain cases like when the real return address can't be found or when the number of tracked calls to a kretprobed function is less than the number of returns, we may not be able to find the correct return address after processing a kretprobe. Currently we just do a BUG_ON, but no information is provided about the actual failing kretprobe. Print out details of the kretprobe before calling BUG(). Signed-off-by: Ananth N Mavinakayanahalli Cc: Prasanna S Panchamukhi Cc: Jim Keniston Cc: Anil S Keshavamurthy Cc: Maneesh Soni Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/kernel/kprobes.c | 3 +-- arch/ia64/kernel/kprobes.c | 3 ++- arch/powerpc/kernel/kprobes.c | 2 +- arch/x86_64/kernel/kprobes.c | 2 +- include/linux/kprobes.h | 10 ++++++++++ 5 files changed, 15 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index e00f75ecf1a8..3fbef288c376 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c @@ -449,8 +449,7 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) break; } - BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); - + kretprobe_assert(ri, orig_ret_address, trampoline_address); spin_unlock_irqrestore(&kretprobe_lock, flags); hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 779fe00c9025..353689edebd5 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -444,7 +444,8 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) break; } - BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); + kretprobe_assert(ri, orig_ret_address, trampoline_address); + regs->cr_iip = orig_ret_address; reset_current_kprobe(); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 2a2c696dcc7a..3d54ad7dd1f9 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -336,7 +336,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) break; } - BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); + kretprobe_assert(ri, orig_ret_address, trampoline_address); regs->nip = orig_ret_address; reset_current_kprobe(); diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 90e7008cd899..5841ba5f479b 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c @@ -447,7 +447,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) break; } - BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); + kretprobe_assert(ri, orig_ret_address, trampoline_address); regs->rip = orig_ret_address; reset_current_kprobe(); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index f26460700379..850ee871e353 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -157,6 +157,16 @@ struct kretprobe_instance { struct task_struct *task; }; +static inline void kretprobe_assert(struct kretprobe_instance *ri, + unsigned long orig_ret_address, unsigned long trampoline_address) +{ + if (!orig_ret_address || (orig_ret_address == trampoline_address)) { + printk("kretprobe BUG!: Processing kretprobe %p @ %p\n", + ri->rp, ri->rp->kp.addr); + BUG(); + } +} + extern spinlock_t kretprobe_lock; extern struct mutex kprobe_mutex; extern int arch_prepare_kprobe(struct kprobe *p); -- cgit v1.2.3 From ae84e324709d6320ed8c1fd7b1736fcbaf26df95 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 8 May 2007 00:28:38 -0700 Subject: Simplify module_get_kallsym() by dropping length arg module_get_kallsym() could in theory truncate module symbol name to fit in buffer, but nobody does this. Always use KSYM_NAME_LEN + 1 bytes for name. Suggested by lg^WRusty. Signed-off-by: Alexey Dobriyan Acked-by: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/module.h | 5 ++--- kernel/kallsyms.c | 2 +- kernel/module.c | 5 +++-- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/module.h b/include/linux/module.h index f0b0faf42d5d..799930216626 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -373,7 +373,7 @@ int is_module_address(unsigned long addr); /* Returns module and fills in value, defined and namebuf, or NULL if symnum out of range. */ struct module *module_get_kallsym(unsigned int symnum, unsigned long *value, - char *type, char *name, size_t namelen); + char *type, char *name); /* Look for this name: can be of form module:name. */ unsigned long module_kallsyms_lookup_name(const char *name); @@ -529,8 +529,7 @@ static inline const char *module_address_lookup(unsigned long addr, static inline struct module *module_get_kallsym(unsigned int symnum, unsigned long *value, - char *type, char *name, - size_t namelen) + char *type, char *name) { return NULL; } diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 5a0de8409739..d086c91d44ed 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -312,7 +312,7 @@ static int get_ksymbol_mod(struct kallsym_iter *iter) { iter->owner = module_get_kallsym(iter->pos - kallsyms_num_syms, &iter->value, &iter->type, - iter->name, sizeof(iter->name)); + iter->name); if (iter->owner == NULL) return 0; diff --git a/kernel/module.c b/kernel/module.c index 9bdbd1217a6f..43a529a1fa48 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -2124,7 +2125,7 @@ const char *module_address_lookup(unsigned long addr, } struct module *module_get_kallsym(unsigned int symnum, unsigned long *value, - char *type, char *name, size_t namelen) + char *type, char *name) { struct module *mod; @@ -2134,7 +2135,7 @@ struct module *module_get_kallsym(unsigned int symnum, unsigned long *value, *value = mod->symtab[symnum].st_value; *type = mod->symtab[symnum].st_info; strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, - namelen); + KSYM_NAME_LEN + 1); mutex_unlock(&module_mutex); return mod; } -- cgit v1.2.3 From ea07890a680273b25127129fb555aac0d9324bea Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 8 May 2007 00:28:39 -0700 Subject: Fix race between rmmod and cat /proc/kallsyms module_get_kallsym() leaks "struct module *" outside of module_mutex which is no-no, because module can dissapear right after mutex unlock. Copy all needed information from inside module_mutex into caller-supplied space. [bunk@stusta.de: is_exported() can now become static] Signed-off-by: Alexey Dobriyan Cc: Rusty Russell Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/module.h | 21 +++++++-------------- kernel/kallsyms.c | 30 +++++++++++++++--------------- kernel/module.c | 12 +++++++----- 3 files changed, 29 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/include/linux/module.h b/include/linux/module.h index 799930216626..58d5a10cdf0d 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -370,16 +370,14 @@ struct module *module_text_address(unsigned long addr); struct module *__module_text_address(unsigned long addr); int is_module_address(unsigned long addr); -/* Returns module and fills in value, defined and namebuf, or NULL if +/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if symnum out of range. */ -struct module *module_get_kallsym(unsigned int symnum, unsigned long *value, - char *type, char *name); +int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *name, char *module_name, int *exported); /* Look for this name: can be of form module:name. */ unsigned long module_kallsyms_lookup_name(const char *name); -int is_exported(const char *name, const struct module *mod); - extern void __module_put_and_exit(struct module *mod, long code) __attribute__((noreturn)); #define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code); @@ -527,11 +525,11 @@ static inline const char *module_address_lookup(unsigned long addr, return NULL; } -static inline struct module *module_get_kallsym(unsigned int symnum, - unsigned long *value, - char *type, char *name) +static inline int module_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, + char *module_name, int *exported) { - return NULL; + return -ERANGE; } static inline unsigned long module_kallsyms_lookup_name(const char *name) @@ -539,11 +537,6 @@ static inline unsigned long module_kallsyms_lookup_name(const char *name) return 0; } -static inline int is_exported(const char *name, const struct module *mod) -{ - return 0; -} - static inline int register_module_notifier(struct notifier_block * nb) { /* no events will happen anyway, so this can always succeed */ diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index d086c91d44ed..f1ea6f66ac6c 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -301,25 +301,20 @@ void __print_symbol(const char *fmt, unsigned long address) struct kallsym_iter { loff_t pos; - struct module *owner; unsigned long value; unsigned int nameoff; /* If iterating in core kernel symbols */ char type; char name[KSYM_NAME_LEN+1]; + char module_name[MODULE_NAME_LEN + 1]; + int exported; }; static int get_ksymbol_mod(struct kallsym_iter *iter) { - iter->owner = module_get_kallsym(iter->pos - kallsyms_num_syms, - &iter->value, &iter->type, - iter->name); - if (iter->owner == NULL) + if (module_get_kallsym(iter->pos - kallsyms_num_syms, &iter->value, + &iter->type, iter->name, iter->module_name, + &iter->exported) < 0) return 0; - - /* Label it "global" if it is exported, "local" if not exported. */ - iter->type = is_exported(iter->name, iter->owner) - ? toupper(iter->type) : tolower(iter->type); - return 1; } @@ -328,7 +323,7 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter) { unsigned off = iter->nameoff; - iter->owner = NULL; + iter->module_name[0] = '\0'; iter->value = kallsyms_addresses[iter->pos]; iter->type = kallsyms_get_symbol_type(off); @@ -392,12 +387,17 @@ static int s_show(struct seq_file *m, void *p) if (!iter->name[0]) return 0; - if (iter->owner) + if (iter->module_name[0]) { + char type; + + /* Label it "global" if it is exported, + * "local" if not exported. */ + type = iter->exported ? toupper(iter->type) : + tolower(iter->type); seq_printf(m, "%0*lx %c %s\t[%s]\n", (int)(2*sizeof(void*)), - iter->value, iter->type, iter->name, - module_name(iter->owner)); - else + iter->value, type, iter->name, iter->module_name); + } else seq_printf(m, "%0*lx %c %s\n", (int)(2*sizeof(void*)), iter->value, iter->type, iter->name); diff --git a/kernel/module.c b/kernel/module.c index 43a529a1fa48..5ee65994a3bc 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1472,7 +1472,7 @@ static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs, } #ifdef CONFIG_KALLSYMS -int is_exported(const char *name, const struct module *mod) +static int is_exported(const char *name, const struct module *mod) { if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab)) return 1; @@ -2124,8 +2124,8 @@ const char *module_address_lookup(unsigned long addr, return NULL; } -struct module *module_get_kallsym(unsigned int symnum, unsigned long *value, - char *type, char *name) +int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *name, char *module_name, int *exported) { struct module *mod; @@ -2136,13 +2136,15 @@ struct module *module_get_kallsym(unsigned int symnum, unsigned long *value, *type = mod->symtab[symnum].st_info; strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, KSYM_NAME_LEN + 1); + strlcpy(module_name, mod->name, MODULE_NAME_LEN + 1); + *exported = is_exported(name, mod); mutex_unlock(&module_mutex); - return mod; + return 0; } symnum -= mod->num_symtab; } mutex_unlock(&module_mutex); - return NULL; + return -ERANGE; } static unsigned long mod_find_symname(struct module *mod, const char *name) -- cgit v1.2.3 From 9d65cb4a1718a072898c7a57a3bc61b2dc4bcd4d Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 8 May 2007 00:28:43 -0700 Subject: Fix race between cat /proc/*/wchan and rmmod et al kallsyms_lookup() can go iterating over modules list unprotected which is OK for emergency situations (oops), but not OK for regular stuff like /proc/*/wchan. Introduce lookup_symbol_name()/lookup_module_symbol_name() which copy symbol name into caller-supplied buffer or return -ERANGE. All copying is done with module_mutex held, so... Signed-off-by: Alexey Dobriyan Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/base.c | 11 +++++------ include/linux/kallsyms.h | 7 +++++++ include/linux/module.h | 6 ++++++ kernel/kallsyms.c | 17 +++++++++++++++++ kernel/module.c | 23 +++++++++++++++++++++++ kernel/time/timer_list.c | 11 ++++------- kernel/time/timer_stats.c | 10 ++++------ 7 files changed, 66 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/fs/proc/base.c b/fs/proc/base.c index 3b4fe21c7e94..0c2052c79243 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -278,16 +278,15 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer) */ static int proc_pid_wchan(struct task_struct *task, char *buffer) { - const char *sym_name; unsigned long wchan; - char namebuf[KSYM_NAME_LEN+1]; + char symname[KSYM_NAME_LEN+1]; wchan = get_wchan(task); - sym_name = kallsyms_lookup(wchan, NULL, NULL, NULL, namebuf); - if (sym_name) - return sprintf(buffer, "%s", sym_name); - return sprintf(buffer, "%lu", wchan); + if (lookup_symbol_name(wchan, symname) < 0) + return sprintf(buffer, "%lu", wchan); + else + return sprintf(buffer, "%s", symname); } #endif /* CONFIG_KALLSYMS */ diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 3e3b92dabe3b..ae0117a95cfd 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -30,6 +30,8 @@ extern int sprint_symbol(char *buffer, unsigned long address); /* Look up a kernel symbol and print it to the kernel messages. */ extern void __print_symbol(const char *fmt, unsigned long address); +int lookup_symbol_name(unsigned long addr, char *symname); + #else /* !CONFIG_KALLSYMS */ static inline unsigned long kallsyms_lookup_name(const char *name) @@ -58,6 +60,11 @@ static inline int sprint_symbol(char *buffer, unsigned long addr) return 0; } +static inline int lookup_symbol_name(unsigned long addr, char *symname) +{ + return -ERANGE; +} + /* Stupid that this does nothing, but I didn't create this mess. */ #define __print_symbol(fmt, addr) #endif /*CONFIG_KALLSYMS*/ diff --git a/include/linux/module.h b/include/linux/module.h index 58d5a10cdf0d..099ae5932c68 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -454,6 +454,7 @@ const char *module_address_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname); +int lookup_module_symbol_name(unsigned long addr, char *symname); /* For extable.c to search modules' exception tables. */ const struct exception_table_entry *search_module_extables(unsigned long addr); @@ -525,6 +526,11 @@ static inline const char *module_address_lookup(unsigned long addr, return NULL; } +static inline int lookup_module_symbol_name(unsigned long addr, char *symname) +{ + return -ERANGE; +} + static inline int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name, char *module_name, int *exported) diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index f66da025cb7f..4e2ec191a127 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -269,6 +269,23 @@ const char *kallsyms_lookup(unsigned long addr, return NULL; } +int lookup_symbol_name(unsigned long addr, char *symname) +{ + symname[0] = '\0'; + symname[KSYM_NAME_LEN] = '\0'; + + if (is_ksym_addr(addr)) { + unsigned long pos; + + pos = get_symbol_pos(addr, NULL, NULL); + /* Grab name */ + kallsyms_expand_symbol(get_symbol_offset(pos), symname); + return 0; + } + /* see if it's in a module */ + return lookup_module_symbol_name(addr, symname); +} + /* Look up a kernel symbol and return it in a text buffer. */ int sprint_symbol(char *buffer, unsigned long address) { diff --git a/kernel/module.c b/kernel/module.c index bf4dccadf7b8..3da76ad32d78 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2126,6 +2126,29 @@ const char *module_address_lookup(unsigned long addr, return NULL; } +int lookup_module_symbol_name(unsigned long addr, char *symname) +{ + struct module *mod; + + mutex_lock(&module_mutex); + list_for_each_entry(mod, &modules, list) { + if (within(addr, mod->module_init, mod->init_size) || + within(addr, mod->module_core, mod->core_size)) { + const char *sym; + + sym = get_ksymbol(mod, addr, NULL, NULL); + if (!sym) + goto out; + strlcpy(symname, sym, KSYM_NAME_LEN + 1); + mutex_unlock(&module_mutex); + return 0; + } + } +out: + mutex_unlock(&module_mutex); + return -ERANGE; +} + int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name, char *module_name, int *exported) { diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index fe9314a89f20..b734ca4bc75e 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -38,15 +38,12 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases); static void print_name_offset(struct seq_file *m, void *sym) { - unsigned long addr = (unsigned long)sym; - char namebuf[KSYM_NAME_LEN+1]; - const char *sym_name; + char symname[KSYM_NAME_LEN+1]; - sym_name = kallsyms_lookup(addr, NULL, NULL, NULL, namebuf); - if (sym_name) - SEQ_printf(m, "%s", sym_name); - else + if (lookup_symbol_name((unsigned long)sym, symname) < 0) SEQ_printf(m, "<%p>", sym); + else + SEQ_printf(m, "%s", symname); } static void diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index 946ed45f7d2f..868f1bceb07f 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c @@ -257,14 +257,12 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, static void print_name_offset(struct seq_file *m, unsigned long addr) { - char namebuf[KSYM_NAME_LEN+1]; - const char *sym_name; + char symname[KSYM_NAME_LEN+1]; - sym_name = kallsyms_lookup(addr, NULL, NULL, NULL, namebuf); - if (sym_name) - seq_printf(m, "%s", sym_name); - else + if (lookup_symbol_name(addr, symname) < 0) seq_printf(m, "<%p>", (void *)addr); + else + seq_printf(m, "%s", symname); } static int tstats_show(struct seq_file *m, void *v) -- cgit v1.2.3 From a5c43dae7ae38c2a6b3e9a819bcf45f010bf6a4a Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 8 May 2007 00:28:47 -0700 Subject: Fix race between cat /proc/slab_allocators and rmmod Same story as with cat /proc/*/wchan race vs rmmod race, only /proc/slab_allocators want more info than just symbol name. Signed-off-by: Alexey Dobriyan Acked-by: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kallsyms.h | 6 ++++++ include/linux/module.h | 6 ++++++ kernel/kallsyms.c | 19 +++++++++++++++++++ kernel/module.c | 27 +++++++++++++++++++++++++++ mm/slab.c | 10 +++------- 5 files changed, 61 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index ae0117a95cfd..12178d2c882b 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -31,6 +31,7 @@ extern int sprint_symbol(char *buffer, unsigned long address); extern void __print_symbol(const char *fmt, unsigned long address); int lookup_symbol_name(unsigned long addr, char *symname); +int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); #else /* !CONFIG_KALLSYMS */ @@ -65,6 +66,11 @@ static inline int lookup_symbol_name(unsigned long addr, char *symname) return -ERANGE; } +static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name) +{ + return -ERANGE; +} + /* Stupid that this does nothing, but I didn't create this mess. */ #define __print_symbol(fmt, addr) #endif /*CONFIG_KALLSYMS*/ diff --git a/include/linux/module.h b/include/linux/module.h index 099ae5932c68..6d3dc9c4ff96 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -455,6 +455,7 @@ const char *module_address_lookup(unsigned long addr, unsigned long *offset, char **modname); int lookup_module_symbol_name(unsigned long addr, char *symname); +int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); /* For extable.c to search modules' exception tables. */ const struct exception_table_entry *search_module_extables(unsigned long addr); @@ -531,6 +532,11 @@ static inline int lookup_module_symbol_name(unsigned long addr, char *symname) return -ERANGE; } +static inline int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name) +{ + return -ERANGE; +} + static inline int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name, char *module_name, int *exported) diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 4e2ec191a127..c36581b40c04 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -286,6 +286,25 @@ int lookup_symbol_name(unsigned long addr, char *symname) return lookup_module_symbol_name(addr, symname); } +int lookup_symbol_attrs(unsigned long addr, unsigned long *size, + unsigned long *offset, char *modname, char *name) +{ + name[0] = '\0'; + name[KSYM_NAME_LEN] = '\0'; + + if (is_ksym_addr(addr)) { + unsigned long pos; + + pos = get_symbol_pos(addr, size, offset); + /* Grab name */ + kallsyms_expand_symbol(get_symbol_offset(pos), name); + modname[0] = '\0'; + return 0; + } + /* see if it's in a module */ + return lookup_module_symbol_attrs(addr, size, offset, modname, name); +} + /* Look up a kernel symbol and return it in a text buffer. */ int sprint_symbol(char *buffer, unsigned long address) { diff --git a/kernel/module.c b/kernel/module.c index 3da76ad32d78..d36e45477fac 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2149,6 +2149,33 @@ out: return -ERANGE; } +int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, + unsigned long *offset, char *modname, char *name) +{ + struct module *mod; + + mutex_lock(&module_mutex); + list_for_each_entry(mod, &modules, list) { + if (within(addr, mod->module_init, mod->init_size) || + within(addr, mod->module_core, mod->core_size)) { + const char *sym; + + sym = get_ksymbol(mod, addr, size, offset); + if (!sym) + goto out; + if (modname) + strlcpy(modname, mod->name, MODULE_NAME_LEN + 1); + if (name) + strlcpy(name, sym, KSYM_NAME_LEN + 1); + mutex_unlock(&module_mutex); + return 0; + } + } +out: + mutex_unlock(&module_mutex); + return -ERANGE; +} + int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name, char *module_name, int *exported) { diff --git a/mm/slab.c b/mm/slab.c index 1115e2065bfc..acda7e2d66e4 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4432,16 +4432,12 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) static void show_symbol(struct seq_file *m, unsigned long address) { #ifdef CONFIG_KALLSYMS - char *modname; - const char *name; unsigned long offset, size; - char namebuf[KSYM_NAME_LEN+1]; - - name = kallsyms_lookup(address, &size, &offset, &modname, namebuf); + char modname[MODULE_NAME_LEN + 1], name[KSYM_NAME_LEN + 1]; - if (name) { + if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { seq_printf(m, "%s+%#lx/%#lx", name, offset, size); - if (modname) + if (modname[0]) seq_printf(m, " [%s]", modname); return; } -- cgit v1.2.3 From 0e8638e2ace18eb6b814a63fe087106be05ca267 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 8 May 2007 00:28:56 -0700 Subject: Deprecate SA_xxx interrupt flags -V2 The deprecation of the SA_xxx interrupt flags did not emit deprecated warnings. Andrew said about the removal of the deprecated flag defines: > This is going to break a lot of external stuff. We should have found > a way to make usage of SA_* emit deprecated warnings (or _some_ > warning) to warn people of impending doom. But I can't immediately > find a way of doing that. if we _can_ find a way of doing this, I > suspect we'll need to do it, and give people another six months. It's > going to get ugly out there. We shall see... Define the deprecated flags as a call to a __deprecated inline function so a warning is emitted on compile time. Extend the reprieve of out of tree drivers to 9/2007. Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/feature-removal-schedule.txt | 2 +- include/linux/interrupt.h | 30 ++++++++++++++++++------------ 2 files changed, 19 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index a67d617429c7..5a1f0319add1 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -156,7 +156,7 @@ Who: Greg Kroah-Hartman --------------------------- What: Interrupt only SA_* flags -When: Januar 2007 +When: September 2007 Why: The interrupt related SA_* flags are replaced by IRQF_* to move them out of the signal namespace. diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 2067a7ef6e62..80e63d8e9b15 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -54,20 +54,26 @@ #define IRQF_NOBALANCING 0x00000800 /* - * Migration helpers. Scheduled for removal in 1/2007 + * Migration helpers. Scheduled for removal in 9/2007 * Do not use for new code ! */ -#define SA_INTERRUPT IRQF_DISABLED -#define SA_SAMPLE_RANDOM IRQF_SAMPLE_RANDOM -#define SA_SHIRQ IRQF_SHARED -#define SA_PROBEIRQ IRQF_PROBE_SHARED -#define SA_PERCPU IRQF_PERCPU - -#define SA_TRIGGER_LOW IRQF_TRIGGER_LOW -#define SA_TRIGGER_HIGH IRQF_TRIGGER_HIGH -#define SA_TRIGGER_FALLING IRQF_TRIGGER_FALLING -#define SA_TRIGGER_RISING IRQF_TRIGGER_RISING -#define SA_TRIGGER_MASK IRQF_TRIGGER_MASK +static inline +unsigned long __deprecated deprecated_irq_flag(unsigned long flag) +{ + return flag; +} + +#define SA_INTERRUPT deprecated_irq_flag(IRQF_DISABLED) +#define SA_SAMPLE_RANDOM deprecated_irq_flag(IRQF_SAMPLE_RANDOM) +#define SA_SHIRQ deprecated_irq_flag(IRQF_SHARED) +#define SA_PROBEIRQ deprecated_irq_flag(IRQF_PROBE_SHARED) +#define SA_PERCPU deprecated_irq_flag(IRQF_PERCPU) + +#define SA_TRIGGER_LOW deprecated_irq_flag(IRQF_TRIGGER_LOW) +#define SA_TRIGGER_HIGH deprecated_irq_flag(IRQF_TRIGGER_HIGH) +#define SA_TRIGGER_FALLING deprecated_irq_flag(IRQF_TRIGGER_FALLING) +#define SA_TRIGGER_RISING deprecated_irq_flag(IRQF_TRIGGER_RISING) +#define SA_TRIGGER_MASK deprecated_irq_flag(IRQF_TRIGGER_MASK) typedef irqreturn_t (*irq_handler_t)(int, void *); -- cgit v1.2.3 From 644fd4f5de9ca147daeb6dc5f844b44ec3d58b47 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 8 May 2007 00:29:07 -0700 Subject: merge compat_ioctl.h into compat_ioctl.c Now that there is no arch-specific compat ioctl handling left there is not point in having a separate copat_ioctl.h, so merge it into compat_ioctl.c Signed-off-by: Christoph Hellwig Acked-by: Arnd Bergmann Acked-by: David S. Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/compat_ioctl.c | 829 +++++++++++++++++++++++++++++++++++++++++- include/linux/compat_ioctl.h | 830 ------------------------------------------- 2 files changed, 828 insertions(+), 831 deletions(-) delete mode 100644 include/linux/compat_ioctl.h (limited to 'include/linux') diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index c870364b0b47..a0c3d2044e84 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -2405,7 +2405,834 @@ lp_timeout_trans(unsigned int fd, unsigned int cmd, unsigned long arg) #define IGNORE_IOCTL(cmd) COMPATIBLE_IOCTL(cmd) struct ioctl_trans ioctl_start[] = { -#include +/* compatible ioctls first */ +COMPATIBLE_IOCTL(0x4B50) /* KDGHWCLK - not in the kernel, but don't complain */ +COMPATIBLE_IOCTL(0x4B51) /* KDSHWCLK - not in the kernel, but don't complain */ + +/* Big T */ +COMPATIBLE_IOCTL(TCGETA) +COMPATIBLE_IOCTL(TCSETA) +COMPATIBLE_IOCTL(TCSETAW) +COMPATIBLE_IOCTL(TCSETAF) +COMPATIBLE_IOCTL(TCSBRK) +ULONG_IOCTL(TCSBRKP) +COMPATIBLE_IOCTL(TCXONC) +COMPATIBLE_IOCTL(TCFLSH) +COMPATIBLE_IOCTL(TCGETS) +COMPATIBLE_IOCTL(TCSETS) +COMPATIBLE_IOCTL(TCSETSW) +COMPATIBLE_IOCTL(TCSETSF) +COMPATIBLE_IOCTL(TIOCLINUX) +COMPATIBLE_IOCTL(TIOCSBRK) +COMPATIBLE_IOCTL(TIOCCBRK) +ULONG_IOCTL(TIOCMIWAIT) +COMPATIBLE_IOCTL(TIOCGICOUNT) +/* Little t */ +COMPATIBLE_IOCTL(TIOCGETD) +COMPATIBLE_IOCTL(TIOCSETD) +COMPATIBLE_IOCTL(TIOCEXCL) +COMPATIBLE_IOCTL(TIOCNXCL) +COMPATIBLE_IOCTL(TIOCCONS) +COMPATIBLE_IOCTL(TIOCGSOFTCAR) +COMPATIBLE_IOCTL(TIOCSSOFTCAR) +COMPATIBLE_IOCTL(TIOCSWINSZ) +COMPATIBLE_IOCTL(TIOCGWINSZ) +COMPATIBLE_IOCTL(TIOCMGET) +COMPATIBLE_IOCTL(TIOCMBIC) +COMPATIBLE_IOCTL(TIOCMBIS) +COMPATIBLE_IOCTL(TIOCMSET) +COMPATIBLE_IOCTL(TIOCPKT) +COMPATIBLE_IOCTL(TIOCNOTTY) +COMPATIBLE_IOCTL(TIOCSTI) +COMPATIBLE_IOCTL(TIOCOUTQ) +COMPATIBLE_IOCTL(TIOCSPGRP) +COMPATIBLE_IOCTL(TIOCGPGRP) +ULONG_IOCTL(TIOCSCTTY) +COMPATIBLE_IOCTL(TIOCGPTN) +COMPATIBLE_IOCTL(TIOCSPTLCK) +COMPATIBLE_IOCTL(TIOCSERGETLSR) +/* Little f */ +COMPATIBLE_IOCTL(FIOCLEX) +COMPATIBLE_IOCTL(FIONCLEX) +COMPATIBLE_IOCTL(FIOASYNC) +COMPATIBLE_IOCTL(FIONBIO) +COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */ +/* 0x00 */ +COMPATIBLE_IOCTL(FIBMAP) +COMPATIBLE_IOCTL(FIGETBSZ) +/* 0x03 -- HD/IDE ioctl's used by hdparm and friends. + * Some need translations, these do not. + */ +COMPATIBLE_IOCTL(HDIO_GET_IDENTITY) +COMPATIBLE_IOCTL(HDIO_DRIVE_TASK) +COMPATIBLE_IOCTL(HDIO_DRIVE_CMD) +ULONG_IOCTL(HDIO_SET_MULTCOUNT) +ULONG_IOCTL(HDIO_SET_UNMASKINTR) +ULONG_IOCTL(HDIO_SET_KEEPSETTINGS) +ULONG_IOCTL(HDIO_SET_32BIT) +ULONG_IOCTL(HDIO_SET_NOWERR) +ULONG_IOCTL(HDIO_SET_DMA) +ULONG_IOCTL(HDIO_SET_PIO_MODE) +ULONG_IOCTL(HDIO_SET_NICE) +ULONG_IOCTL(HDIO_SET_WCACHE) +ULONG_IOCTL(HDIO_SET_ACOUSTIC) +ULONG_IOCTL(HDIO_SET_BUSSTATE) +ULONG_IOCTL(HDIO_SET_ADDRESS) +COMPATIBLE_IOCTL(HDIO_SCAN_HWIF) +/* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */ +COMPATIBLE_IOCTL(0x330) +/* 0x02 -- Floppy ioctls */ +COMPATIBLE_IOCTL(FDMSGON) +COMPATIBLE_IOCTL(FDMSGOFF) +COMPATIBLE_IOCTL(FDSETEMSGTRESH) +COMPATIBLE_IOCTL(FDFLUSH) +COMPATIBLE_IOCTL(FDWERRORCLR) +COMPATIBLE_IOCTL(FDSETMAXERRS) +COMPATIBLE_IOCTL(FDGETMAXERRS) +COMPATIBLE_IOCTL(FDGETDRVTYP) +COMPATIBLE_IOCTL(FDEJECT) +COMPATIBLE_IOCTL(FDCLRPRM) +COMPATIBLE_IOCTL(FDFMTBEG) +COMPATIBLE_IOCTL(FDFMTEND) +COMPATIBLE_IOCTL(FDRESET) +COMPATIBLE_IOCTL(FDTWADDLE) +COMPATIBLE_IOCTL(FDFMTTRK) +COMPATIBLE_IOCTL(FDRAWCMD) +/* 0x12 */ +#ifdef CONFIG_BLOCK +COMPATIBLE_IOCTL(BLKRASET) +COMPATIBLE_IOCTL(BLKROSET) +COMPATIBLE_IOCTL(BLKROGET) +COMPATIBLE_IOCTL(BLKRRPART) +COMPATIBLE_IOCTL(BLKFLSBUF) +COMPATIBLE_IOCTL(BLKSECTSET) +COMPATIBLE_IOCTL(BLKSSZGET) +COMPATIBLE_IOCTL(BLKTRACESTART) +COMPATIBLE_IOCTL(BLKTRACESTOP) +COMPATIBLE_IOCTL(BLKTRACESETUP) +COMPATIBLE_IOCTL(BLKTRACETEARDOWN) +ULONG_IOCTL(BLKRASET) +ULONG_IOCTL(BLKFRASET) +#endif +/* RAID */ +COMPATIBLE_IOCTL(RAID_VERSION) +COMPATIBLE_IOCTL(GET_ARRAY_INFO) +COMPATIBLE_IOCTL(GET_DISK_INFO) +COMPATIBLE_IOCTL(PRINT_RAID_DEBUG) +COMPATIBLE_IOCTL(RAID_AUTORUN) +COMPATIBLE_IOCTL(CLEAR_ARRAY) +COMPATIBLE_IOCTL(ADD_NEW_DISK) +ULONG_IOCTL(HOT_REMOVE_DISK) +COMPATIBLE_IOCTL(SET_ARRAY_INFO) +COMPATIBLE_IOCTL(SET_DISK_INFO) +COMPATIBLE_IOCTL(WRITE_RAID_INFO) +COMPATIBLE_IOCTL(UNPROTECT_ARRAY) +COMPATIBLE_IOCTL(PROTECT_ARRAY) +ULONG_IOCTL(HOT_ADD_DISK) +ULONG_IOCTL(SET_DISK_FAULTY) +COMPATIBLE_IOCTL(RUN_ARRAY) +COMPATIBLE_IOCTL(STOP_ARRAY) +COMPATIBLE_IOCTL(STOP_ARRAY_RO) +COMPATIBLE_IOCTL(RESTART_ARRAY_RW) +COMPATIBLE_IOCTL(GET_BITMAP_FILE) +ULONG_IOCTL(SET_BITMAP_FILE) +/* DM */ +COMPATIBLE_IOCTL(DM_VERSION_32) +COMPATIBLE_IOCTL(DM_REMOVE_ALL_32) +COMPATIBLE_IOCTL(DM_LIST_DEVICES_32) +COMPATIBLE_IOCTL(DM_DEV_CREATE_32) +COMPATIBLE_IOCTL(DM_DEV_REMOVE_32) +COMPATIBLE_IOCTL(DM_DEV_RENAME_32) +COMPATIBLE_IOCTL(DM_DEV_SUSPEND_32) +COMPATIBLE_IOCTL(DM_DEV_STATUS_32) +COMPATIBLE_IOCTL(DM_DEV_WAIT_32) +COMPATIBLE_IOCTL(DM_TABLE_LOAD_32) +COMPATIBLE_IOCTL(DM_TABLE_CLEAR_32) +COMPATIBLE_IOCTL(DM_TABLE_DEPS_32) +COMPATIBLE_IOCTL(DM_TABLE_STATUS_32) +COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32) +COMPATIBLE_IOCTL(DM_TARGET_MSG_32) +COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY_32) +COMPATIBLE_IOCTL(DM_VERSION) +COMPATIBLE_IOCTL(DM_REMOVE_ALL) +COMPATIBLE_IOCTL(DM_LIST_DEVICES) +COMPATIBLE_IOCTL(DM_DEV_CREATE) +COMPATIBLE_IOCTL(DM_DEV_REMOVE) +COMPATIBLE_IOCTL(DM_DEV_RENAME) +COMPATIBLE_IOCTL(DM_DEV_SUSPEND) +COMPATIBLE_IOCTL(DM_DEV_STATUS) +COMPATIBLE_IOCTL(DM_DEV_WAIT) +COMPATIBLE_IOCTL(DM_TABLE_LOAD) +COMPATIBLE_IOCTL(DM_TABLE_CLEAR) +COMPATIBLE_IOCTL(DM_TABLE_DEPS) +COMPATIBLE_IOCTL(DM_TABLE_STATUS) +COMPATIBLE_IOCTL(DM_LIST_VERSIONS) +COMPATIBLE_IOCTL(DM_TARGET_MSG) +COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY) +/* Big K */ +COMPATIBLE_IOCTL(PIO_FONT) +COMPATIBLE_IOCTL(GIO_FONT) +ULONG_IOCTL(KDSIGACCEPT) +COMPATIBLE_IOCTL(KDGETKEYCODE) +COMPATIBLE_IOCTL(KDSETKEYCODE) +ULONG_IOCTL(KIOCSOUND) +ULONG_IOCTL(KDMKTONE) +COMPATIBLE_IOCTL(KDGKBTYPE) +ULONG_IOCTL(KDSETMODE) +COMPATIBLE_IOCTL(KDGETMODE) +ULONG_IOCTL(KDSKBMODE) +COMPATIBLE_IOCTL(KDGKBMODE) +ULONG_IOCTL(KDSKBMETA) +COMPATIBLE_IOCTL(KDGKBMETA) +COMPATIBLE_IOCTL(KDGKBENT) +COMPATIBLE_IOCTL(KDSKBENT) +COMPATIBLE_IOCTL(KDGKBSENT) +COMPATIBLE_IOCTL(KDSKBSENT) +COMPATIBLE_IOCTL(KDGKBDIACR) +COMPATIBLE_IOCTL(KDSKBDIACR) +COMPATIBLE_IOCTL(KDKBDREP) +COMPATIBLE_IOCTL(KDGKBLED) +ULONG_IOCTL(KDSKBLED) +COMPATIBLE_IOCTL(KDGETLED) +ULONG_IOCTL(KDSETLED) +COMPATIBLE_IOCTL(GIO_SCRNMAP) +COMPATIBLE_IOCTL(PIO_SCRNMAP) +COMPATIBLE_IOCTL(GIO_UNISCRNMAP) +COMPATIBLE_IOCTL(PIO_UNISCRNMAP) +COMPATIBLE_IOCTL(PIO_FONTRESET) +COMPATIBLE_IOCTL(PIO_UNIMAPCLR) +/* Big S */ +COMPATIBLE_IOCTL(SCSI_IOCTL_GET_IDLUN) +COMPATIBLE_IOCTL(SCSI_IOCTL_DOORLOCK) +COMPATIBLE_IOCTL(SCSI_IOCTL_DOORUNLOCK) +COMPATIBLE_IOCTL(SCSI_IOCTL_TEST_UNIT_READY) +COMPATIBLE_IOCTL(SCSI_IOCTL_GET_BUS_NUMBER) +COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND) +COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST) +COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI) +/* Big T */ +COMPATIBLE_IOCTL(TUNSETNOCSUM) +COMPATIBLE_IOCTL(TUNSETDEBUG) +COMPATIBLE_IOCTL(TUNSETPERSIST) +COMPATIBLE_IOCTL(TUNSETOWNER) +/* Big V */ +COMPATIBLE_IOCTL(VT_SETMODE) +COMPATIBLE_IOCTL(VT_GETMODE) +COMPATIBLE_IOCTL(VT_GETSTATE) +COMPATIBLE_IOCTL(VT_OPENQRY) +ULONG_IOCTL(VT_ACTIVATE) +ULONG_IOCTL(VT_WAITACTIVE) +ULONG_IOCTL(VT_RELDISP) +ULONG_IOCTL(VT_DISALLOCATE) +COMPATIBLE_IOCTL(VT_RESIZE) +COMPATIBLE_IOCTL(VT_RESIZEX) +COMPATIBLE_IOCTL(VT_LOCKSWITCH) +COMPATIBLE_IOCTL(VT_UNLOCKSWITCH) +COMPATIBLE_IOCTL(VT_GETHIFONTMASK) +/* Little p (/dev/rtc, /dev/envctrl, etc.) */ +COMPATIBLE_IOCTL(RTC_AIE_ON) +COMPATIBLE_IOCTL(RTC_AIE_OFF) +COMPATIBLE_IOCTL(RTC_UIE_ON) +COMPATIBLE_IOCTL(RTC_UIE_OFF) +COMPATIBLE_IOCTL(RTC_PIE_ON) +COMPATIBLE_IOCTL(RTC_PIE_OFF) +COMPATIBLE_IOCTL(RTC_WIE_ON) +COMPATIBLE_IOCTL(RTC_WIE_OFF) +COMPATIBLE_IOCTL(RTC_ALM_SET) +COMPATIBLE_IOCTL(RTC_ALM_READ) +COMPATIBLE_IOCTL(RTC_RD_TIME) +COMPATIBLE_IOCTL(RTC_SET_TIME) +COMPATIBLE_IOCTL(RTC_WKALM_SET) +COMPATIBLE_IOCTL(RTC_WKALM_RD) +/* + * These two are only for the sbus rtc driver, but + * hwclock tries them on every rtc device first when + * running on sparc. On other architectures the entries + * are useless but harmless. + */ +COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */ +COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */ +/* Little m */ +COMPATIBLE_IOCTL(MTIOCTOP) +/* Socket level stuff */ +COMPATIBLE_IOCTL(FIOQSIZE) +COMPATIBLE_IOCTL(FIOSETOWN) +COMPATIBLE_IOCTL(SIOCSPGRP) +COMPATIBLE_IOCTL(FIOGETOWN) +COMPATIBLE_IOCTL(SIOCGPGRP) +COMPATIBLE_IOCTL(SIOCATMARK) +COMPATIBLE_IOCTL(SIOCSIFLINK) +COMPATIBLE_IOCTL(SIOCSIFENCAP) +COMPATIBLE_IOCTL(SIOCGIFENCAP) +COMPATIBLE_IOCTL(SIOCSIFNAME) +COMPATIBLE_IOCTL(SIOCSARP) +COMPATIBLE_IOCTL(SIOCGARP) +COMPATIBLE_IOCTL(SIOCDARP) +COMPATIBLE_IOCTL(SIOCSRARP) +COMPATIBLE_IOCTL(SIOCGRARP) +COMPATIBLE_IOCTL(SIOCDRARP) +COMPATIBLE_IOCTL(SIOCADDDLCI) +COMPATIBLE_IOCTL(SIOCDELDLCI) +COMPATIBLE_IOCTL(SIOCGMIIPHY) +COMPATIBLE_IOCTL(SIOCGMIIREG) +COMPATIBLE_IOCTL(SIOCSMIIREG) +COMPATIBLE_IOCTL(SIOCGIFVLAN) +COMPATIBLE_IOCTL(SIOCSIFVLAN) +COMPATIBLE_IOCTL(SIOCBRADDBR) +COMPATIBLE_IOCTL(SIOCBRDELBR) +/* SG stuff */ +COMPATIBLE_IOCTL(SG_SET_TIMEOUT) +COMPATIBLE_IOCTL(SG_GET_TIMEOUT) +COMPATIBLE_IOCTL(SG_EMULATED_HOST) +ULONG_IOCTL(SG_SET_TRANSFORM) +COMPATIBLE_IOCTL(SG_GET_TRANSFORM) +COMPATIBLE_IOCTL(SG_SET_RESERVED_SIZE) +COMPATIBLE_IOCTL(SG_GET_RESERVED_SIZE) +COMPATIBLE_IOCTL(SG_GET_SCSI_ID) +COMPATIBLE_IOCTL(SG_SET_FORCE_LOW_DMA) +COMPATIBLE_IOCTL(SG_GET_LOW_DMA) +COMPATIBLE_IOCTL(SG_SET_FORCE_PACK_ID) +COMPATIBLE_IOCTL(SG_GET_PACK_ID) +COMPATIBLE_IOCTL(SG_GET_NUM_WAITING) +COMPATIBLE_IOCTL(SG_SET_DEBUG) +COMPATIBLE_IOCTL(SG_GET_SG_TABLESIZE) +COMPATIBLE_IOCTL(SG_GET_COMMAND_Q) +COMPATIBLE_IOCTL(SG_SET_COMMAND_Q) +COMPATIBLE_IOCTL(SG_GET_VERSION_NUM) +COMPATIBLE_IOCTL(SG_NEXT_CMD_LEN) +COMPATIBLE_IOCTL(SG_SCSI_RESET) +COMPATIBLE_IOCTL(SG_GET_REQUEST_TABLE) +COMPATIBLE_IOCTL(SG_SET_KEEP_ORPHAN) +COMPATIBLE_IOCTL(SG_GET_KEEP_ORPHAN) +/* PPP stuff */ +COMPATIBLE_IOCTL(PPPIOCGFLAGS) +COMPATIBLE_IOCTL(PPPIOCSFLAGS) +COMPATIBLE_IOCTL(PPPIOCGASYNCMAP) +COMPATIBLE_IOCTL(PPPIOCSASYNCMAP) +COMPATIBLE_IOCTL(PPPIOCGUNIT) +COMPATIBLE_IOCTL(PPPIOCGRASYNCMAP) +COMPATIBLE_IOCTL(PPPIOCSRASYNCMAP) +COMPATIBLE_IOCTL(PPPIOCGMRU) +COMPATIBLE_IOCTL(PPPIOCSMRU) +COMPATIBLE_IOCTL(PPPIOCSMAXCID) +COMPATIBLE_IOCTL(PPPIOCGXASYNCMAP) +COMPATIBLE_IOCTL(PPPIOCSXASYNCMAP) +COMPATIBLE_IOCTL(PPPIOCXFERUNIT) +/* PPPIOCSCOMPRESS is translated */ +COMPATIBLE_IOCTL(PPPIOCGNPMODE) +COMPATIBLE_IOCTL(PPPIOCSNPMODE) +COMPATIBLE_IOCTL(PPPIOCGDEBUG) +COMPATIBLE_IOCTL(PPPIOCSDEBUG) +/* PPPIOCSPASS is translated */ +/* PPPIOCSACTIVE is translated */ +/* PPPIOCGIDLE is translated */ +COMPATIBLE_IOCTL(PPPIOCNEWUNIT) +COMPATIBLE_IOCTL(PPPIOCATTACH) +COMPATIBLE_IOCTL(PPPIOCDETACH) +COMPATIBLE_IOCTL(PPPIOCSMRRU) +COMPATIBLE_IOCTL(PPPIOCCONNECT) +COMPATIBLE_IOCTL(PPPIOCDISCONN) +COMPATIBLE_IOCTL(PPPIOCATTCHAN) +COMPATIBLE_IOCTL(PPPIOCGCHAN) +/* PPPOX */ +COMPATIBLE_IOCTL(PPPOEIOCSFWD) +COMPATIBLE_IOCTL(PPPOEIOCDFWD) +/* LP */ +COMPATIBLE_IOCTL(LPGETSTATUS) +/* ppdev */ +COMPATIBLE_IOCTL(PPSETMODE) +COMPATIBLE_IOCTL(PPRSTATUS) +COMPATIBLE_IOCTL(PPRCONTROL) +COMPATIBLE_IOCTL(PPWCONTROL) +COMPATIBLE_IOCTL(PPFCONTROL) +COMPATIBLE_IOCTL(PPRDATA) +COMPATIBLE_IOCTL(PPWDATA) +COMPATIBLE_IOCTL(PPCLAIM) +COMPATIBLE_IOCTL(PPRELEASE) +COMPATIBLE_IOCTL(PPYIELD) +COMPATIBLE_IOCTL(PPEXCL) +COMPATIBLE_IOCTL(PPDATADIR) +COMPATIBLE_IOCTL(PPNEGOT) +COMPATIBLE_IOCTL(PPWCTLONIRQ) +COMPATIBLE_IOCTL(PPCLRIRQ) +COMPATIBLE_IOCTL(PPSETPHASE) +COMPATIBLE_IOCTL(PPGETMODES) +COMPATIBLE_IOCTL(PPGETMODE) +COMPATIBLE_IOCTL(PPGETPHASE) +COMPATIBLE_IOCTL(PPGETFLAGS) +COMPATIBLE_IOCTL(PPSETFLAGS) +/* CDROM stuff */ +COMPATIBLE_IOCTL(CDROMPAUSE) +COMPATIBLE_IOCTL(CDROMRESUME) +COMPATIBLE_IOCTL(CDROMPLAYMSF) +COMPATIBLE_IOCTL(CDROMPLAYTRKIND) +COMPATIBLE_IOCTL(CDROMREADTOCHDR) +COMPATIBLE_IOCTL(CDROMREADTOCENTRY) +COMPATIBLE_IOCTL(CDROMSTOP) +COMPATIBLE_IOCTL(CDROMSTART) +COMPATIBLE_IOCTL(CDROMEJECT) +COMPATIBLE_IOCTL(CDROMVOLCTRL) +COMPATIBLE_IOCTL(CDROMSUBCHNL) +ULONG_IOCTL(CDROMEJECT_SW) +COMPATIBLE_IOCTL(CDROMMULTISESSION) +COMPATIBLE_IOCTL(CDROM_GET_MCN) +COMPATIBLE_IOCTL(CDROMRESET) +COMPATIBLE_IOCTL(CDROMVOLREAD) +COMPATIBLE_IOCTL(CDROMSEEK) +COMPATIBLE_IOCTL(CDROMPLAYBLK) +COMPATIBLE_IOCTL(CDROMCLOSETRAY) +ULONG_IOCTL(CDROM_SET_OPTIONS) +ULONG_IOCTL(CDROM_CLEAR_OPTIONS) +ULONG_IOCTL(CDROM_SELECT_SPEED) +ULONG_IOCTL(CDROM_SELECT_DISC) +ULONG_IOCTL(CDROM_MEDIA_CHANGED) +ULONG_IOCTL(CDROM_DRIVE_STATUS) +COMPATIBLE_IOCTL(CDROM_DISC_STATUS) +COMPATIBLE_IOCTL(CDROM_CHANGER_NSLOTS) +ULONG_IOCTL(CDROM_LOCKDOOR) +ULONG_IOCTL(CDROM_DEBUG) +COMPATIBLE_IOCTL(CDROM_GET_CAPABILITY) +/* Ignore cdrom.h about these next 5 ioctls, they absolutely do + * not take a struct cdrom_read, instead they take a struct cdrom_msf + * which is compatible. + */ +COMPATIBLE_IOCTL(CDROMREADMODE2) +COMPATIBLE_IOCTL(CDROMREADMODE1) +COMPATIBLE_IOCTL(CDROMREADRAW) +COMPATIBLE_IOCTL(CDROMREADCOOKED) +COMPATIBLE_IOCTL(CDROMREADALL) +/* DVD ioctls */ +COMPATIBLE_IOCTL(DVD_READ_STRUCT) +COMPATIBLE_IOCTL(DVD_WRITE_STRUCT) +COMPATIBLE_IOCTL(DVD_AUTH) +/* pktcdvd */ +COMPATIBLE_IOCTL(PACKET_CTRL_CMD) +/* Big A */ +/* sparc only */ +/* Big Q for sound/OSS */ +COMPATIBLE_IOCTL(SNDCTL_SEQ_RESET) +COMPATIBLE_IOCTL(SNDCTL_SEQ_SYNC) +COMPATIBLE_IOCTL(SNDCTL_SYNTH_INFO) +COMPATIBLE_IOCTL(SNDCTL_SEQ_CTRLRATE) +COMPATIBLE_IOCTL(SNDCTL_SEQ_GETOUTCOUNT) +COMPATIBLE_IOCTL(SNDCTL_SEQ_GETINCOUNT) +COMPATIBLE_IOCTL(SNDCTL_SEQ_PERCMODE) +COMPATIBLE_IOCTL(SNDCTL_FM_LOAD_INSTR) +COMPATIBLE_IOCTL(SNDCTL_SEQ_TESTMIDI) +COMPATIBLE_IOCTL(SNDCTL_SEQ_RESETSAMPLES) +COMPATIBLE_IOCTL(SNDCTL_SEQ_NRSYNTHS) +COMPATIBLE_IOCTL(SNDCTL_SEQ_NRMIDIS) +COMPATIBLE_IOCTL(SNDCTL_MIDI_INFO) +COMPATIBLE_IOCTL(SNDCTL_SEQ_THRESHOLD) +COMPATIBLE_IOCTL(SNDCTL_SYNTH_MEMAVL) +COMPATIBLE_IOCTL(SNDCTL_FM_4OP_ENABLE) +COMPATIBLE_IOCTL(SNDCTL_SEQ_PANIC) +COMPATIBLE_IOCTL(SNDCTL_SEQ_OUTOFBAND) +COMPATIBLE_IOCTL(SNDCTL_SEQ_GETTIME) +COMPATIBLE_IOCTL(SNDCTL_SYNTH_ID) +COMPATIBLE_IOCTL(SNDCTL_SYNTH_CONTROL) +COMPATIBLE_IOCTL(SNDCTL_SYNTH_REMOVESAMPLE) +/* Big T for sound/OSS */ +COMPATIBLE_IOCTL(SNDCTL_TMR_TIMEBASE) +COMPATIBLE_IOCTL(SNDCTL_TMR_START) +COMPATIBLE_IOCTL(SNDCTL_TMR_STOP) +COMPATIBLE_IOCTL(SNDCTL_TMR_CONTINUE) +COMPATIBLE_IOCTL(SNDCTL_TMR_TEMPO) +COMPATIBLE_IOCTL(SNDCTL_TMR_SOURCE) +COMPATIBLE_IOCTL(SNDCTL_TMR_METRONOME) +COMPATIBLE_IOCTL(SNDCTL_TMR_SELECT) +/* Little m for sound/OSS */ +COMPATIBLE_IOCTL(SNDCTL_MIDI_PRETIME) +COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUMODE) +COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUCMD) +/* Big P for sound/OSS */ +COMPATIBLE_IOCTL(SNDCTL_DSP_RESET) +COMPATIBLE_IOCTL(SNDCTL_DSP_SYNC) +COMPATIBLE_IOCTL(SNDCTL_DSP_SPEED) +COMPATIBLE_IOCTL(SNDCTL_DSP_STEREO) +COMPATIBLE_IOCTL(SNDCTL_DSP_GETBLKSIZE) +COMPATIBLE_IOCTL(SNDCTL_DSP_CHANNELS) +COMPATIBLE_IOCTL(SOUND_PCM_WRITE_FILTER) +COMPATIBLE_IOCTL(SNDCTL_DSP_POST) +COMPATIBLE_IOCTL(SNDCTL_DSP_SUBDIVIDE) +COMPATIBLE_IOCTL(SNDCTL_DSP_SETFRAGMENT) +COMPATIBLE_IOCTL(SNDCTL_DSP_GETFMTS) +COMPATIBLE_IOCTL(SNDCTL_DSP_SETFMT) +COMPATIBLE_IOCTL(SNDCTL_DSP_GETOSPACE) +COMPATIBLE_IOCTL(SNDCTL_DSP_GETISPACE) +COMPATIBLE_IOCTL(SNDCTL_DSP_NONBLOCK) +COMPATIBLE_IOCTL(SNDCTL_DSP_GETCAPS) +COMPATIBLE_IOCTL(SNDCTL_DSP_GETTRIGGER) +COMPATIBLE_IOCTL(SNDCTL_DSP_SETTRIGGER) +COMPATIBLE_IOCTL(SNDCTL_DSP_GETIPTR) +COMPATIBLE_IOCTL(SNDCTL_DSP_GETOPTR) +/* SNDCTL_DSP_MAPINBUF, XXX needs translation */ +/* SNDCTL_DSP_MAPOUTBUF, XXX needs translation */ +COMPATIBLE_IOCTL(SNDCTL_DSP_SETSYNCRO) +COMPATIBLE_IOCTL(SNDCTL_DSP_SETDUPLEX) +COMPATIBLE_IOCTL(SNDCTL_DSP_GETODELAY) +COMPATIBLE_IOCTL(SNDCTL_DSP_PROFILE) +COMPATIBLE_IOCTL(SOUND_PCM_READ_RATE) +COMPATIBLE_IOCTL(SOUND_PCM_READ_CHANNELS) +COMPATIBLE_IOCTL(SOUND_PCM_READ_BITS) +COMPATIBLE_IOCTL(SOUND_PCM_READ_FILTER) +/* Big C for sound/OSS */ +COMPATIBLE_IOCTL(SNDCTL_COPR_RESET) +COMPATIBLE_IOCTL(SNDCTL_COPR_LOAD) +COMPATIBLE_IOCTL(SNDCTL_COPR_RDATA) +COMPATIBLE_IOCTL(SNDCTL_COPR_RCODE) +COMPATIBLE_IOCTL(SNDCTL_COPR_WDATA) +COMPATIBLE_IOCTL(SNDCTL_COPR_WCODE) +COMPATIBLE_IOCTL(SNDCTL_COPR_RUN) +COMPATIBLE_IOCTL(SNDCTL_COPR_HALT) +COMPATIBLE_IOCTL(SNDCTL_COPR_SENDMSG) +COMPATIBLE_IOCTL(SNDCTL_COPR_RCVMSG) +/* Big M for sound/OSS */ +COMPATIBLE_IOCTL(SOUND_MIXER_READ_VOLUME) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_BASS) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_TREBLE) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_SYNTH) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_PCM) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_SPEAKER) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_MIC) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_CD) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_IMIX) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_ALTPCM) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECLEV) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_IGAIN) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_OGAIN) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE1) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE2) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE3) +COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL1)) +COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL2)) +COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL3)) +COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEIN)) +COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEOUT)) +COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_VIDEO)) +COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_RADIO)) +COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_MONITOR)) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_MUTE) +/* SOUND_MIXER_READ_ENHANCE, same value as READ_MUTE */ +/* SOUND_MIXER_READ_LOUD, same value as READ_MUTE */ +COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECSRC) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_DEVMASK) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECMASK) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_STEREODEVS) +COMPATIBLE_IOCTL(SOUND_MIXER_READ_CAPS) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_VOLUME) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_BASS) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_TREBLE) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SYNTH) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_PCM) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SPEAKER) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MIC) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_CD) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IMIX) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_ALTPCM) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECLEV) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IGAIN) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_OGAIN) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE1) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE2) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE3) +COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL1)) +COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL2)) +COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL3)) +COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEIN)) +COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEOUT)) +COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_VIDEO)) +COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_RADIO)) +COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_MONITOR)) +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MUTE) +/* SOUND_MIXER_WRITE_ENHANCE, same value as WRITE_MUTE */ +/* SOUND_MIXER_WRITE_LOUD, same value as WRITE_MUTE */ +COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECSRC) +COMPATIBLE_IOCTL(SOUND_MIXER_INFO) +COMPATIBLE_IOCTL(SOUND_OLD_MIXER_INFO) +COMPATIBLE_IOCTL(SOUND_MIXER_ACCESS) +COMPATIBLE_IOCTL(SOUND_MIXER_AGC) +COMPATIBLE_IOCTL(SOUND_MIXER_3DSE) +COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE1) +COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE2) +COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE3) +COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE4) +COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE5) +COMPATIBLE_IOCTL(SOUND_MIXER_GETLEVELS) +COMPATIBLE_IOCTL(SOUND_MIXER_SETLEVELS) +COMPATIBLE_IOCTL(OSS_GETVERSION) +/* AUTOFS */ +ULONG_IOCTL(AUTOFS_IOC_READY) +ULONG_IOCTL(AUTOFS_IOC_FAIL) +COMPATIBLE_IOCTL(AUTOFS_IOC_CATATONIC) +COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOVER) +COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE) +COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI) +COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOSUBVER) +COMPATIBLE_IOCTL(AUTOFS_IOC_ASKREGHOST) +COMPATIBLE_IOCTL(AUTOFS_IOC_TOGGLEREGHOST) +COMPATIBLE_IOCTL(AUTOFS_IOC_ASKUMOUNT) +/* Raw devices */ +COMPATIBLE_IOCTL(RAW_SETBIND) +COMPATIBLE_IOCTL(RAW_GETBIND) +/* SMB ioctls which do not need any translations */ +COMPATIBLE_IOCTL(SMB_IOC_NEWCONN) +/* Little a */ +COMPATIBLE_IOCTL(ATMSIGD_CTRL) +COMPATIBLE_IOCTL(ATMARPD_CTRL) +COMPATIBLE_IOCTL(ATMLEC_CTRL) +COMPATIBLE_IOCTL(ATMLEC_MCAST) +COMPATIBLE_IOCTL(ATMLEC_DATA) +COMPATIBLE_IOCTL(ATM_SETSC) +COMPATIBLE_IOCTL(SIOCSIFATMTCP) +COMPATIBLE_IOCTL(SIOCMKCLIP) +COMPATIBLE_IOCTL(ATMARP_MKIP) +COMPATIBLE_IOCTL(ATMARP_SETENTRY) +COMPATIBLE_IOCTL(ATMARP_ENCAP) +COMPATIBLE_IOCTL(ATMTCP_CREATE) +COMPATIBLE_IOCTL(ATMTCP_REMOVE) +COMPATIBLE_IOCTL(ATMMPC_CTRL) +COMPATIBLE_IOCTL(ATMMPC_DATA) +/* Watchdog */ +COMPATIBLE_IOCTL(WDIOC_GETSUPPORT) +COMPATIBLE_IOCTL(WDIOC_GETSTATUS) +COMPATIBLE_IOCTL(WDIOC_GETBOOTSTATUS) +COMPATIBLE_IOCTL(WDIOC_GETTEMP) +COMPATIBLE_IOCTL(WDIOC_SETOPTIONS) +COMPATIBLE_IOCTL(WDIOC_KEEPALIVE) +COMPATIBLE_IOCTL(WDIOC_SETTIMEOUT) +COMPATIBLE_IOCTL(WDIOC_GETTIMEOUT) +/* Big R */ +COMPATIBLE_IOCTL(RNDGETENTCNT) +COMPATIBLE_IOCTL(RNDADDTOENTCNT) +COMPATIBLE_IOCTL(RNDGETPOOL) +COMPATIBLE_IOCTL(RNDADDENTROPY) +COMPATIBLE_IOCTL(RNDZAPENTCNT) +COMPATIBLE_IOCTL(RNDCLEARPOOL) +/* Bluetooth */ +COMPATIBLE_IOCTL(HCIDEVUP) +COMPATIBLE_IOCTL(HCIDEVDOWN) +COMPATIBLE_IOCTL(HCIDEVRESET) +COMPATIBLE_IOCTL(HCIDEVRESTAT) +COMPATIBLE_IOCTL(HCIGETDEVLIST) +COMPATIBLE_IOCTL(HCIGETDEVINFO) +COMPATIBLE_IOCTL(HCIGETCONNLIST) +COMPATIBLE_IOCTL(HCIGETCONNINFO) +COMPATIBLE_IOCTL(HCISETRAW) +COMPATIBLE_IOCTL(HCISETSCAN) +COMPATIBLE_IOCTL(HCISETAUTH) +COMPATIBLE_IOCTL(HCISETENCRYPT) +COMPATIBLE_IOCTL(HCISETPTYPE) +COMPATIBLE_IOCTL(HCISETLINKPOL) +COMPATIBLE_IOCTL(HCISETLINKMODE) +COMPATIBLE_IOCTL(HCISETACLMTU) +COMPATIBLE_IOCTL(HCISETSCOMTU) +COMPATIBLE_IOCTL(HCIINQUIRY) +COMPATIBLE_IOCTL(HCIUARTSETPROTO) +COMPATIBLE_IOCTL(HCIUARTGETPROTO) +COMPATIBLE_IOCTL(RFCOMMCREATEDEV) +COMPATIBLE_IOCTL(RFCOMMRELEASEDEV) +COMPATIBLE_IOCTL(RFCOMMGETDEVLIST) +COMPATIBLE_IOCTL(RFCOMMGETDEVINFO) +COMPATIBLE_IOCTL(RFCOMMSTEALDLC) +COMPATIBLE_IOCTL(BNEPCONNADD) +COMPATIBLE_IOCTL(BNEPCONNDEL) +COMPATIBLE_IOCTL(BNEPGETCONNLIST) +COMPATIBLE_IOCTL(BNEPGETCONNINFO) +COMPATIBLE_IOCTL(CMTPCONNADD) +COMPATIBLE_IOCTL(CMTPCONNDEL) +COMPATIBLE_IOCTL(CMTPGETCONNLIST) +COMPATIBLE_IOCTL(CMTPGETCONNINFO) +COMPATIBLE_IOCTL(HIDPCONNADD) +COMPATIBLE_IOCTL(HIDPCONNDEL) +COMPATIBLE_IOCTL(HIDPGETCONNLIST) +COMPATIBLE_IOCTL(HIDPGETCONNINFO) +/* CAPI */ +COMPATIBLE_IOCTL(CAPI_REGISTER) +COMPATIBLE_IOCTL(CAPI_GET_MANUFACTURER) +COMPATIBLE_IOCTL(CAPI_GET_VERSION) +COMPATIBLE_IOCTL(CAPI_GET_SERIAL) +COMPATIBLE_IOCTL(CAPI_GET_PROFILE) +COMPATIBLE_IOCTL(CAPI_MANUFACTURER_CMD) +COMPATIBLE_IOCTL(CAPI_GET_ERRCODE) +COMPATIBLE_IOCTL(CAPI_INSTALLED) +COMPATIBLE_IOCTL(CAPI_GET_FLAGS) +COMPATIBLE_IOCTL(CAPI_SET_FLAGS) +COMPATIBLE_IOCTL(CAPI_CLR_FLAGS) +COMPATIBLE_IOCTL(CAPI_NCCI_OPENCOUNT) +COMPATIBLE_IOCTL(CAPI_NCCI_GETUNIT) +/* Siemens Gigaset */ +COMPATIBLE_IOCTL(GIGASET_REDIR) +COMPATIBLE_IOCTL(GIGASET_CONFIG) +COMPATIBLE_IOCTL(GIGASET_BRKCHARS) +COMPATIBLE_IOCTL(GIGASET_VERSION) +/* Misc. */ +COMPATIBLE_IOCTL(0x41545900) /* ATYIO_CLKR */ +COMPATIBLE_IOCTL(0x41545901) /* ATYIO_CLKW */ +COMPATIBLE_IOCTL(PCIIOC_CONTROLLER) +COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_IO) +COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_MEM) +COMPATIBLE_IOCTL(PCIIOC_WRITE_COMBINE) +/* USB */ +COMPATIBLE_IOCTL(USBDEVFS_RESETEP) +COMPATIBLE_IOCTL(USBDEVFS_SETINTERFACE) +COMPATIBLE_IOCTL(USBDEVFS_SETCONFIGURATION) +COMPATIBLE_IOCTL(USBDEVFS_GETDRIVER) +COMPATIBLE_IOCTL(USBDEVFS_DISCARDURB) +COMPATIBLE_IOCTL(USBDEVFS_CLAIMINTERFACE) +COMPATIBLE_IOCTL(USBDEVFS_RELEASEINTERFACE) +COMPATIBLE_IOCTL(USBDEVFS_CONNECTINFO) +COMPATIBLE_IOCTL(USBDEVFS_HUB_PORTINFO) +COMPATIBLE_IOCTL(USBDEVFS_RESET) +COMPATIBLE_IOCTL(USBDEVFS_SUBMITURB32) +COMPATIBLE_IOCTL(USBDEVFS_REAPURB32) +COMPATIBLE_IOCTL(USBDEVFS_REAPURBNDELAY32) +COMPATIBLE_IOCTL(USBDEVFS_CLEAR_HALT) +/* MTD */ +COMPATIBLE_IOCTL(MEMGETINFO) +COMPATIBLE_IOCTL(MEMERASE) +COMPATIBLE_IOCTL(MEMLOCK) +COMPATIBLE_IOCTL(MEMUNLOCK) +COMPATIBLE_IOCTL(MEMGETREGIONCOUNT) +COMPATIBLE_IOCTL(MEMGETREGIONINFO) +COMPATIBLE_IOCTL(MEMGETBADBLOCK) +COMPATIBLE_IOCTL(MEMSETBADBLOCK) +/* NBD */ +ULONG_IOCTL(NBD_SET_SOCK) +ULONG_IOCTL(NBD_SET_BLKSIZE) +ULONG_IOCTL(NBD_SET_SIZE) +COMPATIBLE_IOCTL(NBD_DO_IT) +COMPATIBLE_IOCTL(NBD_CLEAR_SOCK) +COMPATIBLE_IOCTL(NBD_CLEAR_QUE) +COMPATIBLE_IOCTL(NBD_PRINT_DEBUG) +ULONG_IOCTL(NBD_SET_SIZE_BLOCKS) +COMPATIBLE_IOCTL(NBD_DISCONNECT) +/* i2c */ +COMPATIBLE_IOCTL(I2C_SLAVE) +COMPATIBLE_IOCTL(I2C_SLAVE_FORCE) +COMPATIBLE_IOCTL(I2C_TENBIT) +COMPATIBLE_IOCTL(I2C_PEC) +COMPATIBLE_IOCTL(I2C_RETRIES) +COMPATIBLE_IOCTL(I2C_TIMEOUT) +/* wireless */ +COMPATIBLE_IOCTL(SIOCSIWCOMMIT) +COMPATIBLE_IOCTL(SIOCGIWNAME) +COMPATIBLE_IOCTL(SIOCSIWNWID) +COMPATIBLE_IOCTL(SIOCGIWNWID) +COMPATIBLE_IOCTL(SIOCSIWFREQ) +COMPATIBLE_IOCTL(SIOCGIWFREQ) +COMPATIBLE_IOCTL(SIOCSIWMODE) +COMPATIBLE_IOCTL(SIOCGIWMODE) +COMPATIBLE_IOCTL(SIOCSIWSENS) +COMPATIBLE_IOCTL(SIOCGIWSENS) +COMPATIBLE_IOCTL(SIOCSIWRANGE) +COMPATIBLE_IOCTL(SIOCSIWPRIV) +COMPATIBLE_IOCTL(SIOCGIWPRIV) +COMPATIBLE_IOCTL(SIOCSIWSTATS) +COMPATIBLE_IOCTL(SIOCGIWSTATS) +COMPATIBLE_IOCTL(SIOCSIWAP) +COMPATIBLE_IOCTL(SIOCGIWAP) +COMPATIBLE_IOCTL(SIOCSIWSCAN) +COMPATIBLE_IOCTL(SIOCSIWRATE) +COMPATIBLE_IOCTL(SIOCGIWRATE) +COMPATIBLE_IOCTL(SIOCSIWRTS) +COMPATIBLE_IOCTL(SIOCGIWRTS) +COMPATIBLE_IOCTL(SIOCSIWFRAG) +COMPATIBLE_IOCTL(SIOCGIWFRAG) +COMPATIBLE_IOCTL(SIOCSIWTXPOW) +COMPATIBLE_IOCTL(SIOCGIWTXPOW) +COMPATIBLE_IOCTL(SIOCSIWRETRY) +COMPATIBLE_IOCTL(SIOCGIWRETRY) +COMPATIBLE_IOCTL(SIOCSIWPOWER) +COMPATIBLE_IOCTL(SIOCGIWPOWER) +/* hiddev */ +COMPATIBLE_IOCTL(HIDIOCGVERSION) +COMPATIBLE_IOCTL(HIDIOCAPPLICATION) +COMPATIBLE_IOCTL(HIDIOCGDEVINFO) +COMPATIBLE_IOCTL(HIDIOCGSTRING) +COMPATIBLE_IOCTL(HIDIOCINITREPORT) +COMPATIBLE_IOCTL(HIDIOCGREPORT) +COMPATIBLE_IOCTL(HIDIOCSREPORT) +COMPATIBLE_IOCTL(HIDIOCGREPORTINFO) +COMPATIBLE_IOCTL(HIDIOCGFIELDINFO) +COMPATIBLE_IOCTL(HIDIOCGUSAGE) +COMPATIBLE_IOCTL(HIDIOCSUSAGE) +COMPATIBLE_IOCTL(HIDIOCGUCODE) +COMPATIBLE_IOCTL(HIDIOCGFLAG) +COMPATIBLE_IOCTL(HIDIOCSFLAG) +COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINDEX) +COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINFO) +/* dvb */ +COMPATIBLE_IOCTL(AUDIO_STOP) +COMPATIBLE_IOCTL(AUDIO_PLAY) +COMPATIBLE_IOCTL(AUDIO_PAUSE) +COMPATIBLE_IOCTL(AUDIO_CONTINUE) +COMPATIBLE_IOCTL(AUDIO_SELECT_SOURCE) +COMPATIBLE_IOCTL(AUDIO_SET_MUTE) +COMPATIBLE_IOCTL(AUDIO_SET_AV_SYNC) +COMPATIBLE_IOCTL(AUDIO_SET_BYPASS_MODE) +COMPATIBLE_IOCTL(AUDIO_CHANNEL_SELECT) +COMPATIBLE_IOCTL(AUDIO_GET_STATUS) +COMPATIBLE_IOCTL(AUDIO_GET_CAPABILITIES) +COMPATIBLE_IOCTL(AUDIO_CLEAR_BUFFER) +COMPATIBLE_IOCTL(AUDIO_SET_ID) +COMPATIBLE_IOCTL(AUDIO_SET_MIXER) +COMPATIBLE_IOCTL(AUDIO_SET_STREAMTYPE) +COMPATIBLE_IOCTL(AUDIO_SET_EXT_ID) +COMPATIBLE_IOCTL(AUDIO_SET_ATTRIBUTES) +COMPATIBLE_IOCTL(AUDIO_SET_KARAOKE) +COMPATIBLE_IOCTL(DMX_START) +COMPATIBLE_IOCTL(DMX_STOP) +COMPATIBLE_IOCTL(DMX_SET_FILTER) +COMPATIBLE_IOCTL(DMX_SET_PES_FILTER) +COMPATIBLE_IOCTL(DMX_SET_BUFFER_SIZE) +COMPATIBLE_IOCTL(DMX_GET_PES_PIDS) +COMPATIBLE_IOCTL(DMX_GET_CAPS) +COMPATIBLE_IOCTL(DMX_SET_SOURCE) +COMPATIBLE_IOCTL(DMX_GET_STC) +COMPATIBLE_IOCTL(FE_GET_INFO) +COMPATIBLE_IOCTL(FE_DISEQC_RESET_OVERLOAD) +COMPATIBLE_IOCTL(FE_DISEQC_SEND_MASTER_CMD) +COMPATIBLE_IOCTL(FE_DISEQC_RECV_SLAVE_REPLY) +COMPATIBLE_IOCTL(FE_DISEQC_SEND_BURST) +COMPATIBLE_IOCTL(FE_SET_TONE) +COMPATIBLE_IOCTL(FE_SET_VOLTAGE) +COMPATIBLE_IOCTL(FE_ENABLE_HIGH_LNB_VOLTAGE) +COMPATIBLE_IOCTL(FE_READ_STATUS) +COMPATIBLE_IOCTL(FE_READ_BER) +COMPATIBLE_IOCTL(FE_READ_SIGNAL_STRENGTH) +COMPATIBLE_IOCTL(FE_READ_SNR) +COMPATIBLE_IOCTL(FE_READ_UNCORRECTED_BLOCKS) +COMPATIBLE_IOCTL(FE_SET_FRONTEND) +COMPATIBLE_IOCTL(FE_GET_FRONTEND) +COMPATIBLE_IOCTL(FE_GET_EVENT) +COMPATIBLE_IOCTL(FE_DISHNETWORK_SEND_LEGACY_CMD) +COMPATIBLE_IOCTL(VIDEO_STOP) +COMPATIBLE_IOCTL(VIDEO_PLAY) +COMPATIBLE_IOCTL(VIDEO_FREEZE) +COMPATIBLE_IOCTL(VIDEO_CONTINUE) +COMPATIBLE_IOCTL(VIDEO_SELECT_SOURCE) +COMPATIBLE_IOCTL(VIDEO_SET_BLANK) +COMPATIBLE_IOCTL(VIDEO_GET_STATUS) +COMPATIBLE_IOCTL(VIDEO_SET_DISPLAY_FORMAT) +COMPATIBLE_IOCTL(VIDEO_FAST_FORWARD) +COMPATIBLE_IOCTL(VIDEO_SLOWMOTION) +COMPATIBLE_IOCTL(VIDEO_GET_CAPABILITIES) +COMPATIBLE_IOCTL(VIDEO_CLEAR_BUFFER) +COMPATIBLE_IOCTL(VIDEO_SET_ID) +COMPATIBLE_IOCTL(VIDEO_SET_STREAMTYPE) +COMPATIBLE_IOCTL(VIDEO_SET_FORMAT) +COMPATIBLE_IOCTL(VIDEO_SET_SYSTEM) +COMPATIBLE_IOCTL(VIDEO_SET_HIGHLIGHT) +COMPATIBLE_IOCTL(VIDEO_SET_SPU) +COMPATIBLE_IOCTL(VIDEO_GET_NAVI) +COMPATIBLE_IOCTL(VIDEO_SET_ATTRIBUTES) +COMPATIBLE_IOCTL(VIDEO_GET_SIZE) +COMPATIBLE_IOCTL(VIDEO_GET_FRAME_RATE) + +/* now things that need handlers */ HANDLE_IOCTL(MEMREADOOB32, mtd_rw_oob) HANDLE_IOCTL(MEMWRITEOOB32, mtd_rw_oob) #ifdef CONFIG_NET diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h deleted file mode 100644 index c26c3adcfacf..000000000000 --- a/include/linux/compat_ioctl.h +++ /dev/null @@ -1,830 +0,0 @@ -/* List here explicitly which ioctl's are known to have - * compatible types passed or none at all... Please include - * only stuff that is compatible on *all architectures*. - */ - -COMPATIBLE_IOCTL(0x4B50) /* KDGHWCLK - not in the kernel, but don't complain */ -COMPATIBLE_IOCTL(0x4B51) /* KDSHWCLK - not in the kernel, but don't complain */ - -/* Big T */ -COMPATIBLE_IOCTL(TCGETA) -COMPATIBLE_IOCTL(TCSETA) -COMPATIBLE_IOCTL(TCSETAW) -COMPATIBLE_IOCTL(TCSETAF) -COMPATIBLE_IOCTL(TCSBRK) -ULONG_IOCTL(TCSBRKP) -COMPATIBLE_IOCTL(TCXONC) -COMPATIBLE_IOCTL(TCFLSH) -COMPATIBLE_IOCTL(TCGETS) -COMPATIBLE_IOCTL(TCSETS) -COMPATIBLE_IOCTL(TCSETSW) -COMPATIBLE_IOCTL(TCSETSF) -COMPATIBLE_IOCTL(TIOCLINUX) -COMPATIBLE_IOCTL(TIOCSBRK) -COMPATIBLE_IOCTL(TIOCCBRK) -ULONG_IOCTL(TIOCMIWAIT) -COMPATIBLE_IOCTL(TIOCGICOUNT) -/* Little t */ -COMPATIBLE_IOCTL(TIOCGETD) -COMPATIBLE_IOCTL(TIOCSETD) -COMPATIBLE_IOCTL(TIOCEXCL) -COMPATIBLE_IOCTL(TIOCNXCL) -COMPATIBLE_IOCTL(TIOCCONS) -COMPATIBLE_IOCTL(TIOCGSOFTCAR) -COMPATIBLE_IOCTL(TIOCSSOFTCAR) -COMPATIBLE_IOCTL(TIOCSWINSZ) -COMPATIBLE_IOCTL(TIOCGWINSZ) -COMPATIBLE_IOCTL(TIOCMGET) -COMPATIBLE_IOCTL(TIOCMBIC) -COMPATIBLE_IOCTL(TIOCMBIS) -COMPATIBLE_IOCTL(TIOCMSET) -COMPATIBLE_IOCTL(TIOCPKT) -COMPATIBLE_IOCTL(TIOCNOTTY) -COMPATIBLE_IOCTL(TIOCSTI) -COMPATIBLE_IOCTL(TIOCOUTQ) -COMPATIBLE_IOCTL(TIOCSPGRP) -COMPATIBLE_IOCTL(TIOCGPGRP) -ULONG_IOCTL(TIOCSCTTY) -COMPATIBLE_IOCTL(TIOCGPTN) -COMPATIBLE_IOCTL(TIOCSPTLCK) -COMPATIBLE_IOCTL(TIOCSERGETLSR) -/* Little f */ -COMPATIBLE_IOCTL(FIOCLEX) -COMPATIBLE_IOCTL(FIONCLEX) -COMPATIBLE_IOCTL(FIOASYNC) -COMPATIBLE_IOCTL(FIONBIO) -COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */ -/* 0x00 */ -COMPATIBLE_IOCTL(FIBMAP) -COMPATIBLE_IOCTL(FIGETBSZ) -/* 0x03 -- HD/IDE ioctl's used by hdparm and friends. - * Some need translations, these do not. - */ -COMPATIBLE_IOCTL(HDIO_GET_IDENTITY) -COMPATIBLE_IOCTL(HDIO_DRIVE_TASK) -COMPATIBLE_IOCTL(HDIO_DRIVE_CMD) -ULONG_IOCTL(HDIO_SET_MULTCOUNT) -ULONG_IOCTL(HDIO_SET_UNMASKINTR) -ULONG_IOCTL(HDIO_SET_KEEPSETTINGS) -ULONG_IOCTL(HDIO_SET_32BIT) -ULONG_IOCTL(HDIO_SET_NOWERR) -ULONG_IOCTL(HDIO_SET_DMA) -ULONG_IOCTL(HDIO_SET_PIO_MODE) -ULONG_IOCTL(HDIO_SET_NICE) -ULONG_IOCTL(HDIO_SET_WCACHE) -ULONG_IOCTL(HDIO_SET_ACOUSTIC) -ULONG_IOCTL(HDIO_SET_BUSSTATE) -ULONG_IOCTL(HDIO_SET_ADDRESS) -COMPATIBLE_IOCTL(HDIO_SCAN_HWIF) -/* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */ -COMPATIBLE_IOCTL(0x330) -/* 0x02 -- Floppy ioctls */ -COMPATIBLE_IOCTL(FDMSGON) -COMPATIBLE_IOCTL(FDMSGOFF) -COMPATIBLE_IOCTL(FDSETEMSGTRESH) -COMPATIBLE_IOCTL(FDFLUSH) -COMPATIBLE_IOCTL(FDWERRORCLR) -COMPATIBLE_IOCTL(FDSETMAXERRS) -COMPATIBLE_IOCTL(FDGETMAXERRS) -COMPATIBLE_IOCTL(FDGETDRVTYP) -COMPATIBLE_IOCTL(FDEJECT) -COMPATIBLE_IOCTL(FDCLRPRM) -COMPATIBLE_IOCTL(FDFMTBEG) -COMPATIBLE_IOCTL(FDFMTEND) -COMPATIBLE_IOCTL(FDRESET) -COMPATIBLE_IOCTL(FDTWADDLE) -COMPATIBLE_IOCTL(FDFMTTRK) -COMPATIBLE_IOCTL(FDRAWCMD) -/* 0x12 */ -#ifdef CONFIG_BLOCK -COMPATIBLE_IOCTL(BLKRASET) -COMPATIBLE_IOCTL(BLKROSET) -COMPATIBLE_IOCTL(BLKROGET) -COMPATIBLE_IOCTL(BLKRRPART) -COMPATIBLE_IOCTL(BLKFLSBUF) -COMPATIBLE_IOCTL(BLKSECTSET) -COMPATIBLE_IOCTL(BLKSSZGET) -COMPATIBLE_IOCTL(BLKTRACESTART) -COMPATIBLE_IOCTL(BLKTRACESTOP) -COMPATIBLE_IOCTL(BLKTRACESETUP) -COMPATIBLE_IOCTL(BLKTRACETEARDOWN) -ULONG_IOCTL(BLKRASET) -ULONG_IOCTL(BLKFRASET) -#endif -/* RAID */ -COMPATIBLE_IOCTL(RAID_VERSION) -COMPATIBLE_IOCTL(GET_ARRAY_INFO) -COMPATIBLE_IOCTL(GET_DISK_INFO) -COMPATIBLE_IOCTL(PRINT_RAID_DEBUG) -COMPATIBLE_IOCTL(RAID_AUTORUN) -COMPATIBLE_IOCTL(CLEAR_ARRAY) -COMPATIBLE_IOCTL(ADD_NEW_DISK) -ULONG_IOCTL(HOT_REMOVE_DISK) -COMPATIBLE_IOCTL(SET_ARRAY_INFO) -COMPATIBLE_IOCTL(SET_DISK_INFO) -COMPATIBLE_IOCTL(WRITE_RAID_INFO) -COMPATIBLE_IOCTL(UNPROTECT_ARRAY) -COMPATIBLE_IOCTL(PROTECT_ARRAY) -ULONG_IOCTL(HOT_ADD_DISK) -ULONG_IOCTL(SET_DISK_FAULTY) -COMPATIBLE_IOCTL(RUN_ARRAY) -COMPATIBLE_IOCTL(STOP_ARRAY) -COMPATIBLE_IOCTL(STOP_ARRAY_RO) -COMPATIBLE_IOCTL(RESTART_ARRAY_RW) -COMPATIBLE_IOCTL(GET_BITMAP_FILE) -ULONG_IOCTL(SET_BITMAP_FILE) -/* DM */ -COMPATIBLE_IOCTL(DM_VERSION_32) -COMPATIBLE_IOCTL(DM_REMOVE_ALL_32) -COMPATIBLE_IOCTL(DM_LIST_DEVICES_32) -COMPATIBLE_IOCTL(DM_DEV_CREATE_32) -COMPATIBLE_IOCTL(DM_DEV_REMOVE_32) -COMPATIBLE_IOCTL(DM_DEV_RENAME_32) -COMPATIBLE_IOCTL(DM_DEV_SUSPEND_32) -COMPATIBLE_IOCTL(DM_DEV_STATUS_32) -COMPATIBLE_IOCTL(DM_DEV_WAIT_32) -COMPATIBLE_IOCTL(DM_TABLE_LOAD_32) -COMPATIBLE_IOCTL(DM_TABLE_CLEAR_32) -COMPATIBLE_IOCTL(DM_TABLE_DEPS_32) -COMPATIBLE_IOCTL(DM_TABLE_STATUS_32) -COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32) -COMPATIBLE_IOCTL(DM_TARGET_MSG_32) -COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY_32) -COMPATIBLE_IOCTL(DM_VERSION) -COMPATIBLE_IOCTL(DM_REMOVE_ALL) -COMPATIBLE_IOCTL(DM_LIST_DEVICES) -COMPATIBLE_IOCTL(DM_DEV_CREATE) -COMPATIBLE_IOCTL(DM_DEV_REMOVE) -COMPATIBLE_IOCTL(DM_DEV_RENAME) -COMPATIBLE_IOCTL(DM_DEV_SUSPEND) -COMPATIBLE_IOCTL(DM_DEV_STATUS) -COMPATIBLE_IOCTL(DM_DEV_WAIT) -COMPATIBLE_IOCTL(DM_TABLE_LOAD) -COMPATIBLE_IOCTL(DM_TABLE_CLEAR) -COMPATIBLE_IOCTL(DM_TABLE_DEPS) -COMPATIBLE_IOCTL(DM_TABLE_STATUS) -COMPATIBLE_IOCTL(DM_LIST_VERSIONS) -COMPATIBLE_IOCTL(DM_TARGET_MSG) -COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY) -/* Big K */ -COMPATIBLE_IOCTL(PIO_FONT) -COMPATIBLE_IOCTL(GIO_FONT) -ULONG_IOCTL(KDSIGACCEPT) -COMPATIBLE_IOCTL(KDGETKEYCODE) -COMPATIBLE_IOCTL(KDSETKEYCODE) -ULONG_IOCTL(KIOCSOUND) -ULONG_IOCTL(KDMKTONE) -COMPATIBLE_IOCTL(KDGKBTYPE) -ULONG_IOCTL(KDSETMODE) -COMPATIBLE_IOCTL(KDGETMODE) -ULONG_IOCTL(KDSKBMODE) -COMPATIBLE_IOCTL(KDGKBMODE) -ULONG_IOCTL(KDSKBMETA) -COMPATIBLE_IOCTL(KDGKBMETA) -COMPATIBLE_IOCTL(KDGKBENT) -COMPATIBLE_IOCTL(KDSKBENT) -COMPATIBLE_IOCTL(KDGKBSENT) -COMPATIBLE_IOCTL(KDSKBSENT) -COMPATIBLE_IOCTL(KDGKBDIACR) -COMPATIBLE_IOCTL(KDSKBDIACR) -COMPATIBLE_IOCTL(KDKBDREP) -COMPATIBLE_IOCTL(KDGKBLED) -ULONG_IOCTL(KDSKBLED) -COMPATIBLE_IOCTL(KDGETLED) -ULONG_IOCTL(KDSETLED) -COMPATIBLE_IOCTL(GIO_SCRNMAP) -COMPATIBLE_IOCTL(PIO_SCRNMAP) -COMPATIBLE_IOCTL(GIO_UNISCRNMAP) -COMPATIBLE_IOCTL(PIO_UNISCRNMAP) -COMPATIBLE_IOCTL(PIO_FONTRESET) -COMPATIBLE_IOCTL(PIO_UNIMAPCLR) -/* Big S */ -COMPATIBLE_IOCTL(SCSI_IOCTL_GET_IDLUN) -COMPATIBLE_IOCTL(SCSI_IOCTL_DOORLOCK) -COMPATIBLE_IOCTL(SCSI_IOCTL_DOORUNLOCK) -COMPATIBLE_IOCTL(SCSI_IOCTL_TEST_UNIT_READY) -COMPATIBLE_IOCTL(SCSI_IOCTL_GET_BUS_NUMBER) -COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND) -COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST) -COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI) -/* Big T */ -COMPATIBLE_IOCTL(TUNSETNOCSUM) -COMPATIBLE_IOCTL(TUNSETDEBUG) -COMPATIBLE_IOCTL(TUNSETPERSIST) -COMPATIBLE_IOCTL(TUNSETOWNER) -/* Big V */ -COMPATIBLE_IOCTL(VT_SETMODE) -COMPATIBLE_IOCTL(VT_GETMODE) -COMPATIBLE_IOCTL(VT_GETSTATE) -COMPATIBLE_IOCTL(VT_OPENQRY) -ULONG_IOCTL(VT_ACTIVATE) -ULONG_IOCTL(VT_WAITACTIVE) -ULONG_IOCTL(VT_RELDISP) -ULONG_IOCTL(VT_DISALLOCATE) -COMPATIBLE_IOCTL(VT_RESIZE) -COMPATIBLE_IOCTL(VT_RESIZEX) -COMPATIBLE_IOCTL(VT_LOCKSWITCH) -COMPATIBLE_IOCTL(VT_UNLOCKSWITCH) -COMPATIBLE_IOCTL(VT_GETHIFONTMASK) -/* Little p (/dev/rtc, /dev/envctrl, etc.) */ -COMPATIBLE_IOCTL(RTC_AIE_ON) -COMPATIBLE_IOCTL(RTC_AIE_OFF) -COMPATIBLE_IOCTL(RTC_UIE_ON) -COMPATIBLE_IOCTL(RTC_UIE_OFF) -COMPATIBLE_IOCTL(RTC_PIE_ON) -COMPATIBLE_IOCTL(RTC_PIE_OFF) -COMPATIBLE_IOCTL(RTC_WIE_ON) -COMPATIBLE_IOCTL(RTC_WIE_OFF) -COMPATIBLE_IOCTL(RTC_ALM_SET) -COMPATIBLE_IOCTL(RTC_ALM_READ) -COMPATIBLE_IOCTL(RTC_RD_TIME) -COMPATIBLE_IOCTL(RTC_SET_TIME) -COMPATIBLE_IOCTL(RTC_WKALM_SET) -COMPATIBLE_IOCTL(RTC_WKALM_RD) -/* - * These two are only for the sbus rtc driver, but - * hwclock tries them on every rtc device first when - * running on sparc. On other architectures the entries - * are useless but harmless. - */ -COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */ -COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */ -/* Little m */ -COMPATIBLE_IOCTL(MTIOCTOP) -/* Socket level stuff */ -COMPATIBLE_IOCTL(FIOQSIZE) -COMPATIBLE_IOCTL(FIOSETOWN) -COMPATIBLE_IOCTL(SIOCSPGRP) -COMPATIBLE_IOCTL(FIOGETOWN) -COMPATIBLE_IOCTL(SIOCGPGRP) -COMPATIBLE_IOCTL(SIOCATMARK) -COMPATIBLE_IOCTL(SIOCSIFLINK) -COMPATIBLE_IOCTL(SIOCSIFENCAP) -COMPATIBLE_IOCTL(SIOCGIFENCAP) -COMPATIBLE_IOCTL(SIOCSIFNAME) -COMPATIBLE_IOCTL(SIOCSARP) -COMPATIBLE_IOCTL(SIOCGARP) -COMPATIBLE_IOCTL(SIOCDARP) -COMPATIBLE_IOCTL(SIOCSRARP) -COMPATIBLE_IOCTL(SIOCGRARP) -COMPATIBLE_IOCTL(SIOCDRARP) -COMPATIBLE_IOCTL(SIOCADDDLCI) -COMPATIBLE_IOCTL(SIOCDELDLCI) -COMPATIBLE_IOCTL(SIOCGMIIPHY) -COMPATIBLE_IOCTL(SIOCGMIIREG) -COMPATIBLE_IOCTL(SIOCSMIIREG) -COMPATIBLE_IOCTL(SIOCGIFVLAN) -COMPATIBLE_IOCTL(SIOCSIFVLAN) -COMPATIBLE_IOCTL(SIOCBRADDBR) -COMPATIBLE_IOCTL(SIOCBRDELBR) -/* SG stuff */ -COMPATIBLE_IOCTL(SG_SET_TIMEOUT) -COMPATIBLE_IOCTL(SG_GET_TIMEOUT) -COMPATIBLE_IOCTL(SG_EMULATED_HOST) -ULONG_IOCTL(SG_SET_TRANSFORM) -COMPATIBLE_IOCTL(SG_GET_TRANSFORM) -COMPATIBLE_IOCTL(SG_SET_RESERVED_SIZE) -COMPATIBLE_IOCTL(SG_GET_RESERVED_SIZE) -COMPATIBLE_IOCTL(SG_GET_SCSI_ID) -COMPATIBLE_IOCTL(SG_SET_FORCE_LOW_DMA) -COMPATIBLE_IOCTL(SG_GET_LOW_DMA) -COMPATIBLE_IOCTL(SG_SET_FORCE_PACK_ID) -COMPATIBLE_IOCTL(SG_GET_PACK_ID) -COMPATIBLE_IOCTL(SG_GET_NUM_WAITING) -COMPATIBLE_IOCTL(SG_SET_DEBUG) -COMPATIBLE_IOCTL(SG_GET_SG_TABLESIZE) -COMPATIBLE_IOCTL(SG_GET_COMMAND_Q) -COMPATIBLE_IOCTL(SG_SET_COMMAND_Q) -COMPATIBLE_IOCTL(SG_GET_VERSION_NUM) -COMPATIBLE_IOCTL(SG_NEXT_CMD_LEN) -COMPATIBLE_IOCTL(SG_SCSI_RESET) -COMPATIBLE_IOCTL(SG_GET_REQUEST_TABLE) -COMPATIBLE_IOCTL(SG_SET_KEEP_ORPHAN) -COMPATIBLE_IOCTL(SG_GET_KEEP_ORPHAN) -/* PPP stuff */ -COMPATIBLE_IOCTL(PPPIOCGFLAGS) -COMPATIBLE_IOCTL(PPPIOCSFLAGS) -COMPATIBLE_IOCTL(PPPIOCGASYNCMAP) -COMPATIBLE_IOCTL(PPPIOCSASYNCMAP) -COMPATIBLE_IOCTL(PPPIOCGUNIT) -COMPATIBLE_IOCTL(PPPIOCGRASYNCMAP) -COMPATIBLE_IOCTL(PPPIOCSRASYNCMAP) -COMPATIBLE_IOCTL(PPPIOCGMRU) -COMPATIBLE_IOCTL(PPPIOCSMRU) -COMPATIBLE_IOCTL(PPPIOCSMAXCID) -COMPATIBLE_IOCTL(PPPIOCGXASYNCMAP) -COMPATIBLE_IOCTL(PPPIOCSXASYNCMAP) -COMPATIBLE_IOCTL(PPPIOCXFERUNIT) -/* PPPIOCSCOMPRESS is translated */ -COMPATIBLE_IOCTL(PPPIOCGNPMODE) -COMPATIBLE_IOCTL(PPPIOCSNPMODE) -COMPATIBLE_IOCTL(PPPIOCGDEBUG) -COMPATIBLE_IOCTL(PPPIOCSDEBUG) -/* PPPIOCSPASS is translated */ -/* PPPIOCSACTIVE is translated */ -/* PPPIOCGIDLE is translated */ -COMPATIBLE_IOCTL(PPPIOCNEWUNIT) -COMPATIBLE_IOCTL(PPPIOCATTACH) -COMPATIBLE_IOCTL(PPPIOCDETACH) -COMPATIBLE_IOCTL(PPPIOCSMRRU) -COMPATIBLE_IOCTL(PPPIOCCONNECT) -COMPATIBLE_IOCTL(PPPIOCDISCONN) -COMPATIBLE_IOCTL(PPPIOCATTCHAN) -COMPATIBLE_IOCTL(PPPIOCGCHAN) -/* PPPOX */ -COMPATIBLE_IOCTL(PPPOEIOCSFWD) -COMPATIBLE_IOCTL(PPPOEIOCDFWD) -/* LP */ -COMPATIBLE_IOCTL(LPGETSTATUS) -/* ppdev */ -COMPATIBLE_IOCTL(PPSETMODE) -COMPATIBLE_IOCTL(PPRSTATUS) -COMPATIBLE_IOCTL(PPRCONTROL) -COMPATIBLE_IOCTL(PPWCONTROL) -COMPATIBLE_IOCTL(PPFCONTROL) -COMPATIBLE_IOCTL(PPRDATA) -COMPATIBLE_IOCTL(PPWDATA) -COMPATIBLE_IOCTL(PPCLAIM) -COMPATIBLE_IOCTL(PPRELEASE) -COMPATIBLE_IOCTL(PPYIELD) -COMPATIBLE_IOCTL(PPEXCL) -COMPATIBLE_IOCTL(PPDATADIR) -COMPATIBLE_IOCTL(PPNEGOT) -COMPATIBLE_IOCTL(PPWCTLONIRQ) -COMPATIBLE_IOCTL(PPCLRIRQ) -COMPATIBLE_IOCTL(PPSETPHASE) -COMPATIBLE_IOCTL(PPGETMODES) -COMPATIBLE_IOCTL(PPGETMODE) -COMPATIBLE_IOCTL(PPGETPHASE) -COMPATIBLE_IOCTL(PPGETFLAGS) -COMPATIBLE_IOCTL(PPSETFLAGS) -/* CDROM stuff */ -COMPATIBLE_IOCTL(CDROMPAUSE) -COMPATIBLE_IOCTL(CDROMRESUME) -COMPATIBLE_IOCTL(CDROMPLAYMSF) -COMPATIBLE_IOCTL(CDROMPLAYTRKIND) -COMPATIBLE_IOCTL(CDROMREADTOCHDR) -COMPATIBLE_IOCTL(CDROMREADTOCENTRY) -COMPATIBLE_IOCTL(CDROMSTOP) -COMPATIBLE_IOCTL(CDROMSTART) -COMPATIBLE_IOCTL(CDROMEJECT) -COMPATIBLE_IOCTL(CDROMVOLCTRL) -COMPATIBLE_IOCTL(CDROMSUBCHNL) -ULONG_IOCTL(CDROMEJECT_SW) -COMPATIBLE_IOCTL(CDROMMULTISESSION) -COMPATIBLE_IOCTL(CDROM_GET_MCN) -COMPATIBLE_IOCTL(CDROMRESET) -COMPATIBLE_IOCTL(CDROMVOLREAD) -COMPATIBLE_IOCTL(CDROMSEEK) -COMPATIBLE_IOCTL(CDROMPLAYBLK) -COMPATIBLE_IOCTL(CDROMCLOSETRAY) -ULONG_IOCTL(CDROM_SET_OPTIONS) -ULONG_IOCTL(CDROM_CLEAR_OPTIONS) -ULONG_IOCTL(CDROM_SELECT_SPEED) -ULONG_IOCTL(CDROM_SELECT_DISC) -ULONG_IOCTL(CDROM_MEDIA_CHANGED) -ULONG_IOCTL(CDROM_DRIVE_STATUS) -COMPATIBLE_IOCTL(CDROM_DISC_STATUS) -COMPATIBLE_IOCTL(CDROM_CHANGER_NSLOTS) -ULONG_IOCTL(CDROM_LOCKDOOR) -ULONG_IOCTL(CDROM_DEBUG) -COMPATIBLE_IOCTL(CDROM_GET_CAPABILITY) -/* Ignore cdrom.h about these next 5 ioctls, they absolutely do - * not take a struct cdrom_read, instead they take a struct cdrom_msf - * which is compatible. - */ -COMPATIBLE_IOCTL(CDROMREADMODE2) -COMPATIBLE_IOCTL(CDROMREADMODE1) -COMPATIBLE_IOCTL(CDROMREADRAW) -COMPATIBLE_IOCTL(CDROMREADCOOKED) -COMPATIBLE_IOCTL(CDROMREADALL) -/* DVD ioctls */ -COMPATIBLE_IOCTL(DVD_READ_STRUCT) -COMPATIBLE_IOCTL(DVD_WRITE_STRUCT) -COMPATIBLE_IOCTL(DVD_AUTH) -/* pktcdvd */ -COMPATIBLE_IOCTL(PACKET_CTRL_CMD) -/* Big A */ -/* sparc only */ -/* Big Q for sound/OSS */ -COMPATIBLE_IOCTL(SNDCTL_SEQ_RESET) -COMPATIBLE_IOCTL(SNDCTL_SEQ_SYNC) -COMPATIBLE_IOCTL(SNDCTL_SYNTH_INFO) -COMPATIBLE_IOCTL(SNDCTL_SEQ_CTRLRATE) -COMPATIBLE_IOCTL(SNDCTL_SEQ_GETOUTCOUNT) -COMPATIBLE_IOCTL(SNDCTL_SEQ_GETINCOUNT) -COMPATIBLE_IOCTL(SNDCTL_SEQ_PERCMODE) -COMPATIBLE_IOCTL(SNDCTL_FM_LOAD_INSTR) -COMPATIBLE_IOCTL(SNDCTL_SEQ_TESTMIDI) -COMPATIBLE_IOCTL(SNDCTL_SEQ_RESETSAMPLES) -COMPATIBLE_IOCTL(SNDCTL_SEQ_NRSYNTHS) -COMPATIBLE_IOCTL(SNDCTL_SEQ_NRMIDIS) -COMPATIBLE_IOCTL(SNDCTL_MIDI_INFO) -COMPATIBLE_IOCTL(SNDCTL_SEQ_THRESHOLD) -COMPATIBLE_IOCTL(SNDCTL_SYNTH_MEMAVL) -COMPATIBLE_IOCTL(SNDCTL_FM_4OP_ENABLE) -COMPATIBLE_IOCTL(SNDCTL_SEQ_PANIC) -COMPATIBLE_IOCTL(SNDCTL_SEQ_OUTOFBAND) -COMPATIBLE_IOCTL(SNDCTL_SEQ_GETTIME) -COMPATIBLE_IOCTL(SNDCTL_SYNTH_ID) -COMPATIBLE_IOCTL(SNDCTL_SYNTH_CONTROL) -COMPATIBLE_IOCTL(SNDCTL_SYNTH_REMOVESAMPLE) -/* Big T for sound/OSS */ -COMPATIBLE_IOCTL(SNDCTL_TMR_TIMEBASE) -COMPATIBLE_IOCTL(SNDCTL_TMR_START) -COMPATIBLE_IOCTL(SNDCTL_TMR_STOP) -COMPATIBLE_IOCTL(SNDCTL_TMR_CONTINUE) -COMPATIBLE_IOCTL(SNDCTL_TMR_TEMPO) -COMPATIBLE_IOCTL(SNDCTL_TMR_SOURCE) -COMPATIBLE_IOCTL(SNDCTL_TMR_METRONOME) -COMPATIBLE_IOCTL(SNDCTL_TMR_SELECT) -/* Little m for sound/OSS */ -COMPATIBLE_IOCTL(SNDCTL_MIDI_PRETIME) -COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUMODE) -COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUCMD) -/* Big P for sound/OSS */ -COMPATIBLE_IOCTL(SNDCTL_DSP_RESET) -COMPATIBLE_IOCTL(SNDCTL_DSP_SYNC) -COMPATIBLE_IOCTL(SNDCTL_DSP_SPEED) -COMPATIBLE_IOCTL(SNDCTL_DSP_STEREO) -COMPATIBLE_IOCTL(SNDCTL_DSP_GETBLKSIZE) -COMPATIBLE_IOCTL(SNDCTL_DSP_CHANNELS) -COMPATIBLE_IOCTL(SOUND_PCM_WRITE_FILTER) -COMPATIBLE_IOCTL(SNDCTL_DSP_POST) -COMPATIBLE_IOCTL(SNDCTL_DSP_SUBDIVIDE) -COMPATIBLE_IOCTL(SNDCTL_DSP_SETFRAGMENT) -COMPATIBLE_IOCTL(SNDCTL_DSP_GETFMTS) -COMPATIBLE_IOCTL(SNDCTL_DSP_SETFMT) -COMPATIBLE_IOCTL(SNDCTL_DSP_GETOSPACE) -COMPATIBLE_IOCTL(SNDCTL_DSP_GETISPACE) -COMPATIBLE_IOCTL(SNDCTL_DSP_NONBLOCK) -COMPATIBLE_IOCTL(SNDCTL_DSP_GETCAPS) -COMPATIBLE_IOCTL(SNDCTL_DSP_GETTRIGGER) -COMPATIBLE_IOCTL(SNDCTL_DSP_SETTRIGGER) -COMPATIBLE_IOCTL(SNDCTL_DSP_GETIPTR) -COMPATIBLE_IOCTL(SNDCTL_DSP_GETOPTR) -/* SNDCTL_DSP_MAPINBUF, XXX needs translation */ -/* SNDCTL_DSP_MAPOUTBUF, XXX needs translation */ -COMPATIBLE_IOCTL(SNDCTL_DSP_SETSYNCRO) -COMPATIBLE_IOCTL(SNDCTL_DSP_SETDUPLEX) -COMPATIBLE_IOCTL(SNDCTL_DSP_GETODELAY) -COMPATIBLE_IOCTL(SNDCTL_DSP_PROFILE) -COMPATIBLE_IOCTL(SOUND_PCM_READ_RATE) -COMPATIBLE_IOCTL(SOUND_PCM_READ_CHANNELS) -COMPATIBLE_IOCTL(SOUND_PCM_READ_BITS) -COMPATIBLE_IOCTL(SOUND_PCM_READ_FILTER) -/* Big C for sound/OSS */ -COMPATIBLE_IOCTL(SNDCTL_COPR_RESET) -COMPATIBLE_IOCTL(SNDCTL_COPR_LOAD) -COMPATIBLE_IOCTL(SNDCTL_COPR_RDATA) -COMPATIBLE_IOCTL(SNDCTL_COPR_RCODE) -COMPATIBLE_IOCTL(SNDCTL_COPR_WDATA) -COMPATIBLE_IOCTL(SNDCTL_COPR_WCODE) -COMPATIBLE_IOCTL(SNDCTL_COPR_RUN) -COMPATIBLE_IOCTL(SNDCTL_COPR_HALT) -COMPATIBLE_IOCTL(SNDCTL_COPR_SENDMSG) -COMPATIBLE_IOCTL(SNDCTL_COPR_RCVMSG) -/* Big M for sound/OSS */ -COMPATIBLE_IOCTL(SOUND_MIXER_READ_VOLUME) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_BASS) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_TREBLE) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_SYNTH) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_PCM) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_SPEAKER) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_MIC) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_CD) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_IMIX) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_ALTPCM) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECLEV) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_IGAIN) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_OGAIN) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE1) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE2) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE3) -COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL1)) -COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL2)) -COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL3)) -COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEIN)) -COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEOUT)) -COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_VIDEO)) -COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_RADIO)) -COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_MONITOR)) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_MUTE) -/* SOUND_MIXER_READ_ENHANCE, same value as READ_MUTE */ -/* SOUND_MIXER_READ_LOUD, same value as READ_MUTE */ -COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECSRC) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_DEVMASK) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECMASK) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_STEREODEVS) -COMPATIBLE_IOCTL(SOUND_MIXER_READ_CAPS) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_VOLUME) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_BASS) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_TREBLE) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SYNTH) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_PCM) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SPEAKER) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MIC) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_CD) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IMIX) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_ALTPCM) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECLEV) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IGAIN) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_OGAIN) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE1) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE2) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE3) -COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL1)) -COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL2)) -COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL3)) -COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEIN)) -COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEOUT)) -COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_VIDEO)) -COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_RADIO)) -COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_MONITOR)) -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MUTE) -/* SOUND_MIXER_WRITE_ENHANCE, same value as WRITE_MUTE */ -/* SOUND_MIXER_WRITE_LOUD, same value as WRITE_MUTE */ -COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECSRC) -COMPATIBLE_IOCTL(SOUND_MIXER_INFO) -COMPATIBLE_IOCTL(SOUND_OLD_MIXER_INFO) -COMPATIBLE_IOCTL(SOUND_MIXER_ACCESS) -COMPATIBLE_IOCTL(SOUND_MIXER_AGC) -COMPATIBLE_IOCTL(SOUND_MIXER_3DSE) -COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE1) -COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE2) -COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE3) -COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE4) -COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE5) -COMPATIBLE_IOCTL(SOUND_MIXER_GETLEVELS) -COMPATIBLE_IOCTL(SOUND_MIXER_SETLEVELS) -COMPATIBLE_IOCTL(OSS_GETVERSION) -/* AUTOFS */ -ULONG_IOCTL(AUTOFS_IOC_READY) -ULONG_IOCTL(AUTOFS_IOC_FAIL) -COMPATIBLE_IOCTL(AUTOFS_IOC_CATATONIC) -COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOVER) -COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE) -COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI) -COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOSUBVER) -COMPATIBLE_IOCTL(AUTOFS_IOC_ASKREGHOST) -COMPATIBLE_IOCTL(AUTOFS_IOC_TOGGLEREGHOST) -COMPATIBLE_IOCTL(AUTOFS_IOC_ASKUMOUNT) -/* Raw devices */ -COMPATIBLE_IOCTL(RAW_SETBIND) -COMPATIBLE_IOCTL(RAW_GETBIND) -/* SMB ioctls which do not need any translations */ -COMPATIBLE_IOCTL(SMB_IOC_NEWCONN) -/* Little a */ -COMPATIBLE_IOCTL(ATMSIGD_CTRL) -COMPATIBLE_IOCTL(ATMARPD_CTRL) -COMPATIBLE_IOCTL(ATMLEC_CTRL) -COMPATIBLE_IOCTL(ATMLEC_MCAST) -COMPATIBLE_IOCTL(ATMLEC_DATA) -COMPATIBLE_IOCTL(ATM_SETSC) -COMPATIBLE_IOCTL(SIOCSIFATMTCP) -COMPATIBLE_IOCTL(SIOCMKCLIP) -COMPATIBLE_IOCTL(ATMARP_MKIP) -COMPATIBLE_IOCTL(ATMARP_SETENTRY) -COMPATIBLE_IOCTL(ATMARP_ENCAP) -COMPATIBLE_IOCTL(ATMTCP_CREATE) -COMPATIBLE_IOCTL(ATMTCP_REMOVE) -COMPATIBLE_IOCTL(ATMMPC_CTRL) -COMPATIBLE_IOCTL(ATMMPC_DATA) -/* Watchdog */ -COMPATIBLE_IOCTL(WDIOC_GETSUPPORT) -COMPATIBLE_IOCTL(WDIOC_GETSTATUS) -COMPATIBLE_IOCTL(WDIOC_GETBOOTSTATUS) -COMPATIBLE_IOCTL(WDIOC_GETTEMP) -COMPATIBLE_IOCTL(WDIOC_SETOPTIONS) -COMPATIBLE_IOCTL(WDIOC_KEEPALIVE) -COMPATIBLE_IOCTL(WDIOC_SETTIMEOUT) -COMPATIBLE_IOCTL(WDIOC_GETTIMEOUT) -/* Big R */ -COMPATIBLE_IOCTL(RNDGETENTCNT) -COMPATIBLE_IOCTL(RNDADDTOENTCNT) -COMPATIBLE_IOCTL(RNDGETPOOL) -COMPATIBLE_IOCTL(RNDADDENTROPY) -COMPATIBLE_IOCTL(RNDZAPENTCNT) -COMPATIBLE_IOCTL(RNDCLEARPOOL) -/* Bluetooth */ -COMPATIBLE_IOCTL(HCIDEVUP) -COMPATIBLE_IOCTL(HCIDEVDOWN) -COMPATIBLE_IOCTL(HCIDEVRESET) -COMPATIBLE_IOCTL(HCIDEVRESTAT) -COMPATIBLE_IOCTL(HCIGETDEVLIST) -COMPATIBLE_IOCTL(HCIGETDEVINFO) -COMPATIBLE_IOCTL(HCIGETCONNLIST) -COMPATIBLE_IOCTL(HCIGETCONNINFO) -COMPATIBLE_IOCTL(HCISETRAW) -COMPATIBLE_IOCTL(HCISETSCAN) -COMPATIBLE_IOCTL(HCISETAUTH) -COMPATIBLE_IOCTL(HCISETENCRYPT) -COMPATIBLE_IOCTL(HCISETPTYPE) -COMPATIBLE_IOCTL(HCISETLINKPOL) -COMPATIBLE_IOCTL(HCISETLINKMODE) -COMPATIBLE_IOCTL(HCISETACLMTU) -COMPATIBLE_IOCTL(HCISETSCOMTU) -COMPATIBLE_IOCTL(HCIINQUIRY) -COMPATIBLE_IOCTL(HCIUARTSETPROTO) -COMPATIBLE_IOCTL(HCIUARTGETPROTO) -COMPATIBLE_IOCTL(RFCOMMCREATEDEV) -COMPATIBLE_IOCTL(RFCOMMRELEASEDEV) -COMPATIBLE_IOCTL(RFCOMMGETDEVLIST) -COMPATIBLE_IOCTL(RFCOMMGETDEVINFO) -COMPATIBLE_IOCTL(RFCOMMSTEALDLC) -COMPATIBLE_IOCTL(BNEPCONNADD) -COMPATIBLE_IOCTL(BNEPCONNDEL) -COMPATIBLE_IOCTL(BNEPGETCONNLIST) -COMPATIBLE_IOCTL(BNEPGETCONNINFO) -COMPATIBLE_IOCTL(CMTPCONNADD) -COMPATIBLE_IOCTL(CMTPCONNDEL) -COMPATIBLE_IOCTL(CMTPGETCONNLIST) -COMPATIBLE_IOCTL(CMTPGETCONNINFO) -COMPATIBLE_IOCTL(HIDPCONNADD) -COMPATIBLE_IOCTL(HIDPCONNDEL) -COMPATIBLE_IOCTL(HIDPGETCONNLIST) -COMPATIBLE_IOCTL(HIDPGETCONNINFO) -/* CAPI */ -COMPATIBLE_IOCTL(CAPI_REGISTER) -COMPATIBLE_IOCTL(CAPI_GET_MANUFACTURER) -COMPATIBLE_IOCTL(CAPI_GET_VERSION) -COMPATIBLE_IOCTL(CAPI_GET_SERIAL) -COMPATIBLE_IOCTL(CAPI_GET_PROFILE) -COMPATIBLE_IOCTL(CAPI_MANUFACTURER_CMD) -COMPATIBLE_IOCTL(CAPI_GET_ERRCODE) -COMPATIBLE_IOCTL(CAPI_INSTALLED) -COMPATIBLE_IOCTL(CAPI_GET_FLAGS) -COMPATIBLE_IOCTL(CAPI_SET_FLAGS) -COMPATIBLE_IOCTL(CAPI_CLR_FLAGS) -COMPATIBLE_IOCTL(CAPI_NCCI_OPENCOUNT) -COMPATIBLE_IOCTL(CAPI_NCCI_GETUNIT) -/* Siemens Gigaset */ -COMPATIBLE_IOCTL(GIGASET_REDIR) -COMPATIBLE_IOCTL(GIGASET_CONFIG) -COMPATIBLE_IOCTL(GIGASET_BRKCHARS) -COMPATIBLE_IOCTL(GIGASET_VERSION) -/* Misc. */ -COMPATIBLE_IOCTL(0x41545900) /* ATYIO_CLKR */ -COMPATIBLE_IOCTL(0x41545901) /* ATYIO_CLKW */ -COMPATIBLE_IOCTL(PCIIOC_CONTROLLER) -COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_IO) -COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_MEM) -COMPATIBLE_IOCTL(PCIIOC_WRITE_COMBINE) -/* USB */ -COMPATIBLE_IOCTL(USBDEVFS_RESETEP) -COMPATIBLE_IOCTL(USBDEVFS_SETINTERFACE) -COMPATIBLE_IOCTL(USBDEVFS_SETCONFIGURATION) -COMPATIBLE_IOCTL(USBDEVFS_GETDRIVER) -COMPATIBLE_IOCTL(USBDEVFS_DISCARDURB) -COMPATIBLE_IOCTL(USBDEVFS_CLAIMINTERFACE) -COMPATIBLE_IOCTL(USBDEVFS_RELEASEINTERFACE) -COMPATIBLE_IOCTL(USBDEVFS_CONNECTINFO) -COMPATIBLE_IOCTL(USBDEVFS_HUB_PORTINFO) -COMPATIBLE_IOCTL(USBDEVFS_RESET) -COMPATIBLE_IOCTL(USBDEVFS_SUBMITURB32) -COMPATIBLE_IOCTL(USBDEVFS_REAPURB32) -COMPATIBLE_IOCTL(USBDEVFS_REAPURBNDELAY32) -COMPATIBLE_IOCTL(USBDEVFS_CLEAR_HALT) -/* MTD */ -COMPATIBLE_IOCTL(MEMGETINFO) -COMPATIBLE_IOCTL(MEMERASE) -COMPATIBLE_IOCTL(MEMLOCK) -COMPATIBLE_IOCTL(MEMUNLOCK) -COMPATIBLE_IOCTL(MEMGETREGIONCOUNT) -COMPATIBLE_IOCTL(MEMGETREGIONINFO) -COMPATIBLE_IOCTL(MEMGETBADBLOCK) -COMPATIBLE_IOCTL(MEMSETBADBLOCK) -/* NBD */ -ULONG_IOCTL(NBD_SET_SOCK) -ULONG_IOCTL(NBD_SET_BLKSIZE) -ULONG_IOCTL(NBD_SET_SIZE) -COMPATIBLE_IOCTL(NBD_DO_IT) -COMPATIBLE_IOCTL(NBD_CLEAR_SOCK) -COMPATIBLE_IOCTL(NBD_CLEAR_QUE) -COMPATIBLE_IOCTL(NBD_PRINT_DEBUG) -ULONG_IOCTL(NBD_SET_SIZE_BLOCKS) -COMPATIBLE_IOCTL(NBD_DISCONNECT) -/* i2c */ -COMPATIBLE_IOCTL(I2C_SLAVE) -COMPATIBLE_IOCTL(I2C_SLAVE_FORCE) -COMPATIBLE_IOCTL(I2C_TENBIT) -COMPATIBLE_IOCTL(I2C_PEC) -COMPATIBLE_IOCTL(I2C_RETRIES) -COMPATIBLE_IOCTL(I2C_TIMEOUT) -/* wireless */ -COMPATIBLE_IOCTL(SIOCSIWCOMMIT) -COMPATIBLE_IOCTL(SIOCGIWNAME) -COMPATIBLE_IOCTL(SIOCSIWNWID) -COMPATIBLE_IOCTL(SIOCGIWNWID) -COMPATIBLE_IOCTL(SIOCSIWFREQ) -COMPATIBLE_IOCTL(SIOCGIWFREQ) -COMPATIBLE_IOCTL(SIOCSIWMODE) -COMPATIBLE_IOCTL(SIOCGIWMODE) -COMPATIBLE_IOCTL(SIOCSIWSENS) -COMPATIBLE_IOCTL(SIOCGIWSENS) -COMPATIBLE_IOCTL(SIOCSIWRANGE) -COMPATIBLE_IOCTL(SIOCSIWPRIV) -COMPATIBLE_IOCTL(SIOCGIWPRIV) -COMPATIBLE_IOCTL(SIOCSIWSTATS) -COMPATIBLE_IOCTL(SIOCGIWSTATS) -COMPATIBLE_IOCTL(SIOCSIWAP) -COMPATIBLE_IOCTL(SIOCGIWAP) -COMPATIBLE_IOCTL(SIOCSIWSCAN) -COMPATIBLE_IOCTL(SIOCSIWRATE) -COMPATIBLE_IOCTL(SIOCGIWRATE) -COMPATIBLE_IOCTL(SIOCSIWRTS) -COMPATIBLE_IOCTL(SIOCGIWRTS) -COMPATIBLE_IOCTL(SIOCSIWFRAG) -COMPATIBLE_IOCTL(SIOCGIWFRAG) -COMPATIBLE_IOCTL(SIOCSIWTXPOW) -COMPATIBLE_IOCTL(SIOCGIWTXPOW) -COMPATIBLE_IOCTL(SIOCSIWRETRY) -COMPATIBLE_IOCTL(SIOCGIWRETRY) -COMPATIBLE_IOCTL(SIOCSIWPOWER) -COMPATIBLE_IOCTL(SIOCGIWPOWER) -/* hiddev */ -COMPATIBLE_IOCTL(HIDIOCGVERSION) -COMPATIBLE_IOCTL(HIDIOCAPPLICATION) -COMPATIBLE_IOCTL(HIDIOCGDEVINFO) -COMPATIBLE_IOCTL(HIDIOCGSTRING) -COMPATIBLE_IOCTL(HIDIOCINITREPORT) -COMPATIBLE_IOCTL(HIDIOCGREPORT) -COMPATIBLE_IOCTL(HIDIOCSREPORT) -COMPATIBLE_IOCTL(HIDIOCGREPORTINFO) -COMPATIBLE_IOCTL(HIDIOCGFIELDINFO) -COMPATIBLE_IOCTL(HIDIOCGUSAGE) -COMPATIBLE_IOCTL(HIDIOCSUSAGE) -COMPATIBLE_IOCTL(HIDIOCGUCODE) -COMPATIBLE_IOCTL(HIDIOCGFLAG) -COMPATIBLE_IOCTL(HIDIOCSFLAG) -COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINDEX) -COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINFO) -/* dvb */ -COMPATIBLE_IOCTL(AUDIO_STOP) -COMPATIBLE_IOCTL(AUDIO_PLAY) -COMPATIBLE_IOCTL(AUDIO_PAUSE) -COMPATIBLE_IOCTL(AUDIO_CONTINUE) -COMPATIBLE_IOCTL(AUDIO_SELECT_SOURCE) -COMPATIBLE_IOCTL(AUDIO_SET_MUTE) -COMPATIBLE_IOCTL(AUDIO_SET_AV_SYNC) -COMPATIBLE_IOCTL(AUDIO_SET_BYPASS_MODE) -COMPATIBLE_IOCTL(AUDIO_CHANNEL_SELECT) -COMPATIBLE_IOCTL(AUDIO_GET_STATUS) -COMPATIBLE_IOCTL(AUDIO_GET_CAPABILITIES) -COMPATIBLE_IOCTL(AUDIO_CLEAR_BUFFER) -COMPATIBLE_IOCTL(AUDIO_SET_ID) -COMPATIBLE_IOCTL(AUDIO_SET_MIXER) -COMPATIBLE_IOCTL(AUDIO_SET_STREAMTYPE) -COMPATIBLE_IOCTL(AUDIO_SET_EXT_ID) -COMPATIBLE_IOCTL(AUDIO_SET_ATTRIBUTES) -COMPATIBLE_IOCTL(AUDIO_SET_KARAOKE) -COMPATIBLE_IOCTL(DMX_START) -COMPATIBLE_IOCTL(DMX_STOP) -COMPATIBLE_IOCTL(DMX_SET_FILTER) -COMPATIBLE_IOCTL(DMX_SET_PES_FILTER) -COMPATIBLE_IOCTL(DMX_SET_BUFFER_SIZE) -COMPATIBLE_IOCTL(DMX_GET_PES_PIDS) -COMPATIBLE_IOCTL(DMX_GET_CAPS) -COMPATIBLE_IOCTL(DMX_SET_SOURCE) -COMPATIBLE_IOCTL(DMX_GET_STC) -COMPATIBLE_IOCTL(FE_GET_INFO) -COMPATIBLE_IOCTL(FE_DISEQC_RESET_OVERLOAD) -COMPATIBLE_IOCTL(FE_DISEQC_SEND_MASTER_CMD) -COMPATIBLE_IOCTL(FE_DISEQC_RECV_SLAVE_REPLY) -COMPATIBLE_IOCTL(FE_DISEQC_SEND_BURST) -COMPATIBLE_IOCTL(FE_SET_TONE) -COMPATIBLE_IOCTL(FE_SET_VOLTAGE) -COMPATIBLE_IOCTL(FE_ENABLE_HIGH_LNB_VOLTAGE) -COMPATIBLE_IOCTL(FE_READ_STATUS) -COMPATIBLE_IOCTL(FE_READ_BER) -COMPATIBLE_IOCTL(FE_READ_SIGNAL_STRENGTH) -COMPATIBLE_IOCTL(FE_READ_SNR) -COMPATIBLE_IOCTL(FE_READ_UNCORRECTED_BLOCKS) -COMPATIBLE_IOCTL(FE_SET_FRONTEND) -COMPATIBLE_IOCTL(FE_GET_FRONTEND) -COMPATIBLE_IOCTL(FE_GET_EVENT) -COMPATIBLE_IOCTL(FE_DISHNETWORK_SEND_LEGACY_CMD) -COMPATIBLE_IOCTL(VIDEO_STOP) -COMPATIBLE_IOCTL(VIDEO_PLAY) -COMPATIBLE_IOCTL(VIDEO_FREEZE) -COMPATIBLE_IOCTL(VIDEO_CONTINUE) -COMPATIBLE_IOCTL(VIDEO_SELECT_SOURCE) -COMPATIBLE_IOCTL(VIDEO_SET_BLANK) -COMPATIBLE_IOCTL(VIDEO_GET_STATUS) -COMPATIBLE_IOCTL(VIDEO_SET_DISPLAY_FORMAT) -COMPATIBLE_IOCTL(VIDEO_FAST_FORWARD) -COMPATIBLE_IOCTL(VIDEO_SLOWMOTION) -COMPATIBLE_IOCTL(VIDEO_GET_CAPABILITIES) -COMPATIBLE_IOCTL(VIDEO_CLEAR_BUFFER) -COMPATIBLE_IOCTL(VIDEO_SET_ID) -COMPATIBLE_IOCTL(VIDEO_SET_STREAMTYPE) -COMPATIBLE_IOCTL(VIDEO_SET_FORMAT) -COMPATIBLE_IOCTL(VIDEO_SET_SYSTEM) -COMPATIBLE_IOCTL(VIDEO_SET_HIGHLIGHT) -COMPATIBLE_IOCTL(VIDEO_SET_SPU) -COMPATIBLE_IOCTL(VIDEO_GET_NAVI) -COMPATIBLE_IOCTL(VIDEO_SET_ATTRIBUTES) -COMPATIBLE_IOCTL(VIDEO_GET_SIZE) -COMPATIBLE_IOCTL(VIDEO_GET_FRAME_RATE) -- cgit v1.2.3 From e729aa16b168fb202d1a20f936028cb7c2a0278d Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Tue, 8 May 2007 00:29:13 -0700 Subject: Pad irq_desc to internode cacheline size We noticed a drop in n/w performance due to the irq_desc being cacheline aligned rather than internode aligned. We see 50% of expected performance when two e1000 nics local to two different nodes have consecutive irq descriptors allocated, due to false sharing. Note that this patch does away with cacheline padding for the UP case, as it does not seem useful for UP configurations. Signed-off-by: Ravikiran Thirumalai Signed-off-by: Shai Fultheim Cc: "Siddha, Suresh B" Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/irq.h | 4 +--- kernel/irq/handle.c | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irq.h b/include/linux/irq.h index a6899402b522..1695054e8c63 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -147,8 +147,6 @@ struct irq_chip { * @dir: /proc/irq/ procfs entry * @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP * @name: flow handler name for /proc/interrupts output - * - * Pad this out to 32 bytes for cache and indexing reasons. */ struct irq_desc { irq_flow_handler_t handle_irq; @@ -175,7 +173,7 @@ struct irq_desc { struct proc_dir_entry *dir; #endif const char *name; -} ____cacheline_aligned; +} ____cacheline_internodealigned_in_smp; extern struct irq_desc irq_desc[NR_IRQS]; diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index aff1f0fabb0d..515ad40bde15 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -48,7 +48,7 @@ handle_bad_irq(unsigned int irq, struct irq_desc *desc) * * Controller mappings for all interrupt sources: */ -struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned = { +struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { [0 ... NR_IRQS-1] = { .status = IRQ_DISABLED, .chip = &no_irq_chip, -- cgit v1.2.3 From 6272e2667965dfb5b59199f462cd0f001fb304a6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 8 May 2007 00:29:21 -0700 Subject: cleanup compat ioctl handling Merge all compat ioctl handling into compat_ioctl.c instead of splitting it over compat.c and compat_ioctl.c. This also allows to get rid of ioctl32.h Signed-off-by: Christoph Hellwig Looks-good-to: Andi Kleen Acked-by: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/compat.c | 162 +-------------------------------------------- fs/compat_ioctl.c | 170 ++++++++++++++++++++++++++++++++++++++++++++++-- fs/internal.h | 10 --- include/linux/ioctl32.h | 17 ----- include/net/sock.h | 9 --- 5 files changed, 166 insertions(+), 202 deletions(-) delete mode 100644 include/linux/ioctl32.h (limited to 'include/linux') diff --git a/fs/compat.c b/fs/compat.c index a32e2ae737fb..9f6248dfd9d9 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -25,10 +25,8 @@ #include #include #include -#include #include #include -#include /* for SIOCDEVPRIVATE */ #include #include #include @@ -46,13 +44,12 @@ #include #include #include +#include #include #include #include #include -#include /* siocdevprivate_ioctl */ - #include #include #include @@ -313,163 +310,6 @@ out: return error; } -/* ioctl32 stuff, used by sparc64, parisc, s390x, ppc64, x86_64, MIPS */ - -#define IOCTL_HASHSIZE 256 -static struct ioctl_trans *ioctl32_hash_table[IOCTL_HASHSIZE]; - -static inline unsigned long ioctl32_hash(unsigned long cmd) -{ - return (((cmd >> 6) ^ (cmd >> 4) ^ cmd)) % IOCTL_HASHSIZE; -} - -static void ioctl32_insert_translation(struct ioctl_trans *trans) -{ - unsigned long hash; - struct ioctl_trans *t; - - hash = ioctl32_hash (trans->cmd); - if (!ioctl32_hash_table[hash]) - ioctl32_hash_table[hash] = trans; - else { - t = ioctl32_hash_table[hash]; - while (t->next) - t = t->next; - trans->next = NULL; - t->next = trans; - } -} - -static int __init init_sys32_ioctl(void) -{ - int i; - - for (i = 0; i < ioctl_table_size; i++) { - if (ioctl_start[i].next != 0) { - printk("ioctl translation %d bad\n",i); - return -1; - } - - ioctl32_insert_translation(&ioctl_start[i]); - } - return 0; -} - -__initcall(init_sys32_ioctl); - -static void compat_ioctl_error(struct file *filp, unsigned int fd, - unsigned int cmd, unsigned long arg) -{ - char buf[10]; - char *fn = "?"; - char *path; - - /* find the name of the device. */ - path = (char *)__get_free_page(GFP_KERNEL); - if (path) { - fn = d_path(filp->f_path.dentry, filp->f_path.mnt, path, PAGE_SIZE); - if (IS_ERR(fn)) - fn = "?"; - } - - sprintf(buf,"'%c'", (cmd>>_IOC_TYPESHIFT) & _IOC_TYPEMASK); - if (!isprint(buf[1])) - sprintf(buf, "%02x", buf[1]); - compat_printk("ioctl32(%s:%d): Unknown cmd fd(%d) " - "cmd(%08x){t:%s;sz:%u} arg(%08x) on %s\n", - current->comm, current->pid, - (int)fd, (unsigned int)cmd, buf, - (cmd >> _IOC_SIZESHIFT) & _IOC_SIZEMASK, - (unsigned int)arg, fn); - - if (path) - free_page((unsigned long)path); -} - -asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, - unsigned long arg) -{ - struct file *filp; - int error = -EBADF; - struct ioctl_trans *t; - int fput_needed; - - filp = fget_light(fd, &fput_needed); - if (!filp) - goto out; - - /* RED-PEN how should LSM module know it's handling 32bit? */ - error = security_file_ioctl(filp, cmd, arg); - if (error) - goto out_fput; - - /* - * To allow the compat_ioctl handlers to be self contained - * we need to check the common ioctls here first. - * Just handle them with the standard handlers below. - */ - switch (cmd) { - case FIOCLEX: - case FIONCLEX: - case FIONBIO: - case FIOASYNC: - case FIOQSIZE: - break; - - case FIBMAP: - case FIGETBSZ: - case FIONREAD: - if (S_ISREG(filp->f_path.dentry->d_inode->i_mode)) - break; - /*FALL THROUGH*/ - - default: - if (filp->f_op && filp->f_op->compat_ioctl) { - error = filp->f_op->compat_ioctl(filp, cmd, arg); - if (error != -ENOIOCTLCMD) - goto out_fput; - } - - if (!filp->f_op || - (!filp->f_op->ioctl && !filp->f_op->unlocked_ioctl)) - goto do_ioctl; - break; - } - - for (t = ioctl32_hash_table[ioctl32_hash(cmd)]; t; t = t->next) { - if (t->cmd == cmd) - goto found_handler; - } - - if (S_ISSOCK(filp->f_path.dentry->d_inode->i_mode) && - cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { - error = siocdevprivate_ioctl(fd, cmd, arg); - } else { - static int count; - - if (++count <= 50) - compat_ioctl_error(filp, fd, cmd, arg); - error = -EINVAL; - } - - goto out_fput; - - found_handler: - if (t->handler) { - lock_kernel(); - error = t->handler(fd, cmd, arg, filp); - unlock_kernel(); - goto out_fput; - } - - do_ioctl: - error = vfs_ioctl(filp, fd, cmd, arg); - out_fput: - fput_light(filp, fput_needed); - out: - return error; -} - static int get_compat_flock(struct flock *kfl, struct compat_flock __user *ufl) { if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) || diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index a0c3d2044e84..6972d242fbdf 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -57,7 +57,6 @@ #include #include #include -#include #include #include #include @@ -65,7 +64,6 @@ #include #include -#include /* siocdevprivate_ioctl */ #include #include #include @@ -474,7 +472,7 @@ static int bond_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) }; } -int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) +static int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) { struct ifreq __user *u_ifreq64; struct ifreq32 __user *u_ifreq32 = compat_ptr(arg); @@ -2384,6 +2382,16 @@ lp_timeout_trans(unsigned int fd, unsigned int cmd, unsigned long arg) return sys_ioctl(fd, cmd, (unsigned long)tn); } + +typedef int (*ioctl_trans_handler_t)(unsigned int, unsigned int, + unsigned long, struct file *); + +struct ioctl_trans { + unsigned long cmd; + ioctl_trans_handler_t handler; + struct ioctl_trans *next; +}; + #define HANDLE_IOCTL(cmd,handler) \ { (cmd), (ioctl_trans_handler_t)(handler) }, @@ -2404,7 +2412,7 @@ lp_timeout_trans(unsigned int fd, unsigned int cmd, unsigned long arg) Most other reasons are not valid. */ #define IGNORE_IOCTL(cmd) COMPATIBLE_IOCTL(cmd) -struct ioctl_trans ioctl_start[] = { +static struct ioctl_trans ioctl_start[] = { /* compatible ioctls first */ COMPATIBLE_IOCTL(0x4B50) /* KDGHWCLK - not in the kernel, but don't complain */ COMPATIBLE_IOCTL(0x4B51) /* KDSHWCLK - not in the kernel, but don't complain */ @@ -3464,4 +3472,156 @@ IGNORE_IOCTL(VFAT_IOCTL_READDIR_BOTH32) IGNORE_IOCTL(VFAT_IOCTL_READDIR_SHORT32) }; -int ioctl_table_size = ARRAY_SIZE(ioctl_start); +#define IOCTL_HASHSIZE 256 +static struct ioctl_trans *ioctl32_hash_table[IOCTL_HASHSIZE]; + +static inline unsigned long ioctl32_hash(unsigned long cmd) +{ + return (((cmd >> 6) ^ (cmd >> 4) ^ cmd)) % IOCTL_HASHSIZE; +} + +static void compat_ioctl_error(struct file *filp, unsigned int fd, + unsigned int cmd, unsigned long arg) +{ + char buf[10]; + char *fn = "?"; + char *path; + + /* find the name of the device. */ + path = (char *)__get_free_page(GFP_KERNEL); + if (path) { + fn = d_path(filp->f_path.dentry, filp->f_path.mnt, path, PAGE_SIZE); + if (IS_ERR(fn)) + fn = "?"; + } + + sprintf(buf,"'%c'", (cmd>>_IOC_TYPESHIFT) & _IOC_TYPEMASK); + if (!isprint(buf[1])) + sprintf(buf, "%02x", buf[1]); + compat_printk("ioctl32(%s:%d): Unknown cmd fd(%d) " + "cmd(%08x){t:%s;sz:%u} arg(%08x) on %s\n", + current->comm, current->pid, + (int)fd, (unsigned int)cmd, buf, + (cmd >> _IOC_SIZESHIFT) & _IOC_SIZEMASK, + (unsigned int)arg, fn); + + if (path) + free_page((unsigned long)path); +} + +asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, + unsigned long arg) +{ + struct file *filp; + int error = -EBADF; + struct ioctl_trans *t; + int fput_needed; + + filp = fget_light(fd, &fput_needed); + if (!filp) + goto out; + + /* RED-PEN how should LSM module know it's handling 32bit? */ + error = security_file_ioctl(filp, cmd, arg); + if (error) + goto out_fput; + + /* + * To allow the compat_ioctl handlers to be self contained + * we need to check the common ioctls here first. + * Just handle them with the standard handlers below. + */ + switch (cmd) { + case FIOCLEX: + case FIONCLEX: + case FIONBIO: + case FIOASYNC: + case FIOQSIZE: + break; + + case FIBMAP: + case FIGETBSZ: + case FIONREAD: + if (S_ISREG(filp->f_path.dentry->d_inode->i_mode)) + break; + /*FALL THROUGH*/ + + default: + if (filp->f_op && filp->f_op->compat_ioctl) { + error = filp->f_op->compat_ioctl(filp, cmd, arg); + if (error != -ENOIOCTLCMD) + goto out_fput; + } + + if (!filp->f_op || + (!filp->f_op->ioctl && !filp->f_op->unlocked_ioctl)) + goto do_ioctl; + break; + } + + for (t = ioctl32_hash_table[ioctl32_hash(cmd)]; t; t = t->next) { + if (t->cmd == cmd) + goto found_handler; + } + + if (S_ISSOCK(filp->f_path.dentry->d_inode->i_mode) && + cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { + error = siocdevprivate_ioctl(fd, cmd, arg); + } else { + static int count; + + if (++count <= 50) + compat_ioctl_error(filp, fd, cmd, arg); + error = -EINVAL; + } + + goto out_fput; + + found_handler: + if (t->handler) { + lock_kernel(); + error = t->handler(fd, cmd, arg, filp); + unlock_kernel(); + goto out_fput; + } + + do_ioctl: + error = vfs_ioctl(filp, fd, cmd, arg); + out_fput: + fput_light(filp, fput_needed); + out: + return error; +} + +static void ioctl32_insert_translation(struct ioctl_trans *trans) +{ + unsigned long hash; + struct ioctl_trans *t; + + hash = ioctl32_hash (trans->cmd); + if (!ioctl32_hash_table[hash]) + ioctl32_hash_table[hash] = trans; + else { + t = ioctl32_hash_table[hash]; + while (t->next) + t = t->next; + trans->next = NULL; + t->next = trans; + } +} + +static int __init init_sys32_ioctl(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ioctl_start); i++) { + if (ioctl_start[i].next != 0) { + printk("ioctl translation %d bad\n",i); + return -1; + } + + ioctl32_insert_translation(&ioctl_start[i]); + } + return 0; +} +__initcall(init_sys32_ioctl); diff --git a/fs/internal.h b/fs/internal.h index ea00126c9a59..392e8ccd6fc4 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -9,8 +9,6 @@ * 2 of the License, or (at your option) any later version. */ -#include - struct super_block; /* @@ -41,14 +39,6 @@ static inline int sb_is_blkdev_sb(struct super_block *sb) */ extern void __init chrdev_init(void); -/* - * compat_ioctl.c - */ -#ifdef CONFIG_COMPAT -extern struct ioctl_trans ioctl_start[]; -extern int ioctl_table_size; -#endif - /* * namespace.c */ diff --git a/include/linux/ioctl32.h b/include/linux/ioctl32.h deleted file mode 100644 index 948809d99917..000000000000 --- a/include/linux/ioctl32.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef IOCTL32_H -#define IOCTL32_H 1 - -#include /* for __deprecated */ - -struct file; - -typedef int (*ioctl_trans_handler_t)(unsigned int, unsigned int, - unsigned long, struct file *); - -struct ioctl_trans { - unsigned long cmd; - ioctl_trans_handler_t handler; - struct ioctl_trans *next; -}; - -#endif diff --git a/include/net/sock.h b/include/net/sock.h index 25c37e34bfdc..689b886038da 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1361,15 +1361,6 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) extern __u32 sysctl_wmem_max; extern __u32 sysctl_rmem_max; -#ifdef CONFIG_NET -int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); -#else -static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) -{ - return -ENODEV; -} -#endif - extern void sk_init(void); #ifdef CONFIG_SYSCTL -- cgit v1.2.3 From 49a4ec188f9a96c9a5567956718213d38a456a19 Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 8 May 2007 00:29:39 -0700 Subject: fix hotplug for legacy platform drivers We've had various reports of some legacy "probe the hardware" style platform drivers having nasty problems with hotplug support. The core issue is that those legacy drivers don't fully conform to the driver model. They assume a role that should be the responsibility of infrastructure code: creating device nodes. The "modprobe" step in hotplugging relies on drivers to have split those roles into different modules. The lack of this split causes the problems. When a driver creates nodes for devices that don't exist (sending a hotplug event), then exits (aborting one modprobe) before the "modprobe $MODALIAS" step completes (by failing, since it's in the middle of a modprobe), the result can be an endless loop of modprobe invocations ... badness. This fix uses the newish per-device flag controlling issuance of "add" events. (A previous version of this patch used a per-device "driver can hotplug" flag, which only scrubbed $MODALIAS from the environment rather than suppressing the entire hotplug event.) It also shrinks that flag to one bit, saving a word in "struct device". So the net of this patch is removing some nasty failures with legacy drivers, while retaining hotplug capability for the majority of platform drivers. Signed-off-by: David Brownell Cc: Greg KH Cc: Andres Salomon Cc: Dominik Brodowski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/platform.c | 18 ++++++++++++++++++ drivers/pcmcia/pxa2xx_mainstone.c | 1 + drivers/pcmcia/pxa2xx_sharpsl.c | 1 + include/linux/device.h | 4 ++-- 4 files changed, 22 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 17b5ece8f82c..eb84d9d44645 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -160,6 +160,11 @@ static void platform_device_release(struct device *dev) * * Create a platform device object which can have other objects attached * to it, and which will have attached objects freed when it is released. + * + * This device will be marked as not supporting hotpluggable drivers; no + * device add/remove uevents will be generated. In the unusual case that + * the device isn't being dynamically allocated as a legacy "probe the + * hardware" driver, infrastructure code should reverse this marking. */ struct platform_device *platform_device_alloc(const char *name, unsigned int id) { @@ -172,6 +177,12 @@ struct platform_device *platform_device_alloc(const char *name, unsigned int id) pa->pdev.id = id; device_initialize(&pa->pdev.dev); pa->pdev.dev.release = platform_device_release; + + /* prevent hotplug "modprobe $(MODALIAS)" from causing trouble in + * legacy probe-the-hardware drivers, which don't properly split + * out device enumeration logic from drivers. + */ + pa->pdev.dev.uevent_suppress = 1; } return pa ? &pa->pdev : NULL; @@ -351,6 +362,13 @@ EXPORT_SYMBOL_GPL(platform_device_unregister); * memory allocated for the device allows drivers using such devices * to be unloaded iwithout waiting for the last reference to the device * to be dropped. + * + * This interface is primarily intended for use with legacy drivers + * which probe hardware directly. Because such drivers create sysfs + * device nodes themselves, rather than letting system infrastructure + * handle such device enumeration tasks, they don't fully conform to + * the Linux driver model. In particular, when such drivers are built + * as modules, they can't be "hotplugged". */ struct platform_device *platform_device_register_simple(char *name, unsigned int id, struct resource *res, unsigned int num) diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c index fda06941e730..383107ba4bd3 100644 --- a/drivers/pcmcia/pxa2xx_mainstone.c +++ b/drivers/pcmcia/pxa2xx_mainstone.c @@ -175,6 +175,7 @@ static int __init mst_pcmcia_init(void) if (!mst_pcmcia_device) return -ENOMEM; + mst_pcmcia_device->dev.uevent_suppress = 0; mst_pcmcia_device->dev.platform_data = &mst_pcmcia_ops; ret = platform_device_add(mst_pcmcia_device); diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c index b7b9e149c5b9..a2daa3f531b2 100644 --- a/drivers/pcmcia/pxa2xx_sharpsl.c +++ b/drivers/pcmcia/pxa2xx_sharpsl.c @@ -261,6 +261,7 @@ static int __init sharpsl_pcmcia_init(void) if (!sharpsl_pcmcia_device) return -ENOMEM; + sharpsl_pcmcia_device->dev.uevent_suppress = 0; sharpsl_pcmcia_device->dev.platform_data = &sharpsl_pcmcia_ops; sharpsl_pcmcia_device->dev.parent = platform_scoop_config->devs[0].dev; diff --git a/include/linux/device.h b/include/linux/device.h index 6579068134d1..2e1a2988b7e1 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -412,12 +412,13 @@ struct device { struct klist_node knode_parent; /* node in sibling list */ struct klist_node knode_driver; struct klist_node knode_bus; - struct device * parent; + struct device *parent; struct kobject kobj; char bus_id[BUS_ID_SIZE]; /* position on parent bus */ struct device_type *type; unsigned is_registered:1; + unsigned uevent_suppress:1; struct device_attribute uevent_attr; struct device_attribute *devt_attr; @@ -458,7 +459,6 @@ struct device { struct class *class; dev_t devt; /* dev_t, creates the sysfs "dev" */ struct attribute_group **groups; /* optional groups */ - int uevent_suppress; void (*release)(struct device * dev); }; -- cgit v1.2.3 From 873ec746158403af82c57ce26780166aafc159e1 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 8 May 2007 00:29:57 -0700 Subject: EFI: warn only for pre-1.00 system tables We used to warn unless the EFI system table major revision was exactly 1. But EFI 2.00 firmware is starting to appear, and the 2.00 changes don't affect anything in Linux. Signed-off-by: Bjorn Helgaas Cc: Andi Kleen Cc: "Luck, Tony" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/kernel/efi.c | 12 +++++------- arch/ia64/hp/sim/boot/fw-emu.c | 2 +- arch/ia64/kernel/efi.c | 10 +++++----- include/linux/efi.h | 1 - 4 files changed, 11 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c index dd9e7faafa7c..a1808022ea19 100644 --- a/arch/i386/kernel/efi.c +++ b/arch/i386/kernel/efi.c @@ -347,14 +347,12 @@ void __init efi_init(void) printk(KERN_ERR PFX "Woah! Couldn't map the EFI system table.\n"); if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) printk(KERN_ERR PFX "Woah! EFI system table signature incorrect\n"); - if ((efi.systab->hdr.revision ^ EFI_SYSTEM_TABLE_REVISION) >> 16 != 0) - printk(KERN_ERR PFX - "Warning: EFI system table major version mismatch: " - "got %d.%02d, expected %d.%02d\n", + if ((efi.systab->hdr.revision >> 16) == 0) + printk(KERN_ERR PFX "Warning: EFI system table version " + "%d.%02d, expected 1.00 or greater\n", efi.systab->hdr.revision >> 16, - efi.systab->hdr.revision & 0xffff, - EFI_SYSTEM_TABLE_REVISION >> 16, - EFI_SYSTEM_TABLE_REVISION & 0xffff); + efi.systab->hdr.revision & 0xffff); + /* * Grab some details from the system table */ diff --git a/arch/ia64/hp/sim/boot/fw-emu.c b/arch/ia64/hp/sim/boot/fw-emu.c index 5a0a7afcfc3a..300acd913d9c 100644 --- a/arch/ia64/hp/sim/boot/fw-emu.c +++ b/arch/ia64/hp/sim/boot/fw-emu.c @@ -287,7 +287,7 @@ sys_fw_init (const char *args, int arglen) memset(efi_systab, 0, sizeof(efi_systab)); efi_systab->hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE; - efi_systab->hdr.revision = EFI_SYSTEM_TABLE_REVISION; + efi_systab->hdr.revision = ((1 << 16) | 00); efi_systab->hdr.headersize = sizeof(efi_systab->hdr); efi_systab->fw_vendor = __pa("H\0e\0w\0l\0e\0t\0t\0-\0P\0a\0c\0k\0a\0r\0d\0\0"); efi_systab->fw_revision = 1; diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 78d29b79947d..75ec3478d8a2 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -445,11 +445,11 @@ efi_init (void) panic("Woah! Can't find EFI system table.\n"); if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) panic("Woah! EFI system table signature incorrect\n"); - if ((efi.systab->hdr.revision ^ EFI_SYSTEM_TABLE_REVISION) >> 16 != 0) - printk(KERN_WARNING "Warning: EFI system table major version mismatch: " - "got %d.%02d, expected %d.%02d\n", - efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, - EFI_SYSTEM_TABLE_REVISION >> 16, EFI_SYSTEM_TABLE_REVISION & 0xffff); + if ((efi.systab->hdr.revision >> 16) == 0) + printk(KERN_WARNING "Warning: EFI system table version " + "%d.%02d, expected 1.00 or greater\n", + efi.systab->hdr.revision >> 16, + efi.systab->hdr.revision & 0xffff); config_tables = __va(efi.systab->tables); diff --git a/include/linux/efi.h b/include/linux/efi.h index f8ebd7c1ddb3..0b9579a4cd42 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -213,7 +213,6 @@ typedef struct { } efi_config_table_t; #define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL) -#define EFI_SYSTEM_TABLE_REVISION ((1 << 16) | 00) typedef struct { efi_table_hdr_t hdr; -- cgit v1.2.3 From b32e41bb97971161ad34ea69364c4f9ec3909151 Mon Sep 17 00:00:00 2001 From: Milind Arun Choudhary Date: Tue, 8 May 2007 00:30:08 -0700 Subject: SPIN_LOCK_UNLOCKED cleanup in init_task.h SPIN_LOCK_UNLOCKED cleanup,use __SPIN_LOCK_UNLOCKED instead Signed-off-by: Milind Arun Choudhary Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/init_task.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/init_task.h b/include/linux/init_task.h index a2d95ff50e9b..795102309bf1 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -138,7 +138,7 @@ extern struct group_info init_groups; .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ - .pi_lock = SPIN_LOCK_UNLOCKED, \ + .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ INIT_TRACE_IRQFLAGS \ INIT_LOCKDEP \ } -- cgit v1.2.3 From b5e618181a927210f8be1d3d2249d31904ba358d Mon Sep 17 00:00:00 2001 From: Pavel Emelianov Date: Tue, 8 May 2007 00:30:19 -0700 Subject: Introduce a handy list_first_entry macro There are many places in the kernel where the construction like foo = list_entry(head->next, struct foo_struct, list); are used. The code might look more descriptive and neat if using the macro list_first_entry(head, type, member) \ list_entry((head)->next, type, member) Here is the macro itself and the examples of its usage in the generic code. If it will turn out to be useful, I can prepare the set of patches to inject in into arch-specific code, drivers, networking, etc. Signed-off-by: Pavel Emelianov Signed-off-by: Kirill Korotaev Cc: Randy Dunlap Cc: Andi Kleen Cc: Zach Brown Cc: Davide Libenzi Cc: John McCutchan Cc: Thomas Gleixner Cc: Ingo Molnar Cc: john stultz Cc: Ram Pai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dquot.c | 2 +- fs/eventpoll.c | 6 +++--- fs/inode.c | 2 +- fs/inotify.c | 2 +- fs/namespace.c | 4 ++-- fs/pnode.c | 2 +- include/linux/list.h | 11 +++++++++++ kernel/posix-cpu-timers.c | 14 +++++++------- kernel/timer.c | 4 ++-- 9 files changed, 29 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/fs/dquot.c b/fs/dquot.c index dcc5a582e1f1..3a995841de90 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -474,7 +474,7 @@ int vfs_quota_sync(struct super_block *sb, int type) spin_lock(&dq_list_lock); dirty = &dqopt->info[cnt].dqi_dirty_list; while (!list_empty(dirty)) { - dquot = list_entry(dirty->next, struct dquot, dq_dirty); + dquot = list_first_entry(dirty, struct dquot, dq_dirty); /* Dirty and inactive can be only bad dquot... */ if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { clear_dquot_dirty(dquot); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index d0b86060cc19..b5c7ca584939 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -453,7 +453,7 @@ void eventpoll_release_file(struct file *file) mutex_lock(&epmutex); while (!list_empty(lsthead)) { - epi = list_entry(lsthead->next, struct epitem, fllink); + epi = list_first_entry(lsthead, struct epitem, fllink); ep = epi->ep; list_del_init(&epi->fllink); @@ -1143,7 +1143,7 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) if (nwait) { while (!list_empty(lsthead)) { - pwq = list_entry(lsthead->next, struct eppoll_entry, llink); + pwq = list_first_entry(lsthead, struct eppoll_entry, llink); list_del_init(&pwq->llink); remove_wait_queue(pwq->whead, &pwq->wait); @@ -1359,7 +1359,7 @@ static int ep_send_events(struct eventpoll *ep, struct list_head *txlist, * read. */ for (eventcnt = 0; !list_empty(txlist) && eventcnt < maxevents;) { - epi = list_entry(txlist->next, struct epitem, rdllink); + epi = list_first_entry(txlist, struct epitem, rdllink); prefetch(epi->rdllink.next); /* diff --git a/fs/inode.c b/fs/inode.c index 81508b0a3a70..410f235c337b 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -275,7 +275,7 @@ static void dispose_list(struct list_head *head) while (!list_empty(head)) { struct inode *inode; - inode = list_entry(head->next, struct inode, i_list); + inode = list_first_entry(head, struct inode, i_list); list_del(&inode->i_list); if (inode->i_data.nrpages) diff --git a/fs/inotify.c b/fs/inotify.c index f5099d86fd91..7457501b9565 100644 --- a/fs/inotify.c +++ b/fs/inotify.c @@ -509,7 +509,7 @@ void inotify_destroy(struct inotify_handle *ih) mutex_unlock(&ih->mutex); break; } - watch = list_entry(watches->next, struct inotify_watch, h_list); + watch = list_first_entry(watches, struct inotify_watch, h_list); get_inotify_watch(watch); mutex_unlock(&ih->mutex); diff --git a/fs/namespace.c b/fs/namespace.c index c5b88100d914..72bb1062bfe7 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -499,7 +499,7 @@ void release_mounts(struct list_head *head) { struct vfsmount *mnt; while (!list_empty(head)) { - mnt = list_entry(head->next, struct vfsmount, mnt_hash); + mnt = list_first_entry(head, struct vfsmount, mnt_hash); list_del_init(&mnt->mnt_hash); if (mnt->mnt_parent != mnt) { struct dentry *dentry; @@ -1177,7 +1177,7 @@ static void expire_mount_list(struct list_head *graveyard, struct list_head *mou while (!list_empty(graveyard)) { LIST_HEAD(umounts); - mnt = list_entry(graveyard->next, struct vfsmount, mnt_expire); + mnt = list_first_entry(graveyard, struct vfsmount, mnt_expire); list_del_init(&mnt->mnt_expire); /* don't do anything if the namespace is dead - all the diff --git a/fs/pnode.c b/fs/pnode.c index 56aacead8362..89940f243fc2 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -59,7 +59,7 @@ static int do_make_slave(struct vfsmount *mnt) } else { struct list_head *p = &mnt->mnt_slave_list; while (!list_empty(p)) { - slave_mnt = list_entry(p->next, + slave_mnt = list_first_entry(p, struct vfsmount, mnt_slave); list_del_init(&slave_mnt->mnt_slave); slave_mnt->mnt_master = NULL; diff --git a/include/linux/list.h b/include/linux/list.h index f9d71eab05ee..9202703be2a4 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -425,6 +425,17 @@ static inline void list_splice_init_rcu(struct list_head *list, #define list_entry(ptr, type, member) \ container_of(ptr, type, member) +/** + * list_first_entry - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * Note, that list is expected to be not empty. + */ +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) + /** * list_for_each - iterate over a list * @pos: the &struct list_head to use as a loop cursor. diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 657f77697415..1de710e18373 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -971,7 +971,7 @@ static void check_thread_timers(struct task_struct *tsk, maxfire = 20; tsk->it_prof_expires = cputime_zero; while (!list_empty(timers)) { - struct cpu_timer_list *t = list_entry(timers->next, + struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { @@ -986,7 +986,7 @@ static void check_thread_timers(struct task_struct *tsk, maxfire = 20; tsk->it_virt_expires = cputime_zero; while (!list_empty(timers)) { - struct cpu_timer_list *t = list_entry(timers->next, + struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { @@ -1001,7 +1001,7 @@ static void check_thread_timers(struct task_struct *tsk, maxfire = 20; tsk->it_sched_expires = 0; while (!list_empty(timers)) { - struct cpu_timer_list *t = list_entry(timers->next, + struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); if (!--maxfire || tsk->sched_time < t->expires.sched) { @@ -1057,7 +1057,7 @@ static void check_process_timers(struct task_struct *tsk, maxfire = 20; prof_expires = cputime_zero; while (!list_empty(timers)) { - struct cpu_timer_list *t = list_entry(timers->next, + struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) { @@ -1072,7 +1072,7 @@ static void check_process_timers(struct task_struct *tsk, maxfire = 20; virt_expires = cputime_zero; while (!list_empty(timers)) { - struct cpu_timer_list *t = list_entry(timers->next, + struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); if (!--maxfire || cputime_lt(utime, t->expires.cpu)) { @@ -1087,7 +1087,7 @@ static void check_process_timers(struct task_struct *tsk, maxfire = 20; sched_expires = 0; while (!list_empty(timers)) { - struct cpu_timer_list *t = list_entry(timers->next, + struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); if (!--maxfire || sched_time < t->expires.sched) { @@ -1400,7 +1400,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, */ head = &tsk->signal->cpu_timers[clock_idx]; if (list_empty(head) || - cputime_ge(list_entry(head->next, + cputime_ge(list_first_entry(head, struct cpu_timer_list, entry)->expires.cpu, *newval)) { /* diff --git a/kernel/timer.c b/kernel/timer.c index ba41af2bb6cc..7a6448340f90 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -629,7 +629,7 @@ static inline void __run_timers(tvec_base_t *base) void (*fn)(unsigned long); unsigned long data; - timer = list_entry(head->next,struct timer_list,entry); + timer = list_first_entry(head, struct timer_list,entry); fn = timer->function; data = timer->data; @@ -1248,7 +1248,7 @@ static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head) struct timer_list *timer; while (!list_empty(head)) { - timer = list_entry(head->next, struct timer_list, entry); + timer = list_first_entry(head, struct timer_list, entry); detach_timer(timer, 0); timer_set_base(timer, new_base); internal_add_timer(new_base, timer); -- cgit v1.2.3 From d1ab824be43842ae7429ab1df37153e1cebb4d32 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 8 May 2007 00:30:22 -0700 Subject: Document SPIN_LOCK_UNLOCKED/RW_LOCK_UNLOCKED deprecation Apparently it's not cool anymore to use SPIN/RW_LOCK_UNLOCKED. There's some mention of this in Documentation/spinlocks.txt, but that only talks about dynamic initialisation. A comment in the code mentioning the preferred usage would be good IMHO. [akpm@linux-foundation.org: add reason for deprecation] Signed-off-by: Michael Ellerman Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/spinlock_types.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index dc5fb69e4de9..210549ba4ef4 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -85,6 +85,12 @@ typedef struct { RW_DEP_MAP_INIT(lockname) } #endif +/* + * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and + * are hence deprecated. + * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or + * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate. + */ #define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) #define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) -- cgit v1.2.3 From 28be5abb400e5e082f5225105fdc69337ec0c0b4 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 8 May 2007 00:30:33 -0700 Subject: ext3: copy i_flags to inode flags on write A patch that stores inode flags such as S_IMMUTABLE, S_APPEND, etc. from i_flags to EXT3_I(inode)->i_flags when inode is written to disk. The same thing is done on GETFLAGS ioctl. Quota code changes these flags on quota files (to make it harder for sysadmin to screw himself) and these changes were not correctly propagated into the filesystem (especially, lsattr did not show them and users were wondering...). Propagate flags such as S_APPEND, S_IMMUTABLE, etc. from i_flags into ext3-specific i_flags. Hence, when someone sets these flags via a different interface than ioctl, they are stored correctly. Signed-off-by: Jan Kara Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ext3/inode.c | 20 ++++++++++++++++++++ fs/ext3/ioctl.c | 1 + include/linux/ext3_fs.h | 1 + 3 files changed, 22 insertions(+) (limited to 'include/linux') diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index f9bcddbd2ef1..e1bb03171986 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -2580,6 +2580,25 @@ void ext3_set_inode_flags(struct inode *inode) inode->i_flags |= S_DIRSYNC; } +/* Propagate flags from i_flags to EXT3_I(inode)->i_flags */ +void ext3_get_inode_flags(struct ext3_inode_info *ei) +{ + unsigned int flags = ei->vfs_inode.i_flags; + + ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL| + EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL); + if (flags & S_SYNC) + ei->i_flags |= EXT3_SYNC_FL; + if (flags & S_APPEND) + ei->i_flags |= EXT3_APPEND_FL; + if (flags & S_IMMUTABLE) + ei->i_flags |= EXT3_IMMUTABLE_FL; + if (flags & S_NOATIME) + ei->i_flags |= EXT3_NOATIME_FL; + if (flags & S_DIRSYNC) + ei->i_flags |= EXT3_DIRSYNC_FL; +} + void ext3_read_inode(struct inode * inode) { struct ext3_iloc iloc; @@ -2735,6 +2754,7 @@ static int ext3_do_update_inode(handle_t *handle, if (ei->i_state & EXT3_STATE_NEW) memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size); + ext3_get_inode_flags(ei); raw_inode->i_mode = cpu_to_le16(inode->i_mode); if(!(test_opt(inode->i_sb, NO_UID32))) { raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c index 9b8090d94e6c..965006dba6be 100644 --- a/fs/ext3/ioctl.c +++ b/fs/ext3/ioctl.c @@ -28,6 +28,7 @@ int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, switch (cmd) { case EXT3_IOC_GETFLAGS: + ext3_get_inode_flags(ei); flags = ei->i_flags & EXT3_FL_USER_VISIBLE; return put_user(flags, (int __user *) arg); case EXT3_IOC_SETFLAGS: { diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 4eb18ac510ae..ece49a804fe1 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -824,6 +824,7 @@ extern int ext3_change_inode_journal_flag(struct inode *, int); extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *); extern void ext3_truncate (struct inode *); extern void ext3_set_inode_flags(struct inode *); +extern void ext3_get_inode_flags(struct ext3_inode_info *); extern void ext3_set_aops(struct inode *inode); /* ioctl.c */ -- cgit v1.2.3 From 28ec039c21839914389975b896160a815ffd8b83 Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Tue, 8 May 2007 00:31:01 -0700 Subject: fat: don't use free_clusters for fat32 It seems that the recent Windows changed specification, and it's undocumented. Windows doesn't update ->free_clusters correctly. This patch doesn't use ->free_clusters by default. (instead, add "usefree" for forcing to use it) Signed-off-by: OGAWA Hirofumi Cc: Juergen Beisert Cc: Andreas Schwab Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/vfat.txt | 7 +++++++ fs/fat/inode.c | 14 +++++++++++--- include/linux/msdos_fs.h | 3 ++- 3 files changed, 20 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt index 069cb1094300..fcc123ffa252 100644 --- a/Documentation/filesystems/vfat.txt +++ b/Documentation/filesystems/vfat.txt @@ -57,6 +57,13 @@ nonumtail= -- When creating 8.3 aliases, normally the alias will currently exist in the directory, 'longfile.txt' will be the short alias instead of 'longfi~1.txt'. +usefree -- Use the "free clusters" value stored on FSINFO. It'll + be used to determine number of free clusters without + scanning disk. But it's not used by default, because + recent Windows don't update it correctly in some + case. If you are sure the "free clusters" on FSINFO is + correct, by this option you can avoid scanning disk. + quiet -- Stops printing certain warning messages. check=s|r|n -- Case sensitivity checking setting. diff --git a/fs/fat/inode.c b/fs/fat/inode.c index a8129e82d594..2c55e8dce793 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -825,6 +825,8 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt) } if (opts->name_check != 'n') seq_printf(m, ",check=%c", opts->name_check); + if (opts->usefree) + seq_puts(m, ",usefree"); if (opts->quiet) seq_puts(m, ",quiet"); if (opts->showexec) @@ -850,7 +852,7 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt) enum { Opt_check_n, Opt_check_r, Opt_check_s, Opt_uid, Opt_gid, - Opt_umask, Opt_dmask, Opt_fmask, Opt_codepage, Opt_nocase, + Opt_umask, Opt_dmask, Opt_fmask, Opt_codepage, Opt_usefree, Opt_nocase, Opt_quiet, Opt_showexec, Opt_debug, Opt_immutable, Opt_dots, Opt_nodots, Opt_charset, Opt_shortname_lower, Opt_shortname_win95, @@ -872,6 +874,7 @@ static match_table_t fat_tokens = { {Opt_dmask, "dmask=%o"}, {Opt_fmask, "fmask=%o"}, {Opt_codepage, "codepage=%u"}, + {Opt_usefree, "usefree"}, {Opt_nocase, "nocase"}, {Opt_quiet, "quiet"}, {Opt_showexec, "showexec"}, @@ -951,7 +954,7 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug, opts->quiet = opts->showexec = opts->sys_immutable = opts->dotsOK = 0; opts->utf8 = opts->unicode_xlate = 0; opts->numtail = 1; - opts->nocase = 0; + opts->usefree = opts->nocase = 0; *debug = 0; if (!options) @@ -979,6 +982,9 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug, case Opt_check_n: opts->name_check = 'n'; break; + case Opt_usefree: + opts->usefree = 1; + break; case Opt_nocase: if (!is_vfat) opts->nocase = 1; @@ -1304,7 +1310,9 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, le32_to_cpu(fsinfo->signature2), sbi->fsinfo_sector); } else { - sbi->free_clusters = le32_to_cpu(fsinfo->free_clusters); + if (sbi->options.usefree) + sbi->free_clusters = + le32_to_cpu(fsinfo->free_clusters); sbi->prev_free = le32_to_cpu(fsinfo->next_cluster); } diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h index fa253fa73aa3..0e09c005dda8 100644 --- a/include/linux/msdos_fs.h +++ b/include/linux/msdos_fs.h @@ -205,7 +205,8 @@ struct fat_mount_options { numtail:1, /* Does first alias have a numeric '~1' type tail? */ atari:1, /* Use Atari GEMDOS variation of MS-DOS fs */ flush:1, /* write things quickly */ - nocase:1; /* Does this need case conversion? 0=need case conversion*/ + nocase:1, /* Does this need case conversion? 0=need case conversion*/ + usefree:1; /* Use free_clusters for FAT32 */ }; #define FAT_HASH_BITS 8 -- cgit v1.2.3 From 6df95fd7ad9a842c1688d2b83bdcb7c82e9c8630 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 8 May 2007 00:31:11 -0700 Subject: consolidate asm/const.h to linux/const.h Make a global linux/const.h header file instead of having multiple, per-arch files, and convert current users of asm/const.h to use linux/const.h. Built on x86_64 and sparc64. [akpm@linux-foundation.org: fix include/asm-x86_64/Kbuild] Signed-off-by: Randy Dunlap Signed-off-by: David S. Miller Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-sparc64/Kbuild | 1 - include/asm-sparc64/const.h | 19 ------------------- include/asm-sparc64/lsu.h | 2 +- include/asm-sparc64/mmu.h | 2 +- include/asm-sparc64/page.h | 2 +- include/asm-sparc64/pgtable.h | 2 +- include/asm-sparc64/pstate.h | 2 +- include/asm-sparc64/sfafsr.h | 2 +- include/asm-x86_64/Kbuild | 1 - include/asm-x86_64/const.h | 20 -------------------- include/asm-x86_64/page.h | 2 +- include/asm-x86_64/pgtable.h | 2 +- include/linux/Kbuild | 1 + include/linux/const.h | 19 +++++++++++++++++++ 14 files changed, 28 insertions(+), 49 deletions(-) delete mode 100644 include/asm-sparc64/const.h delete mode 100644 include/asm-x86_64/const.h create mode 100644 include/linux/const.h (limited to 'include/linux') diff --git a/include/asm-sparc64/Kbuild b/include/asm-sparc64/Kbuild index a7f44408c93b..854fd3a65acf 100644 --- a/include/asm-sparc64/Kbuild +++ b/include/asm-sparc64/Kbuild @@ -8,7 +8,6 @@ header-y += apb.h header-y += asi.h header-y += bbc.h header-y += bpp.h -header-y += const.h header-y += display7seg.h header-y += envctrl.h header-y += ipc.h diff --git a/include/asm-sparc64/const.h b/include/asm-sparc64/const.h deleted file mode 100644 index 8ad902b2ce04..000000000000 --- a/include/asm-sparc64/const.h +++ /dev/null @@ -1,19 +0,0 @@ -/* const.h: Macros for dealing with constants. */ - -#ifndef _SPARC64_CONST_H -#define _SPARC64_CONST_H - -/* Some constant macros are used in both assembler and - * C code. Therefore we cannot annotate them always with - * 'UL' and other type specificers unilaterally. We - * use the following macros to deal with this. - */ - -#ifdef __ASSEMBLY__ -#define _AC(X,Y) X -#else -#define _AC(X,Y) (X##Y) -#endif - - -#endif /* !(_SPARC64_CONST_H) */ diff --git a/include/asm-sparc64/lsu.h b/include/asm-sparc64/lsu.h index e5329c7f5833..79f109840c39 100644 --- a/include/asm-sparc64/lsu.h +++ b/include/asm-sparc64/lsu.h @@ -2,7 +2,7 @@ #ifndef _SPARC64_LSU_H #define _SPARC64_LSU_H -#include +#include /* LSU Control Register */ #define LSU_CONTROL_PM _AC(0x000001fe00000000,UL) /* Phys-watchpoint byte mask*/ diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h index 70af4b6ce136..8abc58f0f9d7 100644 --- a/include/asm-sparc64/mmu.h +++ b/include/asm-sparc64/mmu.h @@ -1,8 +1,8 @@ #ifndef __MMU_H #define __MMU_H +#include #include -#include #include #define CTX_NR_BITS 13 diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index ff736eafa64d..7af1077451ff 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -5,7 +5,7 @@ #ifdef __KERNEL__ -#include +#include #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) #define PAGE_SHIFT 13 diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 46705ef47d27..9e80ad43b29c 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -15,13 +15,13 @@ #include #include +#include #include #include #include #include #include #include -#include /* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB). * The page copy blockops can use 0x2000000 to 0x4000000. diff --git a/include/asm-sparc64/pstate.h b/include/asm-sparc64/pstate.h index 49a7924a89ab..f3c45484c636 100644 --- a/include/asm-sparc64/pstate.h +++ b/include/asm-sparc64/pstate.h @@ -2,7 +2,7 @@ #ifndef _SPARC64_PSTATE_H #define _SPARC64_PSTATE_H -#include +#include /* The V9 PSTATE Register (with SpitFire extensions). * diff --git a/include/asm-sparc64/sfafsr.h b/include/asm-sparc64/sfafsr.h index 2f792c20b53c..e96137b04a4f 100644 --- a/include/asm-sparc64/sfafsr.h +++ b/include/asm-sparc64/sfafsr.h @@ -1,7 +1,7 @@ #ifndef _SPARC64_SFAFSR_H #define _SPARC64_SFAFSR_H -#include +#include /* Spitfire Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */ diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild index 89ad1fc27c8b..75a2deffca68 100644 --- a/include/asm-x86_64/Kbuild +++ b/include/asm-x86_64/Kbuild @@ -19,4 +19,3 @@ unifdef-y += mce.h unifdef-y += msr.h unifdef-y += mtrr.h unifdef-y += vsyscall.h -unifdef-y += const.h diff --git a/include/asm-x86_64/const.h b/include/asm-x86_64/const.h deleted file mode 100644 index 54fb08f3db9b..000000000000 --- a/include/asm-x86_64/const.h +++ /dev/null @@ -1,20 +0,0 @@ -/* const.h: Macros for dealing with constants. */ - -#ifndef _X86_64_CONST_H -#define _X86_64_CONST_H - -/* Some constant macros are used in both assembler and - * C code. Therefore we cannot annotate them always with - * 'UL' and other type specificers unilaterally. We - * use the following macros to deal with this. - */ - -#ifdef __ASSEMBLY__ -#define _AC(X,Y) X -#else -#define __AC(X,Y) (X##Y) -#define _AC(X,Y) __AC(X,Y) -#endif - - -#endif /* !(_X86_64_CONST_H) */ diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index 4d04e2479569..d7b101bb4a5f 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h @@ -1,7 +1,7 @@ #ifndef _X86_64_PAGE_H #define _X86_64_PAGE_H -#include +#include /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index b21083891ce8..4e86aca57aea 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -1,7 +1,7 @@ #ifndef _X86_64_PGTABLE_H #define _X86_64_PGTABLE_H -#include +#include #ifndef __ASSEMBLY__ /* diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 2c2b6fc91e30..d61983db4149 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -45,6 +45,7 @@ header-y += coda_psdev.h header-y += coff.h header-y += comstats.h header-y += consolemap.h +header-y += const.h header-y += cycx_cfm.h header-y += dlm_device.h header-y += dm-ioctl.h diff --git a/include/linux/const.h b/include/linux/const.h new file mode 100644 index 000000000000..07b300bfe34b --- /dev/null +++ b/include/linux/const.h @@ -0,0 +1,19 @@ +/* const.h: Macros for dealing with constants. */ + +#ifndef _LINUX_CONST_H +#define _LINUX_CONST_H + +/* Some constant macros are used in both assembler and + * C code. Therefore we cannot annotate them always with + * 'UL' and other type specifiers unilaterally. We + * use the following macros to deal with this. + */ + +#ifdef __ASSEMBLY__ +#define _AC(X,Y) X +#else +#define __AC(X,Y) (X##Y) +#define _AC(X,Y) __AC(X,Y) +#endif + +#endif /* !(_LINUX_CONST_H) */ -- cgit v1.2.3 From f19b121e21c1b032f6c612d2b9b499151f7b661b Mon Sep 17 00:00:00 2001 From: "akpm@linux-foundation.org" Date: Tue, 8 May 2007 00:31:22 -0700 Subject: Driver for the Maxim DS1WM, a 1-wire bus master ASIC core Cc: Matt Reimer [akpm@linux-foundation.org: kconfig update] Signed-off-by: Matt Reimer Signed-off-by: Evgeniy Polyakov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/w1/masters/Kconfig | 8 + drivers/w1/masters/Makefile | 2 +- drivers/w1/masters/ds1wm.c | 468 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/ds1wm.h | 11 ++ 4 files changed, 488 insertions(+), 1 deletion(-) create mode 100644 drivers/w1/masters/ds1wm.c create mode 100644 include/linux/ds1wm.h (limited to 'include/linux') diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig index 2fb425536eae..8f779338f744 100644 --- a/drivers/w1/masters/Kconfig +++ b/drivers/w1/masters/Kconfig @@ -35,5 +35,13 @@ config W1_MASTER_DS2482 This driver can also be built as a module. If so, the module will be called ds2482. +config W1_MASTER_DS1WM + tristate "Maxim DS1WM 1-wire busmaster" + depends on W1 && ARM + help + Say Y here to enable the DS1WM 1-wire driver, such as that + in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like + hx4700. + endmenu diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile index 4cee256a8134..11551b328186 100644 --- a/drivers/w1/masters/Makefile +++ b/drivers/w1/masters/Makefile @@ -5,4 +5,4 @@ obj-$(CONFIG_W1_MASTER_MATROX) += matrox_w1.o obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o - +obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c new file mode 100644 index 000000000000..763bc73e5070 --- /dev/null +++ b/drivers/w1/masters/ds1wm.c @@ -0,0 +1,468 @@ +/* + * 1-wire busmaster driver for DS1WM and ASICs with embedded DS1WMs + * such as HP iPAQs (including h5xxx, h2200, and devices with ASIC3 + * like hx4700). + * + * Copyright (c) 2004-2005, Szabolcs Gyurko + * Copyright (c) 2004-2007, Matt Reimer + * + * Use consistent with the GNU GPL is permitted, + * provided that this copyright notice is + * preserved in its entirety in all copies and derived works. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "../w1.h" +#include "../w1_int.h" + + +#define DS1WM_CMD 0x00 /* R/W 4 bits command */ +#define DS1WM_DATA 0x01 /* R/W 8 bits, transmit/receive buffer */ +#define DS1WM_INT 0x02 /* R/W interrupt status */ +#define DS1WM_INT_EN 0x03 /* R/W interrupt enable */ +#define DS1WM_CLKDIV 0x04 /* R/W 5 bits of divisor and pre-scale */ + +#define DS1WM_CMD_1W_RESET (1 << 0) /* force reset on 1-wire bus */ +#define DS1WM_CMD_SRA (1 << 1) /* enable Search ROM accelerator mode */ +#define DS1WM_CMD_DQ_OUTPUT (1 << 2) /* write only - forces bus low */ +#define DS1WM_CMD_DQ_INPUT (1 << 3) /* read only - reflects state of bus */ +#define DS1WM_CMD_RST (1 << 5) /* software reset */ +#define DS1WM_CMD_OD (1 << 7) /* overdrive */ + +#define DS1WM_INT_PD (1 << 0) /* presence detect */ +#define DS1WM_INT_PDR (1 << 1) /* presence detect result */ +#define DS1WM_INT_TBE (1 << 2) /* tx buffer empty */ +#define DS1WM_INT_TSRE (1 << 3) /* tx shift register empty */ +#define DS1WM_INT_RBF (1 << 4) /* rx buffer full */ +#define DS1WM_INT_RSRF (1 << 5) /* rx shift register full */ + +#define DS1WM_INTEN_EPD (1 << 0) /* enable presence detect int */ +#define DS1WM_INTEN_IAS (1 << 1) /* INTR active state */ +#define DS1WM_INTEN_ETBE (1 << 2) /* enable tx buffer empty int */ +#define DS1WM_INTEN_ETMT (1 << 3) /* enable tx shift register empty int */ +#define DS1WM_INTEN_ERBF (1 << 4) /* enable rx buffer full int */ +#define DS1WM_INTEN_ERSRF (1 << 5) /* enable rx shift register full int */ +#define DS1WM_INTEN_DQO (1 << 6) /* enable direct bus driving ops */ + + +#define DS1WM_TIMEOUT (HZ * 5) + +static struct { + unsigned long freq; + unsigned long divisor; +} freq[] = { + { 4000000, 0x8 }, + { 5000000, 0x2 }, + { 6000000, 0x5 }, + { 7000000, 0x3 }, + { 8000000, 0xc }, + { 10000000, 0x6 }, + { 12000000, 0x9 }, + { 14000000, 0x7 }, + { 16000000, 0x10 }, + { 20000000, 0xa }, + { 24000000, 0xd }, + { 28000000, 0xb }, + { 32000000, 0x14 }, + { 40000000, 0xe }, + { 48000000, 0x11 }, + { 56000000, 0xf }, + { 64000000, 0x18 }, + { 80000000, 0x12 }, + { 96000000, 0x15 }, + { 112000000, 0x13 }, + { 128000000, 0x1c }, +}; + +struct ds1wm_data { + void *map; + int bus_shift; /* # of shifts to calc register offsets */ + struct platform_device *pdev; + struct ds1wm_platform_data *pdata; + int irq; + int active_high; + struct clk *clk; + int slave_present; + void *reset_complete; + void *read_complete; + void *write_complete; + u8 read_byte; /* last byte received */ +}; + +static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, + u8 val) +{ + __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift)); +} + +static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg) +{ + return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift)); +} + + +static irqreturn_t ds1wm_isr(int isr, void *data) +{ + struct ds1wm_data *ds1wm_data = data; + u8 intr = ds1wm_read_register(ds1wm_data, DS1WM_INT); + + ds1wm_data->slave_present = (intr & DS1WM_INT_PDR) ? 0 : 1; + + if ((intr & DS1WM_INT_PD) && ds1wm_data->reset_complete) + complete(ds1wm_data->reset_complete); + + if ((intr & DS1WM_INT_TSRE) && ds1wm_data->write_complete) + complete(ds1wm_data->write_complete); + + if (intr & DS1WM_INT_RBF) { + ds1wm_data->read_byte = ds1wm_read_register(ds1wm_data, + DS1WM_DATA); + if (ds1wm_data->read_complete) + complete(ds1wm_data->read_complete); + } + + return IRQ_HANDLED; +} + +static int ds1wm_reset(struct ds1wm_data *ds1wm_data) +{ + unsigned long timeleft; + DECLARE_COMPLETION_ONSTACK(reset_done); + + ds1wm_data->reset_complete = &reset_done; + + ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, DS1WM_INTEN_EPD | + (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); + + ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_1W_RESET); + + timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); + ds1wm_data->reset_complete = NULL; + if (!timeleft) { + dev_dbg(&ds1wm_data->pdev->dev, "reset failed\n"); + return 1; + } + + /* Wait for the end of the reset. According to the specs, the time + * from when the interrupt is asserted to the end of the reset is: + * tRSTH - tPDH - tPDL - tPDI + * 625 us - 60 us - 240 us - 100 ns = 324.9 us + * + * We'll wait a bit longer just to be sure. + */ + udelay(500); + + ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, + DS1WM_INTEN_ERBF | DS1WM_INTEN_ETMT | DS1WM_INTEN_EPD | + (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); + + if (!ds1wm_data->slave_present) { + dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); + return 1; + } + + return 0; +} + +static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) +{ + DECLARE_COMPLETION_ONSTACK(write_done); + ds1wm_data->write_complete = &write_done; + + ds1wm_write_register(ds1wm_data, DS1WM_DATA, data); + + wait_for_completion_timeout(&write_done, DS1WM_TIMEOUT); + ds1wm_data->write_complete = NULL; + + return 0; +} + +static int ds1wm_read(struct ds1wm_data *ds1wm_data, unsigned char write_data) +{ + DECLARE_COMPLETION_ONSTACK(read_done); + ds1wm_data->read_complete = &read_done; + + ds1wm_write(ds1wm_data, write_data); + wait_for_completion_timeout(&read_done, DS1WM_TIMEOUT); + ds1wm_data->read_complete = NULL; + + return ds1wm_data->read_byte; +} + +static int ds1wm_find_divisor(int gclk) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(freq); i++) + if (gclk <= freq[i].freq) + return freq[i].divisor; + + return 0; +} + +static void ds1wm_up(struct ds1wm_data *ds1wm_data) +{ + int gclk, divisor; + + if (ds1wm_data->pdata->enable) + ds1wm_data->pdata->enable(ds1wm_data->pdev); + + gclk = clk_get_rate(ds1wm_data->clk); + clk_enable(ds1wm_data->clk); + divisor = ds1wm_find_divisor(gclk); + if (divisor == 0) { + dev_err(&ds1wm_data->pdev->dev, + "no suitable divisor for %dHz clock\n", gclk); + return; + } + ds1wm_write_register(ds1wm_data, DS1WM_CLKDIV, divisor); + + /* Let the w1 clock stabilize. */ + msleep(1); + + ds1wm_reset(ds1wm_data); +} + +static void ds1wm_down(struct ds1wm_data *ds1wm_data) +{ + ds1wm_reset(ds1wm_data); + + /* Disable interrupts. */ + ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, + ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0); + + if (ds1wm_data->pdata->disable) + ds1wm_data->pdata->disable(ds1wm_data->pdev); + + clk_disable(ds1wm_data->clk); +} + +/* --------------------------------------------------------------------- */ +/* w1 methods */ + +static u8 ds1wm_read_byte(void *data) +{ + struct ds1wm_data *ds1wm_data = data; + + return ds1wm_read(ds1wm_data, 0xff); +} + +static void ds1wm_write_byte(void *data, u8 byte) +{ + struct ds1wm_data *ds1wm_data = data; + + ds1wm_write(ds1wm_data, byte); +} + +static u8 ds1wm_reset_bus(void *data) +{ + struct ds1wm_data *ds1wm_data = data; + + ds1wm_reset(ds1wm_data); + + return 0; +} + +static void ds1wm_search(void *data, u8 search_type, + w1_slave_found_callback slave_found) +{ + struct ds1wm_data *ds1wm_data = data; + int i; + unsigned long long rom_id; + + /* XXX We need to iterate for multiple devices per the DS1WM docs. + * See http://www.maxim-ic.com/appnotes.cfm/appnote_number/120. */ + if (ds1wm_reset(ds1wm_data)) + return; + + ds1wm_write(ds1wm_data, search_type); + ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_SRA); + + for (rom_id = 0, i = 0; i < 16; i++) { + + unsigned char resp, r, d; + + resp = ds1wm_read(ds1wm_data, 0x00); + + r = ((resp & 0x02) >> 1) | + ((resp & 0x08) >> 2) | + ((resp & 0x20) >> 3) | + ((resp & 0x80) >> 4); + + d = ((resp & 0x01) >> 0) | + ((resp & 0x04) >> 1) | + ((resp & 0x10) >> 2) | + ((resp & 0x40) >> 3); + + rom_id |= (unsigned long long) r << (i * 4); + + } + dev_dbg(&ds1wm_data->pdev->dev, "found 0x%08llX", rom_id); + + ds1wm_write_register(ds1wm_data, DS1WM_CMD, ~DS1WM_CMD_SRA); + ds1wm_reset(ds1wm_data); + + slave_found(ds1wm_data, rom_id); +} + +/* --------------------------------------------------------------------- */ + +static struct w1_bus_master ds1wm_master = { + .read_byte = ds1wm_read_byte, + .write_byte = ds1wm_write_byte, + .reset_bus = ds1wm_reset_bus, + .search = ds1wm_search, +}; + +static int ds1wm_probe(struct platform_device *pdev) +{ + struct ds1wm_data *ds1wm_data; + struct ds1wm_platform_data *plat; + struct resource *res; + int ret; + + if (!pdev) + return -ENODEV; + + ds1wm_data = kzalloc(sizeof (*ds1wm_data), GFP_KERNEL); + if (!ds1wm_data) + return -ENOMEM; + + platform_set_drvdata(pdev, ds1wm_data); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENXIO; + goto err0; + } + ds1wm_data->map = ioremap(res->start, res->end - res->start + 1); + if (!ds1wm_data->map) { + ret = -ENOMEM; + goto err0; + } + plat = pdev->dev.platform_data; + ds1wm_data->bus_shift = plat->bus_shift; + ds1wm_data->pdev = pdev; + ds1wm_data->pdata = plat; + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) { + ret = -ENXIO; + goto err1; + } + ds1wm_data->irq = res->start; + ds1wm_data->active_high = (res->flags & IORESOURCE_IRQ_HIGHEDGE) ? + 1 : 0; + + set_irq_type(ds1wm_data->irq, ds1wm_data->active_high ? + IRQ_TYPE_EDGE_RISING : IRQ_TYPE_EDGE_FALLING); + + ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED, + "ds1wm", ds1wm_data); + if (ret) + goto err1; + + ds1wm_data->clk = clk_get(&pdev->dev, "ds1wm"); + if (!ds1wm_data->clk) { + ret = -ENOENT; + goto err2; + } + + ds1wm_up(ds1wm_data); + + ds1wm_master.data = (void *)ds1wm_data; + + ret = w1_add_master_device(&ds1wm_master); + if (ret) + goto err3; + + return 0; + +err3: + ds1wm_down(ds1wm_data); + clk_put(ds1wm_data->clk); +err2: + free_irq(ds1wm_data->irq, ds1wm_data); +err1: + iounmap(ds1wm_data->map); +err0: + kfree(ds1wm_data); + + return ret; +} + +#ifdef CONFIG_PM +static int ds1wm_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct ds1wm_data *ds1wm_data = platform_get_drvdata(pdev); + + ds1wm_down(ds1wm_data); + + return 0; +} + +static int ds1wm_resume(struct platform_device *pdev) +{ + struct ds1wm_data *ds1wm_data = platform_get_drvdata(pdev); + + ds1wm_up(ds1wm_data); + + return 0; +} +#else +#define ds1wm_suspend NULL +#define ds1wm_resume NULL +#endif + +static int ds1wm_remove(struct platform_device *pdev) +{ + struct ds1wm_data *ds1wm_data = platform_get_drvdata(pdev); + + w1_remove_master_device(&ds1wm_master); + ds1wm_down(ds1wm_data); + clk_put(ds1wm_data->clk); + free_irq(ds1wm_data->irq, ds1wm_data); + iounmap(ds1wm_data->map); + kfree(ds1wm_data); + + return 0; +} + +static struct platform_driver ds1wm_driver = { + .driver = { + .name = "ds1wm", + }, + .probe = ds1wm_probe, + .remove = ds1wm_remove, + .suspend = ds1wm_suspend, + .resume = ds1wm_resume +}; + +static int __init ds1wm_init(void) +{ + printk("DS1WM w1 busmaster driver - (c) 2004 Szabolcs Gyurko\n"); + return platform_driver_register(&ds1wm_driver); +} + +static void __exit ds1wm_exit(void) +{ + platform_driver_unregister(&ds1wm_driver); +} + +module_init(ds1wm_init); +module_exit(ds1wm_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Szabolcs Gyurko , " + "Matt Reimer "); +MODULE_DESCRIPTION("DS1WM w1 busmaster driver"); diff --git a/include/linux/ds1wm.h b/include/linux/ds1wm.h new file mode 100644 index 000000000000..31f6e3c427fb --- /dev/null +++ b/include/linux/ds1wm.h @@ -0,0 +1,11 @@ +/* platform data for the DS1WM driver */ + +struct ds1wm_platform_data { + int bus_shift; /* number of shifts needed to calculate the + * offset between DS1WM registers; + * e.g. on h5xxx and h2200 this is 2 + * (registers aligned to 4-byte boundaries), + * while on hx4700 this is 1 */ + void (*enable)(struct platform_device *pdev); + void (*disable)(struct platform_device *pdev); +}; -- cgit v1.2.3 From cef2cf07273d12ac3453d2baff096423f17b7403 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 8 May 2007 00:31:45 -0700 Subject: Misc: add sensable phantom driver Add sensable phantom driver Signed-off-by: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/ioctl-number.txt | 3 +- MAINTAINERS | 5 + drivers/misc/Kconfig | 9 + drivers/misc/Makefile | 1 + drivers/misc/phantom.c | 463 +++++++++++++++++++++++++++++++++++++++++ include/linux/Kbuild | 1 + include/linux/phantom.h | 42 ++++ 7 files changed, 523 insertions(+), 1 deletion(-) create mode 100644 drivers/misc/phantom.c create mode 100644 include/linux/phantom.h (limited to 'include/linux') diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt index 8f750c0efed5..3de7d379cf07 100644 --- a/Documentation/ioctl-number.txt +++ b/Documentation/ioctl-number.txt @@ -138,7 +138,8 @@ Code Seq# Include File Comments 'm' 00-1F net/irda/irmod.h conflict! 'n' 00-7F linux/ncp_fs.h 'n' E0-FF video/matrox.h matroxfb -'p' 00-3F linux/mc146818rtc.h +'p' 00-0F linux/phantom.h conflict! (OpenHaptics needs this) +'p' 00-3F linux/mc146818rtc.h conflict! 'p' 40-7F linux/nvram.h 'p' 80-9F user-space parport diff --git a/MAINTAINERS b/MAINTAINERS index 36e48a4e29da..69f3f66091b4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3108,6 +3108,11 @@ L: selinux@tycho.nsa.gov (subscribers-only, general discussion) W: http://www.nsa.gov/selinux S: Supported +SENSABLE PHANTOM +P: Jiri Slaby +M: jirislaby@gmail.com +S: Maintained + SERIAL ATA (SATA) SUBSYSTEM: P: Jeff Garzik M: jgarzik@pobox.com diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index bfb02c1a45ae..877e7909a0e5 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -25,6 +25,15 @@ config IBM_ASM information on the specific driver level and support statement for your IBM server. +config PHANTOM + tristate "Sensable PHANToM" + depends on PCI + help + Say Y here if you want to build a driver for Sensable PHANToM device. + + If you choose to build module, its name will be phantom. If unsure, + say N here. + If unsure, say N. diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index ece6baf76bc7..5b6d46de005c 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_BLINK) += blink.o obj-$(CONFIG_LKDTM) += lkdtm.o obj-$(CONFIG_TIFM_CORE) += tifm_core.o obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o +obj-$(CONFIG_PHANTOM) += phantom.o obj-$(CONFIG_SGI_IOC4) += ioc4.o obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c new file mode 100644 index 000000000000..35b139b0e5f2 --- /dev/null +++ b/drivers/misc/phantom.c @@ -0,0 +1,463 @@ +/* + * Copyright (C) 2005-2007 Jiri Slaby + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * You need an userspace library to cooperate with this driver. It (and other + * info) may be obtained here: + * http://www.fi.muni.cz/~xslaby/phantom.html + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define PHANTOM_VERSION "n0.9.5" + +#define PHANTOM_MAX_MINORS 8 + +#define PHN_IRQCTL 0x4c /* irq control in caddr space */ + +#define PHB_RUNNING 1 + +static struct class *phantom_class; +static int phantom_major; + +struct phantom_device { + unsigned int opened; + void __iomem *caddr; + u32 __iomem *iaddr; + u32 __iomem *oaddr; + unsigned long status; + atomic_t counter; + + wait_queue_head_t wait; + struct cdev cdev; + + struct mutex open_lock; +}; + +static unsigned char phantom_devices[PHANTOM_MAX_MINORS]; + +static int phantom_status(struct phantom_device *dev, unsigned long newstat) +{ + pr_debug("phantom_status %lx %lx\n", dev->status, newstat); + + if (!(dev->status & PHB_RUNNING) && (newstat & PHB_RUNNING)) { + atomic_set(&dev->counter, 0); + iowrite32(PHN_CTL_IRQ, dev->iaddr + PHN_CONTROL); + iowrite32(0x43, dev->caddr + PHN_IRQCTL); + } else if ((dev->status & PHB_RUNNING) && !(newstat & PHB_RUNNING)) + iowrite32(0, dev->caddr + PHN_IRQCTL); + + dev->status = newstat; + + return 0; +} + +/* + * File ops + */ + +static int phantom_ioctl(struct inode *inode, struct file *file, u_int cmd, + u_long arg) +{ + struct phantom_device *dev = file->private_data; + struct phm_regs rs; + struct phm_reg r; + void __user *argp = (void __user *)arg; + unsigned int i; + + if (_IOC_TYPE(cmd) != PH_IOC_MAGIC || + _IOC_NR(cmd) > PH_IOC_MAXNR) + return -ENOTTY; + + switch (cmd) { + case PHN_SET_REG: + if (copy_from_user(&r, argp, sizeof(r))) + return -EFAULT; + + if (r.reg > 7) + return -EINVAL; + + if (r.reg == PHN_CONTROL && (r.value & PHN_CTL_IRQ) && + phantom_status(dev, dev->status | PHB_RUNNING)) + return -ENODEV; + + pr_debug("phantom: writing %x to %u\n", r.value, r.reg); + iowrite32(r.value, dev->iaddr + r.reg); + + if (r.reg == PHN_CONTROL && !(r.value & PHN_CTL_IRQ)) + phantom_status(dev, dev->status & ~PHB_RUNNING); + break; + case PHN_SET_REGS: + if (copy_from_user(&rs, argp, sizeof(rs))) + return -EFAULT; + + pr_debug("phantom: SRS %u regs %x\n", rs.count, rs.mask); + for (i = 0; i < min(rs.count, 8U); i++) + if ((1 << i) & rs.mask) + iowrite32(rs.values[i], dev->oaddr + i); + break; + case PHN_GET_REG: + if (copy_from_user(&r, argp, sizeof(r))) + return -EFAULT; + + if (r.reg > 7) + return -EINVAL; + + r.value = ioread32(dev->iaddr + r.reg); + + if (copy_to_user(argp, &r, sizeof(r))) + return -EFAULT; + break; + case PHN_GET_REGS: + if (copy_from_user(&rs, argp, sizeof(rs))) + return -EFAULT; + + pr_debug("phantom: GRS %u regs %x\n", rs.count, rs.mask); + for (i = 0; i < min(rs.count, 8U); i++) + if ((1 << i) & rs.mask) + rs.values[i] = ioread32(dev->iaddr + i); + + if (copy_to_user(argp, &rs, sizeof(rs))) + return -EFAULT; + break; + default: + return -ENOTTY; + } + + return 0; +} + +static int phantom_open(struct inode *inode, struct file *file) +{ + struct phantom_device *dev = container_of(inode->i_cdev, + struct phantom_device, cdev); + + nonseekable_open(inode, file); + + if (mutex_lock_interruptible(&dev->open_lock)) + return -ERESTARTSYS; + + if (dev->opened) { + mutex_unlock(&dev->open_lock); + return -EINVAL; + } + + file->private_data = dev; + + dev->opened++; + mutex_unlock(&dev->open_lock); + + return 0; +} + +static int phantom_release(struct inode *inode, struct file *file) +{ + struct phantom_device *dev = file->private_data; + + mutex_lock(&dev->open_lock); + + dev->opened = 0; + phantom_status(dev, dev->status & ~PHB_RUNNING); + + mutex_unlock(&dev->open_lock); + + return 0; +} + +static unsigned int phantom_poll(struct file *file, poll_table *wait) +{ + struct phantom_device *dev = file->private_data; + unsigned int mask = 0; + + pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter)); + poll_wait(file, &dev->wait, wait); + if (atomic_read(&dev->counter)) { + mask = POLLIN | POLLRDNORM; + atomic_dec(&dev->counter); + } else if ((dev->status & PHB_RUNNING) == 0) + mask = POLLIN | POLLRDNORM | POLLERR; + pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter)); + + return mask; +} + +static struct file_operations phantom_file_ops = { + .open = phantom_open, + .release = phantom_release, + .ioctl = phantom_ioctl, + .poll = phantom_poll, +}; + +static irqreturn_t phantom_isr(int irq, void *data) +{ + struct phantom_device *dev = data; + + if (!(ioread32(dev->iaddr + PHN_CONTROL) & PHN_CTL_IRQ)) + return IRQ_NONE; + + iowrite32(0, dev->iaddr); + iowrite32(0xc0, dev->iaddr); + + atomic_inc(&dev->counter); + wake_up_interruptible(&dev->wait); + + return IRQ_HANDLED; +} + +/* + * Init and deinit driver + */ + +static unsigned int __devinit phantom_get_free(void) +{ + unsigned int i; + + for (i = 0; i < PHANTOM_MAX_MINORS; i++) + if (phantom_devices[i] == 0) + break; + + return i; +} + +static int __devinit phantom_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + struct phantom_device *pht; + unsigned int minor; + int retval; + + retval = pci_enable_device(pdev); + if (retval) + goto err; + + minor = phantom_get_free(); + if (minor == PHANTOM_MAX_MINORS) { + dev_err(&pdev->dev, "too many devices found!\n"); + retval = -EIO; + goto err_dis; + } + + phantom_devices[minor] = 1; + + retval = pci_request_regions(pdev, "phantom"); + if (retval) + goto err_null; + + retval = -ENOMEM; + pht = kzalloc(sizeof(*pht), GFP_KERNEL); + if (pht == NULL) { + dev_err(&pdev->dev, "unable to allocate device\n"); + goto err_reg; + } + + pht->caddr = pci_iomap(pdev, 0, 0); + if (pht->caddr == NULL) { + dev_err(&pdev->dev, "can't remap conf space\n"); + goto err_fr; + } + pht->iaddr = pci_iomap(pdev, 2, 0); + if (pht->iaddr == NULL) { + dev_err(&pdev->dev, "can't remap input space\n"); + goto err_unmc; + } + pht->oaddr = pci_iomap(pdev, 3, 0); + if (pht->oaddr == NULL) { + dev_err(&pdev->dev, "can't remap output space\n"); + goto err_unmi; + } + + mutex_init(&pht->open_lock); + init_waitqueue_head(&pht->wait); + cdev_init(&pht->cdev, &phantom_file_ops); + pht->cdev.owner = THIS_MODULE; + + iowrite32(0, pht->caddr + PHN_IRQCTL); + retval = request_irq(pdev->irq, phantom_isr, + IRQF_SHARED | IRQF_DISABLED, "phantom", pht); + if (retval) { + dev_err(&pdev->dev, "can't establish ISR\n"); + goto err_unmo; + } + + retval = cdev_add(&pht->cdev, MKDEV(phantom_major, minor), 1); + if (retval) { + dev_err(&pdev->dev, "chardev registration failed\n"); + goto err_irq; + } + + if (IS_ERR(device_create(phantom_class, &pdev->dev, MKDEV(phantom_major, + minor), "phantom%u", minor))) + dev_err(&pdev->dev, "can't create device\n"); + + pci_set_drvdata(pdev, pht); + + return 0; +err_irq: + free_irq(pdev->irq, pht); +err_unmo: + pci_iounmap(pdev, pht->oaddr); +err_unmi: + pci_iounmap(pdev, pht->iaddr); +err_unmc: + pci_iounmap(pdev, pht->caddr); +err_fr: + kfree(pht); +err_reg: + pci_release_regions(pdev); +err_null: + phantom_devices[minor] = 0; +err_dis: + pci_disable_device(pdev); +err: + return retval; +} + +static void __devexit phantom_remove(struct pci_dev *pdev) +{ + struct phantom_device *pht = pci_get_drvdata(pdev); + unsigned int minor = MINOR(pht->cdev.dev); + + device_destroy(phantom_class, MKDEV(phantom_major, minor)); + + cdev_del(&pht->cdev); + + iowrite32(0, pht->caddr + PHN_IRQCTL); + free_irq(pdev->irq, pht); + + pci_iounmap(pdev, pht->oaddr); + pci_iounmap(pdev, pht->iaddr); + pci_iounmap(pdev, pht->caddr); + + kfree(pht); + + pci_release_regions(pdev); + + phantom_devices[minor] = 0; + + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +static int phantom_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct phantom_device *dev = pci_get_drvdata(pdev); + + iowrite32(0, dev->caddr + PHN_IRQCTL); + + return 0; +} + +static int phantom_resume(struct pci_dev *pdev) +{ + struct phantom_device *dev = pci_get_drvdata(pdev); + + iowrite32(0, dev->caddr + PHN_IRQCTL); + + return 0; +} +#else +#define phantom_suspend NULL +#define phantom_resume NULL +#endif + +static struct pci_device_id phantom_pci_tbl[] __devinitdata = { + { PCI_DEVICE(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050), + .class = PCI_CLASS_BRIDGE_OTHER << 8, .class_mask = 0xffff00 }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, phantom_pci_tbl); + +static struct pci_driver phantom_pci_driver = { + .name = "phantom", + .id_table = phantom_pci_tbl, + .probe = phantom_probe, + .remove = __devexit_p(phantom_remove), + .suspend = phantom_suspend, + .resume = phantom_resume +}; + +static ssize_t phantom_show_version(struct class *cls, char *buf) +{ + return sprintf(buf, PHANTOM_VERSION "\n"); +} + +static CLASS_ATTR(version, 0444, phantom_show_version, NULL); + +static int __init phantom_init(void) +{ + int retval; + dev_t dev; + + phantom_class = class_create(THIS_MODULE, "phantom"); + if (IS_ERR(phantom_class)) { + retval = PTR_ERR(phantom_class); + printk(KERN_ERR "phantom: can't register phantom class\n"); + goto err; + } + retval = class_create_file(phantom_class, &class_attr_version); + if (retval) { + printk(KERN_ERR "phantom: can't create sysfs version file\n"); + goto err_class; + } + + retval = alloc_chrdev_region(&dev, 0, PHANTOM_MAX_MINORS, "phantom"); + if (retval) { + printk(KERN_ERR "phantom: can't register character device\n"); + goto err_attr; + } + phantom_major = MAJOR(dev); + + retval = pci_register_driver(&phantom_pci_driver); + if (retval) { + printk(KERN_ERR "phantom: can't register pci driver\n"); + goto err_unchr; + } + + printk(KERN_INFO "Phantom Linux Driver, version " PHANTOM_VERSION ", " + "init OK\n"); + + return 0; +err_unchr: + unregister_chrdev_region(dev, PHANTOM_MAX_MINORS); +err_attr: + class_remove_file(phantom_class, &class_attr_version); +err_class: + class_destroy(phantom_class); +err: + return retval; +} + +static void __exit phantom_exit(void) +{ + pci_unregister_driver(&phantom_pci_driver); + + unregister_chrdev_region(MKDEV(phantom_major, 0), PHANTOM_MAX_MINORS); + + class_remove_file(phantom_class, &class_attr_version); + class_destroy(phantom_class); + + pr_debug("phantom: module successfully removed\n"); +} + +module_init(phantom_init); +module_exit(phantom_exit); + +MODULE_AUTHOR("Jiri Slaby "); +MODULE_DESCRIPTION("Sensable Phantom driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(PHANTOM_VERSION); diff --git a/include/linux/Kbuild b/include/linux/Kbuild index d61983db4149..a124c32f55f3 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -121,6 +121,7 @@ header-y += pci_regs.h header-y += personality.h header-y += pfkeyv2.h header-y += pg.h +header-y += phantom.h header-y += pkt_cls.h header-y += pkt_sched.h header-y += posix_types.h diff --git a/include/linux/phantom.h b/include/linux/phantom.h new file mode 100644 index 000000000000..d3ebbfae6903 --- /dev/null +++ b/include/linux/phantom.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2005-2007 Jiri Slaby + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __PHANTOM_H +#define __PHANTOM_H + +#include + +/* PHN_(G/S)ET_REG param */ +struct phm_reg { + __u32 reg; + __u32 value; +}; + +/* PHN_(G/S)ET_REGS param */ +struct phm_regs { + __u32 count; + __u32 mask; + __u32 values[8]; +}; + +#define PH_IOC_MAGIC 'p' +#define PHN_GET_REG _IOWR(PH_IOC_MAGIC, 0, struct phm_reg *) +#define PHN_SET_REG _IOW (PH_IOC_MAGIC, 1, struct phm_reg *) +#define PHN_GET_REGS _IOWR(PH_IOC_MAGIC, 2, struct phm_regs *) +#define PHN_SET_REGS _IOW (PH_IOC_MAGIC, 3, struct phm_regs *) +#define PH_IOC_MAXNR 3 + +#define PHN_CONTROL 0x6 /* control byte in iaddr space */ +#define PHN_CTL_AMP 0x1 /* switch after torques change */ +#define PHN_CTL_BUT 0x2 /* is button switched */ +#define PHN_CTL_IRQ 0x10 /* is irq enabled */ + +#define PHN_ZERO_FORCE 2048 /* zero torque on motor */ + +#endif -- cgit v1.2.3 From 6d4d8c0aa255c7b4bdf0fb693ec015b56204bbb3 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 8 May 2007 00:31:49 -0700 Subject: Add taskstats.h to kbuild Add taskstats.h to include/linux/Kbuild, make headers_install would then pickup taskstats.h. This needs to be done as taskstats.h is a user interface header. Signed-off-by: Balbir Singh Signed-off-by: David Woodhouse Cc: Don Zickus Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/Kbuild | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/Kbuild b/include/linux/Kbuild index a124c32f55f3..715386142626 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -141,6 +141,7 @@ header-y += sockios.h header-y += som.h header-y += sound.h header-y += synclink.h +header-y += taskstats.h header-y += telephony.h header-y += termios.h header-y += ticable.h -- cgit v1.2.3 From acd64b737567d88a907bb09d7e982ac5e6ad6a7b Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 8 May 2007 00:31:51 -0700 Subject: hide spinlock in linux/quota.h behind __KERNEL__ Signed-off-by: Mike Frysinger Acked-by: Jan Kara Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/quota.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/quota.h b/include/linux/quota.h index 77db80a953d6..62439828395e 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -44,8 +44,6 @@ typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ typedef __u64 qsize_t; /* Type in which we store sizes */ -extern spinlock_t dq_data_lock; - /* Size of blocks in which are counted size limits */ #define QUOTABLOCK_BITS 10 #define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) @@ -139,6 +137,8 @@ struct if_dqinfo { #include #include +extern spinlock_t dq_data_lock; + /* Maximal numbers of writes for quota operation (insert/delete/update) * (over VFS all formats) */ #define DQUOT_INIT_ALLOC max(V1_INIT_ALLOC, V2_INIT_ALLOC) -- cgit v1.2.3 From ce0be1273d1473a5a7b57bf0b4995b40c22d6b54 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Tue, 8 May 2007 00:31:55 -0700 Subject: clockchips.h: kernel-doc fix Fix misnamed fields of 'struct clock_event_device' in the kernel-doc comment. Convert the acronyms to uppercase, while at it... Signed-off-by: Sergei Shtylyov Acked-by: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/clockchips.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 4ea7e7bcfafe..8486e78f7335 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -54,17 +54,17 @@ enum clock_event_nofitiers { /** * struct clock_event_device - clock event device descriptor * @name: ptr to clock event name - * @hints: usage hints + * @features: features * @max_delta_ns: maximum delta value in ns * @min_delta_ns: minimum delta value in ns * @mult: nanosecond to cycles multiplier * @shift: nanoseconds to cycles divisor (power of two) * @rating: variable to rate clock event devices - * @irq: irq number (only for non cpu local devices) - * @cpumask: cpumask to indicate for which cpus this device works - * @set_next_event: set next event + * @irq: IRQ number (only for non CPU local devices) + * @cpumask: cpumask to indicate for which CPUs this device works + * @set_next_event: set next event function * @set_mode: set mode function - * @evthandler: Assigned by the framework to be called by the low + * @event_handler: Assigned by the framework to be called by the low * level handler of the event source * @broadcast: function to broadcast events * @list: list head for the management code -- cgit v1.2.3 From 814a8d50eb1d88cedcef97567be53ee0d4512631 Mon Sep 17 00:00:00 2001 From: Andrea Paterniani Date: Tue, 8 May 2007 00:32:15 -0700 Subject: /dev/spidevB.C interface Add a filesystem API for stack. The initial version of this interface is purely synchronous. dbrownell@users.sourceforge.net: Cleaned up, bugfixed; much simplified; added preliminary documentation. Works with mdev given CONFIG_SYSFS_DEPRECATED; and presumably udev. Updated SPI_IOC_MESSAGE ioctl to full spi_message semantics, supporting groups of one or more transfers (each of which may be full duplex if desired). This is marked as EXPERIMENTAL with an explicit disclaimer that the API (notably the ioctls) is subject to change. Signed-off-by: Andrea Paterniani Signed-off-by: David Brownell Cc: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/spi/spidev | 307 ++++++++++++++++++++++++ drivers/spi/Kconfig | 9 + drivers/spi/Makefile | 1 + drivers/spi/spidev.c | 584 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/Kbuild | 1 + include/linux/spi/Kbuild | 1 + include/linux/spi/spidev.h | 124 ++++++++++ 7 files changed, 1027 insertions(+) create mode 100644 Documentation/spi/spidev create mode 100644 drivers/spi/spidev.c create mode 100644 include/linux/spi/Kbuild create mode 100644 include/linux/spi/spidev.h (limited to 'include/linux') diff --git a/Documentation/spi/spidev b/Documentation/spi/spidev new file mode 100644 index 000000000000..5c8e1b988a08 --- /dev/null +++ b/Documentation/spi/spidev @@ -0,0 +1,307 @@ +SPI devices have a limited userspace API, supporting basic half-duplex +read() and write() access to SPI slave devices. Using ioctl() requests, +full duplex transfers and device I/O configuration are also available. + + #include + #include + #include + #include + #include + +Some reasons you might want to use this programming interface include: + + * Prototyping in an environment that's not crash-prone; stray pointers + in userspace won't normally bring down any Linux system. + + * Developing simple protocols used to talk to microcontrollers acting + as SPI slaves, which you may need to change quite often. + +Of course there are drivers that can never be written in userspace, because +they need to access kernel interfaces (such as IRQ handlers or other layers +of the driver stack) that are not accessible to userspace. + + +DEVICE CREATION, DRIVER BINDING +=============================== +The simplest way to arrange to use this driver is to just list it in the +spi_board_info for a device as the driver it should use: the "modalias" +entry is "spidev", matching the name of the driver exposing this API. +Set up the other device characteristics (bits per word, SPI clocking, +chipselect polarity, etc) as usual, so you won't always need to override +them later. + +(Sysfs also supports userspace driven binding/unbinding of drivers to +devices. That mechanism might be supported here in the future.) + +When you do that, the sysfs node for the SPI device will include a child +device node with a "dev" attribute that will be understood by udev or mdev. +(Larger systems will have "udev". Smaller ones may configure "mdev" into +busybox; it's less featureful, but often enough.) For a SPI device with +chipselect C on bus B, you should see: + + /dev/spidevB.C ... character special device, major number 153 with + a dynamically chosen minor device number. This is the node + that userspace programs will open, created by "udev" or "mdev". + + /sys/devices/.../spiB.C ... as usual, the SPI device node will + be a child of its SPI master controller. + + /sys/class/spidev/spidevB.C ... created when the "spidev" driver + binds to that device. (Directory or symlink, based on whether + or not you enabled the "deprecated sysfs files" Kconfig option.) + +Do not try to manage the /dev character device special file nodes by hand. +That's error prone, and you'd need to pay careful attention to system +security issues; udev/mdev should already be configured securely. + +If you unbind the "spidev" driver from that device, those two "spidev" nodes +(in sysfs and in /dev) should automatically be removed (respectively by the +kernel and by udev/mdev). You can unbind by removing the "spidev" driver +module, which will affect all devices using this driver. You can also unbind +by having kernel code remove the SPI device, probably by removing the driver +for its SPI controller (so its spi_master vanishes). + +Since this is a standard Linux device driver -- even though it just happens +to expose a low level API to userspace -- it can be associated with any number +of devices at a time. Just provide one spi_board_info record for each such +SPI device, and you'll get a /dev device node for each device. + + +BASIC CHARACTER DEVICE API +========================== +Normal open() and close() operations on /dev/spidevB.D files work as you +would expect. + +Standard read() and write() operations are obviously only half-duplex, and +the chipselect is deactivated between those operations. Full-duplex access, +and composite operation without chipselect de-activation, is available using +the SPI_IOC_MESSAGE(N) request. + +Several ioctl() requests let your driver read or override the device's current +settings for data transfer parameters: + + SPI_IOC_RD_MODE, SPI_IOC_WR_MODE ... pass a pointer to a byte which will + return (RD) or assign (WR) the SPI transfer mode. Use the constants + SPI_MODE_0..SPI_MODE_3; or if you prefer you can combine SPI_CPOL + (clock polarity, idle high iff this is set) or SPI_CPHA (clock phase, + sample on trailing edge iff this is set) flags. + + SPI_IOC_RD_LSB_FIRST, SPI_IOC_WR_LSB_FIRST ... pass a pointer to a byte + which will return (RD) or assign (WR) the bit justification used to + transfer SPI words. Zero indicates MSB-first; other values indicate + the less common LSB-first encoding. In both cases the specified value + is right-justified in each word, so that unused (TX) or undefined (RX) + bits are in the MSBs. + + SPI_IOC_RD_BITS_PER_WORD, SPI_IOC_WR_BITS_PER_WORD ... pass a pointer to + a byte which will return (RD) or assign (WR) the number of bits in + each SPI transfer word. The value zero signifies eight bits. + + SPI_IOC_RD_MAX_SPEED_HZ, SPI_IOC_WR_MAX_SPEED_HZ ... pass a pointer to a + u32 which will return (RD) or assign (WR) the maximum SPI transfer + speed, in Hz. The controller can't necessarily assign that specific + clock speed. + +NOTES: + + - At this time there is no async I/O support; everything is purely + synchronous. + + - There's currently no way to report the actual bit rate used to + shift data to/from a given device. + + - From userspace, you can't currently change the chip select polarity; + that could corrupt transfers to other devices sharing the SPI bus. + Each SPI device is deselected when it's not in active use, allowing + other drivers to talk to other devices. + + - There's a limit on the number of bytes each I/O request can transfer + to the SPI device. It defaults to one page, but that can be changed + using a module parameter. + + - Because SPI has no low-level transfer acknowledgement, you usually + won't see any I/O errors when talking to a non-existent device. + + +FULL DUPLEX CHARACTER DEVICE API +================================ + +See the sample program below for one example showing the use of the full +duplex programming interface. (Although it doesn't perform a full duplex +transfer.) The model is the same as that used in the kernel spi_sync() +request; the individual transfers offer the same capabilities as are +available to kernel drivers (except that it's not asynchronous). + +The example shows one half-duplex RPC-style request and response message. +These requests commonly require that the chip not be deselected between +the request and response. Several such requests could be chained into +a single kernel request, even allowing the chip to be deselected after +each response. (Other protocol options include changing the word size +and bitrate for each transfer segment.) + +To make a full duplex request, provide both rx_buf and tx_buf for the +same transfer. It's even OK if those are the same buffer. + + +SAMPLE PROGRAM +============== + +-------------------------------- CUT HERE +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + + +static int verbose; + +static void do_read(int fd, int len) +{ + unsigned char buf[32], *bp; + int status; + + /* read at least 2 bytes, no more than 32 */ + if (len < 2) + len = 2; + else if (len > sizeof(buf)) + len = sizeof(buf); + memset(buf, 0, sizeof buf); + + status = read(fd, buf, len); + if (status < 0) { + perror("read"); + return; + } + if (status != len) { + fprintf(stderr, "short read\n"); + return; + } + + printf("read(%2d, %2d): %02x %02x,", len, status, + buf[0], buf[1]); + status -= 2; + bp = buf + 2; + while (status-- > 0) + printf(" %02x", *bp++); + printf("\n"); +} + +static void do_msg(int fd, int len) +{ + struct spi_ioc_transfer xfer[2]; + unsigned char buf[32], *bp; + int status; + + memset(xfer, 0, sizeof xfer); + memset(buf, 0, sizeof buf); + + if (len > sizeof buf) + len = sizeof buf; + + buf[0] = 0xaa; + xfer[0].tx_buf = (__u64) buf; + xfer[0].len = 1; + + xfer[1].rx_buf = (__u64) buf; + xfer[1].len = len; + + status = ioctl(fd, SPI_IOC_MESSAGE(2), xfer); + if (status < 0) { + perror("SPI_IOC_MESSAGE"); + return; + } + + printf("response(%2d, %2d): ", len, status); + for (bp = buf; len; len--) + printf(" %02x", *bp++); + printf("\n"); +} + +static void dumpstat(const char *name, int fd) +{ + __u8 mode, lsb, bits; + __u32 speed; + + if (ioctl(fd, SPI_IOC_RD_MODE, &mode) < 0) { + perror("SPI rd_mode"); + return; + } + if (ioctl(fd, SPI_IOC_RD_LSB_FIRST, &lsb) < 0) { + perror("SPI rd_lsb_fist"); + return; + } + if (ioctl(fd, SPI_IOC_RD_BITS_PER_WORD, &bits) < 0) { + perror("SPI bits_per_word"); + return; + } + if (ioctl(fd, SPI_IOC_RD_MAX_SPEED_HZ, &speed) < 0) { + perror("SPI max_speed_hz"); + return; + } + + printf("%s: spi mode %d, %d bits %sper word, %d Hz max\n", + name, mode, bits, lsb ? "(lsb first) " : "", speed); +} + +int main(int argc, char **argv) +{ + int c; + int readcount = 0; + int msglen = 0; + int fd; + const char *name; + + while ((c = getopt(argc, argv, "hm:r:v")) != EOF) { + switch (c) { + case 'm': + msglen = atoi(optarg); + if (msglen < 0) + goto usage; + continue; + case 'r': + readcount = atoi(optarg); + if (readcount < 0) + goto usage; + continue; + case 'v': + verbose++; + continue; + case 'h': + case '?': +usage: + fprintf(stderr, + "usage: %s [-h] [-m N] [-r N] /dev/spidevB.D\n", + argv[0]); + return 1; + } + } + + if ((optind + 1) != argc) + goto usage; + name = argv[optind]; + + fd = open(name, O_RDWR); + if (fd < 0) { + perror("open"); + return 1; + } + + dumpstat(name, fd); + + if (msglen) + do_msg(fd, msglen); + + if (readcount) + do_read(fd, readcount); + + close(fd); + return 0; +} diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 4a012d9acbff..584ed9f74700 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -159,6 +159,15 @@ config SPI_AT25 This driver can also be built as a module. If so, the module will be called at25. +config SPI_SPIDEV + tristate "User mode SPI device driver support" + depends on SPI_MASTER && EXPERIMENTAL + help + This supports user mode SPI protocol drivers. + + Note that this application programming interface is EXPERIMENTAL + and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes. + # # Add new SPI protocol masters in alphabetical order above this line # diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index a95ade857a2f..4cc5e99dd59a 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o # SPI protocol drivers (device/link on bus) obj-$(CONFIG_SPI_AT25) += at25.o +obj-$(CONFIG_SPI_SPIDEV) += spidev.o # ... add above this line ... # SPI slave controller drivers (upstream link) diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c new file mode 100644 index 000000000000..c0a6dce800a3 --- /dev/null +++ b/drivers/spi/spidev.c @@ -0,0 +1,584 @@ +/* + * spidev.c -- simple synchronous userspace interface to SPI devices + * + * Copyright (C) 2006 SWAPP + * Andrea Paterniani + * Copyright (C) 2007 David Brownell (simplification, cleanup) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + + +/* + * This supports acccess to SPI devices using normal userspace I/O calls. + * Note that while traditional UNIX/POSIX I/O semantics are half duplex, + * and often mask message boundaries, full SPI support requires full duplex + * transfers. There are several kinds of of internal message boundaries to + * handle chipselect management and other protocol options. + * + * SPI has a character major number assigned. We allocate minor numbers + * dynamically using a bitmask. You must use hotplug tools, such as udev + * (or mdev with busybox) to create and destroy the /dev/spidevB.C device + * nodes, since there is no fixed association of minor numbers with any + * particular SPI bus or device. + */ +#define SPIDEV_MAJOR 153 /* assigned */ +#define N_SPI_MINORS 32 /* ... up to 256 */ + +static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG]; + + +/* Bit masks for spi_device.mode management */ +#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL) + + +struct spidev_data { + struct device dev; + struct spi_device *spi; + struct list_head device_entry; + + struct mutex buf_lock; + unsigned users; + u8 *buffer; +}; + +static LIST_HEAD(device_list); +static DEFINE_MUTEX(device_list_lock); + +static unsigned bufsiz = 4096; +module_param(bufsiz, uint, S_IRUGO); +MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message"); + +/*-------------------------------------------------------------------------*/ + +/* Read-only message with current device setup */ +static ssize_t +spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) +{ + struct spidev_data *spidev; + struct spi_device *spi; + ssize_t status = 0; + + /* chipselect only toggles at start or end of operation */ + if (count > bufsiz) + return -EMSGSIZE; + + spidev = filp->private_data; + spi = spidev->spi; + + mutex_lock(&spidev->buf_lock); + status = spi_read(spi, spidev->buffer, count); + if (status == 0) { + unsigned long missing; + + missing = copy_to_user(buf, spidev->buffer, count); + if (count && missing == count) + status = -EFAULT; + else + status = count - missing; + } + mutex_unlock(&spidev->buf_lock); + + return status; +} + +/* Write-only message with current device setup */ +static ssize_t +spidev_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + struct spidev_data *spidev; + struct spi_device *spi; + ssize_t status = 0; + unsigned long missing; + + /* chipselect only toggles at start or end of operation */ + if (count > bufsiz) + return -EMSGSIZE; + + spidev = filp->private_data; + spi = spidev->spi; + + mutex_lock(&spidev->buf_lock); + missing = copy_from_user(spidev->buffer, buf, count); + if (missing == 0) { + status = spi_write(spi, spidev->buffer, count); + if (status == 0) + status = count; + } else + status = -EFAULT; + mutex_unlock(&spidev->buf_lock); + + return status; +} + +static int spidev_message(struct spidev_data *spidev, + struct spi_ioc_transfer *u_xfers, unsigned n_xfers) +{ + struct spi_message msg; + struct spi_transfer *k_xfers; + struct spi_transfer *k_tmp; + struct spi_ioc_transfer *u_tmp; + struct spi_device *spi = spidev->spi; + unsigned n, total; + u8 *buf; + int status = -EFAULT; + + spi_message_init(&msg); + k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL); + if (k_xfers == NULL) + return -ENOMEM; + + /* Construct spi_message, copying any tx data to bounce buffer. + * We walk the array of user-provided transfers, using each one + * to initialize a kernel version of the same transfer. + */ + mutex_lock(&spidev->buf_lock); + buf = spidev->buffer; + total = 0; + for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; + n; + n--, k_tmp++, u_tmp++) { + k_tmp->len = u_tmp->len; + + if (u_tmp->rx_buf) { + k_tmp->rx_buf = buf; + if (!access_ok(VERIFY_WRITE, u_tmp->rx_buf, u_tmp->len)) + goto done; + } + if (u_tmp->tx_buf) { + k_tmp->tx_buf = buf; + if (copy_from_user(buf, (const u8 __user *)u_tmp->tx_buf, + u_tmp->len)) + goto done; + } + + total += k_tmp->len; + if (total > bufsiz) { + status = -EMSGSIZE; + goto done; + } + buf += k_tmp->len; + + k_tmp->cs_change = !!u_tmp->cs_change; + k_tmp->bits_per_word = u_tmp->bits_per_word; + k_tmp->delay_usecs = u_tmp->delay_usecs; + k_tmp->speed_hz = u_tmp->speed_hz; +#ifdef VERBOSE + dev_dbg(&spi->dev, + " xfer len %zd %s%s%s%dbits %u usec %uHz\n", + u_tmp->len, + u_tmp->rx_buf ? "rx " : "", + u_tmp->tx_buf ? "tx " : "", + u_tmp->cs_change ? "cs " : "", + u_tmp->bits_per_word ? : spi->bits_per_word, + u_tmp->delay_usecs, + u_tmp->speed_hz ? : spi->max_speed_hz); +#endif + spi_message_add_tail(k_tmp, &msg); + } + + status = spi_sync(spi, &msg); + if (status < 0) + goto done; + + /* copy any rx data out of bounce buffer */ + buf = spidev->buffer; + for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { + if (u_tmp->rx_buf) { + if (__copy_to_user((u8 __user *)u_tmp->rx_buf, buf, + u_tmp->len)) { + status = -EFAULT; + goto done; + } + } + buf += u_tmp->len; + } + status = total; + +done: + mutex_unlock(&spidev->buf_lock); + kfree(k_xfers); + return status; +} + +static int +spidev_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + int err = 0; + int retval = 0; + struct spidev_data *spidev; + struct spi_device *spi; + u32 tmp; + unsigned n_ioc; + struct spi_ioc_transfer *ioc; + + /* Check type and command number */ + if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC) + return -ENOTTY; + + /* Check access direction once here; don't repeat below. + * IOC_DIR is from the user perspective, while access_ok is + * from the kernel perspective; so they look reversed. + */ + if (_IOC_DIR(cmd) & _IOC_READ) + err = !access_ok(VERIFY_WRITE, + (void __user *)arg, _IOC_SIZE(cmd)); + if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE) + err = !access_ok(VERIFY_READ, + (void __user *)arg, _IOC_SIZE(cmd)); + if (err) + return -EFAULT; + + spidev = filp->private_data; + spi = spidev->spi; + + switch (cmd) { + /* read requests */ + case SPI_IOC_RD_MODE: + retval = __put_user(spi->mode & SPI_MODE_MASK, + (__u8 __user *)arg); + break; + case SPI_IOC_RD_LSB_FIRST: + retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0, + (__u8 __user *)arg); + break; + case SPI_IOC_RD_BITS_PER_WORD: + retval = __put_user(spi->bits_per_word, (__u8 __user *)arg); + break; + case SPI_IOC_RD_MAX_SPEED_HZ: + retval = __put_user(spi->max_speed_hz, (__u32 __user *)arg); + break; + + /* write requests */ + case SPI_IOC_WR_MODE: + retval = __get_user(tmp, (u8 __user *)arg); + if (retval == 0) { + u8 save = spi->mode; + + if (tmp & ~SPI_MODE_MASK) { + retval = -EINVAL; + break; + } + + tmp |= spi->mode & ~SPI_MODE_MASK; + spi->mode = (u8)tmp; + retval = spi_setup(spi); + if (retval < 0) + spi->mode = save; + else + dev_dbg(&spi->dev, "spi mode %02x\n", tmp); + } + break; + case SPI_IOC_WR_LSB_FIRST: + retval = __get_user(tmp, (__u8 __user *)arg); + if (retval == 0) { + u8 save = spi->mode; + + if (tmp) + spi->mode |= SPI_LSB_FIRST; + else + spi->mode &= ~SPI_LSB_FIRST; + retval = spi_setup(spi); + if (retval < 0) + spi->mode = save; + else + dev_dbg(&spi->dev, "%csb first\n", + tmp ? 'l' : 'm'); + } + break; + case SPI_IOC_WR_BITS_PER_WORD: + retval = __get_user(tmp, (__u8 __user *)arg); + if (retval == 0) { + u8 save = spi->bits_per_word; + + spi->bits_per_word = tmp; + retval = spi_setup(spi); + if (retval < 0) + spi->bits_per_word = save; + else + dev_dbg(&spi->dev, "%d bits per word\n", tmp); + } + break; + case SPI_IOC_WR_MAX_SPEED_HZ: + retval = __get_user(tmp, (__u32 __user *)arg); + if (retval == 0) { + u32 save = spi->max_speed_hz; + + spi->max_speed_hz = tmp; + retval = spi_setup(spi); + if (retval < 0) + spi->max_speed_hz = save; + else + dev_dbg(&spi->dev, "%d Hz (max)\n", tmp); + } + break; + + default: + /* segmented and/or full-duplex I/O request */ + if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) + || _IOC_DIR(cmd) != _IOC_WRITE) + return -ENOTTY; + + tmp = _IOC_SIZE(cmd); + if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) { + retval = -EINVAL; + break; + } + n_ioc = tmp / sizeof(struct spi_ioc_transfer); + if (n_ioc == 0) + break; + + /* copy into scratch area */ + ioc = kmalloc(tmp, GFP_KERNEL); + if (!ioc) { + retval = -ENOMEM; + break; + } + if (__copy_from_user(ioc, (void __user *)arg, tmp)) { + retval = -EFAULT; + break; + } + + /* translate to spi_message, execute */ + retval = spidev_message(spidev, ioc, n_ioc); + kfree(ioc); + break; + } + return retval; +} + +static int spidev_open(struct inode *inode, struct file *filp) +{ + struct spidev_data *spidev; + int status = -ENXIO; + + mutex_lock(&device_list_lock); + + list_for_each_entry(spidev, &device_list, device_entry) { + if (spidev->dev.devt == inode->i_rdev) { + status = 0; + break; + } + } + if (status == 0) { + if (!spidev->buffer) { + spidev->buffer = kmalloc(bufsiz, GFP_KERNEL); + if (!spidev->buffer) { + dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); + status = -ENOMEM; + } + } + if (status == 0) { + spidev->users++; + filp->private_data = spidev; + nonseekable_open(inode, filp); + } + } else + pr_debug("spidev: nothing for minor %d\n", iminor(inode)); + + mutex_unlock(&device_list_lock); + return status; +} + +static int spidev_release(struct inode *inode, struct file *filp) +{ + struct spidev_data *spidev; + int status = 0; + + mutex_lock(&device_list_lock); + spidev = filp->private_data; + filp->private_data = NULL; + spidev->users--; + if (!spidev->users) { + kfree(spidev->buffer); + spidev->buffer = NULL; + } + mutex_unlock(&device_list_lock); + + return status; +} + +static struct file_operations spidev_fops = { + .owner = THIS_MODULE, + /* REVISIT switch to aio primitives, so that userspace + * gets more complete API coverage. It'll simplify things + * too, except for the locking. + */ + .write = spidev_write, + .read = spidev_read, + .ioctl = spidev_ioctl, + .open = spidev_open, + .release = spidev_release, +}; + +/*-------------------------------------------------------------------------*/ + +/* The main reason to have this class is to make mdev/udev create the + * /dev/spidevB.C character device nodes exposing our userspace API. + * It also simplifies memory management. + */ + +static void spidev_classdev_release(struct device *dev) +{ + struct spidev_data *spidev; + + spidev = container_of(dev, struct spidev_data, dev); + kfree(spidev); +} + +static struct class spidev_class = { + .name = "spidev", + .owner = THIS_MODULE, + .dev_release = spidev_classdev_release, +}; + +/*-------------------------------------------------------------------------*/ + +static int spidev_probe(struct spi_device *spi) +{ + struct spidev_data *spidev; + int status; + unsigned long minor; + + /* Allocate driver data */ + spidev = kzalloc(sizeof(*spidev), GFP_KERNEL); + if (!spidev) + return -ENOMEM; + + /* Initialize the driver data */ + spidev->spi = spi; + mutex_init(&spidev->buf_lock); + + INIT_LIST_HEAD(&spidev->device_entry); + + /* If we can allocate a minor number, hook up this device. + * Reusing minors is fine so long as udev or mdev is working. + */ + mutex_lock(&device_list_lock); + minor = find_first_zero_bit(minors, ARRAY_SIZE(minors)); + if (minor < N_SPI_MINORS) { + spidev->dev.parent = &spi->dev; + spidev->dev.class = &spidev_class; + spidev->dev.devt = MKDEV(SPIDEV_MAJOR, minor); + snprintf(spidev->dev.bus_id, sizeof spidev->dev.bus_id, + "spidev%d.%d", + spi->master->bus_num, spi->chip_select); + status = device_register(&spidev->dev); + } else { + dev_dbg(&spi->dev, "no minor number available!\n"); + status = -ENODEV; + } + if (status == 0) { + set_bit(minor, minors); + dev_set_drvdata(&spi->dev, spidev); + list_add(&spidev->device_entry, &device_list); + } + mutex_unlock(&device_list_lock); + + if (status != 0) + kfree(spidev); + + return status; +} + +static int spidev_remove(struct spi_device *spi) +{ + struct spidev_data *spidev = dev_get_drvdata(&spi->dev); + + mutex_lock(&device_list_lock); + + list_del(&spidev->device_entry); + dev_set_drvdata(&spi->dev, NULL); + clear_bit(MINOR(spidev->dev.devt), minors); + device_unregister(&spidev->dev); + + mutex_unlock(&device_list_lock); + + return 0; +} + +static struct spi_driver spidev_spi = { + .driver = { + .name = "spidev", + .owner = THIS_MODULE, + }, + .probe = spidev_probe, + .remove = __devexit_p(spidev_remove), + + /* NOTE: suspend/resume methods are not necessary here. + * We don't do anything except pass the requests to/from + * the underlying controller. The refrigerator handles + * most issues; the controller driver handles the rest. + */ +}; + +/*-------------------------------------------------------------------------*/ + +static int __init spidev_init(void) +{ + int status; + + /* Claim our 256 reserved device numbers. Then register a class + * that will key udev/mdev to add/remove /dev nodes. Last, register + * the driver which manages those device numbers. + */ + BUILD_BUG_ON(N_SPI_MINORS > 256); + status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops); + if (status < 0) + return status; + + status = class_register(&spidev_class); + if (status < 0) { + unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); + return status; + } + + status = spi_register_driver(&spidev_spi); + if (status < 0) { + class_unregister(&spidev_class); + unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); + } + return status; +} +module_init(spidev_init); + +static void __exit spidev_exit(void) +{ + spi_unregister_driver(&spidev_spi); + class_unregister(&spidev_class); + unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); +} +module_exit(spidev_exit); + +MODULE_AUTHOR("Andrea Paterniani, "); +MODULE_DESCRIPTION("User mode SPI device interface"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 715386142626..ccc6fc157064 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -4,6 +4,7 @@ header-y += hdlc/ header-y += isdn/ header-y += nfsd/ header-y += raid/ +header-y += spi/ header-y += sunrpc/ header-y += tc_act/ header-y += netfilter/ diff --git a/include/linux/spi/Kbuild b/include/linux/spi/Kbuild new file mode 100644 index 000000000000..d375a082986e --- /dev/null +++ b/include/linux/spi/Kbuild @@ -0,0 +1 @@ +header-y += spidev.h diff --git a/include/linux/spi/spidev.h b/include/linux/spi/spidev.h new file mode 100644 index 000000000000..7d700be57490 --- /dev/null +++ b/include/linux/spi/spidev.h @@ -0,0 +1,124 @@ +/* + * include/linux/spi/spidev.h + * + * Copyright (C) 2006 SWAPP + * Andrea Paterniani + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef SPIDEV_H +#define SPIDEV_H + + +/* User space versions of kernel symbols for SPI clocking modes, + * matching + */ + +#define SPI_CPHA 0x01 +#define SPI_CPOL 0x02 + +#define SPI_MODE_0 (0|0) +#define SPI_MODE_1 (0|SPI_CPHA) +#define SPI_MODE_2 (SPI_CPOL|0) +#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) + + +/*---------------------------------------------------------------------------*/ + +/* IOCTL commands */ + +#define SPI_IOC_MAGIC 'k' + +/** + * struct spi_ioc_transfer - describes a single SPI transfer + * @tx_buf: Holds pointer to userspace buffer with transmit data, or null. + * If no data is provided, zeroes are shifted out. + * @rx_buf: Holds pointer to userspace buffer for receive data, or null. + * @len: Length of tx and rx buffers, in bytes. + * @speed_hz: Temporary override of the device's bitrate. + * @bits_per_word: Temporary override of the device's wordsize. + * @delay_usecs: If nonzero, how long to delay after the last bit transfer + * before optionally deselecting the device before the next transfer. + * @cs_change: True to deselect device before starting the next transfer. + * + * This structure is mapped directly to the kernel spi_transfer structure; + * the fields have the same meanings, except of course that the pointers + * are in a different address space (and may be of different sizes in some + * cases, such as 32-bit i386 userspace over a 64-bit x86_64 kernel). + * Zero-initialize the structure, including currently unused fields, to + * accomodate potential future updates. + * + * SPI_IOC_MESSAGE gives userspace the equivalent of kernel spi_sync(). + * Pass it an array of related transfers, they'll execute together. + * Each transfer may be half duplex (either direction) or full duplex. + * + * struct spi_ioc_transfer mesg[4]; + * ... + * status = ioctl(fd, SPI_IOC_MESSAGE(4), mesg); + * + * So for example one transfer might send a nine bit command (right aligned + * in a 16-bit word), the next could read a block of 8-bit data before + * terminating that command by temporarily deselecting the chip; the next + * could send a different nine bit command (re-selecting the chip), and the + * last transfer might write some register values. + */ +struct spi_ioc_transfer { + __u64 tx_buf; + __u64 rx_buf; + + __u32 len; + __u32 speed_hz; + + __u16 delay_usecs; + __u8 bits_per_word; + __u8 cs_change; + __u32 pad; + + /* If the contents of 'struct spi_ioc_transfer' ever change + * incompatibly, then the ioctl number (currently 0) must change; + * ioctls with constant size fields get a bit more in the way of + * error checking than ones (like this) where that field varies. + * + * NOTE: struct layout is the same in 64bit and 32bit userspace. + */ +}; + +/* not all platforms use or _IOC_TYPECHECK() ... */ +#define SPI_MSGSIZE(N) \ + ((((N)*(sizeof (struct spi_ioc_transfer))) < (1 << _IOC_SIZEBITS)) \ + ? ((N)*(sizeof (struct spi_ioc_transfer))) : 0) +#define SPI_IOC_MESSAGE(N) _IOW(SPI_IOC_MAGIC, 0, char[SPI_MSGSIZE(N)]) + + +/* Read / Write of SPI mode (SPI_MODE_0..SPI_MODE_3) */ +#define SPI_IOC_RD_MODE _IOR(SPI_IOC_MAGIC, 1, __u8) +#define SPI_IOC_WR_MODE _IOW(SPI_IOC_MAGIC, 1, __u8) + +/* Read / Write SPI bit justification */ +#define SPI_IOC_RD_LSB_FIRST _IOR(SPI_IOC_MAGIC, 2, __u8) +#define SPI_IOC_WR_LSB_FIRST _IOW(SPI_IOC_MAGIC, 2, __u8) + +/* Read / Write SPI device word length (1..N) */ +#define SPI_IOC_RD_BITS_PER_WORD _IOR(SPI_IOC_MAGIC, 3, __u8) +#define SPI_IOC_WR_BITS_PER_WORD _IOW(SPI_IOC_MAGIC, 3, __u8) + +/* Read / Write SPI device default max speed hz */ +#define SPI_IOC_RD_MAX_SPEED_HZ _IOR(SPI_IOC_MAGIC, 4, __u32) +#define SPI_IOC_WR_MAX_SPEED_HZ _IOW(SPI_IOC_MAGIC, 4, __u32) + + + +#endif /* SPIDEV_H */ -- cgit v1.2.3 From 33e34dc6ee2cb2cf2d50e65c5b825d9ebb8b9e66 Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 8 May 2007 00:32:21 -0700 Subject: SPI kerneldoc Various documentation updates for the SPI infrastructure, to clarify things that may not have been clear, to cope with lack of editing, and fix omissions. Also, plug SPI into the kernel-api DocBook template, and fix all the resulting glitches in document generation. Signed-off-by: David Brownell Cc: "Randy.Dunlap" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/DocBook/kernel-api.tmpl | 63 +++++++++++++++++++++++++++ Documentation/spi/spi-summary | 43 ++++++++++-------- drivers/spi/spi.c | 33 +++++++++++--- include/linux/spi/spi.h | 82 ++++++++++++++++++++++------------- 4 files changed, 166 insertions(+), 55 deletions(-) (limited to 'include/linux') diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl index b61dfc79e1b8..a2b2b4d187c5 100644 --- a/Documentation/DocBook/kernel-api.tmpl +++ b/Documentation/DocBook/kernel-api.tmpl @@ -576,4 +576,67 @@ X!Idrivers/video/console/fonts.c !Edrivers/input/ff-core.c !Edrivers/input/ff-memless.c + + + Serial Peripheral Interface (SPI) + + SPI is the "Serial Peripheral Interface", widely used with + embedded systems because it is a simple and efficient + interface: basically a multiplexed shift register. + Its three signal wires hold a clock (SCK, often in the range + of 1-20 MHz), a "Master Out, Slave In" (MOSI) data line, and + a "Master In, Slave Out" (MISO) data line. + SPI is a full duplex protocol; for each bit shifted out the + MOSI line (one per clock) another is shifted in on the MISO line. + Those bits are assembled into words of various sizes on the + way to and from system memory. + An additional chipselect line is usually active-low (nCS); + four signals are normally used for each peripheral, plus + sometimes an interrupt. + + + The SPI bus facilities listed here provide a generalized + interface to declare SPI busses and devices, manage them + according to the standard Linux driver model, and perform + input/output operations. + At this time, only "master" side interfaces are supported, + where Linux talks to SPI peripherals and does not implement + such a peripheral itself. + (Interfaces to support implementing SPI slaves would + necessarily look different.) + + + The programming interface is structured around two kinds of driver, + and two kinds of device. + A "Controller Driver" abstracts the controller hardware, which may + be as simple as a set of GPIO pins or as complex as a pair of FIFOs + connected to dual DMA engines on the other side of the SPI shift + register (maximizing throughput). Such drivers bridge between + whatever bus they sit on (often the platform bus) and SPI, and + expose the SPI side of their device as a + struct spi_master. + SPI devices are children of that master, represented as a + struct spi_device and manufactured from + struct spi_board_info descriptors which + are usually provided by board-specific initialization code. + A struct spi_driver is called a + "Protocol Driver", and is bound to a spi_device using normal + driver model calls. + + + The I/O model is a set of queued messages. Protocol drivers + submit one or more struct spi_message + objects, which are processed and completed asynchronously. + (There are synchronous wrappers, however.) Messages are + built from one or more struct spi_transfer + objects, each of which wraps a full duplex SPI transfer. + A variety of protocol tweaking options are needed, because + different chips adopt very different policies for how they + use the bits transferred with SPI. + +!Iinclude/linux/spi/spi.h +!Fdrivers/spi/spi.c spi_register_board_info +!Edrivers/spi/spi.c + + diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary index ecc7c9eb9f29..795fbb48ffa7 100644 --- a/Documentation/spi/spi-summary +++ b/Documentation/spi/spi-summary @@ -8,7 +8,7 @@ What is SPI? The "Serial Peripheral Interface" (SPI) is a synchronous four wire serial link used to connect microcontrollers to sensors, memory, and peripherals. -The three signal wires hold a clock (SCLK, often on the order of 10 MHz), +The three signal wires hold a clock (SCK, often on the order of 10 MHz), and parallel data lines with "Master Out, Slave In" (MOSI) or "Master In, Slave Out" (MISO) signals. (Other names are also used.) There are four clocking modes through which data is exchanged; mode-0 and mode-3 are most @@ -22,7 +22,7 @@ other signals, often including an interrupt to the master. Unlike serial busses like USB or SMBUS, even low level protocols for SPI slave functions are usually not interoperable between vendors -(except for cases like SPI memory chips). +(except for commodities like SPI memory chips). - SPI may be used for request/response style device protocols, as with touchscreen sensors and memory chips. @@ -77,8 +77,9 @@ cards without needing a special purpose MMC/SD/SDIO controller. How do these driver programming interfaces work? ------------------------------------------------ The header file includes kerneldoc, as does the -main source code, and you should certainly read that. This is just -an overview, so you get the big picture before the details. +main source code, and you should certainly read that chapter of the +kernel API document. This is just an overview, so you get the big +picture before those details. SPI requests always go into I/O queues. Requests for a given SPI device are always executed in FIFO order, and complete asynchronously through @@ -88,7 +89,7 @@ a command and then reading its response. There are two types of SPI driver, here called: - Controller drivers ... these are often built in to System-On-Chip + Controller drivers ... controllers may be built in to System-On-Chip processors, and often support both Master and Slave roles. These drivers touch hardware registers and may use DMA. Or they can be PIO bitbangers, needing just GPIO pins. @@ -108,18 +109,18 @@ those two types of driver. At this writing, Linux has no slave side programming interface. There is a minimal core of SPI programming interfaces, focussing on -using driver model to connect controller and protocol drivers using +using the driver model to connect controller and protocol drivers using device tables provided by board specific initialization code. SPI shows up in sysfs in several locations: - /sys/devices/.../CTLR/spiB.C ... spi_device for on bus "B", + /sys/devices/.../CTLR/spiB.C ... spi_device on bus "B", chipselect C, accessed through CTLR. /sys/devices/.../CTLR/spiB.C/modalias ... identifies the driver that should be used with this device (for hotplug/coldplug) /sys/bus/spi/devices/spiB.C ... symlink to the physical - spiB-C device + spiB.C device /sys/bus/spi/drivers/D ... driver for one or more spi*.* devices @@ -240,7 +241,7 @@ The board_info should provide enough information to let the system work without the chip's driver being loaded. The most troublesome aspect of that is likely the SPI_CS_HIGH bit in the spi_device.mode field, since sharing a bus with a device that interprets chipselect "backwards" is -not possible. +not possible until the infrastructure knows how to deselect it. Then your board initialization code would register that table with the SPI infrastructure, so that it's available later when the SPI master controller @@ -268,16 +269,14 @@ board info based on the board that was hotplugged. Of course, you'd later call at least spi_unregister_device() when that board is removed. When Linux includes support for MMC/SD/SDIO/DataFlash cards through SPI, those -configurations will also be dynamic. Fortunately, those devices all support -basic device identification probes, so that support should hotplug normally. +configurations will also be dynamic. Fortunately, such devices all support +basic device identification probes, so they should hotplug normally. How do I write an "SPI Protocol Driver"? ---------------------------------------- -All SPI drivers are currently kernel drivers. A userspace driver API -would just be another kernel driver, probably offering some lowlevel -access through aio_read(), aio_write(), and ioctl() calls and using the -standard userspace sysfs mechanisms to bind to a given SPI device. +Most SPI drivers are currently kernel drivers, but there's also support +for userspace drivers. Here we talk only about kernel drivers. SPI protocol drivers somewhat resemble platform device drivers: @@ -319,7 +318,8 @@ might look like this unless you're creating a class_device: As soon as it enters probe(), the driver may issue I/O requests to the SPI device using "struct spi_message". When remove() returns, -the driver guarantees that it won't submit any more such messages. +or after probe() fails, the driver guarantees that it won't submit +any more such messages. - An spi_message is a sequence of protocol operations, executed as one atomic sequence. SPI driver controls include: @@ -368,7 +368,8 @@ the driver guarantees that it won't submit any more such messages. Some drivers may need to modify spi_device characteristics like the transfer mode, wordsize, or clock rate. This is done with spi_setup(), which would normally be called from probe() before the first I/O is -done to the device. +done to the device. However, that can also be called at any time +that no message is pending for that device. While "spi_device" would be the bottom boundary of the driver, the upper boundaries might include sysfs (especially for sensor readings), @@ -445,11 +446,15 @@ SPI MASTER METHODS This sets up the device clock rate, SPI mode, and word sizes. Drivers may change the defaults provided by board_info, and then call spi_setup(spi) to invoke this routine. It may sleep. + Unless each SPI slave has its own configuration registers, don't + change them right away ... otherwise drivers could corrupt I/O + that's in progress for other SPI devices. master->transfer(struct spi_device *spi, struct spi_message *message) This must not sleep. Its responsibility is arrange that the - transfer happens and its complete() callback is issued; the two - will normally happen later, after other transfers complete. + transfer happens and its complete() callback is issued. The two + will normally happen later, after other transfers complete, and + if the controller is idle it will need to be kickstarted. master->cleanup(struct spi_device *spi) Your controller driver may use spi_device.controller_state to hold diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 6657331eed93..c3219b29b5ac 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -152,6 +152,11 @@ static void spi_drv_shutdown(struct device *dev) sdrv->shutdown(to_spi_device(dev)); } +/** + * spi_register_driver - register a SPI driver + * @sdrv: the driver to register + * Context: can sleep + */ int spi_register_driver(struct spi_driver *sdrv) { sdrv->driver.bus = &spi_bus_type; @@ -183,7 +188,13 @@ static LIST_HEAD(board_list); static DECLARE_MUTEX(board_lock); -/* On typical mainboards, this is purely internal; and it's not needed +/** + * spi_new_device - instantiate one new SPI device + * @master: Controller to which device is connected + * @chip: Describes the SPI device + * Context: can sleep + * + * On typical mainboards, this is purely internal; and it's not needed * after board init creates the hard-wired devices. Some development * platforms may not be able to use spi_register_board_info though, and * this is exported so that for example a USB or parport based adapter @@ -251,7 +262,12 @@ fail: } EXPORT_SYMBOL_GPL(spi_new_device); -/* +/** + * spi_register_board_info - register SPI devices for a given board + * @info: array of chip descriptors + * @n: how many descriptors are provided + * Context: can sleep + * * Board-specific early init code calls this (probably during arch_initcall) * with segments of the SPI device table. Any device nodes are created later, * after the relevant parent SPI controller (bus_num) is defined. We keep @@ -337,9 +353,10 @@ static struct class spi_master_class = { /** * spi_alloc_master - allocate SPI master controller * @dev: the controller, possibly using the platform_bus - * @size: how much driver-private data to preallocate; the pointer to this + * @size: how much zeroed driver-private data to allocate; the pointer to this * memory is in the class_data field of the returned class_device, * accessible with spi_master_get_devdata(). + * Context: can sleep * * This call is used only by SPI master controller drivers, which are the * only ones directly touching chip registers. It's how they allocate @@ -375,6 +392,7 @@ EXPORT_SYMBOL_GPL(spi_alloc_master); /** * spi_register_master - register SPI master controller * @master: initialized master, originally from spi_alloc_master() + * Context: can sleep * * SPI master controllers connect to their drivers using some non-SPI bus, * such as the platform bus. The final stage of probe() in that code @@ -437,6 +455,7 @@ static int __unregister(struct device *dev, void *unused) /** * spi_unregister_master - unregister SPI master controller * @master: the master being unregistered + * Context: can sleep * * This call is used only by SPI master controller drivers, which are the * only ones directly touching chip registers. @@ -455,6 +474,7 @@ EXPORT_SYMBOL_GPL(spi_unregister_master); /** * spi_busnum_to_master - look up master associated with bus_num * @bus_num: the master's bus number + * Context: can sleep * * This call may be used with devices that are registered after * arch init time. It returns a refcounted pointer to the relevant @@ -492,6 +512,7 @@ static void spi_complete(void *arg) * spi_sync - blocking/synchronous SPI data transfers * @spi: device with which data will be exchanged * @message: describes the data transfers + * Context: can sleep * * This call may only be used from a context that may sleep. The sleep * is non-interruptible, and has no timeout. Low-overhead controller @@ -508,7 +529,7 @@ static void spi_complete(void *arg) * * The return value is a negative error code if the message could not be * submitted, else zero. When the value is zero, then message->status is - * also defined: it's the completion code for the transfer, either zero + * also defined; it's the completion code for the transfer, either zero * or a negative error code from the controller driver. */ int spi_sync(struct spi_device *spi, struct spi_message *message) @@ -538,6 +559,7 @@ static u8 *buf; * @n_tx: size of txbuf, in bytes * @rxbuf: buffer into which data will be read * @n_rx: size of rxbuf, in bytes (need not be dma-safe) + * Context: can sleep * * This performs a half duplex MicroWire style transaction with the * device, sending txbuf and then reading rxbuf. The return value @@ -545,7 +567,8 @@ static u8 *buf; * This call may only be used from a context that may sleep. * * Parameters to this routine are always copied using a small buffer; - * performance-sensitive or bulk transfer code should instead use + * portable code should never use this for more than 32 bytes. + * Performance-sensitive or bulk transfer code should instead use * spi_{async,sync}() calls with dma-safe buffers. */ int spi_write_then_read(struct spi_device *spi, diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 4f0f8c2e58a5..b6bedc3ee95c 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -32,11 +32,12 @@ extern struct bus_type spi_bus_type; * @max_speed_hz: Maximum clock rate to be used with this chip * (on this board); may be changed by the device's driver. * The spi_transfer.speed_hz can override this for each transfer. - * @chip-select: Chipselect, distinguishing chips handled by "master". + * @chip_select: Chipselect, distinguishing chips handled by @master. * @mode: The spi mode defines how data is clocked out and in. * This may be changed by the device's driver. - * The "active low" default for chipselect mode can be overridden, - * as can the "MSB first" default for each word in a transfer. + * The "active low" default for chipselect mode can be overridden + * (by specifying SPI_CS_HIGH) as can the "MSB first" default for + * each word in a transfer (by specifying SPI_LSB_FIRST). * @bits_per_word: Data transfers involve one or more words; word sizes * like eight or 12 bits are common. In-memory wordsizes are * powers of two bytes (e.g. 20 bit samples use 32 bits). @@ -48,14 +49,18 @@ extern struct bus_type spi_bus_type; * @controller_state: Controller's runtime state * @controller_data: Board-specific definitions for controller, such as * FIFO initialization parameters; from board_info.controller_data + * @modalias: Name of the driver to use with this device, or an alias + * for that name. This appears in the sysfs "modalias" attribute + * for driver coldplugging, and in uevents used for hotplugging * - * An spi_device is used to interchange data between an SPI slave + * A @spi_device is used to interchange data between an SPI slave * (usually a discrete chip) and CPU memory. * - * In "dev", the platform_data is used to hold information about this + * In @dev, the platform_data is used to hold information about this * device that's meaningful to the device's protocol driver, but not * to its controller. One example might be an identifier for a chip - * variant with slightly different functionality. + * variant with slightly different functionality; another might be + * information about how this particular board wires the chip's pins. */ struct spi_device { struct device dev; @@ -77,13 +82,15 @@ struct spi_device { void *controller_data; const char *modalias; - // likely need more hooks for more protocol options affecting how - // the controller talks to each chip, like: - // - memory packing (12 bit samples into low bits, others zeroed) - // - priority - // - drop chipselect after each word - // - chipselect delays - // - ... + /* + * likely need more hooks for more protocol options affecting how + * the controller talks to each chip, like: + * - memory packing (12 bit samples into low bits, others zeroed) + * - priority + * - drop chipselect after each word + * - chipselect delays + * - ... + */ }; static inline struct spi_device *to_spi_device(struct device *dev) @@ -146,6 +153,11 @@ static inline struct spi_driver *to_spi_driver(struct device_driver *drv) extern int spi_register_driver(struct spi_driver *sdrv); +/** + * spi_unregister_driver - reverse effect of spi_register_driver + * @sdrv: the driver to unregister + * Context: can sleep + */ static inline void spi_unregister_driver(struct spi_driver *sdrv) { if (sdrv) @@ -165,18 +177,20 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @setup: updates the device mode and clocking records used by a * device's SPI controller; protocol code may call this. This * must fail if an unrecognized or unsupported mode is requested. + * It's always safe to call this unless transfers are pending on + * the device whose settings are being modified. * @transfer: adds a message to the controller's transfer queue. * @cleanup: frees controller-specific state * - * Each SPI master controller can communicate with one or more spi_device + * Each SPI master controller can communicate with one or more @spi_device * children. These make a small bus, sharing MOSI, MISO and SCK signals * but not chip select signals. Each device may be configured to use a * different clock rate, since those shared signals are ignored unless * the chip is selected. * * The driver for an SPI controller manages access to those devices through - * a queue of spi_message transactions, copyin data between CPU memory and - * an SPI slave device). For each such message it queues, it calls the + * a queue of spi_message transactions, copying data between CPU memory and + * an SPI slave device. For each such message it queues, it calls the * message's completion function when the transaction completes. */ struct spi_master { @@ -280,27 +294,27 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); * struct spi_transfer - a read/write buffer pair * @tx_buf: data to be written (dma-safe memory), or NULL * @rx_buf: data to be read (dma-safe memory), or NULL - * @tx_dma: DMA address of tx_buf, if spi_message.is_dma_mapped - * @rx_dma: DMA address of rx_buf, if spi_message.is_dma_mapped + * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped + * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped * @len: size of rx and tx buffers (in bytes) * @speed_hz: Select a speed other then the device default for this - * transfer. If 0 the default (from spi_device) is used. + * transfer. If 0 the default (from @spi_device) is used. * @bits_per_word: select a bits_per_word other then the device default - * for this transfer. If 0 the default (from spi_device) is used. + * for this transfer. If 0 the default (from @spi_device) is used. * @cs_change: affects chipselect after this transfer completes * @delay_usecs: microseconds to delay after this transfer before * (optionally) changing the chipselect status, then starting - * the next transfer or completing this spi_message. - * @transfer_list: transfers are sequenced through spi_message.transfers + * the next transfer or completing this @spi_message. + * @transfer_list: transfers are sequenced through @spi_message.transfers * * SPI transfers always write the same number of bytes as they read. - * Protocol drivers should always provide rx_buf and/or tx_buf. + * Protocol drivers should always provide @rx_buf and/or @tx_buf. * In some cases, they may also want to provide DMA addresses for * the data being transferred; that may reduce overhead, when the * underlying driver uses dma. * * If the transmit buffer is null, zeroes will be shifted out - * while filling rx_buf. If the receive buffer is null, the data + * while filling @rx_buf. If the receive buffer is null, the data * shifted in will be discarded. Only "len" bytes shift out (or in). * It's an error to try to shift out a partial word. (For example, by * shifting out three bytes with word size of sixteen or twenty bits; @@ -309,7 +323,7 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); * In-memory data values are always in native CPU byte order, translated * from the wire byte order (big-endian except with SPI_LSB_FIRST). So * for example when bits_per_word is sixteen, buffers are 2N bytes long - * and hold N sixteen bit words in CPU byte order. + * (@len = 2N) and hold N sixteen bit words in CPU byte order. * * When the word size of the SPI transfer is not a power-of-two multiple * of eight bits, those in-memory words include extra bits. In-memory @@ -318,7 +332,7 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); * * All SPI transfers start with the relevant chipselect active. Normally * it stays selected until after the last transfer in a message. Drivers - * can affect the chipselect signal using cs_change: + * can affect the chipselect signal using cs_change. * * (i) If the transfer isn't the last one in the message, this flag is * used to make the chipselect briefly go inactive in the middle of the @@ -372,7 +386,7 @@ struct spi_transfer { * @queue: for use by whichever driver currently owns the message * @state: for use by whichever driver currently owns the message * - * An spi_message is used to execute an atomic sequence of data transfers, + * A @spi_message is used to execute an atomic sequence of data transfers, * each represented by a struct spi_transfer. The sequence is "atomic" * in the sense that no other spi_message may use that SPI bus until that * sequence completes. On some systems, many such sequences can execute as @@ -464,8 +478,9 @@ static inline void spi_message_free(struct spi_message *m) } /** - * spi_setup -- setup SPI mode and clock rate + * spi_setup - setup SPI mode and clock rate * @spi: the device whose settings are being modified + * Context: can sleep * * SPI protocol drivers may need to update the transfer mode if the * device doesn't work with the mode 0 default. They may likewise need @@ -474,7 +489,7 @@ static inline void spi_message_free(struct spi_message *m) * The changes take effect the next time the device is selected and data * is transferred to or from it. * - * Note that this call wil fail if the protocol driver specifies an option + * Note that this call will fail if the protocol driver specifies an option * that the underlying controller or its driver does not support. For * example, not all hardware supports wire transfers using nine bit words, * LSB-first wire encoding, or active-high chipselects. @@ -487,9 +502,10 @@ spi_setup(struct spi_device *spi) /** - * spi_async -- asynchronous SPI transfer + * spi_async - asynchronous SPI transfer * @spi: device with which data will be exchanged * @message: describes the data transfers, including completion callback + * Context: any (irqs may be blocked, etc) * * This call may be used in_irq and other contexts which can't sleep, * as well as from task contexts which can sleep. @@ -535,6 +551,7 @@ extern int spi_sync(struct spi_device *spi, struct spi_message *message); * @spi: device to which data will be written * @buf: data buffer * @len: data buffer size + * Context: can sleep * * This writes the buffer and returns zero or a negative error code. * Callable only from contexts that can sleep. @@ -558,8 +575,9 @@ spi_write(struct spi_device *spi, const u8 *buf, size_t len) * @spi: device from which data will be read * @buf: data buffer * @len: data buffer size + * Context: can sleep * - * This writes the buffer and returns zero or a negative error code. + * This reads the buffer and returns zero or a negative error code. * Callable only from contexts that can sleep. */ static inline int @@ -585,6 +603,7 @@ extern int spi_write_then_read(struct spi_device *spi, * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read * @spi: device with which data will be exchanged * @cmd: command to be written before data is read back + * Context: can sleep * * This returns the (unsigned) eight bit number returned by the * device, or else a negative error code. Callable only from @@ -605,6 +624,7 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read * @spi: device with which data will be exchanged * @cmd: command to be written before data is read back + * Context: can sleep * * This returns the (unsigned) sixteen bit number returned by the * device, or else a negative error code. Callable only from -- cgit v1.2.3 From 3a3a51d1f2efe10090cd779a66c4b3c8f57eaf9f Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Tue, 8 May 2007 00:32:33 -0700 Subject: make drivers/isdn/capi/capiutil.c:cdebbuf_alloc() static Signed-off-by: Adrian Bunk Acked-by: Karsten Keil Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/isdn/capi/capiutil.c | 8 +------- include/linux/isdn/capiutil.h | 1 - 2 files changed, 1 insertion(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c index ad1e2702c2d1..22379b94e88f 100644 --- a/drivers/isdn/capi/capiutil.c +++ b/drivers/isdn/capi/capiutil.c @@ -855,7 +855,7 @@ static _cdebbuf *g_debbuf; static u_long g_debbuf_lock; static _cmsg *g_cmsg; -_cdebbuf *cdebbuf_alloc(void) +static _cdebbuf *cdebbuf_alloc(void) { _cdebbuf *cdb; @@ -989,11 +989,6 @@ _cdebbuf *capi_cmsg2str(_cmsg * cmsg) return &g_debbuf; } -_cdebbuf *cdebbuf_alloc(void) -{ - return &g_debbuf; -} - void cdebbuf_free(_cdebbuf *cdb) { } @@ -1009,7 +1004,6 @@ void __exit cdebug_exit(void) #endif -EXPORT_SYMBOL(cdebbuf_alloc); EXPORT_SYMBOL(cdebbuf_free); EXPORT_SYMBOL(capi_cmsg2message); EXPORT_SYMBOL(capi_message2cmsg); diff --git a/include/linux/isdn/capiutil.h b/include/linux/isdn/capiutil.h index 63bd9cf821a7..5a52f2c94f3f 100644 --- a/include/linux/isdn/capiutil.h +++ b/include/linux/isdn/capiutil.h @@ -187,7 +187,6 @@ typedef struct { #define CDEBUG_SIZE 1024 #define CDEBUG_GSIZE 4096 -_cdebbuf *cdebbuf_alloc(void); void cdebbuf_free(_cdebbuf *cdb); int cdebug_init(void); void cdebug_exit(void); -- cgit v1.2.3 From a7e27d5dd396419dc6d6288db6a6d86cf3a94ba5 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 8 May 2007 00:32:40 -0700 Subject: sanitize linux/isdn_divertif.h for userspace the isdn_divertif contains kernel-only references so I've wrapped them in __KERNEL__ and add proper #include statements. Signed-off-by: Mike Frysinger Cc: Karsten Keil Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/Kbuild | 2 +- include/linux/isdn_divertif.h | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/Kbuild b/include/linux/Kbuild index ccc6fc157064..94cc04a143f2 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -92,7 +92,6 @@ header-y += ip_mp_alg.h header-y += ipsec.h header-y += ipx.h header-y += irda.h -header-y += isdn_divertif.h header-y += iso_fs.h header-y += ixjuser.h header-y += jffs2.h @@ -242,6 +241,7 @@ unifdef-y += ipv6.h unifdef-y += ipv6_route.h unifdef-y += isdn.h unifdef-y += isdnif.h +unifdef-y += isdn_divertif.h unifdef-y += isdn_ppp.h unifdef-y += isicom.h unifdef-y += jbd.h diff --git a/include/linux/isdn_divertif.h b/include/linux/isdn_divertif.h index 0e7e44ce8301..07821ca5955f 100644 --- a/include/linux/isdn_divertif.h +++ b/include/linux/isdn_divertif.h @@ -24,6 +24,10 @@ #define DIVERT_REL_ERR 0x04 /* module not registered */ #define DIVERT_REG_NAME isdn_register_divert +#ifdef __KERNEL__ +#include +#include + /***************************************************************/ /* structure exchanging data between isdn hl and divert module */ /***************************************************************/ @@ -40,3 +44,4 @@ typedef struct /* function register */ /*********************/ extern int DIVERT_REG_NAME(isdn_divert_if *); +#endif -- cgit v1.2.3 From 46cb4b7c88fa5517f64b5bee42939ea3614cddcb Mon Sep 17 00:00:00 2001 From: "Siddha, Suresh B" Date: Tue, 8 May 2007 00:32:51 -0700 Subject: sched: dynticks idle load balancing Fix the process idle load balancing in the presence of dynticks. cpus for which ticks are stopped will sleep till the next event wakes it up. Potentially these sleeps can be for large durations and during which today, there is no periodic idle load balancing being done. This patch nominates an owner among the idle cpus, which does the idle load balancing on behalf of the other idle cpus. And once all the cpus are completely idle, then we can stop this idle load balancing too. Checks added in fast path are minimized. Whenever there are busy cpus in the system, there will be an owner(idle cpu) doing the system wide idle load balancing. Open items: 1. Intelligent owner selection (like an idle core in a busy package). 2. Merge with rcu's nohz_cpu_mask? Signed-off-by: Suresh Siddha Acked-by: Ingo Molnar Cc: Thomas Gleixner Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 8 ++ kernel/sched.c | 223 ++++++++++++++++++++++++++++++++++++++++++++--- kernel/time/tick-sched.c | 9 ++ 3 files changed, 227 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 6312521df2c1..15ab3e039535 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -194,6 +194,14 @@ extern void sched_init_smp(void); extern void init_idle(struct task_struct *idle, int cpu); extern cpumask_t nohz_cpu_mask; +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) +extern int select_nohz_load_balancer(int cpu); +#else +static inline int select_nohz_load_balancer(int cpu) +{ + return 0; +} +#endif /* * Only dump TASK_* tasks. (0 for all tasks) diff --git a/kernel/sched.c b/kernel/sched.c index ba053d88c8c6..74599286230c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -224,6 +224,9 @@ struct rq { #ifdef CONFIG_SMP unsigned long cpu_load[3]; unsigned char idle_at_tick; +#ifdef CONFIG_NO_HZ + unsigned char in_nohz_recently; +#endif #endif unsigned long long nr_switches; @@ -1050,6 +1053,17 @@ static void resched_task(struct task_struct *p) if (!tsk_is_polling(p)) smp_send_reschedule(cpu); } + +static void resched_cpu(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long flags; + + if (!spin_trylock_irqsave(&rq->lock, flags)) + return; + resched_task(cpu_curr(cpu)); + spin_unlock_irqrestore(&rq->lock, flags); +} #else static inline void resched_task(struct task_struct *p) { @@ -2658,6 +2672,12 @@ redo: double_rq_unlock(this_rq, busiest); local_irq_restore(flags); + /* + * some other cpu did the load balance for us. + */ + if (nr_moved && this_cpu != smp_processor_id()) + resched_cpu(this_cpu); + /* All tasks on this runqueue were pinned by CPU affinity */ if (unlikely(all_pinned)) { cpu_clear(cpu_of(busiest), cpus); @@ -2928,27 +2948,98 @@ static void update_load(struct rq *this_rq) } } +#ifdef CONFIG_NO_HZ +static struct { + atomic_t load_balancer; + cpumask_t cpu_mask; +} nohz ____cacheline_aligned = { + .load_balancer = ATOMIC_INIT(-1), + .cpu_mask = CPU_MASK_NONE, +}; + /* - * run_rebalance_domains is triggered when needed from the scheduler tick. + * This routine will try to nominate the ilb (idle load balancing) + * owner among the cpus whose ticks are stopped. ilb owner will do the idle + * load balancing on behalf of all those cpus. If all the cpus in the system + * go into this tickless mode, then there will be no ilb owner (as there is + * no need for one) and all the cpus will sleep till the next wakeup event + * arrives... + * + * For the ilb owner, tick is not stopped. And this tick will be used + * for idle load balancing. ilb owner will still be part of + * nohz.cpu_mask.. + * + * While stopping the tick, this cpu will become the ilb owner if there + * is no other owner. And will be the owner till that cpu becomes busy + * or if all cpus in the system stop their ticks at which point + * there is no need for ilb owner. * + * When the ilb owner becomes busy, it nominates another owner, during the + * next busy scheduler_tick() + */ +int select_nohz_load_balancer(int stop_tick) +{ + int cpu = smp_processor_id(); + + if (stop_tick) { + cpu_set(cpu, nohz.cpu_mask); + cpu_rq(cpu)->in_nohz_recently = 1; + + /* + * If we are going offline and still the leader, give up! + */ + if (cpu_is_offline(cpu) && + atomic_read(&nohz.load_balancer) == cpu) { + if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) + BUG(); + return 0; + } + + /* time for ilb owner also to sleep */ + if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) { + if (atomic_read(&nohz.load_balancer) == cpu) + atomic_set(&nohz.load_balancer, -1); + return 0; + } + + if (atomic_read(&nohz.load_balancer) == -1) { + /* make me the ilb owner */ + if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1) + return 1; + } else if (atomic_read(&nohz.load_balancer) == cpu) + return 1; + } else { + if (!cpu_isset(cpu, nohz.cpu_mask)) + return 0; + + cpu_clear(cpu, nohz.cpu_mask); + + if (atomic_read(&nohz.load_balancer) == cpu) + if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) + BUG(); + } + return 0; +} +#endif + +static DEFINE_SPINLOCK(balancing); + +/* * It checks each scheduling domain to see if it is due to be balanced, * and initiates a balancing operation if so. * * Balancing parameters are set up in arch_init_sched_domains. */ -static DEFINE_SPINLOCK(balancing); - -static void run_rebalance_domains(struct softirq_action *h) +static inline void rebalance_domains(int cpu, enum idle_type idle) { - int this_cpu = smp_processor_id(), balance = 1; - struct rq *this_rq = cpu_rq(this_cpu); + int balance = 1; + struct rq *rq = cpu_rq(cpu); unsigned long interval; struct sched_domain *sd; - enum idle_type idle = this_rq->idle_at_tick ? SCHED_IDLE : NOT_IDLE; - /* Earliest time when we have to call run_rebalance_domains again */ + /* Earliest time when we have to do rebalance again */ unsigned long next_balance = jiffies + 60*HZ; - for_each_domain(this_cpu, sd) { + for_each_domain(cpu, sd) { if (!(sd->flags & SD_LOAD_BALANCE)) continue; @@ -2967,7 +3058,7 @@ static void run_rebalance_domains(struct softirq_action *h) } if (time_after_eq(jiffies, sd->last_balance + interval)) { - if (load_balance(this_cpu, this_rq, sd, idle, &balance)) { + if (load_balance(cpu, rq, sd, idle, &balance)) { /* * We've pulled tasks over so either we're no * longer idle, or one of our SMT siblings is @@ -2991,7 +3082,114 @@ out: if (!balance) break; } - this_rq->next_balance = next_balance; + rq->next_balance = next_balance; +} + +/* + * run_rebalance_domains is triggered when needed from the scheduler tick. + * In CONFIG_NO_HZ case, the idle load balance owner will do the + * rebalancing for all the cpus for whom scheduler ticks are stopped. + */ +static void run_rebalance_domains(struct softirq_action *h) +{ + int local_cpu = smp_processor_id(); + struct rq *local_rq = cpu_rq(local_cpu); + enum idle_type idle = local_rq->idle_at_tick ? SCHED_IDLE : NOT_IDLE; + + rebalance_domains(local_cpu, idle); + +#ifdef CONFIG_NO_HZ + /* + * If this cpu is the owner for idle load balancing, then do the + * balancing on behalf of the other idle cpus whose ticks are + * stopped. + */ + if (local_rq->idle_at_tick && + atomic_read(&nohz.load_balancer) == local_cpu) { + cpumask_t cpus = nohz.cpu_mask; + struct rq *rq; + int balance_cpu; + + cpu_clear(local_cpu, cpus); + for_each_cpu_mask(balance_cpu, cpus) { + /* + * If this cpu gets work to do, stop the load balancing + * work being done for other cpus. Next load + * balancing owner will pick it up. + */ + if (need_resched()) + break; + + rebalance_domains(balance_cpu, SCHED_IDLE); + + rq = cpu_rq(balance_cpu); + if (time_after(local_rq->next_balance, rq->next_balance)) + local_rq->next_balance = rq->next_balance; + } + } +#endif +} + +/* + * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. + * + * In case of CONFIG_NO_HZ, this is the place where we nominate a new + * idle load balancing owner or decide to stop the periodic load balancing, + * if the whole system is idle. + */ +static inline void trigger_load_balance(int cpu) +{ + struct rq *rq = cpu_rq(cpu); +#ifdef CONFIG_NO_HZ + /* + * If we were in the nohz mode recently and busy at the current + * scheduler tick, then check if we need to nominate new idle + * load balancer. + */ + if (rq->in_nohz_recently && !rq->idle_at_tick) { + rq->in_nohz_recently = 0; + + if (atomic_read(&nohz.load_balancer) == cpu) { + cpu_clear(cpu, nohz.cpu_mask); + atomic_set(&nohz.load_balancer, -1); + } + + if (atomic_read(&nohz.load_balancer) == -1) { + /* + * simple selection for now: Nominate the + * first cpu in the nohz list to be the next + * ilb owner. + * + * TBD: Traverse the sched domains and nominate + * the nearest cpu in the nohz.cpu_mask. + */ + int ilb = first_cpu(nohz.cpu_mask); + + if (ilb != NR_CPUS) + resched_cpu(ilb); + } + } + + /* + * If this cpu is idle and doing idle load balancing for all the + * cpus with ticks stopped, is it time for that to stop? + */ + if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && + cpus_weight(nohz.cpu_mask) == num_online_cpus()) { + resched_cpu(cpu); + return; + } + + /* + * If this cpu is idle and the idle load balancing is done by + * someone else, then no need raise the SCHED_SOFTIRQ + */ + if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && + cpu_isset(cpu, nohz.cpu_mask)) + return; +#endif + if (time_after_eq(jiffies, rq->next_balance)) + raise_softirq(SCHED_SOFTIRQ); } #else /* @@ -3224,8 +3422,7 @@ void scheduler_tick(void) #ifdef CONFIG_SMP update_load(rq); rq->idle_at_tick = idle_at_tick; - if (time_after_eq(jiffies, rq->next_balance)) - raise_softirq(SCHED_SOFTIRQ); + trigger_load_balance(cpu); #endif } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f4fc867f467d..3483e6cb9549 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -217,6 +217,14 @@ void tick_nohz_stop_sched_tick(void) * the scheduler tick in nohz_restart_sched_tick. */ if (!ts->tick_stopped) { + if (select_nohz_load_balancer(1)) { + /* + * sched tick not stopped! + */ + cpu_clear(cpu, nohz_cpu_mask); + goto out; + } + ts->idle_tick = ts->sched_timer.expires; ts->tick_stopped = 1; ts->idle_jiffies = last_jiffies; @@ -285,6 +293,7 @@ void tick_nohz_restart_sched_tick(void) now = ktime_get(); local_irq_disable(); + select_nohz_load_balancer(0); tick_do_update_jiffies64(now); cpu_clear(cpu, nohz_cpu_mask); -- cgit v1.2.3 From 5517d86bea237c1d7078840182d9ebc0fe4c1afc Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 8 May 2007 00:32:57 -0700 Subject: Speed up divides by cpu_power in scheduler I noticed expensive divides done in try_to_wakeup() and find_busiest_group() on a bi dual core Opteron machine (total of 4 cores), moderatly loaded (15.000 context switch per second) oprofile numbers : CPU: AMD64 processors, speed 2600.05 MHz (estimated) Counted CPU_CLK_UNHALTED events (Cycles outside of halt state) with a unit mask of 0x00 (No unit mask) count 50000 samples % symbol name ... 613914 1.0498 try_to_wake_up 834 0.0013 :ffffffff80227ae1: div %rcx 77513 0.1191 :ffffffff80227ae4: mov %rax,%r11 608893 1.0413 find_busiest_group 1841 0.0031 :ffffffff802260bf: div %rdi 140109 0.2394 :ffffffff802260c2: test %sil,%sil Some of these divides can use the reciprocal divides we introduced some time ago (currently used in slab AFAIK) We can assume a load will fit in a 32bits number, because with a SCHED_LOAD_SCALE=128 value, its still a theorical limit of 33554432 When/if we reach this limit one day, probably cpus will have a fast hardware divide and we can zap the reciprocal divide trick. Ingo suggested to rename cpu_power to __cpu_power to make clear it should not be modified without changing its reciprocal value too. I did not convert the divide in cpu_avg_load_per_task(), because tracking nr_running changes may be not worth it ? We could use a static table of 32 reciprocal values but it would add a conditional branch and table lookup. [akpm@linux-foundation.org: !SMP build fix] Signed-off-by: Eric Dumazet Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 8 ++++- kernel/sched.c | 83 +++++++++++++++++++++++++++++++++------------------ 2 files changed, 61 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 15ab3e039535..3d95c480f58d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -680,8 +680,14 @@ struct sched_group { /* * CPU power of this group, SCHED_LOAD_SCALE being max power for a * single CPU. This is read only (except for setup, hotplug CPU). + * Note : Never change cpu_power without recompute its reciprocal */ - unsigned long cpu_power; + unsigned int __cpu_power; + /* + * reciprocal value of cpu_power to avoid expensive divides + * (see include/linux/reciprocal_div.h) + */ + u32 reciprocal_cpu_power; }; struct sched_domain { diff --git a/kernel/sched.c b/kernel/sched.c index 74599286230c..e4a5888549a5 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -52,8 +52,9 @@ #include #include #include -#include +#include +#include #include /* @@ -181,6 +182,27 @@ static unsigned int static_prio_timeslice(int static_prio) return SCALE_PRIO(DEF_TIMESLICE, static_prio); } +#ifdef CONFIG_SMP +/* + * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) + * Since cpu_power is a 'constant', we can use a reciprocal divide. + */ +static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load) +{ + return reciprocal_divide(load, sg->reciprocal_cpu_power); +} + +/* + * Each time a sched group cpu_power is changed, + * we must compute its reciprocal value + */ +static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val) +{ + sg->__cpu_power += val; + sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power); +} +#endif + /* * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ] * to time slice values: [800ms ... 100ms ... 5ms] @@ -1256,7 +1278,8 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) } /* Adjust by relative CPU power of the group */ - avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power; + avg_load = sg_div_cpu_power(group, + avg_load * SCHED_LOAD_SCALE); if (local_group) { this_load = avg_load; @@ -2367,12 +2390,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, } total_load += avg_load; - total_pwr += group->cpu_power; + total_pwr += group->__cpu_power; /* Adjust by relative CPU power of the group */ - avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power; + avg_load = sg_div_cpu_power(group, + avg_load * SCHED_LOAD_SCALE); - group_capacity = group->cpu_power / SCHED_LOAD_SCALE; + group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; if (local_group) { this_load = avg_load; @@ -2483,8 +2507,8 @@ group_next: max_pull = min(max_load - avg_load, max_load - busiest_load_per_task); /* How much load to actually move to equalise the imbalance */ - *imbalance = min(max_pull * busiest->cpu_power, - (avg_load - this_load) * this->cpu_power) + *imbalance = min(max_pull * busiest->__cpu_power, + (avg_load - this_load) * this->__cpu_power) / SCHED_LOAD_SCALE; /* @@ -2518,28 +2542,29 @@ small_imbalance: * moving them. */ - pwr_now += busiest->cpu_power * - min(busiest_load_per_task, max_load); - pwr_now += this->cpu_power * - min(this_load_per_task, this_load); + pwr_now += busiest->__cpu_power * + min(busiest_load_per_task, max_load); + pwr_now += this->__cpu_power * + min(this_load_per_task, this_load); pwr_now /= SCHED_LOAD_SCALE; /* Amount of load we'd subtract */ - tmp = busiest_load_per_task * SCHED_LOAD_SCALE / - busiest->cpu_power; + tmp = sg_div_cpu_power(busiest, + busiest_load_per_task * SCHED_LOAD_SCALE); if (max_load > tmp) - pwr_move += busiest->cpu_power * + pwr_move += busiest->__cpu_power * min(busiest_load_per_task, max_load - tmp); /* Amount of load we'd add */ - if (max_load * busiest->cpu_power < + if (max_load * busiest->__cpu_power < busiest_load_per_task * SCHED_LOAD_SCALE) - tmp = max_load * busiest->cpu_power / this->cpu_power; + tmp = sg_div_cpu_power(this, + max_load * busiest->__cpu_power); else - tmp = busiest_load_per_task * SCHED_LOAD_SCALE / - this->cpu_power; - pwr_move += this->cpu_power * - min(this_load_per_task, this_load + tmp); + tmp = sg_div_cpu_power(this, + busiest_load_per_task * SCHED_LOAD_SCALE); + pwr_move += this->__cpu_power * + min(this_load_per_task, this_load + tmp); pwr_move /= SCHED_LOAD_SCALE; /* Move if we gain throughput */ @@ -5501,7 +5526,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) break; } - if (!group->cpu_power) { + if (!group->__cpu_power) { printk("\n"); printk(KERN_ERR "ERROR: domain->cpu_power not " "set\n"); @@ -5678,7 +5703,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map, continue; sg->cpumask = CPU_MASK_NONE; - sg->cpu_power = 0; + sg->__cpu_power = 0; for_each_cpu_mask(j, span) { if (group_fn(j, cpu_map, NULL) != group) @@ -6367,7 +6392,7 @@ next_sg: continue; } - sg->cpu_power += sd->groups->cpu_power; + sg_inc_cpu_power(sg, sd->groups->__cpu_power); } sg = sg->next; if (sg != group_head) @@ -6442,6 +6467,8 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) child = sd->child; + sd->groups->__cpu_power = 0; + /* * For perf policy, if the groups in child domain share resources * (for example cores sharing some portions of the cache hierarchy @@ -6452,18 +6479,16 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) && (child->flags & (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) { - sd->groups->cpu_power = SCHED_LOAD_SCALE; + sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE); return; } - sd->groups->cpu_power = 0; - /* * add cpu_power of each child group to this groups cpu_power */ group = child->groups; do { - sd->groups->cpu_power += group->cpu_power; + sg_inc_cpu_power(sd->groups, group->__cpu_power); group = group->next; } while (group != child->groups); } @@ -6623,7 +6648,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) sd = &per_cpu(node_domains, j); sd->groups = sg; } - sg->cpu_power = 0; + sg->__cpu_power = 0; sg->cpumask = nodemask; sg->next = sg; cpus_or(covered, covered, nodemask); @@ -6651,7 +6676,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) "Can not alloc domain group for node %d\n", j); goto error; } - sg->cpu_power = 0; + sg->__cpu_power = 0; sg->cpumask = tmp; sg->next = prev->next; cpus_or(covered, covered, tmp); -- cgit v1.2.3 From 1c710c896eb461895d3c399e15bb5f20b39c9073 Mon Sep 17 00:00:00 2001 From: Ulrich Drepper Date: Tue, 8 May 2007 00:33:25 -0700 Subject: utimensat implementation Implement utimensat(2) which is an extension to futimesat(2) in that it a) supports nano-second resolution for the timestamps b) allows to selectively ignore the atime/mtime value c) allows to selectively use the current time for either atime or mtime d) supports changing the atime/mtime of a symlink itself along the lines of the BSD lutimes(3) functions For this change the internally used do_utimes() functions was changed to accept a timespec time value and an additional flags parameter. Additionally the sys_utime function was changed to match compat_sys_utime which already use do_utimes instead of duplicating the work. Also, the completely missing futimensat() functionality is added. We have such a function in glibc but we have to resort to using /proc/self/fd/* which not everybody likes (chroot etc). Test application (the syscall number will need per-arch editing): #include #include #include #include #include #include #define __NR_utimensat 280 #define UTIME_NOW ((1l << 30) - 1l) #define UTIME_OMIT ((1l << 30) - 2l) int main(void) { int status = 0; int fd = open("ttt", O_RDWR|O_CREAT|O_EXCL, 0666); if (fd == -1) error (1, errno, "failed to create test file \"ttt\""); struct stat64 st1; if (fstat64 (fd, &st1) != 0) error (1, errno, "fstat failed"); struct timespec t[2]; t[0].tv_sec = 0; t[0].tv_nsec = 0; t[1].tv_sec = 0; t[1].tv_nsec = 0; if (syscall(__NR_utimensat, AT_FDCWD, "ttt", t, 0) != 0) error (1, errno, "utimensat failed"); struct stat64 st2; if (fstat64 (fd, &st2) != 0) error (1, errno, "fstat failed"); if (st2.st_atim.tv_sec != 0 || st2.st_atim.tv_nsec != 0) { puts ("atim not reset to zero"); status = 1; } if (st2.st_mtim.tv_sec != 0 || st2.st_mtim.tv_nsec != 0) { puts ("mtim not reset to zero"); status = 1; } if (status != 0) goto out; t[0] = st1.st_atim; t[1].tv_sec = 0; t[1].tv_nsec = UTIME_OMIT; if (syscall(__NR_utimensat, AT_FDCWD, "ttt", t, 0) != 0) error (1, errno, "utimensat failed"); if (fstat64 (fd, &st2) != 0) error (1, errno, "fstat failed"); if (st2.st_atim.tv_sec != st1.st_atim.tv_sec || st2.st_atim.tv_nsec != st1.st_atim.tv_nsec) { puts ("atim not set"); status = 1; } if (st2.st_mtim.tv_sec != 0 || st2.st_mtim.tv_nsec != 0) { puts ("mtim changed from zero"); status = 1; } if (status != 0) goto out; t[0].tv_sec = 0; t[0].tv_nsec = UTIME_OMIT; t[1] = st1.st_mtim; if (syscall(__NR_utimensat, AT_FDCWD, "ttt", t, 0) != 0) error (1, errno, "utimensat failed"); if (fstat64 (fd, &st2) != 0) error (1, errno, "fstat failed"); if (st2.st_atim.tv_sec != st1.st_atim.tv_sec || st2.st_atim.tv_nsec != st1.st_atim.tv_nsec) { puts ("mtim changed from original time"); status = 1; } if (st2.st_mtim.tv_sec != st1.st_mtim.tv_sec || st2.st_mtim.tv_nsec != st1.st_mtim.tv_nsec) { puts ("mtim not set"); status = 1; } if (status != 0) goto out; sleep (2); t[0].tv_sec = 0; t[0].tv_nsec = UTIME_NOW; t[1].tv_sec = 0; t[1].tv_nsec = UTIME_NOW; if (syscall(__NR_utimensat, AT_FDCWD, "ttt", t, 0) != 0) error (1, errno, "utimensat failed"); if (fstat64 (fd, &st2) != 0) error (1, errno, "fstat failed"); struct timeval tv; gettimeofday(&tv,NULL); if (st2.st_atim.tv_sec <= st1.st_atim.tv_sec || st2.st_atim.tv_sec > tv.tv_sec) { puts ("atim not set to NOW"); status = 1; } if (st2.st_mtim.tv_sec <= st1.st_mtim.tv_sec || st2.st_mtim.tv_sec > tv.tv_sec) { puts ("mtim not set to NOW"); status = 1; } if (symlink ("ttt", "tttsym") != 0) error (1, errno, "cannot create symlink"); t[0].tv_sec = 0; t[0].tv_nsec = 0; t[1].tv_sec = 0; t[1].tv_nsec = 0; if (syscall(__NR_utimensat, AT_FDCWD, "tttsym", t, AT_SYMLINK_NOFOLLOW) != 0) error (1, errno, "utimensat failed"); if (lstat64 ("tttsym", &st2) != 0) error (1, errno, "lstat failed"); if (st2.st_atim.tv_sec != 0 || st2.st_atim.tv_nsec != 0) { puts ("symlink atim not reset to zero"); status = 1; } if (st2.st_mtim.tv_sec != 0 || st2.st_mtim.tv_nsec != 0) { puts ("symlink mtim not reset to zero"); status = 1; } if (status != 0) goto out; t[0].tv_sec = 1; t[0].tv_nsec = 0; t[1].tv_sec = 1; t[1].tv_nsec = 0; if (syscall(__NR_utimensat, fd, NULL, t, 0) != 0) error (1, errno, "utimensat failed"); if (fstat64 (fd, &st2) != 0) error (1, errno, "fstat failed"); if (st2.st_atim.tv_sec != 1 || st2.st_atim.tv_nsec != 0) { puts ("atim not reset to one"); status = 1; } if (st2.st_mtim.tv_sec != 1 || st2.st_mtim.tv_nsec != 0) { puts ("mtim not reset to one"); status = 1; } if (status == 0) puts ("all OK"); out: close (fd); unlink ("ttt"); unlink ("tttsym"); return status; } [akpm@linux-foundation.org: add missing i386 syscall table entry] Signed-off-by: Ulrich Drepper Cc: Alexey Dobriyan Cc: Michael Kerrisk Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/kernel/osf_sys.c | 14 +++- arch/i386/kernel/syscall_table.S | 1 + arch/sparc64/kernel/sys_sparc32.c | 14 +++- arch/x86_64/ia32/ia32entry.S | 3 +- fs/compat.c | 43 ++++++++-- fs/utimes.c | 162 ++++++++++++++++++++++++-------------- include/asm-i386/unistd.h | 3 +- include/asm-x86_64/unistd.h | 2 + include/linux/stat.h | 3 + include/linux/time.h | 2 +- 10 files changed, 172 insertions(+), 75 deletions(-) (limited to 'include/linux') diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index ea405f5713ce..ce857158c1ea 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -953,15 +953,25 @@ osf_setitimer(int which, struct itimerval32 __user *in, struct itimerval32 __use asmlinkage int osf_utimes(char __user *filename, struct timeval32 __user *tvs) { - struct timeval ktvs[2]; + struct timespec tv[2]; if (tvs) { + struct timeval ktvs[2]; if (get_tv32(&ktvs[0], &tvs[0]) || get_tv32(&ktvs[1], &tvs[1])) return -EFAULT; + + if (ktvs[0].tv_usec < 0 || ktvs[0].tv_usec >= 1000000 || + ktvs[1].tv_usec < 0 || ktvs[1].tv_usec >= 1000000) + return -EINVAL; + + tv[0].tv_sec = ktvs[0].tv_sec; + tv[0].tv_nsec = 1000 * ktvs[0].tv_usec; + tv[1].tv_sec = ktvs[1].tv_sec; + tv[1].tv_nsec = 1000 * ktvs[1].tv_usec; } - return do_utimes(AT_FDCWD, filename, tvs ? ktvs : NULL); + return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0); } #define MAX_SELECT_SECONDS \ diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index 2697e9210e92..0772678ceecf 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S @@ -319,3 +319,4 @@ ENTRY(sys_call_table) .long sys_move_pages .long sys_getcpu .long sys_epoll_pwait + .long sys_utimensat /* 320 */ diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c index 7876a0226285..692e46a6b8da 100644 --- a/arch/sparc64/kernel/sys_sparc32.c +++ b/arch/sparc64/kernel/sys_sparc32.c @@ -775,15 +775,25 @@ asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv, asmlinkage long sys32_utimes(char __user *filename, struct compat_timeval __user *tvs) { - struct timeval ktvs[2]; + struct timespec tv[2]; if (tvs) { + struct timeval ktvs[2]; if (get_tv32(&ktvs[0], tvs) || get_tv32(&ktvs[1], 1+tvs)) return -EFAULT; + + if (ktvs[0].tv_usec < 0 || ktvs[0].tv_usec >= 1000000 || + ktvs[1].tv_usec < 0 || ktvs[1].tv_usec >= 1000000) + return -EINVAL; + + tv[0].tv_sec = ktvs[0].tv_sec; + tv[0].tv_nsec = 1000 * ktvs[0].tv_usec; + tv[1].tv_sec = ktvs[1].tv_sec; + tv[1].tv_nsec = 1000 * ktvs[1].tv_usec; } - return do_utimes(AT_FDCWD, filename, (tvs ? &ktvs[0] : NULL)); + return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL); } /* These are here just in case some old sparc32 binary calls it. */ diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index c48087db6f75..f21068378272 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S @@ -710,9 +710,10 @@ ia32_sys_call_table: .quad compat_sys_get_robust_list .quad sys_splice .quad sys_sync_file_range - .quad sys_tee + .quad sys_tee /* 315 */ .quad compat_sys_vmsplice .quad compat_sys_move_pages .quad sys_getcpu .quad sys_epoll_pwait + .quad compat_sys_utimensat /* 320 */ ia32_syscall_end: diff --git a/fs/compat.c b/fs/compat.c index 9f6248dfd9d9..9cf75df9b2bb 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -77,30 +77,57 @@ int compat_printk(const char *fmt, ...) */ asmlinkage long compat_sys_utime(char __user *filename, struct compat_utimbuf __user *t) { - struct timeval tv[2]; + struct timespec tv[2]; if (t) { if (get_user(tv[0].tv_sec, &t->actime) || get_user(tv[1].tv_sec, &t->modtime)) return -EFAULT; - tv[0].tv_usec = 0; - tv[1].tv_usec = 0; + tv[0].tv_nsec = 0; + tv[1].tv_nsec = 0; } - return do_utimes(AT_FDCWD, filename, t ? tv : NULL); + return do_utimes(AT_FDCWD, filename, t ? tv : NULL, 0); +} + +asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename, struct compat_timespec __user *t, int flags) +{ + struct timespec tv[2]; + + if (t) { + if (get_compat_timespec(&tv[0], &t[0]) || + get_compat_timespec(&tv[1], &t[1])) + return -EFAULT; + + if ((tv[0].tv_nsec == UTIME_OMIT || tv[0].tv_nsec == UTIME_NOW) + && tv[0].tv_sec != 0) + return -EINVAL; + if ((tv[1].tv_nsec == UTIME_OMIT || tv[1].tv_nsec == UTIME_NOW) + && tv[1].tv_sec != 0) + return -EINVAL; + + if (tv[0].tv_nsec == UTIME_OMIT && tv[1].tv_nsec == UTIME_OMIT) + return 0; + } + return do_utimes(dfd, filename, t ? tv : NULL, flags); } asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename, struct compat_timeval __user *t) { - struct timeval tv[2]; + struct timespec tv[2]; if (t) { if (get_user(tv[0].tv_sec, &t[0].tv_sec) || - get_user(tv[0].tv_usec, &t[0].tv_usec) || + get_user(tv[0].tv_nsec, &t[0].tv_usec) || get_user(tv[1].tv_sec, &t[1].tv_sec) || - get_user(tv[1].tv_usec, &t[1].tv_usec)) + get_user(tv[1].tv_nsec, &t[1].tv_usec)) return -EFAULT; + if (tv[0].tv_nsec >= 1000000 || tv[0].tv_nsec < 0 || + tv[1].tv_nsec >= 1000000 || tv[1].tv_nsec < 0) + return -EINVAL; + tv[0].tv_nsec *= 1000; + tv[1].tv_nsec *= 1000; } - return do_utimes(dfd, filename, t ? tv : NULL); + return do_utimes(dfd, filename, t ? tv : NULL, 0); } asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval __user *t) diff --git a/fs/utimes.c b/fs/utimes.c index 99cf2cb11fec..480f7c8c29da 100644 --- a/fs/utimes.c +++ b/fs/utimes.c @@ -1,8 +1,10 @@ #include +#include #include #include #include #include +#include #include #include #include @@ -20,54 +22,18 @@ * must be owner or have write permission. * Else, update from *times, must be owner or super user. */ -asmlinkage long sys_utime(char __user * filename, struct utimbuf __user * times) +asmlinkage long sys_utime(char __user *filename, struct utimbuf __user *times) { - int error; - struct nameidata nd; - struct inode * inode; - struct iattr newattrs; + struct timespec tv[2]; - error = user_path_walk(filename, &nd); - if (error) - goto out; - inode = nd.dentry->d_inode; - - error = -EROFS; - if (IS_RDONLY(inode)) - goto dput_and_out; - - /* Don't worry, the checks are done in inode_change_ok() */ - newattrs.ia_valid = ATTR_CTIME | ATTR_MTIME | ATTR_ATIME; if (times) { - error = -EPERM; - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) - goto dput_and_out; - - error = get_user(newattrs.ia_atime.tv_sec, ×->actime); - newattrs.ia_atime.tv_nsec = 0; - if (!error) - error = get_user(newattrs.ia_mtime.tv_sec, ×->modtime); - newattrs.ia_mtime.tv_nsec = 0; - if (error) - goto dput_and_out; - - newattrs.ia_valid |= ATTR_ATIME_SET | ATTR_MTIME_SET; - } else { - error = -EACCES; - if (IS_IMMUTABLE(inode)) - goto dput_and_out; - - if (current->fsuid != inode->i_uid && - (error = vfs_permission(&nd, MAY_WRITE)) != 0) - goto dput_and_out; + if (get_user(tv[0].tv_sec, ×->actime) || + get_user(tv[1].tv_sec, ×->modtime)) + return -EFAULT; + tv[0].tv_nsec = 0; + tv[1].tv_nsec = 0; } - mutex_lock(&inode->i_mutex); - error = notify_change(nd.dentry, &newattrs); - mutex_unlock(&inode->i_mutex); -dput_and_out: - path_release(&nd); -out: - return error; + return do_utimes(AT_FDCWD, filename, times ? tv : NULL, 0); } #endif @@ -76,18 +42,38 @@ out: * must be owner or have write permission. * Else, update from *times, must be owner or super user. */ -long do_utimes(int dfd, char __user *filename, struct timeval *times) +long do_utimes(int dfd, char __user *filename, struct timespec *times, int flags) { int error; struct nameidata nd; - struct inode * inode; + struct dentry *dentry; + struct inode *inode; struct iattr newattrs; + struct file *f = NULL; - error = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW, &nd); - - if (error) + error = -EINVAL; + if (flags & ~AT_SYMLINK_NOFOLLOW) goto out; - inode = nd.dentry->d_inode; + + if (filename == NULL && dfd != AT_FDCWD) { + error = -EINVAL; + if (flags & AT_SYMLINK_NOFOLLOW) + goto out; + + error = -EBADF; + f = fget(dfd); + if (!f) + goto out; + dentry = f->f_path.dentry; + } else { + error = __user_walk_fd(dfd, filename, (flags & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW, &nd); + if (error) + goto out; + + dentry = nd.dentry; + } + + inode = dentry->d_inode; error = -EROFS; if (IS_RDONLY(inode)) @@ -100,11 +86,21 @@ long do_utimes(int dfd, char __user *filename, struct timeval *times) if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) goto dput_and_out; - newattrs.ia_atime.tv_sec = times[0].tv_sec; - newattrs.ia_atime.tv_nsec = times[0].tv_usec * 1000; - newattrs.ia_mtime.tv_sec = times[1].tv_sec; - newattrs.ia_mtime.tv_nsec = times[1].tv_usec * 1000; - newattrs.ia_valid |= ATTR_ATIME_SET | ATTR_MTIME_SET; + if (times[0].tv_nsec == UTIME_OMIT) + newattrs.ia_valid &= ~ATTR_ATIME; + else if (times[0].tv_nsec != UTIME_NOW) { + newattrs.ia_atime.tv_sec = times[0].tv_sec; + newattrs.ia_atime.tv_nsec = times[0].tv_nsec; + newattrs.ia_valid |= ATTR_ATIME_SET; + } + + if (times[1].tv_nsec == UTIME_OMIT) + newattrs.ia_valid &= ~ATTR_MTIME; + else if (times[1].tv_nsec != UTIME_NOW) { + newattrs.ia_mtime.tv_sec = times[1].tv_sec; + newattrs.ia_mtime.tv_nsec = times[1].tv_nsec; + newattrs.ia_valid |= ATTR_MTIME_SET; + } } else { error = -EACCES; if (IS_IMMUTABLE(inode)) @@ -115,21 +111,67 @@ long do_utimes(int dfd, char __user *filename, struct timeval *times) goto dput_and_out; } mutex_lock(&inode->i_mutex); - error = notify_change(nd.dentry, &newattrs); + error = notify_change(dentry, &newattrs); mutex_unlock(&inode->i_mutex); dput_and_out: - path_release(&nd); + if (f) + fput(f); + else + path_release(&nd); out: return error; } +asmlinkage long sys_utimensat(int dfd, char __user *filename, struct timespec __user *utimes, int flags) +{ + struct timespec tstimes[2]; + + if (utimes) { + if (copy_from_user(&tstimes, utimes, sizeof(tstimes))) + return -EFAULT; + if ((tstimes[0].tv_nsec == UTIME_OMIT || + tstimes[0].tv_nsec == UTIME_NOW) && + tstimes[0].tv_sec != 0) + return -EINVAL; + if ((tstimes[1].tv_nsec == UTIME_OMIT || + tstimes[1].tv_nsec == UTIME_NOW) && + tstimes[1].tv_sec != 0) + return -EINVAL; + + /* Nothing to do, we must not even check the path. */ + if (tstimes[0].tv_nsec == UTIME_OMIT && + tstimes[1].tv_nsec == UTIME_OMIT) + return 0; + } + + return do_utimes(dfd, filename, utimes ? tstimes : NULL, flags); +} + asmlinkage long sys_futimesat(int dfd, char __user *filename, struct timeval __user *utimes) { struct timeval times[2]; + struct timespec tstimes[2]; + + if (utimes) { + if (copy_from_user(×, utimes, sizeof(times))) + return -EFAULT; + + /* This test is needed to catch all invalid values. If we + would test only in do_utimes we would miss those invalid + values truncated by the multiplication with 1000. Note + that we also catch UTIME_{NOW,OMIT} here which are only + valid for utimensat. */ + if (times[0].tv_usec >= 1000000 || times[0].tv_usec < 0 || + times[1].tv_usec >= 1000000 || times[1].tv_usec < 0) + return -EINVAL; + + tstimes[0].tv_sec = times[0].tv_sec; + tstimes[0].tv_nsec = 1000 * times[0].tv_usec; + tstimes[1].tv_sec = times[1].tv_sec; + tstimes[1].tv_nsec = 1000 * times[1].tv_usec; + } - if (utimes && copy_from_user(×, utimes, sizeof(times))) - return -EFAULT; - return do_utimes(dfd, filename, utimes ? times : NULL); + return do_utimes(dfd, filename, utimes ? tstimes : NULL, 0); } asmlinkage long sys_utimes(char __user *filename, struct timeval __user *utimes) diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index 833fa1704ff9..bd21e795197c 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -325,10 +325,11 @@ #define __NR_move_pages 317 #define __NR_getcpu 318 #define __NR_epoll_pwait 319 +#define __NR_utimensat 320 #ifdef __KERNEL__ -#define NR_syscalls 320 +#define NR_syscalls 321 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index 26e23e01c54a..595703949df3 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -619,6 +619,8 @@ __SYSCALL(__NR_sync_file_range, sys_sync_file_range) __SYSCALL(__NR_vmsplice, sys_vmsplice) #define __NR_move_pages 279 __SYSCALL(__NR_move_pages, sys_move_pages) +#define __NR_utimensat 280 +__SYSCALL(__NR_utimensat, sys_utimensat) #ifndef __NO_STUBS #define __ARCH_WANT_OLD_READDIR diff --git a/include/linux/stat.h b/include/linux/stat.h index 679ef0d70b6b..611c398dab72 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -53,6 +53,9 @@ #define S_IWUGO (S_IWUSR|S_IWGRP|S_IWOTH) #define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH) +#define UTIME_NOW ((1l << 30) - 1l) +#define UTIME_OMIT ((1l << 30) - 2l) + #include #include diff --git a/include/linux/time.h b/include/linux/time.h index 50e31b1659a7..dda9be685ab6 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -109,7 +109,7 @@ extern void do_gettimeofday(struct timeval *tv); extern int do_settimeofday(struct timespec *tv); extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); #define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) -extern long do_utimes(int dfd, char __user *filename, struct timeval *times); +extern long do_utimes(int dfd, char __user *filename, struct timespec *times, int flags); struct itimerval; extern int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue); -- cgit v1.2.3 From 5726fb2012f0d96153113ddb7f988a0daea587ce Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 8 May 2007 00:33:27 -0700 Subject: rtc: remove /sys/class/rtc-dev/* This simplifies the /dev support by removing a superfluous class_device (the /sys/class/rtc-dev stuff) and the class_interface that hooks it into the rtc core. Accordingly, if it's configured then /dev support is now part of the RTC core, and is never a separate module. It's another step towards being able to remove "struct class_device". [bunk@stusta.de: drivers/rtc/rtc-dev.c should #include "rtc-core.h"] Signed-off-by: David Brownell Acked-by: Greg Kroah-Hartman Acked-By: Alessandro Zummo Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/rtc/Kconfig | 2 +- drivers/rtc/Makefile | 3 +- drivers/rtc/class.c | 8 ++++ drivers/rtc/rtc-core.h | 15 +++++++ drivers/rtc/rtc-dev.c | 106 ++++++++++--------------------------------------- include/linux/rtc.h | 1 - 6 files changed, 46 insertions(+), 89 deletions(-) create mode 100644 drivers/rtc/rtc-core.h (limited to 'include/linux') diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 1b853c4ff86d..48eca80bbfc4 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -71,7 +71,7 @@ config RTC_INTF_PROC will be called rtc-proc. config RTC_INTF_DEV - tristate "dev" + boolean "dev" depends on RTC_CLASS default RTC_CLASS help diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index f9a3e0130f4e..4820dcc6a8fa 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -11,9 +11,10 @@ obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o obj-$(CONFIG_RTC_CLASS) += rtc-core.o rtc-core-y := class.o interface.o +rtc-core-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o + obj-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o obj-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o -obj-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 04aaa6347234..786406c2cf78 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -16,6 +16,9 @@ #include #include +#include "rtc-core.h" + + static DEFINE_IDR(rtc_idr); static DEFINE_MUTEX(idr_lock); struct class *rtc_class; @@ -85,6 +88,8 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev, if (err) goto exit_kfree; + rtc_dev_add_device(rtc); + dev_info(dev, "rtc core: registered %s as %s\n", rtc->name, rtc->class_dev.class_id); @@ -118,6 +123,7 @@ void rtc_device_unregister(struct rtc_device *rtc) /* remove innards of this RTC, then disable it, before * letting any rtc_class_open() users access it again */ + rtc_dev_del_device(rtc); class_device_unregister(&rtc->class_dev); rtc->ops = NULL; mutex_unlock(&rtc->ops_lock); @@ -140,11 +146,13 @@ static int __init rtc_init(void) printk(KERN_ERR "%s: couldn't create class\n", __FILE__); return PTR_ERR(rtc_class); } + rtc_dev_init(); return 0; } static void __exit rtc_exit(void) { + rtc_dev_exit(); class_destroy(rtc_class); } diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h new file mode 100644 index 000000000000..c3c495ed1869 --- /dev/null +++ b/drivers/rtc/rtc-core.h @@ -0,0 +1,15 @@ +#ifdef CONFIG_RTC_INTF_DEV + +extern void __init rtc_dev_init(void); +extern void __exit rtc_dev_exit(void); +extern void rtc_dev_add_device(struct rtc_device *rtc); +extern void rtc_dev_del_device(struct rtc_device *rtc); + +#else + +#define rtc_dev_init() do{}while(0) +#define rtc_dev_exit() do{}while(0) +#define rtc_dev_add_device(r) do{}while(0) +#define rtc_dev_del_device(r) do{}while(0) + +#endif diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 137330b8636b..3fa6138523b5 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c @@ -13,8 +13,8 @@ #include #include +#include "rtc-core.h" -static struct class *rtc_dev_class; static dev_t rtc_devt; #define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */ @@ -397,17 +397,18 @@ static const struct file_operations rtc_dev_fops = { /* insertion/removal hooks */ -static int rtc_dev_add_device(struct class_device *class_dev, - struct class_interface *class_intf) +void rtc_dev_add_device(struct rtc_device *rtc) { - int err = 0; - struct rtc_device *rtc = to_rtc_device(class_dev); + if (!rtc_devt) + return; if (rtc->id >= RTC_DEV_MAX) { - dev_err(class_dev->dev, "too many RTCs\n"); - return -EINVAL; + pr_debug("%s: too many RTC devices\n", rtc->name); + return; } + rtc->class_dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); + mutex_init(&rtc->char_lock); spin_lock_init(&rtc->irq_lock); init_waitqueue_head(&rtc->irq_queue); @@ -419,99 +420,32 @@ static int rtc_dev_add_device(struct class_device *class_dev, cdev_init(&rtc->char_dev, &rtc_dev_fops); rtc->char_dev.owner = rtc->owner; - if (cdev_add(&rtc->char_dev, MKDEV(MAJOR(rtc_devt), rtc->id), 1)) { - dev_err(class_dev->dev, - "failed to add char device %d:%d\n", + if (cdev_add(&rtc->char_dev, rtc->class_dev.devt, 1)) + printk(KERN_WARNING "%s: failed to add char device %d:%d\n", + rtc->name, MAJOR(rtc_devt), rtc->id); + else + pr_debug("%s: dev (%d:%d)\n", rtc->name, MAJOR(rtc_devt), rtc->id); - return -ENODEV; - } - - rtc->rtc_dev = class_device_create(rtc_dev_class, NULL, - MKDEV(MAJOR(rtc_devt), rtc->id), - class_dev->dev, "rtc%d", rtc->id); - if (IS_ERR(rtc->rtc_dev)) { - dev_err(class_dev->dev, "cannot create rtc_dev device\n"); - err = PTR_ERR(rtc->rtc_dev); - goto err_cdev_del; - } - - dev_dbg(class_dev->dev, "rtc intf: dev (%d:%d)\n", - MAJOR(rtc->rtc_dev->devt), - MINOR(rtc->rtc_dev->devt)); - - return 0; - -err_cdev_del: - - cdev_del(&rtc->char_dev); - return err; } -static void rtc_dev_remove_device(struct class_device *class_dev, - struct class_interface *class_intf) +void rtc_dev_del_device(struct rtc_device *rtc) { - struct rtc_device *rtc = to_rtc_device(class_dev); - - if (rtc->rtc_dev) { - dev_dbg(class_dev->dev, "removing char %d:%d\n", - MAJOR(rtc->rtc_dev->devt), - MINOR(rtc->rtc_dev->devt)); - - class_device_unregister(rtc->rtc_dev); + if (rtc->class_dev.devt) cdev_del(&rtc->char_dev); - } } -/* interface registration */ - -static struct class_interface rtc_dev_interface = { - .add = &rtc_dev_add_device, - .remove = &rtc_dev_remove_device, -}; - -static int __init rtc_dev_init(void) +void __init rtc_dev_init(void) { int err; - rtc_dev_class = class_create(THIS_MODULE, "rtc-dev"); - if (IS_ERR(rtc_dev_class)) - return PTR_ERR(rtc_dev_class); - err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc"); - if (err < 0) { + if (err < 0) printk(KERN_ERR "%s: failed to allocate char dev region\n", __FILE__); - goto err_destroy_class; - } - - err = rtc_interface_register(&rtc_dev_interface); - if (err < 0) { - printk(KERN_ERR "%s: failed to register the interface\n", - __FILE__); - goto err_unregister_chrdev; - } - - return 0; - -err_unregister_chrdev: - unregister_chrdev_region(rtc_devt, RTC_DEV_MAX); - -err_destroy_class: - class_destroy(rtc_dev_class); - - return err; } -static void __exit rtc_dev_exit(void) +void __exit rtc_dev_exit(void) { - class_interface_unregister(&rtc_dev_interface); - class_destroy(rtc_dev_class); - unregister_chrdev_region(rtc_devt, RTC_DEV_MAX); + if (rtc_devt) + unregister_chrdev_region(rtc_devt, RTC_DEV_MAX); } - -subsys_initcall(rtc_dev_init); -module_exit(rtc_dev_exit); - -MODULE_AUTHOR("Alessandro Zummo "); -MODULE_DESCRIPTION("RTC class dev interface"); -MODULE_LICENSE("GPL"); diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 5e22d4510d11..f662ae1aa621 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -145,7 +145,6 @@ struct rtc_device const struct rtc_class_ops *ops; struct mutex ops_lock; - struct class_device *rtc_dev; struct cdev char_dev; struct mutex char_lock; -- cgit v1.2.3 From ab6a2d70d18edc7a716ef3127b9e13382faec98c Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 8 May 2007 00:33:30 -0700 Subject: rtc: rtc interfaces don't use class_device This patch removes class_device from the programming interface that the RTC framework exposes to the rest of the kernel. Now an rtc_device is passed, which is more type-safe and streamlines all the relevant code. Signed-off-by: David Brownell Acked-by: Greg Kroah-Hartman Acked-By: Alessandro Zummo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/rtc/hctosys.c | 14 ++++---- drivers/rtc/interface.c | 78 ++++++++++++++++++++------------------------ drivers/rtc/rtc-at91rm9200.c | 2 +- drivers/rtc/rtc-cmos.c | 12 +++---- drivers/rtc/rtc-core.h | 2 ++ drivers/rtc/rtc-dev.c | 27 ++++++++------- drivers/rtc/rtc-ds1553.c | 2 +- drivers/rtc/rtc-omap.c | 4 +-- drivers/rtc/rtc-pl031.c | 2 +- drivers/rtc/rtc-proc.c | 19 ++++++----- drivers/rtc/rtc-s3c.c | 4 +-- drivers/rtc/rtc-sa1100.c | 4 +-- drivers/rtc/rtc-sh.c | 6 ++-- drivers/rtc/rtc-sysfs.c | 18 ++++++---- drivers/rtc/rtc-test.c | 6 ++-- drivers/rtc/rtc-vr41xx.c | 4 +-- include/linux/rtc.h | 33 +++++++++---------- 17 files changed, 119 insertions(+), 118 deletions(-) (limited to 'include/linux') diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c index d02fe9a0001f..f48a8aed5b0f 100644 --- a/drivers/rtc/hctosys.c +++ b/drivers/rtc/hctosys.c @@ -26,15 +26,15 @@ static int __init rtc_hctosys(void) { int err; struct rtc_time tm; - struct class_device *class_dev = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); + struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); - if (class_dev == NULL) { + if (rtc == NULL) { printk("%s: unable to open rtc device (%s)\n", __FILE__, CONFIG_RTC_HCTOSYS_DEVICE); return -ENODEV; } - err = rtc_read_time(class_dev, &tm); + err = rtc_read_time(rtc, &tm); if (err == 0) { err = rtc_valid_tm(&tm); if (err == 0) { @@ -46,7 +46,7 @@ static int __init rtc_hctosys(void) do_settimeofday(&tv); - dev_info(class_dev->dev, + dev_info(rtc->class_dev.dev, "setting the system clock to " "%d-%02d-%02d %02d:%02d:%02d (%u)\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, @@ -54,14 +54,14 @@ static int __init rtc_hctosys(void) (unsigned int) tv.tv_sec); } else - dev_err(class_dev->dev, + dev_err(rtc->class_dev.dev, "hctosys: invalid date/time\n"); } else - dev_err(class_dev->dev, + dev_err(rtc->class_dev.dev, "hctosys: unable to read the hardware clock\n"); - rtc_class_close(class_dev); + rtc_class_close(rtc); return 0; } diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index ef40df0f169d..d9d326ff6253 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -13,10 +13,9 @@ #include -int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm) +int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) { int err; - struct rtc_device *rtc = to_rtc_device(class_dev); err = mutex_lock_interruptible(&rtc->ops_lock); if (err) @@ -28,7 +27,7 @@ int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm) err = -EINVAL; else { memset(tm, 0, sizeof(struct rtc_time)); - err = rtc->ops->read_time(class_dev->dev, tm); + err = rtc->ops->read_time(rtc->class_dev.dev, tm); } mutex_unlock(&rtc->ops_lock); @@ -36,10 +35,9 @@ int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm) } EXPORT_SYMBOL_GPL(rtc_read_time); -int rtc_set_time(struct class_device *class_dev, struct rtc_time *tm) +int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) { int err; - struct rtc_device *rtc = to_rtc_device(class_dev); err = rtc_valid_tm(tm); if (err != 0) @@ -54,17 +52,16 @@ int rtc_set_time(struct class_device *class_dev, struct rtc_time *tm) else if (!rtc->ops->set_time) err = -EINVAL; else - err = rtc->ops->set_time(class_dev->dev, tm); + err = rtc->ops->set_time(rtc->class_dev.dev, tm); mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_set_time); -int rtc_set_mmss(struct class_device *class_dev, unsigned long secs) +int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs) { int err; - struct rtc_device *rtc = to_rtc_device(class_dev); err = mutex_lock_interruptible(&rtc->ops_lock); if (err) @@ -73,11 +70,11 @@ int rtc_set_mmss(struct class_device *class_dev, unsigned long secs) if (!rtc->ops) err = -ENODEV; else if (rtc->ops->set_mmss) - err = rtc->ops->set_mmss(class_dev->dev, secs); + err = rtc->ops->set_mmss(rtc->class_dev.dev, secs); else if (rtc->ops->read_time && rtc->ops->set_time) { struct rtc_time new, old; - err = rtc->ops->read_time(class_dev->dev, &old); + err = rtc->ops->read_time(rtc->class_dev.dev, &old); if (err == 0) { rtc_time_to_tm(secs, &new); @@ -89,7 +86,8 @@ int rtc_set_mmss(struct class_device *class_dev, unsigned long secs) */ if (!((old.tm_hour == 23 && old.tm_min == 59) || (new.tm_hour == 23 && new.tm_min == 59))) - err = rtc->ops->set_time(class_dev->dev, &new); + err = rtc->ops->set_time(rtc->class_dev.dev, + &new); } } else @@ -101,10 +99,9 @@ int rtc_set_mmss(struct class_device *class_dev, unsigned long secs) } EXPORT_SYMBOL_GPL(rtc_set_mmss); -int rtc_read_alarm(struct class_device *class_dev, struct rtc_wkalrm *alarm) +int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; - struct rtc_device *rtc = to_rtc_device(class_dev); err = mutex_lock_interruptible(&rtc->ops_lock); if (err) @@ -116,7 +113,7 @@ int rtc_read_alarm(struct class_device *class_dev, struct rtc_wkalrm *alarm) err = -EINVAL; else { memset(alarm, 0, sizeof(struct rtc_wkalrm)); - err = rtc->ops->read_alarm(class_dev->dev, alarm); + err = rtc->ops->read_alarm(rtc->class_dev.dev, alarm); } mutex_unlock(&rtc->ops_lock); @@ -124,10 +121,9 @@ int rtc_read_alarm(struct class_device *class_dev, struct rtc_wkalrm *alarm) } EXPORT_SYMBOL_GPL(rtc_read_alarm); -int rtc_set_alarm(struct class_device *class_dev, struct rtc_wkalrm *alarm) +int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; - struct rtc_device *rtc = to_rtc_device(class_dev); err = mutex_lock_interruptible(&rtc->ops_lock); if (err) @@ -138,7 +134,7 @@ int rtc_set_alarm(struct class_device *class_dev, struct rtc_wkalrm *alarm) else if (!rtc->ops->set_alarm) err = -EINVAL; else - err = rtc->ops->set_alarm(class_dev->dev, alarm); + err = rtc->ops->set_alarm(rtc->class_dev.dev, alarm); mutex_unlock(&rtc->ops_lock); return err; @@ -147,16 +143,14 @@ EXPORT_SYMBOL_GPL(rtc_set_alarm); /** * rtc_update_irq - report RTC periodic, alarm, and/or update irqs - * @class_dev: the rtc's class device + * @rtc: the rtc device * @num: how many irqs are being reported (usually one) * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF * Context: in_interrupt(), irqs blocked */ -void rtc_update_irq(struct class_device *class_dev, +void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events) { - struct rtc_device *rtc = to_rtc_device(class_dev); - spin_lock(&rtc->irq_lock); rtc->irq_data = (rtc->irq_data + (num << 8)) | events; spin_unlock(&rtc->irq_lock); @@ -171,40 +165,43 @@ void rtc_update_irq(struct class_device *class_dev, } EXPORT_SYMBOL_GPL(rtc_update_irq); -struct class_device *rtc_class_open(char *name) +struct rtc_device *rtc_class_open(char *name) { - struct class_device *class_dev = NULL, - *class_dev_tmp; + struct class_device *class_dev_tmp; + struct rtc_device *rtc = NULL; down(&rtc_class->sem); list_for_each_entry(class_dev_tmp, &rtc_class->children, node) { if (strncmp(class_dev_tmp->class_id, name, BUS_ID_SIZE) == 0) { - class_dev = class_device_get(class_dev_tmp); + class_dev_tmp = class_device_get(class_dev_tmp); + if (class_dev_tmp) + rtc = to_rtc_device(class_dev_tmp); break; } } - if (class_dev) { - if (!try_module_get(to_rtc_device(class_dev)->owner)) - class_dev = NULL; + if (rtc) { + if (!try_module_get(rtc->owner)) { + class_device_put(class_dev_tmp); + rtc = NULL; + } } up(&rtc_class->sem); - return class_dev; + return rtc; } EXPORT_SYMBOL_GPL(rtc_class_open); -void rtc_class_close(struct class_device *class_dev) +void rtc_class_close(struct rtc_device *rtc) { - module_put(to_rtc_device(class_dev)->owner); - class_device_put(class_dev); + module_put(rtc->owner); + class_device_put(&rtc->class_dev); } EXPORT_SYMBOL_GPL(rtc_class_close); -int rtc_irq_register(struct class_device *class_dev, struct rtc_task *task) +int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task) { int retval = -EBUSY; - struct rtc_device *rtc = to_rtc_device(class_dev); if (task == NULL || task->func == NULL) return -EINVAL; @@ -220,9 +217,8 @@ int rtc_irq_register(struct class_device *class_dev, struct rtc_task *task) } EXPORT_SYMBOL_GPL(rtc_irq_register); -void rtc_irq_unregister(struct class_device *class_dev, struct rtc_task *task) +void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task) { - struct rtc_device *rtc = to_rtc_device(class_dev); spin_lock_irq(&rtc->irq_task_lock); if (rtc->irq_task == task) @@ -231,11 +227,10 @@ void rtc_irq_unregister(struct class_device *class_dev, struct rtc_task *task) } EXPORT_SYMBOL_GPL(rtc_irq_unregister); -int rtc_irq_set_state(struct class_device *class_dev, struct rtc_task *task, int enabled) +int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled) { int err = 0; unsigned long flags; - struct rtc_device *rtc = to_rtc_device(class_dev); if (rtc->ops->irq_set_state == NULL) return -ENXIO; @@ -246,17 +241,16 @@ int rtc_irq_set_state(struct class_device *class_dev, struct rtc_task *task, int spin_unlock_irqrestore(&rtc->irq_task_lock, flags); if (err == 0) - err = rtc->ops->irq_set_state(class_dev->dev, enabled); + err = rtc->ops->irq_set_state(rtc->class_dev.dev, enabled); return err; } EXPORT_SYMBOL_GPL(rtc_irq_set_state); -int rtc_irq_set_freq(struct class_device *class_dev, struct rtc_task *task, int freq) +int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq) { int err = 0; unsigned long flags; - struct rtc_device *rtc = to_rtc_device(class_dev); if (rtc->ops->irq_set_freq == NULL) return -ENXIO; @@ -267,7 +261,7 @@ int rtc_irq_set_freq(struct class_device *class_dev, struct rtc_task *task, int spin_unlock_irqrestore(&rtc->irq_task_lock, flags); if (err == 0) { - err = rtc->ops->irq_set_freq(class_dev->dev, freq); + err = rtc->ops->irq_set_freq(rtc->class_dev.dev, freq); if (err == 0) rtc->irq_freq = freq; } diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index ac0e68e2f025..2f06b5f9fb7b 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -263,7 +263,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) at91_sys_write(AT91_RTC_SCCR, rtsr); /* clear status reg */ - rtc_update_irq(&rtc->class_dev, 1, events); + rtc_update_irq(rtc, 1, events); pr_debug("%s(): num=%ld, events=0x%02lx\n", __FUNCTION__, events >> 8, events & 0x000000FF); diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 7c0d60910077..4b586ebb5814 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -203,7 +203,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) rtc_intr = CMOS_READ(RTC_INTR_FLAGS); rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF; if (is_intr(rtc_intr)) - rtc_update_irq(&cmos->rtc->class_dev, 1, rtc_intr); + rtc_update_irq(cmos->rtc, 1, rtc_intr); /* update alarm */ CMOS_WRITE(hrs, RTC_HOURS_ALARM); @@ -223,7 +223,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) rtc_intr = CMOS_READ(RTC_INTR_FLAGS); rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF; if (is_intr(rtc_intr)) - rtc_update_irq(&cmos->rtc->class_dev, 1, rtc_intr); + rtc_update_irq(cmos->rtc, 1, rtc_intr); } spin_unlock_irq(&rtc_lock); @@ -304,7 +304,7 @@ cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) rtc_intr = CMOS_READ(RTC_INTR_FLAGS); rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF; if (is_intr(rtc_intr)) - rtc_update_irq(&cmos->rtc->class_dev, 1, rtc_intr); + rtc_update_irq(cmos->rtc, 1, rtc_intr); spin_unlock_irqrestore(&rtc_lock, flags); return 0; } @@ -471,7 +471,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) if (is_valid_irq(rtc_irq)) retval = request_irq(rtc_irq, cmos_interrupt, IRQF_DISABLED, cmos_rtc.rtc->class_dev.class_id, - &cmos_rtc.rtc->class_dev); + cmos_rtc.rtc); if (retval < 0) { dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq); goto cleanup1; @@ -555,7 +555,7 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg) irqstat = CMOS_READ(RTC_INTR_FLAGS); irqstat &= (tmp & RTC_IRQMASK) | RTC_IRQF; if (is_intr(irqstat)) - rtc_update_irq(&cmos->rtc->class_dev, 1, irqstat); + rtc_update_irq(cmos->rtc, 1, irqstat); } spin_unlock_irq(&rtc_lock); @@ -590,7 +590,7 @@ static int cmos_resume(struct device *dev) tmp = CMOS_READ(RTC_INTR_FLAGS); tmp &= (cmos->suspend_ctrl & RTC_IRQMASK) | RTC_IRQF; if (is_intr(tmp)) - rtc_update_irq(&cmos->rtc->class_dev, 1, tmp); + rtc_update_irq(cmos->rtc, 1, tmp); spin_unlock_irq(&rtc_lock); } diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h index c3c495ed1869..c9891423a468 100644 --- a/drivers/rtc/rtc-core.h +++ b/drivers/rtc/rtc-core.h @@ -1,3 +1,5 @@ +extern int rtc_interface_register(struct class_interface *intf); + #ifdef CONFIG_RTC_INTF_DEV extern void __init rtc_dev_init(void); diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 3fa6138523b5..623cb8d06b02 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c @@ -32,7 +32,7 @@ static int rtc_dev_open(struct inode *inode, struct file *file) if (!(mutex_trylock(&rtc->char_lock))) return -EBUSY; - file->private_data = &rtc->class_dev; + file->private_data = rtc; err = ops->open ? ops->open(rtc->class_dev.dev) : 0; if (err == 0) { @@ -61,7 +61,7 @@ static void rtc_uie_task(struct work_struct *work) int num = 0; int err; - err = rtc_read_time(&rtc->class_dev, &tm); + err = rtc_read_time(rtc, &tm); local_irq_disable(); spin_lock(&rtc->irq_lock); @@ -79,7 +79,7 @@ static void rtc_uie_task(struct work_struct *work) } spin_unlock(&rtc->irq_lock); if (num) - rtc_update_irq(&rtc->class_dev, num, RTC_UF | RTC_IRQF); + rtc_update_irq(rtc, num, RTC_UF | RTC_IRQF); local_irq_enable(); } static void rtc_uie_timer(unsigned long data) @@ -121,7 +121,7 @@ static int set_uie(struct rtc_device *rtc) struct rtc_time tm; int err; - err = rtc_read_time(&rtc->class_dev, &tm); + err = rtc_read_time(rtc, &tm); if (err) return err; spin_lock_irq(&rtc->irq_lock); @@ -210,8 +210,7 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { int err = 0; - struct class_device *class_dev = file->private_data; - struct rtc_device *rtc = to_rtc_device(class_dev); + struct rtc_device *rtc = file->private_data; const struct rtc_class_ops *ops = rtc->ops; struct rtc_time tm; struct rtc_wkalrm alarm; @@ -252,7 +251,7 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file, /* try the driver's ioctl interface */ if (ops->ioctl) { - err = ops->ioctl(class_dev->dev, cmd, arg); + err = ops->ioctl(rtc->class_dev.dev, cmd, arg); if (err != -ENOIOCTLCMD) return err; } @@ -264,7 +263,7 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file, switch (cmd) { case RTC_ALM_READ: - err = rtc_read_alarm(class_dev, &alarm); + err = rtc_read_alarm(rtc, &alarm); if (err < 0) return err; @@ -284,11 +283,11 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file, alarm.time.tm_wday = -1; alarm.time.tm_yday = -1; alarm.time.tm_isdst = -1; - err = rtc_set_alarm(class_dev, &alarm); + err = rtc_set_alarm(rtc, &alarm); break; case RTC_RD_TIME: - err = rtc_read_time(class_dev, &tm); + err = rtc_read_time(rtc, &tm); if (err < 0) return err; @@ -300,7 +299,7 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file, if (copy_from_user(&tm, uarg, sizeof(tm))) return -EFAULT; - err = rtc_set_time(class_dev, &tm); + err = rtc_set_time(rtc, &tm); break; case RTC_IRQP_READ: @@ -310,7 +309,7 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file, case RTC_IRQP_SET: if (ops->irq_set_freq) - err = rtc_irq_set_freq(class_dev, rtc->irq_task, arg); + err = rtc_irq_set_freq(rtc, rtc->irq_task, arg); break; #if 0 @@ -336,11 +335,11 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file, if (copy_from_user(&alarm, uarg, sizeof(alarm))) return -EFAULT; - err = rtc_set_alarm(class_dev, &alarm); + err = rtc_set_alarm(rtc, &alarm); break; case RTC_WKALM_RD: - err = rtc_read_alarm(class_dev, &alarm); + err = rtc_read_alarm(rtc, &alarm); if (err < 0) return err; diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c index e27176c0e18f..afa64c7fa2e2 100644 --- a/drivers/rtc/rtc-ds1553.c +++ b/drivers/rtc/rtc-ds1553.c @@ -203,7 +203,7 @@ static irqreturn_t ds1553_rtc_interrupt(int irq, void *dev_id) events |= RTC_UF; else events |= RTC_AF; - rtc_update_irq(&pdata->rtc->class_dev, 1, events); + rtc_update_irq(pdata->rtc, 1, events); return IRQ_HANDLED; } diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index 9de8d67f4f8d..4ca3a5182cdb 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -124,7 +124,7 @@ static void rtc_wait_not_busy(void) /* now we have ~15 usec to read/write various registers */ } -static irqreturn_t rtc_irq(int irq, void *class_dev) +static irqreturn_t rtc_irq(int irq, void *rtc) { unsigned long events = 0; u8 irq_data; @@ -141,7 +141,7 @@ static irqreturn_t rtc_irq(int irq, void *class_dev) if (irq_data & OMAP_RTC_STATUS_1S_EVENT) events |= RTC_IRQF | RTC_UF; - rtc_update_irq(class_dev, 1, events); + rtc_update_irq(rtc, 1, events); return IRQ_HANDLED; } diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index f13daa9fecaa..e4bf68ca96f7 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -51,7 +51,7 @@ static irqreturn_t pl031_interrupt(int irq, void *dev_id) { struct rtc_device *rtc = dev_id; - rtc_update_irq(&rtc->class_dev, 1, RTC_AF); + rtc_update_irq(rtc, 1, RTC_AF); return IRQ_HANDLED; } diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c index 1bd624fc685c..195be6587b30 100644 --- a/drivers/rtc/rtc-proc.c +++ b/drivers/rtc/rtc-proc.c @@ -16,18 +16,21 @@ #include #include +#include "rtc-core.h" + + static struct class_device *rtc_dev = NULL; static DEFINE_MUTEX(rtc_lock); static int rtc_proc_show(struct seq_file *seq, void *offset) { int err; - struct class_device *class_dev = seq->private; - const struct rtc_class_ops *ops = to_rtc_device(class_dev)->ops; + struct rtc_device *rtc = seq->private; + const struct rtc_class_ops *ops = rtc->ops; struct rtc_wkalrm alrm; struct rtc_time tm; - err = rtc_read_time(class_dev, &tm); + err = rtc_read_time(rtc, &tm); if (err == 0) { seq_printf(seq, "rtc_time\t: %02d:%02d:%02d\n" @@ -36,7 +39,7 @@ static int rtc_proc_show(struct seq_file *seq, void *offset) tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday); } - err = rtc_read_alarm(class_dev, &alrm); + err = rtc_read_alarm(rtc, &alrm); if (err == 0) { seq_printf(seq, "alrm_time\t: "); if ((unsigned int)alrm.time.tm_hour <= 24) @@ -74,19 +77,19 @@ static int rtc_proc_show(struct seq_file *seq, void *offset) seq_printf(seq, "24hr\t\t: yes\n"); if (ops->proc) - ops->proc(class_dev->dev, seq); + ops->proc(rtc->class_dev.dev, seq); return 0; } static int rtc_proc_open(struct inode *inode, struct file *file) { - struct class_device *class_dev = PDE(inode)->data; + struct rtc_device *rtc = PDE(inode)->data; if (!try_module_get(THIS_MODULE)) return -ENODEV; - return single_open(file, rtc_proc_show, class_dev); + return single_open(file, rtc_proc_show, rtc); } static int rtc_proc_release(struct inode *inode, struct file *file) @@ -118,7 +121,7 @@ static int rtc_proc_add_device(struct class_device *class_dev, ent->proc_fops = &rtc_proc_fops; ent->owner = rtc->owner; - ent->data = class_dev; + ent->data = rtc; dev_dbg(class_dev->dev, "rtc intf: proc\n"); } diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 9a79a24a7487..3617c970caaa 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -50,7 +50,7 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) { struct rtc_device *rdev = id; - rtc_update_irq(&rdev->class_dev, 1, RTC_AF | RTC_IRQF); + rtc_update_irq(rdev, 1, RTC_AF | RTC_IRQF); return IRQ_HANDLED; } @@ -58,7 +58,7 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id) { struct rtc_device *rdev = id; - rtc_update_irq(&rdev->class_dev, tick_count++, RTC_PF | RTC_IRQF); + rtc_update_irq(rdev, tick_count++, RTC_PF | RTC_IRQF); return IRQ_HANDLED; } diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 677bae820dc3..0918b787c4dd 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c @@ -93,7 +93,7 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id) if (rtsr & RTSR_HZ) events |= RTC_UF | RTC_IRQF; - rtc_update_irq(&rtc->class_dev, 1, events); + rtc_update_irq(rtc, 1, events); if (rtsr & RTSR_AL && rtc_periodic_alarm(&rtc_alarm)) rtc_update_alarm(&rtc_alarm); @@ -119,7 +119,7 @@ static irqreturn_t timer1_interrupt(int irq, void *dev_id) */ OSSR = OSSR_M1; /* clear match on timer1 */ - rtc_update_irq(&rtc->class_dev, rtc_timer1_count, RTC_PF | RTC_IRQF); + rtc_update_irq(rtc, rtc_timer1_count, RTC_PF | RTC_IRQF); if (rtc_timer1_count == 1) rtc_timer1_count = (rtc_freq * ((1<<30)/(TIMER_FREQ>>2))); diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 198b9f22fbff..6abf4811958c 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -104,7 +104,7 @@ static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id) writeb(tmp, rtc->regbase + RCR1); - rtc_update_irq(&rtc->rtc_dev->class_dev, 1, events); + rtc_update_irq(&rtc->rtc_dev, 1, events); spin_unlock(&rtc->lock); @@ -139,7 +139,7 @@ static irqreturn_t sh_rtc_alarm(int irq, void *dev_id) rtc->rearm_aie = 1; - rtc_update_irq(&rtc->rtc_dev->class_dev, 1, events); + rtc_update_irq(&rtc->rtc_dev, 1, events); } spin_unlock(&rtc->lock); @@ -153,7 +153,7 @@ static irqreturn_t sh_rtc_periodic(int irq, void *dev_id) spin_lock(&rtc->lock); - rtc_update_irq(&rtc->rtc_dev->class_dev, 1, RTC_PF | RTC_IRQF); + rtc_update_irq(&rtc->rtc_dev, 1, RTC_PF | RTC_IRQF); spin_unlock(&rtc->lock); diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c index 899ab8c514fa..97444b45cc26 100644 --- a/drivers/rtc/rtc-sysfs.c +++ b/drivers/rtc/rtc-sysfs.c @@ -12,6 +12,9 @@ #include #include +#include "rtc-core.h" + + /* device attributes */ static ssize_t rtc_sysfs_show_name(struct class_device *dev, char *buf) @@ -25,7 +28,7 @@ static ssize_t rtc_sysfs_show_date(struct class_device *dev, char *buf) ssize_t retval; struct rtc_time tm; - retval = rtc_read_time(dev, &tm); + retval = rtc_read_time(to_rtc_device(dev), &tm); if (retval == 0) { retval = sprintf(buf, "%04d-%02d-%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday); @@ -40,7 +43,7 @@ static ssize_t rtc_sysfs_show_time(struct class_device *dev, char *buf) ssize_t retval; struct rtc_time tm; - retval = rtc_read_time(dev, &tm); + retval = rtc_read_time(to_rtc_device(dev), &tm); if (retval == 0) { retval = sprintf(buf, "%02d:%02d:%02d\n", tm.tm_hour, tm.tm_min, tm.tm_sec); @@ -55,7 +58,7 @@ static ssize_t rtc_sysfs_show_since_epoch(struct class_device *dev, char *buf) ssize_t retval; struct rtc_time tm; - retval = rtc_read_time(dev, &tm); + retval = rtc_read_time(to_rtc_device(dev), &tm); if (retval == 0) { unsigned long time; rtc_tm_to_time(&tm, &time); @@ -94,7 +97,7 @@ rtc_sysfs_show_wakealarm(struct class_device *dev, char *buf) * REVISIT maybe we should require RTC implementations to * disable the RTC alarm after it triggers, for uniformity. */ - retval = rtc_read_alarm(dev, &alm); + retval = rtc_read_alarm(to_rtc_device(dev), &alm); if (retval == 0 && alm.enabled) { rtc_tm_to_time(&alm.time, &alarm); retval = sprintf(buf, "%lu\n", alarm); @@ -109,11 +112,12 @@ rtc_sysfs_set_wakealarm(struct class_device *dev, const char *buf, size_t n) ssize_t retval; unsigned long now, alarm; struct rtc_wkalrm alm; + struct rtc_device *rtc = to_rtc_device(dev); /* Only request alarms that trigger in the future. Disable them * by writing another time, e.g. 0 meaning Jan 1 1970 UTC. */ - retval = rtc_read_time(dev, &alm.time); + retval = rtc_read_time(rtc, &alm.time); if (retval < 0) return retval; rtc_tm_to_time(&alm.time, &now); @@ -124,7 +128,7 @@ rtc_sysfs_set_wakealarm(struct class_device *dev, const char *buf, size_t n) * entirely prevent that here, without even the minimal * locking from the /dev/rtcN api. */ - retval = rtc_read_alarm(dev, &alm); + retval = rtc_read_alarm(rtc, &alm); if (retval < 0) return retval; if (alm.enabled) @@ -141,7 +145,7 @@ rtc_sysfs_set_wakealarm(struct class_device *dev, const char *buf, size_t n) } rtc_time_to_tm(alarm, &alm.time); - retval = rtc_set_alarm(dev, &alm); + retval = rtc_set_alarm(rtc, &alm); return (retval < 0) ? retval : n; } static const CLASS_DEVICE_ATTR(wakealarm, S_IRUGO | S_IWUSR, diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c index f50a1b8e1607..254c9fce27da 100644 --- a/drivers/rtc/rtc-test.c +++ b/drivers/rtc/rtc-test.c @@ -101,11 +101,11 @@ static ssize_t test_irq_store(struct device *dev, retval = count; local_irq_disable(); if (strncmp(buf, "tick", 4) == 0) - rtc_update_irq(&rtc->class_dev, 1, RTC_PF | RTC_IRQF); + rtc_update_irq(rtc, 1, RTC_PF | RTC_IRQF); else if (strncmp(buf, "alarm", 5) == 0) - rtc_update_irq(&rtc->class_dev, 1, RTC_AF | RTC_IRQF); + rtc_update_irq(rtc, 1, RTC_AF | RTC_IRQF); else if (strncmp(buf, "update", 6) == 0) - rtc_update_irq(&rtc->class_dev, 1, RTC_UF | RTC_IRQF); + rtc_update_irq(rtc, 1, RTC_UF | RTC_IRQF); else retval = -EINVAL; local_irq_enable(); diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c index e40322b71938..e9f9c5489468 100644 --- a/drivers/rtc/rtc-vr41xx.c +++ b/drivers/rtc/rtc-vr41xx.c @@ -275,7 +275,7 @@ static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id) rtc2_write(RTCINTREG, ELAPSEDTIME_INT); - rtc_update_irq(&rtc->class_dev, 1, RTC_AF); + rtc_update_irq(rtc, 1, RTC_AF); return IRQ_HANDLED; } @@ -291,7 +291,7 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id) rtc1_write(RTCL1LREG, count); rtc1_write(RTCL1HREG, count >> 16); - rtc_update_irq(&rtc->class_dev, 1, RTC_PF); + rtc_update_irq(rtc, 1, RTC_PF); return IRQ_HANDLED; } diff --git a/include/linux/rtc.h b/include/linux/rtc.h index f662ae1aa621..73ccd8fcc0c7 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -4,7 +4,7 @@ * service. It is used with both the legacy mc146818 and also EFI * Struct rtc_time and first 12 ioctl by Paul Gortmaker, 1996 - separated out * from to this file for 2.4 kernels. - * + * * Copyright (C) 1999 Hewlett-Packard Co. * Copyright (C) 1999 Stephane Eranian */ @@ -13,7 +13,7 @@ /* * The struct used to pass data via the following ioctl. Similar to the - * struct tm in , but it needs to be here so that the kernel + * struct tm in , but it needs to be here so that the kernel * source is self contained, allowing cross-compiles, etc. etc. */ @@ -50,7 +50,7 @@ struct rtc_wkalrm { * pll_value*pll_posmult/pll_clock * -ve pll_value means clock will run slower by * pll_value*pll_negmult/pll_clock - */ + */ struct rtc_pll_info { int pll_ctrl; /* placeholder for fancier control */ @@ -174,29 +174,28 @@ extern struct rtc_device *rtc_device_register(const char *name, struct device *dev, const struct rtc_class_ops *ops, struct module *owner); -extern void rtc_device_unregister(struct rtc_device *rdev); -extern int rtc_interface_register(struct class_interface *intf); +extern void rtc_device_unregister(struct rtc_device *rtc); -extern int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm); -extern int rtc_set_time(struct class_device *class_dev, struct rtc_time *tm); -extern int rtc_set_mmss(struct class_device *class_dev, unsigned long secs); -extern int rtc_read_alarm(struct class_device *class_dev, +extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); +extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); +extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs); +extern int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); -extern int rtc_set_alarm(struct class_device *class_dev, +extern int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); -extern void rtc_update_irq(struct class_device *class_dev, +extern void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events); -extern struct class_device *rtc_class_open(char *name); -extern void rtc_class_close(struct class_device *class_dev); +extern struct rtc_device *rtc_class_open(char *name); +extern void rtc_class_close(struct rtc_device *rtc); -extern int rtc_irq_register(struct class_device *class_dev, +extern int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task); -extern void rtc_irq_unregister(struct class_device *class_dev, +extern void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task); -extern int rtc_irq_set_state(struct class_device *class_dev, +extern int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled); -extern int rtc_irq_set_freq(struct class_device *class_dev, +extern int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq); typedef struct rtc_task { -- cgit v1.2.3 From cd9662094edf4173e87f0452e57e4eacc228f8ff Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 8 May 2007 00:33:40 -0700 Subject: rtc: remove rest of class_device Finish converting the RTC framework so it no longer uses class_device. Signed-off-by: David Brownell Acked-by: Greg Kroah-Hartman Acked-By: Alessandro Zummo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/rtc/class.c | 22 +++++++++++----------- drivers/rtc/hctosys.c | 6 +++--- drivers/rtc/interface.c | 34 +++++++++++++++++----------------- drivers/rtc/rtc-cmos.c | 12 ++++++------ drivers/rtc/rtc-dev.c | 14 +++++++------- drivers/rtc/rtc-omap.c | 8 ++++---- drivers/rtc/rtc-proc.c | 2 +- drivers/rtc/rtc-sysfs.c | 38 +++++++++++++++++++++++--------------- include/linux/rtc.h | 4 ++-- 9 files changed, 74 insertions(+), 66 deletions(-) (limited to 'include/linux') diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 9230001bd591..d58d74cf570e 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -23,9 +23,9 @@ static DEFINE_IDR(rtc_idr); static DEFINE_MUTEX(idr_lock); struct class *rtc_class; -static void rtc_device_release(struct class_device *class_dev) +static void rtc_device_release(struct device *dev) { - struct rtc_device *rtc = to_rtc_device(class_dev); + struct rtc_device *rtc = to_rtc_device(dev); mutex_lock(&idr_lock); idr_remove(&rtc_idr, rtc->id); mutex_unlock(&idr_lock); @@ -73,18 +73,18 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev, rtc->ops = ops; rtc->owner = owner; rtc->max_user_freq = 64; - rtc->class_dev.dev = dev; - rtc->class_dev.class = rtc_class; - rtc->class_dev.release = rtc_device_release; + rtc->dev.parent = dev; + rtc->dev.class = rtc_class; + rtc->dev.release = rtc_device_release; mutex_init(&rtc->ops_lock); spin_lock_init(&rtc->irq_lock); spin_lock_init(&rtc->irq_task_lock); strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); - snprintf(rtc->class_dev.class_id, BUS_ID_SIZE, "rtc%d", id); + snprintf(rtc->dev.bus_id, BUS_ID_SIZE, "rtc%d", id); - err = class_device_register(&rtc->class_dev); + err = device_register(&rtc->dev); if (err) goto exit_kfree; @@ -93,7 +93,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev, rtc_proc_add_device(rtc); dev_info(dev, "rtc core: registered %s as %s\n", - rtc->name, rtc->class_dev.class_id); + rtc->name, rtc->dev.bus_id); return rtc; @@ -120,7 +120,7 @@ EXPORT_SYMBOL_GPL(rtc_device_register); */ void rtc_device_unregister(struct rtc_device *rtc) { - if (class_device_get(&rtc->class_dev) != NULL) { + if (get_device(&rtc->dev) != NULL) { mutex_lock(&rtc->ops_lock); /* remove innards of this RTC, then disable it, before * letting any rtc_class_open() users access it again @@ -128,10 +128,10 @@ void rtc_device_unregister(struct rtc_device *rtc) rtc_sysfs_del_device(rtc); rtc_dev_del_device(rtc); rtc_proc_del_device(rtc); - class_device_unregister(&rtc->class_dev); + device_unregister(&rtc->dev); rtc->ops = NULL; mutex_unlock(&rtc->ops_lock); - class_device_put(&rtc->class_dev); + put_device(&rtc->dev); } } EXPORT_SYMBOL_GPL(rtc_device_unregister); diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c index f48a8aed5b0f..178527252c6a 100644 --- a/drivers/rtc/hctosys.c +++ b/drivers/rtc/hctosys.c @@ -46,7 +46,7 @@ static int __init rtc_hctosys(void) do_settimeofday(&tv); - dev_info(rtc->class_dev.dev, + dev_info(rtc->dev.parent, "setting the system clock to " "%d-%02d-%02d %02d:%02d:%02d (%u)\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, @@ -54,11 +54,11 @@ static int __init rtc_hctosys(void) (unsigned int) tv.tv_sec); } else - dev_err(rtc->class_dev.dev, + dev_err(rtc->dev.parent, "hctosys: invalid date/time\n"); } else - dev_err(rtc->class_dev.dev, + dev_err(rtc->dev.parent, "hctosys: unable to read the hardware clock\n"); rtc_class_close(rtc); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index d9d326ff6253..b5cc5f82436d 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -27,7 +27,7 @@ int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) err = -EINVAL; else { memset(tm, 0, sizeof(struct rtc_time)); - err = rtc->ops->read_time(rtc->class_dev.dev, tm); + err = rtc->ops->read_time(rtc->dev.parent, tm); } mutex_unlock(&rtc->ops_lock); @@ -52,7 +52,7 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) else if (!rtc->ops->set_time) err = -EINVAL; else - err = rtc->ops->set_time(rtc->class_dev.dev, tm); + err = rtc->ops->set_time(rtc->dev.parent, tm); mutex_unlock(&rtc->ops_lock); return err; @@ -70,11 +70,11 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs) if (!rtc->ops) err = -ENODEV; else if (rtc->ops->set_mmss) - err = rtc->ops->set_mmss(rtc->class_dev.dev, secs); + err = rtc->ops->set_mmss(rtc->dev.parent, secs); else if (rtc->ops->read_time && rtc->ops->set_time) { struct rtc_time new, old; - err = rtc->ops->read_time(rtc->class_dev.dev, &old); + err = rtc->ops->read_time(rtc->dev.parent, &old); if (err == 0) { rtc_time_to_tm(secs, &new); @@ -86,7 +86,7 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs) */ if (!((old.tm_hour == 23 && old.tm_min == 59) || (new.tm_hour == 23 && new.tm_min == 59))) - err = rtc->ops->set_time(rtc->class_dev.dev, + err = rtc->ops->set_time(rtc->dev.parent, &new); } } @@ -113,7 +113,7 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) err = -EINVAL; else { memset(alarm, 0, sizeof(struct rtc_wkalrm)); - err = rtc->ops->read_alarm(rtc->class_dev.dev, alarm); + err = rtc->ops->read_alarm(rtc->dev.parent, alarm); } mutex_unlock(&rtc->ops_lock); @@ -134,7 +134,7 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) else if (!rtc->ops->set_alarm) err = -EINVAL; else - err = rtc->ops->set_alarm(rtc->class_dev.dev, alarm); + err = rtc->ops->set_alarm(rtc->dev.parent, alarm); mutex_unlock(&rtc->ops_lock); return err; @@ -167,22 +167,22 @@ EXPORT_SYMBOL_GPL(rtc_update_irq); struct rtc_device *rtc_class_open(char *name) { - struct class_device *class_dev_tmp; + struct device *dev; struct rtc_device *rtc = NULL; down(&rtc_class->sem); - list_for_each_entry(class_dev_tmp, &rtc_class->children, node) { - if (strncmp(class_dev_tmp->class_id, name, BUS_ID_SIZE) == 0) { - class_dev_tmp = class_device_get(class_dev_tmp); - if (class_dev_tmp) - rtc = to_rtc_device(class_dev_tmp); + list_for_each_entry(dev, &rtc_class->devices, node) { + if (strncmp(dev->bus_id, name, BUS_ID_SIZE) == 0) { + dev = get_device(dev); + if (dev) + rtc = to_rtc_device(dev); break; } } if (rtc) { if (!try_module_get(rtc->owner)) { - class_device_put(class_dev_tmp); + put_device(dev); rtc = NULL; } } @@ -195,7 +195,7 @@ EXPORT_SYMBOL_GPL(rtc_class_open); void rtc_class_close(struct rtc_device *rtc) { module_put(rtc->owner); - class_device_put(&rtc->class_dev); + put_device(&rtc->dev); } EXPORT_SYMBOL_GPL(rtc_class_close); @@ -241,7 +241,7 @@ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled spin_unlock_irqrestore(&rtc->irq_task_lock, flags); if (err == 0) - err = rtc->ops->irq_set_state(rtc->class_dev.dev, enabled); + err = rtc->ops->irq_set_state(rtc->dev.parent, enabled); return err; } @@ -261,7 +261,7 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq) spin_unlock_irqrestore(&rtc->irq_task_lock, flags); if (err == 0) { - err = rtc->ops->irq_set_freq(rtc->class_dev.dev, freq); + err = rtc->ops->irq_set_freq(rtc->dev.parent, freq); if (err == 0) rtc->irq_freq = freq; } diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 4b586ebb5814..4e8c373f78e3 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -434,7 +434,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) goto cleanup0; } } - rename_region(ports, cmos_rtc.rtc->class_dev.class_id); + rename_region(ports, cmos_rtc.rtc->dev.bus_id); spin_lock_irq(&rtc_lock); @@ -470,7 +470,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) if (is_valid_irq(rtc_irq)) retval = request_irq(rtc_irq, cmos_interrupt, IRQF_DISABLED, - cmos_rtc.rtc->class_dev.class_id, + cmos_rtc.rtc->dev.bus_id, cmos_rtc.rtc); if (retval < 0) { dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq); @@ -483,7 +483,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) */ pr_info("%s: alarms up to one %s%s\n", - cmos_rtc.rtc->class_dev.class_id, + cmos_rtc.rtc->dev.bus_id, is_valid_irq(rtc_irq) ? (cmos_rtc.mon_alrm ? "year" @@ -525,7 +525,7 @@ static void __exit cmos_do_remove(struct device *dev) rename_region(cmos->iomem, NULL); if (is_valid_irq(cmos->irq)) - free_irq(cmos->irq, &cmos_rtc.rtc->class_dev); + free_irq(cmos->irq, cmos_rtc.rtc); rtc_device_unregister(cmos_rtc.rtc); @@ -564,7 +564,7 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg) */ pr_debug("%s: suspend%s, ctrl %02x\n", - cmos_rtc.rtc->class_dev.class_id, + cmos_rtc.rtc->dev.bus_id, (tmp & RTC_AIE) ? ", alarm may wake" : "", tmp); @@ -595,7 +595,7 @@ static int cmos_resume(struct device *dev) } pr_debug("%s: resume, ctrl %02x\n", - cmos_rtc.rtc->class_dev.class_id, + cmos_rtc.rtc->dev.bus_id, cmos->suspend_ctrl); diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 623cb8d06b02..2c13433089a0 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c @@ -34,7 +34,7 @@ static int rtc_dev_open(struct inode *inode, struct file *file) file->private_data = rtc; - err = ops->open ? ops->open(rtc->class_dev.dev) : 0; + err = ops->open ? ops->open(rtc->dev.parent) : 0; if (err == 0) { spin_lock_irq(&rtc->irq_lock); rtc->irq_data = 0; @@ -180,7 +180,7 @@ rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) if (ret == 0) { /* Check for any data updates */ if (rtc->ops->read_callback) - data = rtc->ops->read_callback(rtc->class_dev.dev, + data = rtc->ops->read_callback(rtc->dev.parent, data); if (sizeof(int) != sizeof(long) && @@ -251,7 +251,7 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file, /* try the driver's ioctl interface */ if (ops->ioctl) { - err = ops->ioctl(rtc->class_dev.dev, cmd, arg); + err = ops->ioctl(rtc->dev.parent, cmd, arg); if (err != -ENOIOCTLCMD) return err; } @@ -371,7 +371,7 @@ static int rtc_dev_release(struct inode *inode, struct file *file) clear_uie(rtc); #endif if (rtc->ops->release) - rtc->ops->release(rtc->class_dev.dev); + rtc->ops->release(rtc->dev.parent); mutex_unlock(&rtc->char_lock); return 0; @@ -406,7 +406,7 @@ void rtc_dev_add_device(struct rtc_device *rtc) return; } - rtc->class_dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); + rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); mutex_init(&rtc->char_lock); spin_lock_init(&rtc->irq_lock); @@ -419,7 +419,7 @@ void rtc_dev_add_device(struct rtc_device *rtc) cdev_init(&rtc->char_dev, &rtc_dev_fops); rtc->char_dev.owner = rtc->owner; - if (cdev_add(&rtc->char_dev, rtc->class_dev.devt, 1)) + if (cdev_add(&rtc->char_dev, rtc->dev.devt, 1)) printk(KERN_WARNING "%s: failed to add char device %d:%d\n", rtc->name, MAJOR(rtc_devt), rtc->id); else @@ -429,7 +429,7 @@ void rtc_dev_add_device(struct rtc_device *rtc) void rtc_dev_del_device(struct rtc_device *rtc) { - if (rtc->class_dev.devt) + if (rtc->dev.devt) cdev_del(&rtc->char_dev); } diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index 4ca3a5182cdb..ded35fd13ba0 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -399,7 +399,7 @@ static int __devinit omap_rtc_probe(struct platform_device *pdev) goto fail; } platform_set_drvdata(pdev, rtc); - class_set_devdata(&rtc->class_dev, mem); + dev_set_devdata(&rtc->dev, mem); /* clear pending irqs, and set 1/second periodic, * which we'll use instead of update irqs @@ -418,13 +418,13 @@ static int __devinit omap_rtc_probe(struct platform_device *pdev) /* handle periodic and alarm irqs */ if (request_irq(omap_rtc_timer, rtc_irq, IRQF_DISABLED, - rtc->class_dev.class_id, &rtc->class_dev)) { + rtc->dev.bus_id, rtc)) { pr_debug("%s: RTC timer interrupt IRQ%d already claimed\n", pdev->name, omap_rtc_timer); goto fail0; } if (request_irq(omap_rtc_alarm, rtc_irq, IRQF_DISABLED, - rtc->class_dev.class_id, &rtc->class_dev)) { + rtc->dev.bus_id, rtc)) { pr_debug("%s: RTC alarm interrupt IRQ%d already claimed\n", pdev->name, omap_rtc_alarm); goto fail1; @@ -481,7 +481,7 @@ static int __devexit omap_rtc_remove(struct platform_device *pdev) free_irq(omap_rtc_timer, rtc); free_irq(omap_rtc_alarm, rtc); - release_resource(class_get_devdata(&rtc->class_dev)); + release_resource(dev_get_devdata(&rtc->dev)); rtc_device_unregister(rtc); return 0; } diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c index 3d7f4547c7d4..8d300e6d0d9e 100644 --- a/drivers/rtc/rtc-proc.c +++ b/drivers/rtc/rtc-proc.c @@ -74,7 +74,7 @@ static int rtc_proc_show(struct seq_file *seq, void *offset) seq_printf(seq, "24hr\t\t: yes\n"); if (ops->proc) - ops->proc(rtc->class_dev.dev, seq); + ops->proc(rtc->dev.parent, seq); return 0; } diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c index 1c2fa0cc9cbb..69df94b44841 100644 --- a/drivers/rtc/rtc-sysfs.c +++ b/drivers/rtc/rtc-sysfs.c @@ -17,12 +17,16 @@ /* device attributes */ -static ssize_t rtc_sysfs_show_name(struct class_device *dev, char *buf) +static ssize_t +rtc_sysfs_show_name(struct device *dev, struct device_attribute *attr, + char *buf) { return sprintf(buf, "%s\n", to_rtc_device(dev)->name); } -static ssize_t rtc_sysfs_show_date(struct class_device *dev, char *buf) +static ssize_t +rtc_sysfs_show_date(struct device *dev, struct device_attribute *attr, + char *buf) { ssize_t retval; struct rtc_time tm; @@ -36,7 +40,9 @@ static ssize_t rtc_sysfs_show_date(struct class_device *dev, char *buf) return retval; } -static ssize_t rtc_sysfs_show_time(struct class_device *dev, char *buf) +static ssize_t +rtc_sysfs_show_time(struct device *dev, struct device_attribute *attr, + char *buf) { ssize_t retval; struct rtc_time tm; @@ -50,7 +56,9 @@ static ssize_t rtc_sysfs_show_time(struct class_device *dev, char *buf) return retval; } -static ssize_t rtc_sysfs_show_since_epoch(struct class_device *dev, char *buf) +static ssize_t +rtc_sysfs_show_since_epoch(struct device *dev, struct device_attribute *attr, + char *buf) { ssize_t retval; struct rtc_time tm; @@ -65,7 +73,7 @@ static ssize_t rtc_sysfs_show_since_epoch(struct class_device *dev, char *buf) return retval; } -static struct class_device_attribute rtc_attrs[] = { +static struct device_attribute rtc_attrs[] = { __ATTR(name, S_IRUGO, rtc_sysfs_show_name, NULL), __ATTR(date, S_IRUGO, rtc_sysfs_show_date, NULL), __ATTR(time, S_IRUGO, rtc_sysfs_show_time, NULL), @@ -74,7 +82,8 @@ static struct class_device_attribute rtc_attrs[] = { }; static ssize_t -rtc_sysfs_show_wakealarm(struct class_device *dev, char *buf) +rtc_sysfs_show_wakealarm(struct device *dev, struct device_attribute *attr, + char *buf) { ssize_t retval; unsigned long alarm; @@ -98,7 +107,8 @@ rtc_sysfs_show_wakealarm(struct class_device *dev, char *buf) } static ssize_t -rtc_sysfs_set_wakealarm(struct class_device *dev, const char *buf, size_t n) +rtc_sysfs_set_wakealarm(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) { ssize_t retval; unsigned long now, alarm; @@ -139,7 +149,7 @@ rtc_sysfs_set_wakealarm(struct class_device *dev, const char *buf, size_t n) retval = rtc_set_alarm(rtc, &alm); return (retval < 0) ? retval : n; } -static const CLASS_DEVICE_ATTR(wakealarm, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(wakealarm, S_IRUGO | S_IWUSR, rtc_sysfs_show_wakealarm, rtc_sysfs_set_wakealarm); @@ -150,7 +160,7 @@ static const CLASS_DEVICE_ATTR(wakealarm, S_IRUGO | S_IWUSR, */ static inline int rtc_does_wakealarm(struct rtc_device *rtc) { - if (!device_can_wakeup(rtc->class_dev.dev)) + if (!device_can_wakeup(rtc->dev.parent)) return 0; return rtc->ops->set_alarm != NULL; } @@ -164,10 +174,9 @@ void rtc_sysfs_add_device(struct rtc_device *rtc) if (!rtc_does_wakealarm(rtc)) return; - err = class_device_create_file(&rtc->class_dev, - &class_device_attr_wakealarm); + err = device_create_file(&rtc->dev, &dev_attr_wakealarm); if (err) - dev_err(rtc->class_dev.dev, "failed to create " + dev_err(rtc->dev.parent, "failed to create " "alarm attribute, %d", err); } @@ -176,11 +185,10 @@ void rtc_sysfs_del_device(struct rtc_device *rtc) { /* REVISIT did we add it successfully? */ if (rtc_does_wakealarm(rtc)) - class_device_remove_file(&rtc->class_dev, - &class_device_attr_wakealarm); + device_remove_file(&rtc->dev, &dev_attr_wakealarm); } void __init rtc_sysfs_init(struct class *rtc_class) { - rtc_class->class_dev_attrs = rtc_attrs; + rtc_class->dev_attrs = rtc_attrs; } diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 73ccd8fcc0c7..07ecd4ca8ca7 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -136,7 +136,7 @@ struct rtc_task; struct rtc_device { - struct class_device class_dev; + struct device dev; struct module *owner; int id; @@ -168,7 +168,7 @@ struct rtc_device unsigned int uie_timer_active:1; #endif }; -#define to_rtc_device(d) container_of(d, struct rtc_device, class_dev) +#define to_rtc_device(d) container_of(d, struct rtc_device, dev) extern struct rtc_device *rtc_device_register(const char *name, struct device *dev, -- cgit v1.2.3 From 87ac84f42a7a580d0dd72ae31d6a5eb4bfe04c6d Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 8 May 2007 00:34:00 -0700 Subject: rtc-cmos wakeup interface I finally got around to testing the updated wakeup event hooks for rtc-cmos, and they follow in two patches: - Interface update ... when a simple enable_irq_wake() doesn't suffice, the platform data can hold suspend/resume callback hooks. - ACPI implementation ... provides callback hooks to do ACPI magic, and eliminate the legacy /proc/acpi/alarm file. The interface update could go into 2.6.21, but that's not essential; they will be NOPs on most PCs, without the ACPI stuff. I suspect the ACPI folk may have opinions about how to merge that second patch, and how to obsolete that legacy procfs file. I'd like to see that merge into 2.6.22 if possible... As for how to kick it in ... two ways: - The appended "rtcwake" program; updated since the last time it was posted, it deals much better with timezones and DST. - Write the /sys/class/rtc/.../wakealarm file, then go to sleep. For some reason RTC wake from "swsusp" stopped working on a system where it previously worked; the alarm setting appears to get clobbered. But on the bright side, RTC wake from "standby" worked on a system that had never been able to resume from that state before ... IDEACPI is my guess as to why it finally started to work. It's the old "two steps forward, one step back" dance, I guess. - Dave /* gcc -Wall -Os -o rtcwake rtcwake.c */ #include #include #include #include #include #include #include #include #include #include #include #include /* constants from legacy PC/AT hardware */ #define RTC_PF 0x40 #define RTC_AF 0x20 #define RTC_UF 0x10 /* * rtcwake -- enter a system sleep state until specified wakeup time. * * This uses cross-platform Linux interfaces to enter a system sleep state, * and leave it no later than a specified time. It uses any RTC framework * driver that supports standard driver model wakeup flags. * * This is normally used like the old "apmsleep" utility, to wake from a * suspend state like ACPI S1 (standby) or S3 (suspend-to-RAM). Most * platforms can implement those without analogues of BIOS, APM, or ACPI. * * On some systems, this can also be used like "nvram-wakeup", waking * from states like ACPI S4 (suspend to disk). Not all systems have * persistent media that are appropriate for such suspend modes. * * The best way to set the system's RTC is so that it holds the current * time in UTC. Use the "-l" flag to tell this program that the system * RTC uses a local timezone instead (maybe you dual-boot MS-Windows). */ static char *progname; #ifdef DEBUG #define VERSION "1.0 dev (" __DATE__ " " __TIME__ ")" #else #define VERSION "0.9" #endif static unsigned verbose; static int rtc_is_utc = -1; static int may_wakeup(const char *devname) { char buf[128], *s; FILE *f; snprintf(buf, sizeof buf, "/sys/class/rtc/%s/device/power/wakeup", devname); f = fopen(buf, "r"); if (!f) { perror(buf); return 0; } fgets(buf, sizeof buf, f); fclose(f); s = strchr(buf, '\n'); if (!s) return 0; *s = 0; /* wakeup events could be disabled or not supported */ return strcmp(buf, "enabled") == 0; } /* all times should be in UTC */ static time_t sys_time; static time_t rtc_time; static int get_basetimes(int fd) { struct tm tm; struct rtc_time rtc; /* this process works in RTC time, except when working * with the system clock (which always uses UTC). */ if (rtc_is_utc) setenv("TZ", "UTC", 1); tzset(); /* read rtc and system clocks "at the same time", or as * precisely (+/- a second) as we can read them. */ if (ioctl(fd, RTC_RD_TIME, &rtc) < 0) { perror("read rtc time"); return 0; } sys_time = time(0); if (sys_time == (time_t)-1) { perror("read system time"); return 0; } /* convert rtc_time to normal arithmetic-friendly form, * updating tm.tm_wday as used by asctime(). */ memset(&tm, 0, sizeof tm); tm.tm_sec = rtc.tm_sec; tm.tm_min = rtc.tm_min; tm.tm_hour = rtc.tm_hour; tm.tm_mday = rtc.tm_mday; tm.tm_mon = rtc.tm_mon; tm.tm_year = rtc.tm_year; tm.tm_isdst = rtc.tm_isdst; /* stays unspecified? */ rtc_time = mktime(&tm); if (rtc_time == (time_t)-1) { perror("convert rtc time"); return 0; } if (verbose) { if (!rtc_is_utc) { printf("\ttzone = %ld\n", timezone); printf("\ttzname = %s\n", tzname[daylight]); gmtime_r(&rtc_time, &tm); } printf("\tsystime = %ld, (UTC) %s", (long) sys_time, asctime(gmtime(&sys_time))); printf("\trtctime = %ld, (UTC) %s", (long) rtc_time, asctime(&tm)); } return 1; } static int setup_alarm(int fd, time_t *wakeup) { struct tm *tm; struct rtc_wkalrm wake; tm = gmtime(wakeup); wake.time.tm_sec = tm->tm_sec; wake.time.tm_min = tm->tm_min; wake.time.tm_hour = tm->tm_hour; wake.time.tm_mday = tm->tm_mday; wake.time.tm_mon = tm->tm_mon; wake.time.tm_year = tm->tm_year; wake.time.tm_wday = tm->tm_wday; wake.time.tm_yday = tm->tm_yday; wake.time.tm_isdst = tm->tm_isdst; /* many rtc alarms only support up to 24 hours from 'now' ... */ if ((rtc_time + (24 * 60 * 60)) > *wakeup) { if (ioctl(fd, RTC_ALM_SET, &wake.time) < 0) { perror("set rtc alarm"); return 0; } if (ioctl(fd, RTC_AIE_ON, 0) < 0) { perror("enable rtc alarm"); return 0; } /* ... so use the "more than 24 hours" request only if we must */ } else { /* avoid an extra AIE_ON call */ wake.enabled = 1; if (ioctl(fd, RTC_WKALM_SET, &wake) < 0) { perror("set rtc wake alarm"); return 0; } } return 1; } static void suspend_system(const char *suspend) { FILE *f = fopen("/sys/power/state", "w"); if (!f) { perror("/sys/power/state"); return; } fprintf(f, "%s\n", suspend); fflush(f); /* this executes after wake from suspend */ fclose(f); } int main(int argc, char **argv) { static char *devname = "rtc0"; static unsigned seconds = 0; static char *suspend = "standby"; int t; int fd; time_t alarm = 0; progname = strrchr(argv[0], '/'); if (progname) progname++; else progname = argv[0]; if (chdir("/dev/") < 0) { perror("chdir /dev"); return 1; } while ((t = getopt(argc, argv, "d:lm:s:t:uVv")) != EOF) { switch (t) { case 'd': devname = optarg; break; case 'l': rtc_is_utc = 0; break; /* what system power mode to use? for now handle only * standardized mode names; eventually when systems define * their own state names, parse /sys/power/state. * * "on" is used just to test the RTC alarm mechanism, * bypassing all the wakeup-from-sleep infrastructure. */ case 'm': if (strcmp(optarg, "standby") == 0 || strcmp(optarg, "mem") == 0 || strcmp(optarg, "disk") == 0 || strcmp(optarg, "on") == 0 ) { suspend = optarg; break; } printf("%s: unrecognized suspend state '%s'\n", progname, optarg); goto usage; /* alarm time, seconds-to-sleep (relative) */ case 's': t = atoi(optarg); if (t < 0) { printf("%s: illegal interval %s seconds\n", progname, optarg); goto usage; } seconds = t; break; /* alarm time, time_t (absolute, seconds since 1/1 1970 UTC) */ case 't': t = atoi(optarg); if (t < 0) { printf("%s: illegal time_t value %s\n", progname, optarg); goto usage; } alarm = t; break; case 'u': rtc_is_utc = 1; break; case 'v': verbose++; break; case 'V': printf("%s: version %s\n", progname, VERSION); break; default: usage: printf("usage: %s [options]" "\n\t" "-d rtc0|rtc1|...\t(select rtc)" "\n\t" "-l\t\t\t(RTC uses local timezone)" "\n\t" "-m standby|mem|...\t(sleep mode)" "\n\t" "-s seconds\t\t(seconds to sleep)" "\n\t" "-t time_t\t\t(time to wake)" "\n\t" "-u\t\t\t(RTC uses UTC)" "\n\t" "-v\t\t\t(verbose messages)" "\n\t" "-V\t\t\t(show version)" "\n", progname); return 1; } } if (!alarm && !seconds) { printf("%s: must provide wake time\n", progname); goto usage; } /* REVISIT: if /etc/adjtime exists, read it to see what * the util-linux version of hwclock assumes. */ if (rtc_is_utc == -1) { printf("%s: assuming RTC uses UTC ...\n", progname); rtc_is_utc = 1; } /* this RTC must exist and (if we'll sleep) be wakeup-enabled */ fd = open(devname, O_RDONLY); if (fd < 0) { perror(devname); return 1; } if (strcmp(suspend, "on") != 0 && !may_wakeup(devname)) { printf("%s: %s not enabled for wakeup events\n", progname, devname); return 1; } /* relative or absolute alarm time, normalized to time_t */ if (!get_basetimes(fd)) return 1; if (verbose) printf("alarm %ld, sys_time %ld, rtc_time %ld, seconds %u\n", alarm, sys_time, rtc_time, seconds); if (alarm) { if (alarm < sys_time) { printf("%s: time doesn't go backward to %s", progname, ctime(&alarm)); return 1; } alarm += sys_time - rtc_time; } else alarm = rtc_time + seconds + 1; if (setup_alarm(fd, &alarm) < 0) return 1; sync(); printf("%s: wakeup from \"%s\" using %s at %s", progname, suspend, devname, ctime(&alarm)); fflush(stdout); usleep(10 * 1000); if (strcmp(suspend, "on") != 0) suspend_system(suspend); else { unsigned long data; do { t = read(fd, &data, sizeof data); if (t < 0) { perror("rtc read"); break; } if (verbose) printf("... %s: %03lx\n", devname, data); } while (!(data & RTC_AF)); } if (ioctl(fd, RTC_AIE_OFF, 0) < 0) perror("disable rtc alarm interrupt"); close(fd); return 0; } This patch: Make rtc-cmos do the relevant magic so this RTC can wake the system from a sleep state. That magic comes in two basic flavors: - Straightforward: enable_irq_wake(), the way it'd work on most SOC chips; or generally with system sleep states which don't disable core IRQ logic. - Roundabout, using non-IRQ platform hooks. This is needed with ACPI and one almost-clone chip which uses a special wakeup-only alarm. (That's the RTC used on Footbridge boards, FWIW, which don't do PM in Linux.) A separate patch implements those hooks for ACPI platforms, so that rtc_cmos can issue system wakeup events (and its sysfs "wakealarm" attribute works on at least some systems). Signed-off-by: David Brownell Cc: Alessandro Zummo Cc: Len Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/rtc/rtc-cmos.c | 37 +++++++++++++++++++++++++++---------- include/linux/mc146818rtc.h | 7 +++++++ 2 files changed, 34 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 4e8c373f78e3..3fbfdd16191d 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -46,6 +46,10 @@ struct cmos_rtc { int irq; struct resource *iomem; + void (*wake_on)(struct device *); + void (*wake_off)(struct device *); + + u8 enabled_wake; u8 suspend_ctrl; /* newer hardware extends the original register set */ @@ -405,13 +409,20 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) cmos_rtc.irq = rtc_irq; cmos_rtc.iomem = ports; - /* For ACPI systems the info comes from the FADT. On others, - * board specific setup provides it as appropriate. + /* For ACPI systems extension info comes from the FADT. On others, + * board specific setup provides it as appropriate. Systems where + * the alarm IRQ isn't automatically a wakeup IRQ (like ACPI, and + * some almost-clones) can provide hooks to make that behave. */ if (info) { cmos_rtc.day_alrm = info->rtc_day_alarm; cmos_rtc.mon_alrm = info->rtc_mon_alarm; cmos_rtc.century = info->rtc_century; + + if (info->wake_on && info->wake_off) { + cmos_rtc.wake_on = info->wake_on; + cmos_rtc.wake_off = info->wake_off; + } } cmos_rtc.rtc = rtc_device_register(driver_name, dev, @@ -559,9 +570,13 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg) } spin_unlock_irq(&rtc_lock); - /* ACPI HOOK: enable ACPI_EVENT_RTC when (tmp & RTC_AIE) - * ... it'd be best if we could do that under rtc_lock. - */ + if (tmp & RTC_AIE) { + cmos->enabled_wake = 1; + if (cmos->wake_on) + cmos->wake_on(dev); + else + enable_irq_wake(cmos->irq); + } pr_debug("%s: suspend%s, ctrl %02x\n", cmos_rtc.rtc->dev.bus_id, @@ -576,14 +591,16 @@ static int cmos_resume(struct device *dev) struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned char tmp = cmos->suspend_ctrl; - /* REVISIT: a mechanism to resync the system clock (jiffies) - * on resume should be portable between platforms ... - */ - /* re-enable any irqs previously active */ if (tmp & (RTC_PIE|RTC_AIE|RTC_UIE)) { - /* ACPI HOOK: disable ACPI_EVENT_RTC when (tmp & RTC_AIE) */ + if (cmos->enabled_wake) { + if (cmos->wake_off) + cmos->wake_off(dev); + else + disable_irq_wake(cmos->irq); + cmos->enabled_wake = 0; + } spin_lock_irq(&rtc_lock); CMOS_WRITE(tmp, RTC_CONTROL); diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h index bdc01127dced..580b3f4956ee 100644 --- a/include/linux/mc146818rtc.h +++ b/include/linux/mc146818rtc.h @@ -22,8 +22,15 @@ extern spinlock_t rtc_lock; /* serialize CMOS RAM access */ /* Some RTCs extend the mc146818 register set to support alarms of more * than 24 hours in the future; or dates that include a century code. * This platform_data structure can pass this information to the driver. + * + * Also, some platforms need suspend()/resume() hooks to kick in special + * handling of wake alarms, e.g. activating ACPI BIOS hooks or setting up + * a separate wakeup alarm used by some almost-clone chips. */ struct cmos_rtc_board_info { + void (*wake_on)(struct device *dev); + void (*wake_off)(struct device *dev); + u8 rtc_day_alarm; /* zero, or register index */ u8 rtc_mon_alarm; /* zero, or register index */ u8 rtc_century; /* zero, or register index */ -- cgit v1.2.3 From 416ce32e704d778c283f2f86cadd836cd5d3696c Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 8 May 2007 00:34:05 -0700 Subject: revert "rtc: Add rtc_merge_alarm()" David says "884b4aaaa242a2db8c8252796f0118164a680ab5 should be reverted. It added an rtc_merge_alarm() call to the 2.6.20 kernel, which hasn't yet been used by any in-tree driver; this patch obviates the need for that call, and uses a more robust approach." Cc: Scott Wood Cc: Alessandro Zummo Cc: David Brownell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/rtc/rtc-lib.c | 81 --------------------------------------------------- include/linux/rtc.h | 1 - 2 files changed, 82 deletions(-) (limited to 'include/linux') diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c index 7bbc26a34bd2..ba795a4db1e9 100644 --- a/drivers/rtc/rtc-lib.c +++ b/drivers/rtc/rtc-lib.c @@ -117,85 +117,4 @@ int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time) } EXPORT_SYMBOL(rtc_tm_to_time); - -/* Merge the valid (i.e. non-negative) fields of alarm into the current - * time. If the valid alarm fields are earlier than the equivalent - * fields in the time, carry one into the least significant invalid - * field, so that the alarm expiry is in the future. It assumes that the - * least significant invalid field is more significant than the most - * significant valid field, and that the seconds field is valid. - * - * This is used by alarms that take relative (rather than absolute) - * times, and/or have a simple binary second counter instead of - * day/hour/minute/sec registers. - */ -void rtc_merge_alarm(struct rtc_time *now, struct rtc_time *alarm) -{ - int *alarmp = &alarm->tm_sec; - int *timep = &now->tm_sec; - int carry_into, i; - - /* Ignore everything past the 6th element (tm_year). */ - for (i = 5; i > 0; i--) { - if (alarmp[i] < 0) - alarmp[i] = timep[i]; - else - break; - } - - /* No carry needed if all fields are valid. */ - if (i == 5) - return; - - for (carry_into = i + 1; i >= 0; i--) { - if (alarmp[i] < timep[i]) - break; - - if (alarmp[i] > timep[i]) - return; - } - - switch (carry_into) { - case 1: - alarm->tm_min++; - - if (alarm->tm_min < 60) - return; - - alarm->tm_min = 0; - /* fall-through */ - - case 2: - alarm->tm_hour++; - - if (alarm->tm_hour < 60) - return; - - alarm->tm_hour = 0; - /* fall-through */ - - case 3: - alarm->tm_mday++; - - if (alarm->tm_mday <= rtc_days_in_month[alarm->tm_mon]) - return; - - alarm->tm_mday = 1; - /* fall-through */ - - case 4: - alarm->tm_mon++; - - if (alarm->tm_mon <= 12) - return; - - alarm->tm_mon = 1; - /* fall-through */ - - case 5: - alarm->tm_year++; - } -} -EXPORT_SYMBOL(rtc_merge_alarm); - MODULE_LICENSE("GPL"); diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 07ecd4ca8ca7..6d5e4a46781e 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -106,7 +106,6 @@ extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year extern int rtc_valid_tm(struct rtc_time *tm); extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time); extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); -extern void rtc_merge_alarm(struct rtc_time *now, struct rtc_time *alarm); #include #include -- cgit v1.2.3 From 4c4308cb93450989846ac49faeb6dab943e7657e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 8 May 2007 00:34:14 -0700 Subject: kprobes: kretprobes simplifications - consolidate duplicate code in all arch_prepare_kretprobe instances into common code - replace various odd helpers that use hlist_for_each_entry to get the first elemenet of a list with either a hlist_for_each_entry_save or an opencoded access to the first element in the caller - inline add_rp_inst into it's only remaining caller - use kretprobe_inst_table_head instead of opencoding it Signed-off-by: Christoph Hellwig Cc: Prasanna S Panchamukhi Acked-by: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/kernel/kprobes.c | 17 +++--------- arch/ia64/kernel/kprobes.c | 18 +++--------- arch/powerpc/kernel/kprobes.c | 19 ++++--------- arch/s390/kernel/kprobes.c | 18 +++--------- arch/x86_64/kernel/kprobes.c | 17 +++--------- include/linux/kprobes.h | 5 ++-- kernel/kprobes.c | 64 ++++++++++++++----------------------------- 7 files changed, 44 insertions(+), 114 deletions(-) (limited to 'include/linux') diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 3fbef288c376..b6a9d64c2251 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c @@ -226,24 +226,15 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) } /* Called with kretprobe_lock held */ -void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { unsigned long *sara = (unsigned long *)®s->esp; - struct kretprobe_instance *ri; + ri->ret_addr = (kprobe_opcode_t *) *sara; - if ((ri = get_free_rp_inst(rp)) != NULL) { - ri->rp = rp; - ri->task = current; - ri->ret_addr = (kprobe_opcode_t *) *sara; - - /* Replace the return addr with trampoline addr */ - *sara = (unsigned long) &kretprobe_trampoline; - add_rp_inst(ri); - } else { - rp->nmissed++; - } + /* Replace the return addr with trampoline addr */ + *sara = (unsigned long) &kretprobe_trampoline; } /* diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 353689edebd5..0b72f0f94192 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -465,23 +465,13 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) } /* Called with kretprobe_lock held */ -void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { - struct kretprobe_instance *ri; + ri->ret_addr = (kprobe_opcode_t *)regs->b0; - if ((ri = get_free_rp_inst(rp)) != NULL) { - ri->rp = rp; - ri->task = current; - ri->ret_addr = (kprobe_opcode_t *)regs->b0; - - /* Replace the return addr with trampoline addr */ - regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; - - add_rp_inst(ri); - } else { - rp->nmissed++; - } + /* Replace the return addr with trampoline addr */ + regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; } int __kprobes arch_prepare_kprobe(struct kprobe *p) diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 3d54ad7dd1f9..aed58e1cb91f 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -126,22 +126,13 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, } /* Called with kretprobe_lock held */ -void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { - struct kretprobe_instance *ri; - - if ((ri = get_free_rp_inst(rp)) != NULL) { - ri->rp = rp; - ri->task = current; - ri->ret_addr = (kprobe_opcode_t *)regs->link; - - /* Replace the return addr with trampoline addr */ - regs->link = (unsigned long)kretprobe_trampoline; - add_rp_inst(ri); - } else { - rp->nmissed++; - } + ri->ret_addr = (kprobe_opcode_t *)regs->link; + + /* Replace the return addr with trampoline addr */ + regs->link = (unsigned long)kretprobe_trampoline; } static int __kprobes kprobe_handler(struct pt_regs *regs) diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 8516a94d8163..9d0f0d09d473 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -271,23 +271,13 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, } /* Called with kretprobe_lock held */ -void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { - struct kretprobe_instance *ri; + ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; - if ((ri = get_free_rp_inst(rp)) != NULL) { - ri->rp = rp; - ri->task = current; - ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; - - /* Replace the return addr with trampoline addr */ - regs->gprs[14] = (unsigned long)&kretprobe_trampoline; - - add_rp_inst(ri); - } else { - rp->nmissed++; - } + /* Replace the return addr with trampoline addr */ + regs->gprs[14] = (unsigned long)&kretprobe_trampoline; } static int __kprobes kprobe_handler(struct pt_regs *regs) diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 5841ba5f479b..f995bea6e2c1 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c @@ -266,23 +266,14 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) } /* Called with kretprobe_lock held */ -void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { unsigned long *sara = (unsigned long *)regs->rsp; - struct kretprobe_instance *ri; - if ((ri = get_free_rp_inst(rp)) != NULL) { - ri->rp = rp; - ri->task = current; - ri->ret_addr = (kprobe_opcode_t *) *sara; - - /* Replace the return addr with trampoline addr */ - *sara = (unsigned long) &kretprobe_trampoline; - add_rp_inst(ri); - } else { - rp->nmissed++; - } + ri->ret_addr = (kprobe_opcode_t *) *sara; + /* Replace the return addr with trampoline addr */ + *sara = (unsigned long) &kretprobe_trampoline; } int __kprobes kprobe_handler(struct pt_regs *regs) diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 850ee871e353..6fc623e41fd8 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -123,7 +123,8 @@ DECLARE_PER_CPU(struct kprobe *, current_kprobe); DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); #ifdef ARCH_SUPPORTS_KRETPROBES -extern void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs); +extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs); #else /* ARCH_SUPPORTS_KRETPROBES */ static inline void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) @@ -209,8 +210,6 @@ void jprobe_return(void); int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); -struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp); -void add_rp_inst(struct kretprobe_instance *ri); void kprobe_flush_task(struct task_struct *tk); void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); #else /* CONFIG_KPROBES */ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 22857003a65b..f58f171bd65f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -357,46 +357,6 @@ void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) return; } -/* Called with kretprobe_lock held */ -struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) -{ - struct hlist_node *node; - struct kretprobe_instance *ri; - hlist_for_each_entry(ri, node, &rp->free_instances, uflist) - return ri; - return NULL; -} - -/* Called with kretprobe_lock held */ -static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe - *rp) -{ - struct hlist_node *node; - struct kretprobe_instance *ri; - hlist_for_each_entry(ri, node, &rp->used_instances, uflist) - return ri; - return NULL; -} - -/* Called with kretprobe_lock held */ -void __kprobes add_rp_inst(struct kretprobe_instance *ri) -{ - /* - * Remove rp inst off the free list - - * Add it back when probed function returns - */ - hlist_del(&ri->uflist); - - /* Add rp inst onto table */ - INIT_HLIST_NODE(&ri->hlist); - hlist_add_head(&ri->hlist, - &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]); - - /* Also add this rp inst to the used list. */ - INIT_HLIST_NODE(&ri->uflist); - hlist_add_head(&ri->uflist, &ri->rp->used_instances); -} - /* Called with kretprobe_lock held */ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head) @@ -450,7 +410,9 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) static inline void free_rp_inst(struct kretprobe *rp) { struct kretprobe_instance *ri; - while ((ri = get_free_rp_inst(rp)) != NULL) { + struct hlist_node *pos, *next; + + hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) { hlist_del(&ri->uflist); kfree(ri); } @@ -732,7 +694,21 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, /*TODO: consider to only swap the RA after the last pre_handler fired */ spin_lock_irqsave(&kretprobe_lock, flags); - arch_prepare_kretprobe(rp, regs); + if (!hlist_empty(&rp->free_instances)) { + struct kretprobe_instance *ri; + + ri = hlist_entry(rp->free_instances.first, + struct kretprobe_instance, uflist); + ri->rp = rp; + ri->task = current; + arch_prepare_kretprobe(ri, regs); + + /* XXX(hch): why is there no hlist_move_head? */ + hlist_del(&ri->uflist); + hlist_add_head(&ri->uflist, &ri->rp->used_instances); + hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task)); + } else + rp->nmissed++; spin_unlock_irqrestore(&kretprobe_lock, flags); return 0; } @@ -795,11 +771,13 @@ void __kprobes unregister_kretprobe(struct kretprobe *rp) { unsigned long flags; struct kretprobe_instance *ri; + struct hlist_node *pos, *next; unregister_kprobe(&rp->kp); + /* No race here */ spin_lock_irqsave(&kretprobe_lock, flags); - while ((ri = get_used_rp_inst(rp)) != NULL) { + hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) { ri->rp = NULL; hlist_del(&ri->uflist); } -- cgit v1.2.3 From bf8f6e5b3e51ee0c64c2d1350c70198ddc8ad3f7 Mon Sep 17 00:00:00 2001 From: Ananth N Mavinakayanahalli Date: Tue, 8 May 2007 00:34:16 -0700 Subject: Kprobes: The ON/OFF knob thru debugfs This patch provides a debugfs knob to turn kprobes on/off o A new file /debug/kprobes/enabled indicates if kprobes is enabled or not (default enabled) o Echoing 0 to this file will disarm all installed probes o Any new probe registration when disabled will register the probe but not arm it. A message will be printed out in such a case. o When a value 1 is echoed to the file, all probes (including ones registered in the intervening period) will be enabled o Unregistration will happen irrespective of whether probes are globally enabled or not. o Update Documentation/kprobes.txt to reflect these changes. While there also update the doc to make it current. We are also looking at providing sysrq key support to tie to the disabling feature provided by this patch. [akpm@linux-foundation.org: Use bool like a bool!] [akpm@linux-foundation.org: add printk facility levels] [cornelia.huck@de.ibm.com: Add the missing arch_trampoline_kprobe() for s390] Signed-off-by: Ananth N Mavinakayanahalli Signed-off-by: Srinivasa DS Signed-off-by: Cornelia Huck Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/kprobes.txt | 34 ++++++++- arch/i386/kernel/kprobes.c | 5 ++ arch/ia64/kernel/kprobes.c | 9 +++ arch/powerpc/kernel/kprobes.c | 8 +++ arch/s390/kernel/kprobes.c | 7 ++ arch/x86_64/kernel/kprobes.c | 8 +++ include/linux/kprobes.h | 5 ++ kernel/kprobes.c | 156 ++++++++++++++++++++++++++++++++++++++++-- 8 files changed, 222 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index d71fafffce90..da5404ab7569 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -14,6 +14,7 @@ CONTENTS 8. Kprobes Example 9. Jprobes Example 10. Kretprobes Example +Appendix A: The kprobes debugfs interface 1. Concepts: Kprobes, Jprobes, Return Probes @@ -349,9 +350,12 @@ for instrumentation and error reporting.) If the number of times a function is called does not match the number of times it returns, registering a return probe on that function may -produce undesirable results. We have the do_exit() case covered. -do_execve() and do_fork() are not an issue. We're unaware of other -specific cases where this could be a problem. +produce undesirable results. In such a case, a line: +kretprobe BUG!: Processing kretprobe d000000000041aa8 @ c00000000004f48c +gets printed. With this information, one will be able to correlate the +exact instance of the kretprobe that caused the problem. We have the +do_exit() case covered. do_execve() and do_fork() are not an issue. +We're unaware of other specific cases where this could be a problem. If, upon entry to or exit from a function, the CPU is running on a stack other than that of the current task, registering a return @@ -614,3 +618,27 @@ http://www-106.ibm.com/developerworks/library/l-kprobes.html?ca=dgr-lnxw42Kprobe http://www.redhat.com/magazine/005mar05/features/kprobes/ http://www-users.cs.umn.edu/~boutcher/kprobes/ http://www.linuxsymposium.org/2006/linuxsymposium_procv2.pdf (pages 101-115) + + +Appendix A: The kprobes debugfs interface + +With recent kernels (> 2.6.20) the list of registered kprobes is visible +under the /debug/kprobes/ directory (assuming debugfs is mounted at /debug). + +/debug/kprobes/list: Lists all registered probes on the system + +c015d71a k vfs_read+0x0 +c011a316 j do_fork+0x0 +c03dedc5 r tcp_v4_rcv+0x0 + +The first column provides the kernel address where the probe is inserted. +The second column identifies the type of probe (k - kprobe, r - kretprobe +and j - jprobe), while the third column specifies the symbol+offset of +the probe. If the probed function belongs to a module, the module name +is also specified. + +/debug/kprobes/enabled: Turn kprobes ON/OFF + +Provides a knob to globally turn registered kprobes ON or OFF. By default, +all kprobes are enabled. By echoing "0" to this file, all registered probes +will be disarmed, till such time a "1" is echoed to this file. diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index b6a9d64c2251..dde828a333c3 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c @@ -743,6 +743,11 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) return 0; } +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + return 0; +} + int __init arch_init_kprobes(void) { return 0; diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 0b72f0f94192..4f5fd0960ba7 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -1012,3 +1012,12 @@ int __init arch_init_kprobes(void) (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip; return register_kprobe(&trampoline_p); } + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + if (p->addr == + (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip) + return 1; + + return 0; +} diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index aed58e1cb91f..088b8c6defa0 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -550,3 +550,11 @@ int __init arch_init_kprobes(void) { return register_kprobe(&trampoline_p); } + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) + return 1; + + return 0; +} diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 9d0f0d09d473..e39333ae0fcf 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -661,3 +661,10 @@ int __init arch_init_kprobes(void) { return register_kprobe(&trampoline_p); } + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + if (p->addr == (kprobe_opcode_t *) & kretprobe_trampoline) + return 1; + return 0; +} diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index f995bea6e2c1..d4a0d0ac9935 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c @@ -743,3 +743,11 @@ int __init arch_init_kprobes(void) { return register_kprobe(&trampoline_p); } + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) + return 1; + + return 0; +} diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 6fc623e41fd8..23adf6075ae4 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -125,11 +125,16 @@ DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); #ifdef ARCH_SUPPORTS_KRETPROBES extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs); +extern int arch_trampoline_kprobe(struct kprobe *p); #else /* ARCH_SUPPORTS_KRETPROBES */ static inline void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) { } +static inline int arch_trampoline_kprobe(struct kprobe *p) +{ + return 0; +} #endif /* ARCH_SUPPORTS_KRETPROBES */ /* * Function-return probe - diff --git a/kernel/kprobes.c b/kernel/kprobes.c index f58f171bd65f..9e47d8c493f3 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -43,9 +43,11 @@ #include #include #include + #include #include #include +#include #define KPROBE_HASH_BITS 6 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) @@ -64,6 +66,9 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; static atomic_t kprobe_count; +/* NOTE: change this value only with kprobe_mutex held */ +static bool kprobe_enabled; + DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; @@ -564,12 +569,13 @@ static int __kprobes __register_kprobe(struct kprobe *p, hlist_add_head_rcu(&p->hlist, &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); - if (atomic_add_return(1, &kprobe_count) == \ + if (kprobe_enabled) { + if (atomic_add_return(1, &kprobe_count) == \ (ARCH_INACTIVE_KPROBE_COUNT + 1)) - register_page_fault_notifier(&kprobe_page_fault_nb); - - arch_arm_kprobe(p); + register_page_fault_notifier(&kprobe_page_fault_nb); + arch_arm_kprobe(p); + } out: mutex_unlock(&kprobe_mutex); @@ -607,8 +613,13 @@ valid_p: if (old_p == p || (old_p->pre_handler == aggr_pre_handler && p->list.next == &old_p->list && p->list.prev == &old_p->list)) { - /* Only probe on the hash list */ - arch_disarm_kprobe(p); + /* + * Only probe on the hash list. Disarm only if kprobes are + * enabled - otherwise, the breakpoint would already have + * been removed. We save on flushing icache. + */ + if (kprobe_enabled) + arch_disarm_kprobe(p); hlist_del_rcu(&old_p->hlist); cleanup_p = 1; } else { @@ -797,6 +808,9 @@ static int __init init_kprobes(void) } atomic_set(&kprobe_count, 0); + /* By default, kprobes are enabled */ + kprobe_enabled = true; + err = arch_init_kprobes(); if (!err) err = register_die_notifier(&kprobe_exceptions_nb); @@ -806,7 +820,7 @@ static int __init init_kprobes(void) #ifdef CONFIG_DEBUG_FS static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, - const char *sym, int offset,char *modname) + const char *sym, int offset,char *modname) { char *kprobe_type; @@ -885,9 +899,130 @@ static struct file_operations debugfs_kprobes_operations = { .release = seq_release, }; +static void __kprobes enable_all_kprobes(void) +{ + struct hlist_head *head; + struct hlist_node *node; + struct kprobe *p; + unsigned int i; + + mutex_lock(&kprobe_mutex); + + /* If kprobes are already enabled, just return */ + if (kprobe_enabled) + goto already_enabled; + + /* + * Re-register the page fault notifier only if there are any + * active probes at the time of enabling kprobes globally + */ + if (atomic_read(&kprobe_count) > ARCH_INACTIVE_KPROBE_COUNT) + register_page_fault_notifier(&kprobe_page_fault_nb); + + for (i = 0; i < KPROBE_TABLE_SIZE; i++) { + head = &kprobe_table[i]; + hlist_for_each_entry_rcu(p, node, head, hlist) + arch_arm_kprobe(p); + } + + kprobe_enabled = true; + printk(KERN_INFO "Kprobes globally enabled\n"); + +already_enabled: + mutex_unlock(&kprobe_mutex); + return; +} + +static void __kprobes disable_all_kprobes(void) +{ + struct hlist_head *head; + struct hlist_node *node; + struct kprobe *p; + unsigned int i; + + mutex_lock(&kprobe_mutex); + + /* If kprobes are already disabled, just return */ + if (!kprobe_enabled) + goto already_disabled; + + kprobe_enabled = false; + printk(KERN_INFO "Kprobes globally disabled\n"); + for (i = 0; i < KPROBE_TABLE_SIZE; i++) { + head = &kprobe_table[i]; + hlist_for_each_entry_rcu(p, node, head, hlist) { + if (!arch_trampoline_kprobe(p)) + arch_disarm_kprobe(p); + } + } + + mutex_unlock(&kprobe_mutex); + /* Allow all currently running kprobes to complete */ + synchronize_sched(); + + mutex_lock(&kprobe_mutex); + /* Unconditionally unregister the page_fault notifier */ + unregister_page_fault_notifier(&kprobe_page_fault_nb); + +already_disabled: + mutex_unlock(&kprobe_mutex); + return; +} + +/* + * XXX: The debugfs bool file interface doesn't allow for callbacks + * when the bool state is switched. We can reuse that facility when + * available + */ +static ssize_t read_enabled_file_bool(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char buf[3]; + + if (kprobe_enabled) + buf[0] = '1'; + else + buf[0] = '0'; + buf[1] = '\n'; + buf[2] = 0x00; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t write_enabled_file_bool(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + char buf[32]; + int buf_size; + + buf_size = min(count, (sizeof(buf)-1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + switch (buf[0]) { + case 'y': + case 'Y': + case '1': + enable_all_kprobes(); + break; + case 'n': + case 'N': + case '0': + disable_all_kprobes(); + break; + } + + return count; +} + +static struct file_operations fops_kp = { + .read = read_enabled_file_bool, + .write = write_enabled_file_bool, +}; + static int __kprobes debugfs_kprobe_init(void) { struct dentry *dir, *file; + unsigned int value = 1; dir = debugfs_create_dir("kprobes", NULL); if (!dir) @@ -900,6 +1035,13 @@ static int __kprobes debugfs_kprobe_init(void) return -ENOMEM; } + file = debugfs_create_file("enabled", 0600, dir, + &value, &fops_kp); + if (!file) { + debugfs_remove(dir); + return -ENOMEM; + } + return 0; } -- cgit v1.2.3 From 277866a0e3a4f97e859f7a621f5b4f5359c9526c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 8 May 2007 00:35:12 -0700 Subject: nfs: fix congestion control: use atomic_longs Change the atomic_t in struct nfs_server to atomic_long_t in anticipation of machines that can handle 8+TB of (4K) pages under writeback. However I suspect other things in NFS will start going *bang* by then. Signed-off-by: Peter Zijlstra Cc: Trond Myklebust Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/nfs/write.c | 4 ++-- include/linux/nfs_fs_sb.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 522cd413906e..de92b9509d94 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -224,7 +224,7 @@ static int nfs_set_page_writeback(struct page *page) struct inode *inode = page->mapping->host; struct nfs_server *nfss = NFS_SERVER(inode); - if (atomic_inc_return(&nfss->writeback) > + if (atomic_long_inc_return(&nfss->writeback) > NFS_CONGESTION_ON_THRESH) set_bdi_congested(&nfss->backing_dev_info, WRITE); } @@ -237,7 +237,7 @@ static void nfs_end_page_writeback(struct page *page) struct nfs_server *nfss = NFS_SERVER(inode); end_page_writeback(page); - if (atomic_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) { + if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) { clear_bdi_congested(&nfss->backing_dev_info, WRITE); congestion_end(WRITE); } diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index c95d5e642548..52b4378311c8 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -82,7 +82,7 @@ struct nfs_server { struct rpc_clnt * client_acl; /* ACL RPC client handle */ struct nfs_iostats * io_stats; /* I/O statistics */ struct backing_dev_info backing_dev_info; - atomic_t writeback; /* number of writeback pages */ + atomic_long_t writeback; /* number of writeback pages */ int flags; /* various flags */ unsigned int caps; /* server capabilities */ unsigned int rsize; /* read size */ -- cgit v1.2.3 From d85a60d85ea5b7c597508c1510c88e657773d378 Mon Sep 17 00:00:00 2001 From: Bernhard Walle Date: Tue, 8 May 2007 00:35:24 -0700 Subject: Add IRQF_IRQPOLL flag (common code) irqpoll is broken on some architectures that don't use the IRQ 0 for the timer interrupt like IA64. This patch adds a IRQF_IRQPOLL flag. Each architecture is handled in a separate pach. As I left the irq == 0 as condition, this should not break existing architectures that use timer_irq == 0 and that I did't address with that patch (because I don't know). This patch: This patch adds a IRQF_IRQPOLL flag that the interrupt registration code could use for the interrupt it wants to use for IRQ polling. Because this must not be the timer interrupt, an additional flag was added instead of re-using the IRQF_TIMER constant. Until all architectures will have an IRQF_IRQPOLL interrupt, irq == 0 will stay as alternative as it should not break anything. Also, note_interrupt() is called on CPU-specific interrupts to be used as interrupt source for IRQ polling. Signed-off-by: Bernhard Walle Cc: Russell King Cc: Kyle McMartin Cc: Matthew Wilcox Cc: Grant Grundler Cc: Paul Mundt Cc: "Luck, Tony" Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Alan Cox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/interrupt.h | 4 ++++ kernel/irq/handle.c | 2 ++ kernel/irq/spurious.c | 4 +++- 3 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 80e63d8e9b15..f7b01b9a35b3 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -44,6 +44,9 @@ * IRQF_TIMER - Flag to mark this interrupt as timer interrupt * IRQF_PERCPU - Interrupt is per cpu * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing + * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is + * registered first in an shared interrupt is considered for + * performance reasons) */ #define IRQF_DISABLED 0x00000020 #define IRQF_SAMPLE_RANDOM 0x00000040 @@ -52,6 +55,7 @@ #define IRQF_TIMER 0x00000200 #define IRQF_PERCPU 0x00000400 #define IRQF_NOBALANCING 0x00000800 +#define IRQF_IRQPOLL 0x00001000 /* * Migration helpers. Scheduled for removal in 9/2007 diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 515ad40bde15..32e1ab1477d1 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -180,6 +180,8 @@ fastcall unsigned int __do_IRQ(unsigned int irq) if (desc->chip->ack) desc->chip->ack(irq); action_ret = handle_IRQ_event(irq, desc->action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); desc->chip->end(irq); return 1; } diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 9d8c79b48823..b0d81aae472f 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -146,7 +146,9 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, if (unlikely(irqfixup)) { /* Don't punish working computers */ - if ((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE) { + if ((irqfixup == 2 && ((irq == 0) || + (desc->action->flags & IRQF_IRQPOLL))) || + action_ret == IRQ_NONE) { int ok = misrouted_irq(irq); if (action_ret == IRQ_NONE) desc->irqs_unhandled -= ok; -- cgit v1.2.3 From db05c3b1ddaa06e658548f3d99e31a188b0b3bcc Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 8 May 2007 00:35:46 -0700 Subject: Char: cyclades, cy_readX/writeX cleanup cyclades, cy_readX/writeX cleanup - cy_readX are placeholders for readX, remove it - move cy_writeX macros into do {} while(0) to be safe Signed-off-by: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/cyclades.c | 315 ++++++++++++++++++++++------------------------- include/linux/cyclades.h | 10 +- 2 files changed, 153 insertions(+), 172 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 3b62962b3512..9bf0fb2fa0d4 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -675,13 +675,13 @@ static void cy_send_xchar(struct tty_struct *tty, char ch); #define IS_CYC_Z(card) ((card).num_chips == -1) #define Z_FPGA_CHECK(card) \ - ((cy_readl(&((struct RUNTIME_9060 __iomem *) \ + ((readl(&((struct RUNTIME_9060 __iomem *) \ ((card).ctl_addr))->init_ctrl) & (1<<17)) != 0) -#define ISZLOADED(card) (((ZO_V1==cy_readl(&((struct RUNTIME_9060 __iomem *) \ +#define ISZLOADED(card) (((ZO_V1==readl(&((struct RUNTIME_9060 __iomem *) \ ((card).ctl_addr))->mail_box_0)) || \ Z_FPGA_CHECK(card)) && \ - (ZFIRM_ID==cy_readl(&((struct FIRM_ID __iomem *) \ + (ZFIRM_ID==readl(&((struct FIRM_ID __iomem *) \ ((card).base_addr+ID_ADDRESS))->signature))) #ifndef SERIAL_XMIT_SIZE @@ -972,7 +972,7 @@ static int cyy_issue_cmd(void __iomem * base_addr, u_char cmd, int index) /* Check to see that the previous command has completed */ for (i = 0; i < 100; i++) { - if (cy_readb(base_addr + (CyCCR << index)) == 0) { + if (readb(base_addr + (CyCCR << index)) == 0) { break; } udelay(10L); @@ -1015,7 +1015,7 @@ static unsigned detect_isa_irq(void __iomem * address) cy_writeb(address + (CyCAR << index), 0); cy_writeb(address + (CySRER << index), - cy_readb(address + (CySRER << index)) | CyTxRdy); + readb(address + (CySRER << index)) | CyTxRdy); local_irq_restore(flags); /* Wait ... */ @@ -1025,11 +1025,11 @@ static unsigned detect_isa_irq(void __iomem * address) irq = probe_irq_off(irqs); /* Clean up */ - save_xir = (u_char) cy_readb(address + (CyTIR << index)); - save_car = cy_readb(address + (CyCAR << index)); + save_xir = (u_char) readb(address + (CyTIR << index)); + save_car = readb(address + (CyCAR << index)); cy_writeb(address + (CyCAR << index), (save_xir & 0x3)); cy_writeb(address + (CySRER << index), - cy_readb(address + (CySRER << index)) & ~CyTxRdy); + readb(address + (CySRER << index)) & ~CyTxRdy); cy_writeb(address + (CyTIR << index), (save_xir & 0x3f)); cy_writeb(address + (CyCAR << index), (save_car)); cy_writeb(address + (Cy_ClrIntr << index), 0); @@ -1055,34 +1055,34 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, #endif /* determine the channel & change to that context */ spin_lock(&cinfo->card_lock); - save_xir = (u_char) cy_readb(base_addr + (CyRIR << index)); + save_xir = (u_char) readb(base_addr + (CyRIR << index)); channel = (u_short) (save_xir & CyIRChannel); i = channel + chip * 4 + cinfo->first_line; info = &cy_port[i]; info->last_active = jiffies; - save_car = cy_readb(base_addr + (CyCAR << index)); + save_car = readb(base_addr + (CyCAR << index)); cy_writeb(base_addr + (CyCAR << index), save_xir); /* if there is nowhere to put the data, discard it */ if (info->tty == 0) { - j = (cy_readb(base_addr + (CyRIVR << index)) & + j = (readb(base_addr + (CyRIVR << index)) & CyIVRMask); if (j == CyIVRRxEx) { /* exception */ - data = cy_readb(base_addr + (CyRDSR << index)); + data = readb(base_addr + (CyRDSR << index)); } else { /* normal character reception */ - char_count = cy_readb(base_addr + + char_count = readb(base_addr + (CyRDCR << index)); while (char_count--) { - data = cy_readb(base_addr + + data = readb(base_addr + (CyRDSR << index)); } } } else { /* there is an open port for this data */ tty = info->tty; - j = (cy_readb(base_addr + (CyRIVR << index)) & + j = (readb(base_addr + (CyRIVR << index)) & CyIVRMask); if (j == CyIVRRxEx) { /* exception */ - data = cy_readb(base_addr + (CyRDSR << index)); + data = readb(base_addr + (CyRDSR << index)); /* For statistics only */ if (data & CyBREAK) @@ -1103,7 +1103,7 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, if (data & CyBREAK) { tty_insert_flip_char( tty, - cy_readb( + readb( base_addr + (CyRDSR << index)), @@ -1116,7 +1116,7 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, } else if (data & CyFRAME) { tty_insert_flip_char( tty, - cy_readb( + readb( base_addr + (CyRDSR << index)), @@ -1128,7 +1128,7 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, /* Pieces of seven... */ tty_insert_flip_char( tty, - cy_readb( + readb( base_addr + (CyRDSR << index)), @@ -1147,7 +1147,7 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, */ tty_insert_flip_char( tty, - cy_readb( + readb( base_addr + (CyRDSR << index)), @@ -1179,7 +1179,7 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, } } else { /* normal character reception */ /* load # chars available from the chip */ - char_count = cy_readb(base_addr + + char_count = readb(base_addr + (CyRDCR << index)); #ifdef CY_ENABLE_MONITORING @@ -1191,7 +1191,7 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, #endif len = tty_buffer_request_room(tty, char_count); while (len--) { - data = cy_readb(base_addr + + data = readb(base_addr + (CyRDSR << index)); tty_insert_flip_char(tty, data, TTY_NORMAL); @@ -1221,16 +1221,16 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, /* determine the channel & change to that context */ spin_lock(&cinfo->card_lock); - save_xir = (u_char) cy_readb(base_addr + (CyTIR << index)); + save_xir = (u_char) readb(base_addr + (CyTIR << index)); channel = (u_short) (save_xir & CyIRChannel); i = channel + chip * 4 + cinfo->first_line; - save_car = cy_readb(base_addr + (CyCAR << index)); + save_car = readb(base_addr + (CyCAR << index)); cy_writeb(base_addr + (CyCAR << index), save_xir); /* validate the port# (as configured and open) */ if ((i < 0) || (NR_PORTS <= i)) { cy_writeb(base_addr + (CySRER << index), - cy_readb(base_addr + (CySRER << index)) & + readb(base_addr + (CySRER << index)) & ~CyTxRdy); goto txend; } @@ -1238,7 +1238,7 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, info->last_active = jiffies; if (info->tty == 0) { cy_writeb(base_addr + (CySRER << index), - cy_readb(base_addr + (CySRER << index)) & + readb(base_addr + (CySRER << index)) & ~CyTxRdy); goto txdone; } @@ -1271,15 +1271,15 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, while (char_count-- > 0) { if (!info->xmit_cnt) { - if (cy_readb(base_addr + (CySRER << index)) & + if (readb(base_addr + (CySRER << index)) & CyTxMpty) { cy_writeb(base_addr + (CySRER << index), - cy_readb(base_addr + + readb(base_addr + (CySRER << index)) & ~CyTxMpty); } else { cy_writeb(base_addr + (CySRER << index), - (cy_readb(base_addr + + (readb(base_addr + (CySRER << index)) & ~CyTxRdy) | CyTxMpty); } @@ -1287,13 +1287,13 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, } if (info->xmit_buf == 0) { cy_writeb(base_addr + (CySRER << index), - cy_readb(base_addr + (CySRER << index))& + readb(base_addr + (CySRER << index)) & ~CyTxRdy); goto txdone; } if (info->tty->stopped || info->tty->hw_stopped) { cy_writeb(base_addr + (CySRER << index), - cy_readb(base_addr + (CySRER << index))& + readb(base_addr + (CySRER << index)) & ~CyTxRdy); goto txdone; } @@ -1346,15 +1346,15 @@ txend: /* determine the channel & change to that context */ spin_lock(&cinfo->card_lock); - save_xir = (u_char) cy_readb(base_addr + (CyMIR << index)); + save_xir = (u_char) readb(base_addr + (CyMIR << index)); channel = (u_short) (save_xir & CyIRChannel); info = &cy_port[channel + chip * 4 + cinfo->first_line]; info->last_active = jiffies; - save_car = cy_readb(base_addr + (CyCAR << index)); + save_car = readb(base_addr + (CyCAR << index)); cy_writeb(base_addr + (CyCAR << index), save_xir); - mdm_change = cy_readb(base_addr + (CyMISR << index)); - mdm_status = cy_readb(base_addr + (CyMSVR1 << index)); + mdm_change = readb(base_addr + (CyMISR << index)); + mdm_status = readb(base_addr + (CyMSVR1 << index)); if (info->tty == 0) { /* no place for data, ignore it */ ; @@ -1391,7 +1391,7 @@ txend: info->tty->hw_stopped = 0; cy_writeb(base_addr + (CySRER << index), - cy_readb(base_addr + + readb(base_addr + (CySRER << index))| CyTxRdy); @@ -1405,7 +1405,7 @@ txend: info->tty->hw_stopped = 1; cy_writeb(base_addr + (CySRER << index), - cy_readb(base_addr + + readb(base_addr + (CySRER << index)) & ~CyTxRdy); @@ -1459,7 +1459,7 @@ static irqreturn_t cyy_interrupt(int irq, void *dev_id) base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index); too_many = 0; - while ((status = cy_readb(base_addr + + while ((status = readb(base_addr + (CySVRR << index))) != 0x00) { had_work++; /* The purpose of the following test is to ensure that @@ -1502,16 +1502,15 @@ cyz_fetch_msg(struct cyclades_card *cinfo, if (!ISZLOADED(*cinfo)) { return -1; } - zfw_ctrl = cinfo->base_addr + (cy_readl(&firm_id->zfwctrl_addr) & - 0xfffff); + zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; - loc_doorbell = cy_readl(&((struct RUNTIME_9060 __iomem *) + loc_doorbell = readl(&((struct RUNTIME_9060 __iomem *) (cinfo->ctl_addr))->loc_doorbell); if (loc_doorbell) { *cmd = (char)(0xff & loc_doorbell); - *channel = cy_readl(&board_ctrl->fwcmd_channel); - *param = (__u32) cy_readl(&board_ctrl->fwcmd_param); + *channel = readl(&board_ctrl->fwcmd_channel); + *param = (__u32) readl(&board_ctrl->fwcmd_param); cy_writel(&((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))-> loc_doorbell, 0xffffffff); return 1; @@ -1533,16 +1532,15 @@ cyz_issue_cmd(struct cyclades_card *cinfo, if (!ISZLOADED(*cinfo)) { return -1; } - zfw_ctrl = cinfo->base_addr + (cy_readl(&firm_id->zfwctrl_addr) & - 0xfffff); + zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; index = 0; pci_doorbell = &((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))->pci_doorbell; - while ((cy_readl(pci_doorbell) & 0xff) != 0) { + while ((readl(pci_doorbell) & 0xff) != 0) { if (index++ == 1000) { - return (int)(cy_readl(pci_doorbell) & 0xff); + return (int)(readl(pci_doorbell) & 0xff); } udelay(50L); } @@ -1569,10 +1567,10 @@ cyz_handle_rx(struct cyclades_port *info, #endif volatile __u32 rx_put, rx_get, new_rx_get, rx_bufsize, rx_bufaddr; - rx_get = new_rx_get = cy_readl(&buf_ctrl->rx_get); - rx_put = cy_readl(&buf_ctrl->rx_put); - rx_bufsize = cy_readl(&buf_ctrl->rx_bufsize); - rx_bufaddr = cy_readl(&buf_ctrl->rx_bufaddr); + rx_get = new_rx_get = readl(&buf_ctrl->rx_get); + rx_put = readl(&buf_ctrl->rx_put); + rx_bufsize = readl(&buf_ctrl->rx_bufsize); + rx_bufaddr = readl(&buf_ctrl->rx_bufaddr); if (rx_put >= rx_get) char_count = rx_put - rx_get; else @@ -1622,7 +1620,7 @@ cyz_handle_rx(struct cyclades_port *info, #else len = tty_buffer_request_room(tty, char_count); while (len--) { - data = cy_readb(cinfo->base_addr + rx_bufaddr + + data = readb(cinfo->base_addr + rx_bufaddr + new_rx_get); new_rx_get = (new_rx_get + 1)& (rx_bufsize - 1); tty_insert_flip_char(tty, data, TTY_NORMAL); @@ -1633,13 +1631,12 @@ cyz_handle_rx(struct cyclades_port *info, #ifdef CONFIG_CYZ_INTR /* Recalculate the number of chars in the RX buffer and issue a cmd in case it's higher than the RX high water mark */ - rx_put = cy_readl(&buf_ctrl->rx_put); + rx_put = readl(&buf_ctrl->rx_put); if (rx_put >= rx_get) char_count = rx_put - rx_get; else char_count = rx_put - rx_get + rx_bufsize; - if (char_count >= (int)cy_readl(&buf_ctrl-> - rx_threshold)) { + if (char_count >= (int)readl(&buf_ctrl->rx_threshold)) { cy_sched_event(info, Cy_EVENT_Z_RX_FULL); } #endif @@ -1668,10 +1665,10 @@ cyz_handle_tx(struct cyclades_port *info, if (info->xmit_cnt <= 0) /* Nothing to transmit */ return; - tx_get = cy_readl(&buf_ctrl->tx_get); - tx_put = cy_readl(&buf_ctrl->tx_put); - tx_bufsize = cy_readl(&buf_ctrl->tx_bufsize); - tx_bufaddr = cy_readl(&buf_ctrl->tx_bufaddr); + tx_get = readl(&buf_ctrl->tx_get); + tx_put = readl(&buf_ctrl->tx_put); + tx_bufsize = readl(&buf_ctrl->tx_bufsize); + tx_bufaddr = readl(&buf_ctrl->tx_bufaddr); if (tx_put >= tx_get) char_count = tx_get - tx_put - 1 + tx_bufsize; else @@ -1756,11 +1753,10 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo) int delta_count; firm_id = cinfo->base_addr + ID_ADDRESS; - zfw_ctrl = cinfo->base_addr + (cy_readl(&firm_id->zfwctrl_addr) & - 0xfffff); + zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; - fw_ver = cy_readl(&board_ctrl->fw_version); - hw_ver = cy_readl(&((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))-> + fw_ver = readl(&board_ctrl->fw_version); + hw_ver = readl(&((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))-> mail_box_0); while (cyz_fetch_msg(cinfo, &channel, &cmd, ¶m) == 1) { @@ -1794,7 +1790,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo) delta_count++; if (info->flags & ASYNC_CHECK_CD) { if ((fw_ver > 241 ? ((u_long) param) : - cy_readl(&ch_ctrl->rs_status)) & + readl(&ch_ctrl->rs_status)) & C_RS_DCD) { cy_sched_event(info, Cy_EVENT_OPEN_WAKEUP); @@ -1923,12 +1919,12 @@ static void cyz_poll(unsigned long arg) firm_id = cinfo->base_addr + ID_ADDRESS; zfw_ctrl = cinfo->base_addr + - (cy_readl(&firm_id->zfwctrl_addr) & 0xfffff); + (readl(&firm_id->zfwctrl_addr) & 0xfffff); board_ctrl = &(zfw_ctrl->board_ctrl); /* Skip first polling cycle to avoid racing conditions with the FW */ if (!cinfo->intr_enabled) { - cinfo->nports = (int)cy_readl(&board_ctrl->n_channel); + cinfo->nports = (int)readl(&board_ctrl->n_channel); cinfo->intr_enabled = 1; continue; } @@ -2029,12 +2025,12 @@ static int startup(struct cyclades_port *info) #ifdef CY_DEBUG_DTR printk("cyc:startup raising DTR\n"); printk(" status: 0x%x, 0x%x\n", - cy_readb(base_addr + (CyMSVR1 << index)), - cy_readb(base_addr + (CyMSVR2 << index))); + readb(base_addr + (CyMSVR1 << index)), + readb(base_addr + (CyMSVR2 << index))); #endif cy_writeb(base_addr + (CySRER << index), - cy_readb(base_addr + (CySRER << index)) | CyRxData); + readb(base_addr + (CySRER << index)) | CyRxData); info->flags |= ASYNC_INITIALIZED; if (info->tty) { @@ -2064,7 +2060,7 @@ static int startup(struct cyclades_port *info) } zfw_ctrl = cy_card[card].base_addr + - (cy_readl(&firm_id->zfwctrl_addr) & 0xfffff); + (readl(&firm_id->zfwctrl_addr) & 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; ch_ctrl = zfw_ctrl->ch_ctrl; @@ -2112,7 +2108,7 @@ static int startup(struct cyclades_port *info) /* set timeout !!! */ /* set RTS and DTR !!! */ cy_writel(&ch_ctrl[channel].rs_control, - cy_readl(&ch_ctrl[channel].rs_control) | C_RS_RTS | + readl(&ch_ctrl[channel].rs_control) | C_RS_RTS | C_RS_DTR); retval = cyz_issue_cmd(&cy_card[info->card], channel, C_CM_IOCTLM, 0L); @@ -2168,7 +2164,7 @@ static void start_xmit(struct cyclades_port *info) CY_LOCK(info, flags); cy_writeb(base_addr + (CyCAR << index), channel); cy_writeb(base_addr + (CySRER << index), - cy_readb(base_addr + (CySRER << index)) | CyTxRdy); + readb(base_addr + (CySRER << index)) | CyTxRdy); CY_UNLOCK(info, flags); } else { #ifdef CONFIG_CYZ_INTR @@ -2235,8 +2231,8 @@ static void shutdown(struct cyclades_port *info) #ifdef CY_DEBUG_DTR printk("cyc shutdown dropping DTR\n"); printk(" status: 0x%x, 0x%x\n", - cy_readb(base_addr + (CyMSVR1 << index)), - cy_readb(base_addr + (CyMSVR2 << index))); + readb(base_addr + (CyMSVR1 << index)), + readb(base_addr + (CyMSVR2 << index))); #endif } cyy_issue_cmd(base_addr, CyCHAN_CTL | CyDIS_RCVR, index); @@ -2267,7 +2263,7 @@ static void shutdown(struct cyclades_port *info) } zfw_ctrl = cy_card[card].base_addr + - (cy_readl(&firm_id->zfwctrl_addr) & 0xfffff); + (readl(&firm_id->zfwctrl_addr) & 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; ch_ctrl = zfw_ctrl->ch_ctrl; @@ -2282,7 +2278,7 @@ static void shutdown(struct cyclades_port *info) if (!info->tty || (info->tty->termios->c_cflag & HUPCL)) { cy_writel(&ch_ctrl[channel].rs_control, - (__u32)(cy_readl(&ch_ctrl[channel].rs_control)& + (__u32)(readl(&ch_ctrl[channel].rs_control) & ~(C_RS_RTS | C_RS_DTR))); retval = cyz_issue_cmd(&cy_card[info->card], channel, C_CM_IOCTLM, 0L); @@ -2390,10 +2386,8 @@ block_til_ready(struct tty_struct *tty, struct file *filp, #ifdef CY_DEBUG_DTR printk("cyc:block_til_ready raising DTR\n"); printk(" status: 0x%x, 0x%x\n", - cy_readb(base_addr + - (CyMSVR1 << index)), - cy_readb(base_addr + - (CyMSVR2 << index))); + readb(base_addr + (CyMSVR1 << index)), + readb(base_addr + (CyMSVR2 << index))); #endif } CY_UNLOCK(info, flags); @@ -2410,7 +2404,7 @@ block_til_ready(struct tty_struct *tty, struct file *filp, cy_writeb(base_addr + (CyCAR << index), (u_char) channel); if (!(info->flags & ASYNC_CLOSING) && (C_CLOCAL(tty) || - (cy_readb(base_addr + + (readb(base_addr + (CyMSVR1 << index)) & CyDCD))) { CY_UNLOCK(info, flags); break; @@ -2444,19 +2438,17 @@ block_til_ready(struct tty_struct *tty, struct file *filp, return -EINVAL; } - zfw_ctrl = base_addr + (cy_readl(&firm_id->zfwctrl_addr) & - 0xfffff); + zfw_ctrl = base_addr + (readl(&firm_id->zfwctrl_addr)& 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; ch_ctrl = zfw_ctrl->ch_ctrl; while (1) { if ((tty->termios->c_cflag & CBAUD)) { cy_writel(&ch_ctrl[channel].rs_control, - cy_readl(&ch_ctrl[channel]. - rs_control) | (C_RS_RTS | - C_RS_DTR)); + readl(&ch_ctrl[channel].rs_control) | + C_RS_RTS | C_RS_DTR); retval = cyz_issue_cmd(&cy_card[info->card], - channel, C_CM_IOCTLM, 0L); + channel, C_CM_IOCTLM, 0L); if (retval != 0) { printk("cyc:block_til_ready retval on " "ttyC%d was %x\n", @@ -2475,7 +2467,7 @@ block_til_ready(struct tty_struct *tty, struct file *filp, break; } if (!(info->flags & ASYNC_CLOSING) && (C_CLOCAL(tty) || - (cy_readl(&ch_ctrl[channel].rs_status) & + (readl(&ch_ctrl[channel].rs_status) & C_RS_DCD))) { break; } @@ -2540,11 +2532,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp) struct FIRM_ID __iomem *firm_id = cinfo->base_addr + ID_ADDRESS; if (!ISZLOADED(*cinfo)) { - if (((ZE_V1 == cy_readl( - &((struct RUNTIME_9060 __iomem *) + if (((ZE_V1 == readl(&((struct RUNTIME_9060 __iomem *) (cinfo->ctl_addr))->mail_box_0)) && Z_FPGA_CHECK(*cinfo)) && - (ZFIRM_HLT == cy_readl( + (ZFIRM_HLT == readl( &firm_id->signature))) { printk("cyc:Cyclades-Z Error: you need an " "external power supply for this number " @@ -2565,15 +2556,14 @@ static int cy_open(struct tty_struct *tty, struct file *filp) struct BOARD_CTRL __iomem *board_ctrl; zfw_ctrl = cinfo->base_addr + - (cy_readl(&firm_id->zfwctrl_addr) & - 0xfffff); + (readl(&firm_id->zfwctrl_addr) & + 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; /* Enable interrupts on the PLX chip */ cy_writew(cinfo->ctl_addr + 0x68, - cy_readw(cinfo->ctl_addr + - 0x68) | 0x0900); + readw(cinfo->ctl_addr + 0x68) | 0x0900); /* Enable interrupts on the FW */ retval = cyz_issue_cmd(cinfo, 0, C_CM_IRQ_ENBL, 0L); @@ -2582,7 +2572,7 @@ static int cy_open(struct tty_struct *tty, struct file *filp) retval); } cinfo->nports = - (int)cy_readl(&board_ctrl->n_channel); + (int)readl(&board_ctrl->n_channel); cinfo->intr_enabled = 1; } } @@ -2701,7 +2691,7 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout) index = cy_card[card].bus_index; base_addr = cy_card[card].base_addr + (cy_chip_offset[chip] << index); - while (cy_readb(base_addr + (CySRER << index)) & CyTxRdy) { + while (readb(base_addr + (CySRER << index)) & CyTxRdy) { #ifdef CY_DEBUG_WAIT_UNTIL_SENT printk("Not clean (jiff=%lu)...", jiffies); #endif @@ -2794,7 +2784,7 @@ static void cy_close(struct tty_struct *tty, struct file *filp) channel &= 0x03; cy_writeb(base_addr + (CyCAR << index), (u_char) channel); cy_writeb(base_addr + (CySRER << index), - cy_readb(base_addr + (CySRER << index)) & ~CyRxData); + readb(base_addr + (CySRER << index)) & ~CyRxData); if (info->flags & ASYNC_INITIALIZED) { /* Waiting for on-board buffers to be empty before closing the port */ @@ -2808,12 +2798,12 @@ static void cy_close(struct tty_struct *tty, struct file *filp) void __iomem *base_addr = cy_card[info->card].base_addr; struct FIRM_ID __iomem *firm_id = base_addr + ID_ADDRESS; struct ZFW_CTRL __iomem *zfw_ctrl = - base_addr + (cy_readl(&firm_id->zfwctrl_addr) & 0xfffff); + base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); struct CH_CTRL __iomem *ch_ctrl = zfw_ctrl->ch_ctrl; int channel = info->line - cy_card[info->card].first_line; int retval; - if (cy_readl(&ch_ctrl[channel].flow_status) != C_FS_TXIDLE) { + if (readl(&ch_ctrl[channel].flow_status) != C_FS_TXIDLE) { retval = cyz_issue_cmd(&cy_card[info->card], channel, C_CM_IOCTLW, 0L); if (retval != 0) { @@ -3023,13 +3013,13 @@ static int cy_chars_in_buffer(struct tty_struct *tty) firm_id = cy_card[card].base_addr + ID_ADDRESS; zfw_ctrl = cy_card[card].base_addr + - (cy_readl(&firm_id->zfwctrl_addr) & 0xfffff); + (readl(&firm_id->zfwctrl_addr) & 0xfffff); ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]); buf_ctrl = &(zfw_ctrl->buf_ctrl[channel]); - tx_get = cy_readl(&buf_ctrl->tx_get); - tx_put = cy_readl(&buf_ctrl->tx_put); - tx_bufsize = cy_readl(&buf_ctrl->tx_bufsize); + tx_get = readl(&buf_ctrl->tx_get); + tx_put = readl(&buf_ctrl->tx_put); + tx_bufsize = readl(&buf_ctrl->tx_bufsize); if (tx_put >= tx_get) char_count = tx_put - tx_get; else @@ -3269,8 +3259,7 @@ static void set_line_char(struct cyclades_port *info) if (C_CLOCAL(info->tty)) { /* without modem intr */ cy_writeb(base_addr + (CySRER << index), - cy_readb(base_addr + - (CySRER << index)) | CyMdmCh); + readb(base_addr + (CySRER << index)) | CyMdmCh); /* act on 1->0 modem transitions */ if ((cflag & CRTSCTS) && info->rflow) { cy_writeb(base_addr + (CyMCOR1 << index), @@ -3284,7 +3273,7 @@ static void set_line_char(struct cyclades_port *info) } else { /* without modem intr */ cy_writeb(base_addr + (CySRER << index), - cy_readb(base_addr + + readb(base_addr + (CySRER << index)) | CyMdmCh); /* act on 1->0 modem transitions */ if ((cflag & CRTSCTS) && info->rflow) { @@ -3311,8 +3300,8 @@ static void set_line_char(struct cyclades_port *info) #ifdef CY_DEBUG_DTR printk("cyc:set_line_char dropping DTR\n"); printk(" status: 0x%x, 0x%x\n", - cy_readb(base_addr + (CyMSVR1 << index)), - cy_readb(base_addr + (CyMSVR2 << index))); + readb(base_addr + (CyMSVR1 << index)), + readb(base_addr + (CyMSVR2 << index))); #endif } else { if (info->rtsdtr_inv) { @@ -3325,8 +3314,8 @@ static void set_line_char(struct cyclades_port *info) #ifdef CY_DEBUG_DTR printk("cyc:set_line_char raising DTR\n"); printk(" status: 0x%x, 0x%x\n", - cy_readb(base_addr + (CyMSVR1 << index)), - cy_readb(base_addr + (CyMSVR2 << index))); + readb(base_addr + (CyMSVR1 << index)), + readb(base_addr + (CyMSVR2 << index))); #endif } @@ -3350,7 +3339,7 @@ static void set_line_char(struct cyclades_port *info) } zfw_ctrl = cy_card[card].base_addr + - (cy_readl(&firm_id->zfwctrl_addr) & 0xfffff); + (readl(&firm_id->zfwctrl_addr) & 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]); buf_ctrl = &zfw_ctrl->buf_ctrl[channel]; @@ -3401,10 +3390,10 @@ static void set_line_char(struct cyclades_port *info) } if (cflag & CSTOPB) { cy_writel(&ch_ctrl->comm_data_l, - cy_readl(&ch_ctrl->comm_data_l) | C_DL_2STOP); + readl(&ch_ctrl->comm_data_l) | C_DL_2STOP); } else { cy_writel(&ch_ctrl->comm_data_l, - cy_readl(&ch_ctrl->comm_data_l) | C_DL_1STOP); + readl(&ch_ctrl->comm_data_l) | C_DL_1STOP); } if (cflag & PARENB) { if (cflag & PARODD) { @@ -3419,12 +3408,10 @@ static void set_line_char(struct cyclades_port *info) /* CTS flow control flag */ if (cflag & CRTSCTS) { cy_writel(&ch_ctrl->hw_flow, - cy_readl(&ch_ctrl-> - hw_flow) | C_RS_CTS | C_RS_RTS); + readl(&ch_ctrl->hw_flow) | C_RS_CTS | C_RS_RTS); } else { - cy_writel(&ch_ctrl->hw_flow, - cy_readl(&ch_ctrl-> - hw_flow) & ~(C_RS_CTS | C_RS_RTS)); + cy_writel(&ch_ctrl->hw_flow, readl(&ch_ctrl->hw_flow) & + ~(C_RS_CTS | C_RS_RTS)); } /* As the HW flow control is done in firmware, the driver doesn't need to care about it */ @@ -3454,13 +3441,13 @@ static void set_line_char(struct cyclades_port *info) if (baud == 0) { /* baud rate is zero, turn off line */ cy_writel(&ch_ctrl->rs_control, - cy_readl(&ch_ctrl->rs_control) & ~C_RS_DTR); + readl(&ch_ctrl->rs_control) & ~C_RS_DTR); #ifdef CY_DEBUG_DTR printk("cyc:set_line_char dropping Z DTR\n"); #endif } else { cy_writel(&ch_ctrl->rs_control, - cy_readl(&ch_ctrl->rs_control) | C_RS_DTR); + readl(&ch_ctrl->rs_control) | C_RS_DTR); #ifdef CY_DEBUG_DTR printk("cyc:set_line_char raising Z DTR\n"); #endif @@ -3575,7 +3562,7 @@ static int get_lsr_info(struct cyclades_port *info, unsigned int __user * value) cy_card[card].base_addr + (cy_chip_offset[chip] << index); CY_LOCK(info, flags); - status = cy_readb(base_addr + (CySRER << index)) & + status = readb(base_addr + (CySRER << index)) & (CyTxRdy | CyTxMpty); CY_UNLOCK(info, flags); result = (status ? 0 : TIOCSER_TEMT); @@ -3614,8 +3601,8 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file) CY_LOCK(info, flags); cy_writeb(base_addr + (CyCAR << index), (u_char) channel); - status = cy_readb(base_addr + (CyMSVR1 << index)); - status |= cy_readb(base_addr + (CyMSVR2 << index)); + status = readb(base_addr + (CyMSVR1 << index)); + status |= readb(base_addr + (CyMSVR2 << index)); CY_UNLOCK(info, flags); if (info->rtsdtr_inv) { @@ -3639,10 +3626,10 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file) firm_id = cy_card[card].base_addr + ID_ADDRESS; if (ISZLOADED(cy_card[card])) { zfw_ctrl = cy_card[card].base_addr + - (cy_readl(&firm_id->zfwctrl_addr) & 0xfffff); + (readl(&firm_id->zfwctrl_addr) & 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; ch_ctrl = zfw_ctrl->ch_ctrl; - lstatus = cy_readl(&ch_ctrl[channel].rs_status); + lstatus = readl(&ch_ctrl[channel].rs_status); result = ((lstatus & C_RS_RTS) ? TIOCM_RTS : 0) | ((lstatus & C_RS_DTR) ? TIOCM_DTR : 0) | ((lstatus & C_RS_DCD) ? TIOCM_CAR : 0) | @@ -3724,8 +3711,8 @@ cy_tiocmset(struct tty_struct *tty, struct file *file, #ifdef CY_DEBUG_DTR printk("cyc:set_modem_info raising DTR\n"); printk(" status: 0x%x, 0x%x\n", - cy_readb(base_addr + (CyMSVR1 << index)), - cy_readb(base_addr + (CyMSVR2 << index))); + readb(base_addr + (CyMSVR1 << index)), + readb(base_addr + (CyMSVR2 << index))); #endif CY_UNLOCK(info, flags); } @@ -3744,8 +3731,8 @@ cy_tiocmset(struct tty_struct *tty, struct file *file, #ifdef CY_DEBUG_DTR printk("cyc:set_modem_info dropping DTR\n"); printk(" status: 0x%x, 0x%x\n", - cy_readb(base_addr + (CyMSVR1 << index)), - cy_readb(base_addr + (CyMSVR2 << index))); + readb(base_addr + (CyMSVR1 << index)), + readb(base_addr + (CyMSVR2 << index))); #endif CY_UNLOCK(info, flags); } @@ -3755,29 +3742,29 @@ cy_tiocmset(struct tty_struct *tty, struct file *file, firm_id = cy_card[card].base_addr + ID_ADDRESS; if (ISZLOADED(cy_card[card])) { zfw_ctrl = cy_card[card].base_addr + - (cy_readl(&firm_id->zfwctrl_addr) & 0xfffff); + (readl(&firm_id->zfwctrl_addr) & 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; ch_ctrl = zfw_ctrl->ch_ctrl; if (set & TIOCM_RTS) { CY_LOCK(info, flags); cy_writel(&ch_ctrl[channel].rs_control, - cy_readl(&ch_ctrl[channel]. - rs_control) | C_RS_RTS); + readl(&ch_ctrl[channel].rs_control) | + C_RS_RTS); CY_UNLOCK(info, flags); } if (clear & TIOCM_RTS) { CY_LOCK(info, flags); cy_writel(&ch_ctrl[channel].rs_control, - cy_readl(&ch_ctrl[channel]. - rs_control) & ~C_RS_RTS); + readl(&ch_ctrl[channel].rs_control) & + ~C_RS_RTS); CY_UNLOCK(info, flags); } if (set & TIOCM_DTR) { CY_LOCK(info, flags); cy_writel(&ch_ctrl[channel].rs_control, - cy_readl(&ch_ctrl[channel]. - rs_control) | C_RS_DTR); + readl(&ch_ctrl[channel].rs_control) | + C_RS_DTR); #ifdef CY_DEBUG_DTR printk("cyc:set_modem_info raising Z DTR\n"); #endif @@ -3786,8 +3773,8 @@ cy_tiocmset(struct tty_struct *tty, struct file *file, if (clear & TIOCM_DTR) { CY_LOCK(info, flags); cy_writel(&ch_ctrl[channel].rs_control, - cy_readl(&ch_ctrl[channel]. - rs_control) & ~C_RS_DTR); + readl(&ch_ctrl[channel].rs_control) & + ~C_RS_DTR); #ifdef CY_DEBUG_DTR printk("cyc:set_modem_info clearing Z DTR\n"); #endif @@ -3924,7 +3911,7 @@ get_threshold(struct cyclades_port *info, unsigned long __user * value) base_addr = cy_card[card].base_addr + (cy_chip_offset[chip] << index); - tmp = cy_readb(base_addr + (CyCOR3 << index)) & CyREC_FIFO; + tmp = readb(base_addr + (CyCOR3 << index)) & CyREC_FIFO; return put_user(tmp, value); } else { /* Nothing to do! */ @@ -3984,7 +3971,7 @@ static int get_timeout(struct cyclades_port *info, unsigned long __user * value) base_addr = cy_card[card].base_addr + (cy_chip_offset[chip] << index); - tmp = cy_readb(base_addr + (CyRTPR << index)); + tmp = readb(base_addr + (CyRTPR << index)); return put_user(tmp, value); } else { /* Nothing to do! */ @@ -4410,7 +4397,7 @@ static void cy_stop(struct tty_struct *tty) cy_writeb(base_addr + (CyCAR << index), (u_char)(channel & 0x0003)); /* index channel */ cy_writeb(base_addr + (CySRER << index), - cy_readb(base_addr + (CySRER << index)) & ~CyTxRdy); + readb(base_addr + (CySRER << index)) & ~CyTxRdy); CY_UNLOCK(info, flags); } else { /* Nothing to do! */ @@ -4444,7 +4431,7 @@ static void cy_start(struct tty_struct *tty) CY_LOCK(info, flags); cy_writeb(base_addr + (CyCAR << index), (u_char) (channel & 0x0003)); /* index channel */ cy_writeb(base_addr + (CySRER << index), - cy_readb(base_addr + (CySRER << index)) | CyTxRdy); + readb(base_addr + (CySRER << index)) | CyTxRdy); CY_UNLOCK(info, flags); } else { /* Nothing to do! */ @@ -4537,7 +4524,7 @@ cyy_init_card(void __iomem * true_base_addr, int index) base_addr = true_base_addr + (cy_chip_offset[chip_number] << index); mdelay(1); - if (cy_readb(base_addr + (CyCCR << index)) != 0x00) { + if (readb(base_addr + (CyCCR << index)) != 0x00) { /************* printk(" chip #%d at %#6lx is never idle (CCR != 0)\n", chip_number, (unsigned long)base_addr); @@ -4554,7 +4541,7 @@ cyy_init_card(void __iomem * true_base_addr, int index) chip 4 GFRCR register appears at chip 0, there is no chip 4 and this must be a Cyclom-16Y, not a Cyclom-32Ye. */ - if (chip_number == 4 && cy_readb(true_base_addr + + if (chip_number == 4 && readb(true_base_addr + (cy_chip_offset[0] << index) + (CyGFRCR << index)) == 0) { return chip_number; @@ -4563,7 +4550,7 @@ cyy_init_card(void __iomem * true_base_addr, int index) cy_writeb(base_addr + (CyCCR << index), CyCHIP_RESET); mdelay(1); - if (cy_readb(base_addr + (CyGFRCR << index)) == 0x00) { + if (readb(base_addr + (CyGFRCR << index)) == 0x00) { /* printk(" chip #%d at %#6lx is not responding ", chip_number, (unsigned long)base_addr); @@ -4571,7 +4558,7 @@ cyy_init_card(void __iomem * true_base_addr, int index) */ return chip_number; } - if ((0xf0 & (cy_readb(base_addr + (CyGFRCR << index)))) != + if ((0xf0 & (readb(base_addr + (CyGFRCR << index)))) != 0x40) { /* printk(" chip #%d at %#6lx is not valid (GFRCR == " @@ -4582,7 +4569,7 @@ cyy_init_card(void __iomem * true_base_addr, int index) return chip_number; } cy_writeb(base_addr + (CyGCR << index), CyCH0_SERIAL); - if (cy_readb(base_addr + (CyGFRCR << index)) >= CD1400_REV_J) { + if (readb(base_addr + (CyGFRCR << index)) >= CD1400_REV_J) { /* It is a CD1400 rev. J or later */ /* Impossible to reach 5ms with this chip. Changed to 2ms instead (f = 500 Hz). */ @@ -4595,7 +4582,7 @@ cyy_init_card(void __iomem * true_base_addr, int index) /* printk(" chip #%d at %#6lx is rev 0x%2x\n", chip_number, (unsigned long)base_addr, - cy_readb(base_addr+(CyGFRCR<mail_box_0); + mailbox = (__u32)readl(&((struct RUNTIME_9060 __iomem *) + cy_pci_addr0)->mail_box_0); if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) { printk(" Warning: PCI I/O bit incorrectly " @@ -5005,10 +4990,10 @@ static int __init cy_detect_pci(void) WIN_CREG); printk("Cyclades-8Zo/PCI: FPGA id %lx, ver " "%lx\n", (ulong) (0xff & - cy_readl(&((struct CUSTOM_REG *) + readl(&((struct CUSTOM_REG *) (cy_pci_addr2))->fpga_id)), (ulong)(0xff & - cy_readl(&((struct CUSTOM_REG *) + readl(&((struct CUSTOM_REG *) (cy_pci_addr2))-> fpga_version))); cy_writel(&((struct RUNTIME_9060 *) @@ -5113,7 +5098,7 @@ static int __init cy_detect_pci(void) Ze_pdev[j] = Ze_pdev[j + 1]; } ZeIndex--; - mailbox = (__u32)cy_readl(&((struct RUNTIME_9060 __iomem *) + mailbox = (__u32)readl(&((struct RUNTIME_9060 __iomem *) cy_pci_addr0)->mail_box_0); #ifdef CY_PCI_DEBUG printk("Cyclades-Z/PCI: relocate winaddr=0x%lx ctladdr=0x%lx\n", @@ -5382,7 +5367,7 @@ static int __init cy_init(void) cinfo = &cy_card[board]; if (cinfo->num_chips == -1) { /* Cyclades-Z */ number_z_boards++; - mailbox = cy_readl(&((struct RUNTIME_9060 __iomem *) + mailbox = readl(&((struct RUNTIME_9060 __iomem *) cy_card[board].ctl_addr)-> mail_box_0); nports = (mailbox == ZE_V1) ? ZE_V1_NPORTS : 8; @@ -5478,7 +5463,7 @@ static int __init cy_init(void) info->icount.overrun = info->icount.brk = 0; chip_number = (port - cinfo->first_line) / 4; if ((info->chip_rev = - cy_readb(cinfo->base_addr + + readb(cinfo->base_addr + (cy_chip_offset[chip_number] << index) + (CyGFRCR << index))) >= CD1400_REV_J) { diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index 4c5b4763f5b8..f0ad61f69792 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -539,13 +539,9 @@ struct cyclades_chip { * (required to support Alpha systems) * ***************************************/ -#define cy_writeb(port,val) {writeb((val),(port)); mb();} -#define cy_writew(port,val) {writew((val),(port)); mb();} -#define cy_writel(port,val) {writel((val),(port)); mb();} - -#define cy_readb(port) readb(port) -#define cy_readw(port) readw(port) -#define cy_readl(port) readl(port) +#define cy_writeb(port,val) do { writeb((val), (port)); mb(); } while (0) +#define cy_writew(port,val) do { writew((val), (port)); mb(); } while (0) +#define cy_writel(port,val) do { writel((val), (port)); mb(); } while (0) /* * Statistics counters -- cgit v1.2.3 From 8f81dd149806bc53c68c92f34d61f88427079039 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 8 May 2007 00:35:54 -0700 Subject: PNP: notice whether we have PNP devices (PNPBIOS or PNPACPI) This series converts i386 and x86_64 legacy serial ports to be platform devices and prevents probing for them if we have PNP. This prevents double discovery, where a device was found both by the legacy probe and by 8250_pnp. This also prevents the serial driver from claiming IRDA devices (unless they have a UART PNP ID). The serial legacy probe sometimes assumed the wrong IRQ, so the user had to use "setserial" to fix it. Removing the need for setserial to make IRDA devices work seems good, but it does break some things. In particular, you may need to keep setserial from poking legacy UART stuff back in by doing something like "dpkg-reconfigure setserial" with the "kernel" option. Otherwise, the setserial-discovered "UART" will claim resources and prevent the IRDA driver from loading. This patch: If we can discover devices using PNP, we can skip some legacy probes. This flag ("pnp_platform_devices") indicates that PNPBIOS or PNPACPI is enabled and should tell us about builtin devices. Signed-off-by: Bjorn Helgaas Cc: Keith Owens Cc: Len Brown Cc: Adam Belay Cc: Matthieu CASTET Cc: Jean Tourrilhes Cc: Matthew Garrett Cc: Ville Syrjala Cc: Russell King Cc: Samuel Ortiz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/pnp/core.c | 8 ++++++++ drivers/pnp/pnpacpi/core.c | 1 + drivers/pnp/pnpbios/core.c | 1 + include/linux/pnp.h | 2 ++ 4 files changed, 12 insertions(+) (limited to 'include/linux') diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c index d8d75541552c..3e20b1cc7778 100644 --- a/drivers/pnp/core.c +++ b/drivers/pnp/core.c @@ -23,6 +23,14 @@ static LIST_HEAD(pnp_protocols); LIST_HEAD(pnp_global); DEFINE_SPINLOCK(pnp_lock); +/* + * ACPI or PNPBIOS should tell us about all platform devices, so we can + * skip some blind probes. ISAPNP typically enumerates only plug-in ISA + * devices, not built-in things like COM ports. + */ +int pnp_platform_devices; +EXPORT_SYMBOL(pnp_platform_devices); + void *pnp_alloc(long size) { void *result; diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 7eb8275185b6..a00548799e98 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -285,6 +285,7 @@ static int __init pnpacpi_init(void) acpi_get_devices(NULL, pnpacpi_add_device_handler, NULL, NULL); pnp_info("PnP ACPI: found %d devices", num); unregister_acpi_bus_type(&acpi_pnp_bus); + pnp_platform_devices = 1; return 0; } subsys_initcall(pnpacpi_init); diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c index b71aff21b3fc..3a201b77b963 100644 --- a/drivers/pnp/pnpbios/core.c +++ b/drivers/pnp/pnpbios/core.c @@ -570,6 +570,7 @@ static int __init pnpbios_init(void) /* scan for pnpbios devices */ build_devlist(); + pnp_platform_devices = 1; return 0; } diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 00dae5ba128a..2a1897e6f937 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -364,6 +364,7 @@ int pnp_add_device(struct pnp_dev *dev); int pnp_device_attach(struct pnp_dev *pnp_dev); void pnp_device_detach(struct pnp_dev *pnp_dev); extern struct list_head pnp_global; +extern int pnp_platform_devices; /* multidevice card support */ int pnp_add_card(struct pnp_card *card); @@ -411,6 +412,7 @@ static inline int pnp_init_device(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_add_device(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_device_attach(struct pnp_dev *pnp_dev) { return -ENODEV; } static inline void pnp_device_detach(struct pnp_dev *pnp_dev) { ; } +#define pnp_platform_devices 0 /* multidevice card support */ static inline int pnp_add_card(struct pnp_card *card) { return -ENODEV; } -- cgit v1.2.3 From b81cc310f1309f6090a5655af1fe5831ded53233 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 8 May 2007 00:36:38 -0700 Subject: Char: cyclades, unexport struct cyclades_card Do not export internal card data to userspace. cytune doesn't use this anyway. Signed-off-by: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/cyclades.c | 8 -------- include/linux/cyclades.h | 15 ++------------- 2 files changed, 2 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 249f443115f2..1b4ff138b8ff 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -4049,14 +4049,6 @@ cy_ioctl(struct tty_struct *tty, struct file *file, case CYGETRTSDTR_INV: ret_val = info->rtsdtr_inv; break; - case CYGETCARDINFO: - if (copy_to_user(argp, &cy_card[info->card], - sizeof(struct cyclades_card))) { - ret_val = -EFAULT; - break; - } - ret_val = 0; - break; case CYGETCD1400VER: ret_val = info->chip_rev; break; diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index f0ad61f69792..e76f486550c6 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -110,7 +110,6 @@ struct cyclades_idle_stats { #define CYZSETPOLLCYCLE 0x43590e #define CYZGETPOLLCYCLE 0x43590f #define CYGETCD1400VER 0x435910 -#define CYGETCARDINFO 0x435911 #define CYSETWAIT 0x435912 #define CYGETWAIT 0x435913 @@ -506,8 +505,9 @@ struct ZFW_CTRL { /****************** ****************** *******************/ #endif +#ifdef __KERNEL__ + /* Per card data structure */ -struct resource; struct cyclades_card { unsigned long base_phys; unsigned long ctl_phys; @@ -520,20 +520,9 @@ struct cyclades_card { int bus_index; /* address shift - 0 for ISA, 1 for PCI */ int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ struct pci_dev *pdev; -#ifdef __KERNEL__ spinlock_t card_lock; -#else - unsigned long filler; -#endif }; -struct cyclades_chip { - int filler; -}; - - -#ifdef __KERNEL__ - /*************************************** * Memory access functions/macros * * (required to support Alpha systems) * -- cgit v1.2.3 From 46039f8a64cd50bbf70ce54fefe148e75598391b Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 8 May 2007 00:36:40 -0700 Subject: Char: cyclades, remove useless fileds from cyclades_card pde, ctl_phys and base_phys are useless -- they are never used. Signed-off-by: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/cyclades.c | 9 --------- include/linux/cyclades.h | 3 --- 2 files changed, 12 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 1b4ff138b8ff..a79a4b213bba 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -4914,15 +4914,12 @@ static int __devinit cy_init_Ze(unsigned long cy_pci_phys0, #endif /* CONFIG_CYZ_INTR */ /* set cy_card */ - cy_card[j].base_phys = cy_pci_phys2; - cy_card[j].ctl_phys = cy_pci_phys0; cy_card[j].base_addr = cy_pci_addr2; cy_card[j].ctl_addr = cy_pci_addr0; cy_card[j].irq = (int)cy_pci_irq; cy_card[j].bus_index = 1; cy_card[j].first_line = cy_next_channel; cy_card[j].num_chips = -1; - cy_card[j].pdev = pdev; cy_init_card(&cy_card[j], j); pci_set_drvdata(pdev, &cy_card[j]); @@ -5066,15 +5063,12 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev, } /* set cy_card */ - cy_card[j].base_phys = (ulong) cy_pci_phys2; - cy_card[j].ctl_phys = (ulong) cy_pci_phys0; cy_card[j].base_addr = cy_pci_addr2; cy_card[j].ctl_addr = cy_pci_addr0; cy_card[j].irq = (int)cy_pci_irq; cy_card[j].bus_index = 1; cy_card[j].first_line = cy_next_channel; cy_card[j].num_chips = cy_pci_nchan / 4; - cy_card[j].pdev = pdev; cy_init_card(&cy_card[j], j); pci_set_drvdata(pdev, &cy_card[j]); @@ -5253,15 +5247,12 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev, #endif /* CONFIG_CYZ_INTR */ /* set cy_card */ - cy_card[j].base_phys = cy_pci_phys2; - cy_card[j].ctl_phys = cy_pci_phys0; cy_card[j].base_addr = cy_pci_addr2; cy_card[j].ctl_addr = cy_pci_addr0; cy_card[j].irq = (int)cy_pci_irq; cy_card[j].bus_index = 1; cy_card[j].first_line = cy_next_channel; cy_card[j].num_chips = -1; - cy_card[j].pdev = pdev; cy_init_card(&cy_card[j], j); pci_set_drvdata(pdev, &cy_card[j]); diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index e76f486550c6..e06cd470ddd2 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -509,8 +509,6 @@ struct ZFW_CTRL { /* Per card data structure */ struct cyclades_card { - unsigned long base_phys; - unsigned long ctl_phys; void __iomem *base_addr; void __iomem *ctl_addr; int irq; @@ -519,7 +517,6 @@ struct cyclades_card { int nports; /* Number of ports in the card */ int bus_index; /* address shift - 0 for ISA, 1 for PCI */ int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ - struct pci_dev *pdev; spinlock_t card_lock; }; -- cgit v1.2.3 From 875b206b5f4971cc990a12e891f5519f0f6772a9 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 8 May 2007 00:36:49 -0700 Subject: Char: cyclades, make info->card a pointer Signed-off-by: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/cyclades.c | 323 ++++++++++++++++++++++++----------------------- include/linux/cyclades.h | 2 +- 2 files changed, 163 insertions(+), 162 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 01c835a466bc..9bec4ef876f2 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -655,12 +655,12 @@ #define CY_LOCK(info,flags) \ do { \ - spin_lock_irqsave(&cy_card[info->card].card_lock, flags); \ + spin_lock_irqsave(&info->card->card_lock, flags); \ } while (0) #define CY_UNLOCK(info,flags) \ do { \ - spin_unlock_irqrestore(&cy_card[info->card].card_lock, flags); \ + spin_unlock_irqrestore(&info->card->card_lock, flags); \ } while (0) #include @@ -1555,7 +1555,7 @@ static void cyz_handle_rx(struct cyclades_port *info, struct CH_CTRL __iomem *ch_ctrl, struct BUF_CTRL __iomem *buf_ctrl) { - struct cyclades_card *cinfo = &cy_card[info->card]; + struct cyclades_card *cinfo = info->card; struct tty_struct *tty = info->tty; int char_count; int len; @@ -1651,7 +1651,7 @@ static void cyz_handle_tx(struct cyclades_port *info, struct CH_CTRL __iomem *ch_ctrl, struct BUF_CTRL __iomem *buf_ctrl) { - struct cyclades_card *cinfo = &cy_card[info->card]; + struct cyclades_card *cinfo = info->card; struct tty_struct *tty = info->tty; char data; int char_count; @@ -1878,13 +1878,13 @@ static irqreturn_t cyz_interrupt(int irq, void *dev_id) static void cyz_rx_restart(unsigned long arg) { struct cyclades_port *info = (struct cyclades_port *)arg; + struct cyclades_card *card = info->card; int retval; - int card = info->card; - __u32 channel = (info->line) - (cy_card[card].first_line); + __u32 channel = info->line - card->first_line; unsigned long flags; CY_LOCK(info, flags); - retval = cyz_issue_cmd(&cy_card[card], channel, C_CM_INTBACK2, 0L); + retval = cyz_issue_cmd(card, channel, C_CM_INTBACK2, 0L); if (retval != 0) { printk(KERN_ERR "cyc:cyz_rx_restart retval on ttyC%d was %x\n", info->line, retval); @@ -1956,14 +1956,15 @@ static void cyz_poll(unsigned long arg) */ static int startup(struct cyclades_port *info) { + struct cyclades_card *card; unsigned long flags; int retval = 0; void __iomem *base_addr; - int card, chip, channel, index; + int chip, channel, index; unsigned long page; card = info->card; - channel = (info->line) - (cy_card[card].first_line); + channel = info->line - card->first_line; page = get_zeroed_page(GFP_KERNEL); if (!page) @@ -1993,12 +1994,11 @@ static int startup(struct cyclades_port *info) set_line_char(info); - if (!IS_CYC_Z(cy_card[card])) { + if (!IS_CYC_Z(*card)) { chip = channel >> 2; channel &= 0x03; - index = cy_card[card].bus_index; - base_addr = cy_card[card].base_addr + - (cy_chip_offset[chip] << index); + index = card->bus_index; + base_addr = card->base_addr + (cy_chip_offset[chip] << index); #ifdef CY_DEBUG_OPEN printk(KERN_DEBUG "cyc startup card %d, chip %d, channel %d, " @@ -2050,14 +2050,14 @@ static int startup(struct cyclades_port *info) struct CH_CTRL __iomem *ch_ctrl; int retval; - base_addr = cy_card[card].base_addr; + base_addr = card->base_addr; firm_id = base_addr + ID_ADDRESS; - if (!ISZLOADED(cy_card[card])) { + if (!ISZLOADED(*card)) { return -ENODEV; } - zfw_ctrl = cy_card[card].base_addr + + zfw_ctrl = card->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; ch_ctrl = zfw_ctrl->ch_ctrl; @@ -2088,15 +2088,14 @@ static int startup(struct cyclades_port *info) #endif /* CONFIG_CYZ_INTR */ #endif /* Z_WAKE */ - retval = cyz_issue_cmd(&cy_card[card], channel, C_CM_IOCTL, 0L); + retval = cyz_issue_cmd(card, channel, C_CM_IOCTL, 0L); if (retval != 0) { printk(KERN_ERR "cyc:startup(1) retval on ttyC%d was " "%x\n", info->line, retval); } /* Flush RX buffers before raising DTR and RTS */ - retval = cyz_issue_cmd(&cy_card[card], channel, C_CM_FLUSH_RX, - 0L); + retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_RX, 0L); if (retval != 0) { printk(KERN_ERR "cyc:startup(2) retval on ttyC%d was " "%x\n", info->line, retval); @@ -2107,7 +2106,7 @@ static int startup(struct cyclades_port *info) cy_writel(&ch_ctrl[channel].rs_control, readl(&ch_ctrl[channel].rs_control) | C_RS_RTS | C_RS_DTR); - retval = cyz_issue_cmd(&cy_card[info->card], channel, + retval = cyz_issue_cmd(info->card, channel, C_CM_IOCTLM, 0L); if (retval != 0) { printk(KERN_ERR "cyc:startup(3) retval on ttyC%d was " @@ -2145,18 +2144,18 @@ errout: static void start_xmit(struct cyclades_port *info) { + struct cyclades_card *card; unsigned long flags; void __iomem *base_addr; - int card, chip, channel, index; + int chip, channel, index; card = info->card; - channel = (info->line) - (cy_card[card].first_line); - if (!IS_CYC_Z(cy_card[card])) { + channel = info->line - card->first_line; + if (!IS_CYC_Z(*card)) { chip = channel >> 2; channel &= 0x03; - index = cy_card[card].bus_index; - base_addr = cy_card[card].base_addr + - (cy_chip_offset[chip] << index); + index = card->bus_index; + base_addr = card->base_addr + (cy_chip_offset[chip] << index); CY_LOCK(info, flags); cy_writeb(base_addr + (CyCAR << index), channel); @@ -2168,8 +2167,7 @@ static void start_xmit(struct cyclades_port *info) int retval; CY_LOCK(info, flags); - retval = cyz_issue_cmd(&cy_card[card], channel, C_CM_INTBACK, - 0L); + retval = cyz_issue_cmd(card, channel, C_CM_INTBACK, 0L); if (retval != 0) { printk(KERN_ERR "cyc:start_xmit retval on ttyC%d was " "%x\n", info->line, retval); @@ -2187,22 +2185,22 @@ static void start_xmit(struct cyclades_port *info) */ static void shutdown(struct cyclades_port *info) { + struct cyclades_card *card; unsigned long flags; void __iomem *base_addr; - int card, chip, channel, index; + int chip, channel, index; if (!(info->flags & ASYNC_INITIALIZED)) { return; } card = info->card; - channel = info->line - cy_card[card].first_line; - if (!IS_CYC_Z(cy_card[card])) { + channel = info->line - card->first_line; + if (!IS_CYC_Z(*card)) { chip = channel >> 2; channel &= 0x03; - index = cy_card[card].bus_index; - base_addr = cy_card[card].base_addr + - (cy_chip_offset[chip] << index); + index = card->bus_index; + base_addr = card->base_addr + (cy_chip_offset[chip] << index); #ifdef CY_DEBUG_OPEN printk(KERN_DEBUG "cyc shutdown Y card %d, chip %d, " @@ -2248,18 +2246,18 @@ static void shutdown(struct cyclades_port *info) struct CH_CTRL __iomem *ch_ctrl; int retval; - base_addr = cy_card[card].base_addr; + base_addr = card->base_addr; #ifdef CY_DEBUG_OPEN printk(KERN_DEBUG "cyc shutdown Z card %d, channel %d, " "base_addr %p\n", card, channel, base_addr); #endif firm_id = base_addr + ID_ADDRESS; - if (!ISZLOADED(cy_card[card])) { + if (!ISZLOADED(*card)) { return; } - zfw_ctrl = cy_card[card].base_addr + + zfw_ctrl = card->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; ch_ctrl = zfw_ctrl->ch_ctrl; @@ -2277,7 +2275,7 @@ static void shutdown(struct cyclades_port *info) cy_writel(&ch_ctrl[channel].rs_control, (__u32)(readl(&ch_ctrl[channel].rs_control) & ~(C_RS_RTS | C_RS_DTR))); - retval = cyz_issue_cmd(&cy_card[info->card], channel, + retval = cyz_issue_cmd(info->card, channel, C_CM_IOCTLM, 0L); if (retval != 0) { printk(KERN_ERR"cyc:shutdown retval on ttyC%d " @@ -2318,7 +2316,7 @@ block_til_ready(struct tty_struct *tty, struct file *filp, int retval; void __iomem *base_addr; - cinfo = &cy_card[info->card]; + cinfo = info->card; channel = info->line - cinfo->first_line; /* @@ -2443,7 +2441,7 @@ block_til_ready(struct tty_struct *tty, struct file *filp, cy_writel(&ch_ctrl[channel].rs_control, readl(&ch_ctrl[channel].rs_control) | C_RS_RTS | C_RS_DTR); - retval = cyz_issue_cmd(&cy_card[info->card], + retval = cyz_issue_cmd(info->card, channel, C_CM_IOCTLM, 0L); if (retval != 0) { printk(KERN_ERR "cyc:block_til_ready " @@ -2522,8 +2520,8 @@ static int cy_open(struct tty_struct *tty, struct file *filp) treat it as absent from the system. This will make the user pay attention. */ - if (IS_CYC_Z(cy_card[info->card])) { - struct cyclades_card *cinfo = &cy_card[info->card]; + if (IS_CYC_Z(*info->card)) { + struct cyclades_card *cinfo = info->card; struct FIRM_ID __iomem *firm_id = cinfo->base_addr + ID_ADDRESS; if (!ISZLOADED(*cinfo)) { @@ -2634,9 +2632,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp) */ static void cy_wait_until_sent(struct tty_struct *tty, int timeout) { + struct cyclades_card *card; struct cyclades_port *info = tty->driver_data; void __iomem *base_addr; - int card, chip, channel, index; + int chip, channel, index; unsigned long orig_jiffies; int char_time; @@ -2679,13 +2678,12 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout) timeout, char_time, jiffies); #endif card = info->card; - channel = (info->line) - (cy_card[card].first_line); - if (!IS_CYC_Z(cy_card[card])) { + channel = (info->line) - (card->first_line); + if (!IS_CYC_Z(*card)) { chip = channel >> 2; channel &= 0x03; - index = cy_card[card].bus_index; - base_addr = - cy_card[card].base_addr + (cy_chip_offset[chip] << index); + index = card->bus_index; + base_addr = card->base_addr + (cy_chip_offset[chip] << index); while (readb(base_addr + (CySRER << index)) & CyTxRdy) { #ifdef CY_DEBUG_WAIT_UNTIL_SENT printk(KERN_DEBUG "Not clean (jiff=%lu)...", jiffies); @@ -2771,10 +2769,10 @@ static void cy_close(struct tty_struct *tty, struct file *filp) } CY_LOCK(info, flags); - if (!IS_CYC_Z(cy_card[info->card])) { - int channel = info->line - cy_card[info->card].first_line; - int index = cy_card[info->card].bus_index; - void __iomem *base_addr = cy_card[info->card].base_addr + + if (!IS_CYC_Z(*info->card)) { + int channel = info->line - info->card->first_line; + int index = info->card->bus_index; + void __iomem *base_addr = info->card->base_addr + (cy_chip_offset[channel >> 2] << index); /* Stop accepting input */ channel &= 0x03; @@ -2791,16 +2789,16 @@ static void cy_close(struct tty_struct *tty, struct file *filp) } else { #ifdef Z_WAKE /* Waiting for on-board buffers to be empty before closing the port */ - void __iomem *base_addr = cy_card[info->card].base_addr; + void __iomem *base_addr = info->card->base_addr; struct FIRM_ID __iomem *firm_id = base_addr + ID_ADDRESS; struct ZFW_CTRL __iomem *zfw_ctrl = base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); struct CH_CTRL __iomem *ch_ctrl = zfw_ctrl->ch_ctrl; - int channel = info->line - cy_card[info->card].first_line; + int channel = info->line - info->card->first_line; int retval; if (readl(&ch_ctrl[channel].flow_status) != C_FS_TXIDLE) { - retval = cyz_issue_cmd(&cy_card[info->card], channel, + retval = cyz_issue_cmd(info->card, channel, C_CM_IOCTLW, 0L); if (retval != 0) { printk(KERN_DEBUG "cyc:cy_close retval on " @@ -2982,14 +2980,15 @@ static int cy_write_room(struct tty_struct *tty) static int cy_chars_in_buffer(struct tty_struct *tty) { + struct cyclades_card *card; struct cyclades_port *info = tty->driver_data; - int card, channel; + int channel; if (serial_paranoia_check(info, tty->name, "cy_chars_in_buffer")) return 0; card = info->card; - channel = (info->line) - (cy_card[card].first_line); + channel = (info->line) - (card->first_line); #ifdef Z_EXT_CHARS_IN_BUFFER if (!IS_CYC_Z(cy_card[card])) { @@ -3008,8 +3007,8 @@ static int cy_chars_in_buffer(struct tty_struct *tty) int char_count; __u32 tx_put, tx_get, tx_bufsize; - firm_id = cy_card[card].base_addr + ID_ADDRESS; - zfw_ctrl = cy_card[card].base_addr + + firm_id = card->base_addr + ID_ADDRESS; + zfw_ctrl = card->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]); buf_ctrl = &(zfw_ctrl->buf_ctrl[channel]); @@ -3067,9 +3066,10 @@ static void cyy_baud_calc(struct cyclades_port *info, __u32 baud) */ static void set_line_char(struct cyclades_port *info) { + struct cyclades_card *card; unsigned long flags; void __iomem *base_addr; - int card, chip, channel, index; + int chip, channel, index; unsigned cflag, iflag; unsigned short chip_number; int baud, baud_rate = 0; @@ -3099,12 +3099,12 @@ static void set_line_char(struct cyclades_port *info) } card = info->card; - channel = (info->line) - (cy_card[card].first_line); + channel = info->line - card->first_line; chip_number = channel / 4; - if (!IS_CYC_Z(cy_card[card])) { + if (!IS_CYC_Z(*card)) { - index = cy_card[card].bus_index; + index = card->bus_index; /* baud rate */ baud = tty_get_baud_rate(info->tty); @@ -3222,8 +3222,7 @@ static void set_line_char(struct cyclades_port *info) chip = channel >> 2; channel &= 0x03; - base_addr = cy_card[card].base_addr + - (cy_chip_offset[chip] << index); + base_addr = card->base_addr + (cy_chip_offset[chip] << index); CY_LOCK(info, flags); cy_writeb(base_addr + (CyCAR << index), (u_char) channel); @@ -3331,12 +3330,12 @@ static void set_line_char(struct cyclades_port *info) __u32 sw_flow; int retval; - firm_id = cy_card[card].base_addr + ID_ADDRESS; - if (!ISZLOADED(cy_card[card])) { + firm_id = card->base_addr + ID_ADDRESS; + if (!ISZLOADED(*card)) { return; } - zfw_ctrl = cy_card[card].base_addr + + zfw_ctrl = card->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]); @@ -3424,7 +3423,7 @@ static void set_line_char(struct cyclades_port *info) } cy_writel(&ch_ctrl->sw_flow, sw_flow); - retval = cyz_issue_cmd(&cy_card[card], channel, C_CM_IOCTL, 0L); + retval = cyz_issue_cmd(card, channel, C_CM_IOCTL, 0L); if (retval != 0) { printk(KERN_ERR "cyc:set_line_char retval on ttyC%d " "was %x\n", info->line, retval); @@ -3451,7 +3450,7 @@ static void set_line_char(struct cyclades_port *info) #endif } - retval = cyz_issue_cmd(&cy_card[card], channel, C_CM_IOCTLM,0L); + retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM,0L); if (retval != 0) { printk(KERN_ERR "cyc:set_line_char(2) retval on ttyC%d " "was %x\n", info->line, retval); @@ -3468,14 +3467,15 @@ get_serial_info(struct cyclades_port *info, struct serial_struct __user * retinfo) { struct serial_struct tmp; - struct cyclades_card *cinfo = &cy_card[info->card]; + struct cyclades_card *cinfo = info->card; if (!retinfo) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.type = info->type; tmp.line = info->line; - tmp.port = info->card * 0x100 + info->line - cinfo->first_line; + tmp.port = (info->card - cy_card) * 0x100 + info->line - + cinfo->first_line; tmp.irq = cinfo->irq; tmp.flags = info->flags; tmp.close_delay = info->close_delay; @@ -3544,20 +3544,20 @@ check_and_exit: */ static int get_lsr_info(struct cyclades_port *info, unsigned int __user * value) { - int card, chip, channel, index; + struct cyclades_card *card; + int chip, channel, index; unsigned char status; unsigned int result; unsigned long flags; void __iomem *base_addr; card = info->card; - channel = (info->line) - (cy_card[card].first_line); - if (!IS_CYC_Z(cy_card[card])) { + channel = (info->line) - (card->first_line); + if (!IS_CYC_Z(*card)) { chip = channel >> 2; channel &= 0x03; - index = cy_card[card].bus_index; - base_addr = - cy_card[card].base_addr + (cy_chip_offset[chip] << index); + index = card->bus_index; + base_addr = card->base_addr + (cy_chip_offset[chip] << index); CY_LOCK(info, flags); status = readb(base_addr + (CySRER << index)) & @@ -3574,7 +3574,8 @@ static int get_lsr_info(struct cyclades_port *info, unsigned int __user * value) static int cy_tiocmget(struct tty_struct *tty, struct file *file) { struct cyclades_port *info = tty->driver_data; - int card, chip, channel, index; + struct cyclades_card *card; + int chip, channel, index; void __iomem *base_addr; unsigned long flags; unsigned char status; @@ -3589,13 +3590,12 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file) return -ENODEV; card = info->card; - channel = (info->line) - (cy_card[card].first_line); - if (!IS_CYC_Z(cy_card[card])) { + channel = info->line - card->first_line; + if (!IS_CYC_Z(*card)) { chip = channel >> 2; channel &= 0x03; - index = cy_card[card].bus_index; - base_addr = - cy_card[card].base_addr + (cy_chip_offset[chip] << index); + index = card->bus_index; + base_addr = card->base_addr + (cy_chip_offset[chip] << index); CY_LOCK(info, flags); cy_writeb(base_addr + (CyCAR << index), (u_char) channel); @@ -3615,10 +3615,10 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file) ((status & CyDSR) ? TIOCM_DSR : 0) | ((status & CyCTS) ? TIOCM_CTS : 0); } else { - base_addr = cy_card[card].base_addr; - firm_id = cy_card[card].base_addr + ID_ADDRESS; - if (ISZLOADED(cy_card[card])) { - zfw_ctrl = cy_card[card].base_addr + + base_addr = card->base_addr; + firm_id = card->base_addr + ID_ADDRESS; + if (ISZLOADED(*card)) { + zfw_ctrl = card->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; ch_ctrl = zfw_ctrl->ch_ctrl; @@ -3643,7 +3643,8 @@ cy_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear) { struct cyclades_port *info = tty->driver_data; - int card, chip, channel, index; + struct cyclades_card *card; + int chip, channel, index; void __iomem *base_addr; unsigned long flags; struct FIRM_ID __iomem *firm_id; @@ -3656,13 +3657,12 @@ cy_tiocmset(struct tty_struct *tty, struct file *file, return -ENODEV; card = info->card; - channel = (info->line) - (cy_card[card].first_line); - if (!IS_CYC_Z(cy_card[card])) { + channel = (info->line) - (card->first_line); + if (!IS_CYC_Z(*card)) { chip = channel >> 2; channel &= 0x03; - index = cy_card[card].bus_index; - base_addr = - cy_card[card].base_addr + (cy_chip_offset[chip] << index); + index = card->bus_index; + base_addr = card->base_addr + (cy_chip_offset[chip] << index); if (set & TIOCM_RTS) { CY_LOCK(info, flags); @@ -3730,11 +3730,11 @@ cy_tiocmset(struct tty_struct *tty, struct file *file, CY_UNLOCK(info, flags); } } else { - base_addr = cy_card[card].base_addr; + base_addr = card->base_addr; - firm_id = cy_card[card].base_addr + ID_ADDRESS; - if (ISZLOADED(cy_card[card])) { - zfw_ctrl = cy_card[card].base_addr + + firm_id = card->base_addr + ID_ADDRESS; + if (ISZLOADED(*card)) { + zfw_ctrl = card->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); board_ctrl = &zfw_ctrl->board_ctrl; ch_ctrl = zfw_ctrl->ch_ctrl; @@ -3779,7 +3779,7 @@ cy_tiocmset(struct tty_struct *tty, struct file *file, return -ENODEV; } CY_LOCK(info, flags); - retval = cyz_issue_cmd(&cy_card[info->card], + retval = cyz_issue_cmd(info->card, channel, C_CM_IOCTLM, 0L); if (retval != 0) { printk(KERN_ERR "cyc:set_modem_info retval on ttyC%d " @@ -3802,7 +3802,7 @@ static void cy_break(struct tty_struct *tty, int break_state) return; CY_LOCK(info, flags); - if (!IS_CYC_Z(cy_card[info->card])) { + if (!IS_CYC_Z(*info->card)) { /* Let the transmit ISR take care of this (since it requires stuffing characters into the output stream). */ @@ -3829,16 +3829,16 @@ static void cy_break(struct tty_struct *tty, int break_state) int retval; if (break_state == -1) { - retval = cyz_issue_cmd(&cy_card[info->card], - info->line - cy_card[info->card].first_line, + retval = cyz_issue_cmd(info->card, + info->line - info->card->first_line, C_CM_SET_BREAK, 0L); if (retval != 0) { printk(KERN_ERR "cyc:cy_break (set) retval on " "ttyC%d was %x\n", info->line, retval); } } else { - retval = cyz_issue_cmd(&cy_card[info->card], - info->line - cy_card[info->card].first_line, + retval = cyz_issue_cmd(info->card, + info->line - info->card->first_line, C_CM_CLR_BREAK, 0L); if (retval != 0) { printk(KERN_DEBUG "cyc:cy_break (clr) retval " @@ -3865,18 +3865,19 @@ get_mon_info(struct cyclades_port *info, struct cyclades_monitor __user * mon) static int set_threshold(struct cyclades_port *info, unsigned long value) { + struct cyclades_card *card; void __iomem *base_addr; - int card, channel, chip, index; + int channel, chip, index; unsigned long flags; card = info->card; - channel = info->line - cy_card[card].first_line; - if (!IS_CYC_Z(cy_card[card])) { + channel = info->line - card->first_line; + if (!IS_CYC_Z(*card)) { chip = channel >> 2; channel &= 0x03; - index = cy_card[card].bus_index; + index = card->bus_index; base_addr = - cy_card[card].base_addr + (cy_chip_offset[chip] << index); + card->base_addr + (cy_chip_offset[chip] << index); info->cor3 &= ~CyREC_FIFO; info->cor3 |= value & CyREC_FIFO; @@ -3894,18 +3895,18 @@ static int set_threshold(struct cyclades_port *info, unsigned long value) static int get_threshold(struct cyclades_port *info, unsigned long __user * value) { + struct cyclades_card *card; void __iomem *base_addr; - int card, channel, chip, index; + int channel, chip, index; unsigned long tmp; card = info->card; - channel = info->line - cy_card[card].first_line; - if (!IS_CYC_Z(cy_card[card])) { + channel = info->line - card->first_line; + if (!IS_CYC_Z(*card)) { chip = channel >> 2; channel &= 0x03; - index = cy_card[card].bus_index; - base_addr = - cy_card[card].base_addr + (cy_chip_offset[chip] << index); + index = card->bus_index; + base_addr = card->base_addr + (cy_chip_offset[chip] << index); tmp = readb(base_addr + (CyCOR3 << index)) & CyREC_FIFO; return put_user(tmp, value); @@ -3930,18 +3931,18 @@ get_default_threshold(struct cyclades_port *info, unsigned long __user * value) static int set_timeout(struct cyclades_port *info, unsigned long value) { + struct cyclades_card *card; void __iomem *base_addr; - int card, channel, chip, index; + int channel, chip, index; unsigned long flags; card = info->card; - channel = info->line - cy_card[card].first_line; - if (!IS_CYC_Z(cy_card[card])) { + channel = info->line - card->first_line; + if (!IS_CYC_Z(*card)) { chip = channel >> 2; channel &= 0x03; - index = cy_card[card].bus_index; - base_addr = - cy_card[card].base_addr + (cy_chip_offset[chip] << index); + index = card->bus_index; + base_addr = card->base_addr + (cy_chip_offset[chip] << index); CY_LOCK(info, flags); cy_writeb(base_addr + (CyRTPR << index), value & 0xff); @@ -3954,18 +3955,18 @@ static int set_timeout(struct cyclades_port *info, unsigned long value) static int get_timeout(struct cyclades_port *info, unsigned long __user * value) { + struct cyclades_card *card; void __iomem *base_addr; - int card, channel, chip, index; + int channel, chip, index; unsigned long tmp; card = info->card; - channel = info->line - cy_card[card].first_line; - if (!IS_CYC_Z(cy_card[card])) { + channel = info->line - card->first_line; + if (!IS_CYC_Z(*card)) { chip = channel >> 2; channel &= 0x03; - index = cy_card[card].bus_index; - base_addr = - cy_card[card].base_addr + (cy_chip_offset[chip] << index); + index = card->bus_index; + base_addr = card->base_addr + (cy_chip_offset[chip] << index); tmp = readb(base_addr + (CyRTPR << index)); return put_user(tmp, value); @@ -4218,7 +4219,8 @@ static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios) static void cy_send_xchar(struct tty_struct *tty, char ch) { struct cyclades_port *info = tty->driver_data; - int card, channel; + struct cyclades_card *card; + int channel; if (serial_paranoia_check(info, tty->name, "cy_send_xchar")) return; @@ -4229,15 +4231,13 @@ static void cy_send_xchar(struct tty_struct *tty, char ch) cy_start(tty); card = info->card; - channel = info->line - cy_card[card].first_line; + channel = info->line - card->first_line; - if (IS_CYC_Z(cy_card[card])) { + if (IS_CYC_Z(*card)) { if (ch == STOP_CHAR(tty)) - cyz_issue_cmd(&cy_card[card], channel, C_CM_SENDXOFF, - 0L); + cyz_issue_cmd(card, channel, C_CM_SENDXOFF, 0L); else if (ch == START_CHAR(tty)) - cyz_issue_cmd(&cy_card[card], channel, C_CM_SENDXON, - 0L); + cyz_issue_cmd(card, channel, C_CM_SENDXON, 0L); } } @@ -4248,9 +4248,10 @@ static void cy_send_xchar(struct tty_struct *tty, char ch) static void cy_throttle(struct tty_struct *tty) { struct cyclades_port *info = tty->driver_data; + struct cyclades_card *card; unsigned long flags; void __iomem *base_addr; - int card, chip, channel, index; + int chip, channel, index; #ifdef CY_DEBUG_THROTTLE char buf[64]; @@ -4266,19 +4267,19 @@ static void cy_throttle(struct tty_struct *tty) card = info->card; if (I_IXOFF(tty)) { - if (!IS_CYC_Z(cy_card[card])) + if (!IS_CYC_Z(*card)) cy_send_xchar(tty, STOP_CHAR(tty)); else info->throttle = 1; } if (tty->termios->c_cflag & CRTSCTS) { - channel = info->line - cy_card[card].first_line; - if (!IS_CYC_Z(cy_card[card])) { + channel = info->line - card->first_line; + if (!IS_CYC_Z(*card)) { chip = channel >> 2; channel &= 0x03; - index = cy_card[card].bus_index; - base_addr = cy_card[card].base_addr + + index = card->bus_index; + base_addr = card->base_addr + (cy_chip_offset[chip] << index); CY_LOCK(info, flags); @@ -4306,9 +4307,10 @@ static void cy_throttle(struct tty_struct *tty) static void cy_unthrottle(struct tty_struct *tty) { struct cyclades_port *info = tty->driver_data; + struct cyclades_card *card; unsigned long flags; void __iomem *base_addr; - int card, chip, channel, index; + int chip, channel, index; #ifdef CY_DEBUG_THROTTLE char buf[64]; @@ -4330,12 +4332,12 @@ static void cy_unthrottle(struct tty_struct *tty) if (tty->termios->c_cflag & CRTSCTS) { card = info->card; - channel = info->line - cy_card[card].first_line; - if (!IS_CYC_Z(cy_card[card])) { + channel = info->line - card->first_line; + if (!IS_CYC_Z(*card)) { chip = channel >> 2; channel &= 0x03; - index = cy_card[card].bus_index; - base_addr = cy_card[card].base_addr + + index = card->bus_index; + base_addr = card->base_addr + (cy_chip_offset[chip] << index); CY_LOCK(info, flags); @@ -4373,13 +4375,13 @@ static void cy_stop(struct tty_struct *tty) if (serial_paranoia_check(info, tty->name, "cy_stop")) return; - cinfo = &cy_card[info->card]; + cinfo = info->card; channel = info->line - cinfo->first_line; if (!IS_CYC_Z(*cinfo)) { index = cinfo->bus_index; chip = channel >> 2; channel &= 0x03; - base_addr = cy_card[info->card].base_addr + + base_addr = info->card->base_addr + (cy_chip_offset[chip] << index); CY_LOCK(info, flags); @@ -4408,13 +4410,13 @@ static void cy_start(struct tty_struct *tty) if (serial_paranoia_check(info, tty->name, "cy_start")) return; - cinfo = &cy_card[info->card]; + cinfo = info->card; channel = info->line - cinfo->first_line; index = cinfo->bus_index; if (!IS_CYC_Z(*cinfo)) { chip = channel >> 2; channel &= 0x03; - base_addr = cy_card[info->card].base_addr + + base_addr = info->card->base_addr + (cy_chip_offset[chip] << index); CY_LOCK(info, flags); @@ -4430,7 +4432,8 @@ static void cy_start(struct tty_struct *tty) static void cy_flush_buffer(struct tty_struct *tty) { struct cyclades_port *info = tty->driver_data; - int card, channel, retval; + struct cyclades_card *card; + int channel, retval; unsigned long flags; #ifdef CY_DEBUG_IO @@ -4441,17 +4444,16 @@ static void cy_flush_buffer(struct tty_struct *tty) return; card = info->card; - channel = (info->line) - (cy_card[card].first_line); + channel = info->line - card->first_line; CY_LOCK(info, flags); info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; CY_UNLOCK(info, flags); - if (IS_CYC_Z(cy_card[card])) { /* If it is a Z card, flush the on-board + if (IS_CYC_Z(*card)) { /* If it is a Z card, flush the on-board buffers as well */ CY_LOCK(info, flags); - retval = - cyz_issue_cmd(&cy_card[card], channel, C_CM_FLUSH_TX, 0L); + retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L); if (retval != 0) { printk(KERN_ERR "cyc: flush_buffer retval on ttyC%d " "was %x\n", info->line, retval); @@ -4496,8 +4498,7 @@ static void cy_hangup(struct tty_struct *tty) * --------------------------------------------------------------------- */ -static void __devinit cy_init_card(struct cyclades_card *cinfo, - const unsigned int board) +static void __devinit cy_init_card(struct cyclades_card *cinfo) { struct cyclades_port *info; u32 mailbox; @@ -4524,7 +4525,7 @@ static void __devinit cy_init_card(struct cyclades_card *cinfo, info = &cy_port[port]; memset(info, 0, sizeof(*info)); info->magic = CYCLADES_MAGIC; - info->card = board; + info->card = cinfo; info->line = port; info->flags = STD_COM_FLAGS; info->closing_wait = CLOSING_WAIT_DELAY; @@ -4763,7 +4764,7 @@ static int __init cy_detect_isa(void) cy_card[j].bus_index = 0; cy_card[j].first_line = cy_next_channel; cy_card[j].num_chips = cy_isa_nchan / 4; - cy_init_card(&cy_card[j], j); + cy_init_card(&cy_card[j]); nboard++; printk(KERN_INFO "Cyclom-Y/ISA #%d: 0x%lx-0x%lx, IRQ%d found: " @@ -4849,7 +4850,7 @@ static int __devinit cy_init_Ze(struct RUNTIME_9060 __iomem *cy_pci_addr0, cy_card[j].bus_index = 1; cy_card[j].first_line = cy_next_channel; cy_card[j].num_chips = -1; - cy_init_card(&cy_card[j], j); + cy_init_card(&cy_card[j]); pci_set_drvdata(pdev, &cy_card[j]); dev_info(&pdev->dev, "Cyclades-Ze/PCI #%d found: %d channels starting " @@ -4956,7 +4957,7 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev, cy_card[j].bus_index = 1; cy_card[j].first_line = cy_next_channel; cy_card[j].num_chips = cy_pci_nchan / 4; - cy_init_card(&cy_card[j], j); + cy_init_card(&cy_card[j]); pci_set_drvdata(pdev, &cy_card[j]); /* enable interrupts in the PCI interface */ @@ -5110,7 +5111,7 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev, cy_card[j].bus_index = 1; cy_card[j].first_line = cy_next_channel; cy_card[j].num_chips = -1; - cy_init_card(&cy_card[j], j); + cy_init_card(&cy_card[j]); pci_set_drvdata(pdev, &cy_card[j]); dev_info(&pdev->dev, "Cyclades-8Zo/PCI #%d found: %d channels " diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index e06cd470ddd2..121d64c3247b 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -549,7 +549,7 @@ struct cyclades_icount { struct cyclades_port { int magic; - int card; + struct cyclades_card *card; int line; int flags; /* defined in tty.h */ int type; /* UART type */ -- cgit v1.2.3 From 2c7fea992104b5ca2b510d585a27b3ba018b795f Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 8 May 2007 00:36:51 -0700 Subject: Char: cyclades, remove sleep_on convert to wait_* and completion Signed-off-by: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/cyclades.c | 45 ++++++++++++++++----------------------------- include/linux/cyclades.h | 2 +- 2 files changed, 17 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 9bec4ef876f2..c236e9f7d8e7 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -947,7 +947,7 @@ do_softint(struct work_struct *work) tty_wakeup(tty); #ifdef Z_WAKE if (test_and_clear_bit(Cy_EVENT_SHUTDOWN_WAKEUP, &info->event)) - wake_up_interruptible(&info->shutdown_wait); + complete(&info->shutdown_wait); #endif } /* do_softint */ @@ -2324,9 +2324,8 @@ block_til_ready(struct tty_struct *tty, struct file *filp, * until it's done, and then try again. */ if (tty_hung_up_p(filp) || (info->flags & ASYNC_CLOSING)) { - if (info->flags & ASYNC_CLOSING) { - interruptible_sleep_on(&info->close_wait); - } + wait_event_interruptible(info->close_wait, + !(info->flags & ASYNC_CLOSING)); return (info->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS; } @@ -2597,8 +2596,8 @@ static int cy_open(struct tty_struct *tty, struct file *filp) * If the port is the middle of closing, bail out now */ if (tty_hung_up_p(filp) || (info->flags & ASYNC_CLOSING)) { - if (info->flags & ASYNC_CLOSING) - interruptible_sleep_on(&info->close_wait); + wait_event_interruptible(info->close_wait, + !(info->flags & ASYNC_CLOSING)); return (info->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS; } @@ -2805,7 +2804,7 @@ static void cy_close(struct tty_struct *tty, struct file *filp) "ttyC%d was %x\n", info->line, retval); } CY_UNLOCK(info, flags); - interruptible_sleep_on(&info->shutdown_wait); + wait_for_completion_interruptible(&info->shutdown_wait); CY_LOCK(info, flags); } #endif @@ -4091,32 +4090,20 @@ cy_ioctl(struct tty_struct *tty, struct file *file, case TIOCMIWAIT: CY_LOCK(info, flags); /* note the counters on entry */ - cprev = info->icount; + cnow = info->icount; CY_UNLOCK(info, flags); - while (1) { - interruptible_sleep_on(&info->delta_msr_wait); - /* see if a signal did it */ - if (signal_pending(current)) { - return -ERESTARTSYS; - } - + ret_val = wait_event_interruptible(info->delta_msr_wait, ({ + cprev = cnow; CY_LOCK(info, flags); cnow = info->icount; /* atomic copy */ CY_UNLOCK(info, flags); - if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && - cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { - return -EIO; /* no change => error */ - } - if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || - ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || - ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || - ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { - return 0; - } - cprev = cnow; - } - /* NOTREACHED */ + ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || + ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || + ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || + ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)); + })); + break; /* * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) @@ -4534,7 +4521,7 @@ static void __devinit cy_init_card(struct cyclades_card *cinfo) INIT_WORK(&info->tqueue, do_softint); init_waitqueue_head(&info->open_wait); init_waitqueue_head(&info->close_wait); - init_waitqueue_head(&info->shutdown_wait); + init_completion(&info->shutdown_wait); init_waitqueue_head(&info->delta_msr_wait); if (IS_CYC_Z(*cinfo)) { diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index 121d64c3247b..cdd77398030c 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -588,7 +588,7 @@ struct cyclades_port { struct work_struct tqueue; wait_queue_head_t open_wait; wait_queue_head_t close_wait; - wait_queue_head_t shutdown_wait; + struct completion shutdown_wait; wait_queue_head_t delta_msr_wait; int throttle; }; -- cgit v1.2.3 From 6a0aa67b177a3d84f789f7f19c523d8c5e8bb5f8 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 8 May 2007 00:36:55 -0700 Subject: Char: cyclades, remove unused timestamps Signed-off-by: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/cyclades.c | 12 ------------ include/linux/cyclades.h | 2 -- 2 files changed, 14 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 14652c1ed024..ab9ad66ceefa 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -1049,7 +1049,6 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, channel = (u_short) (save_xir & CyIRChannel); i = channel + chip * 4 + cinfo->first_line; info = &cy_port[i]; - info->last_active = jiffies; save_car = readb(base_addr + (CyCAR << index)); cy_writeb(base_addr + (CyCAR << index), save_xir); @@ -1225,7 +1224,6 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, goto txend; } info = &cy_port[i]; - info->last_active = jiffies; if (info->tty == 0) { cy_writeb(base_addr + (CySRER << index), readb(base_addr + (CySRER << index)) & @@ -1339,7 +1337,6 @@ txend: save_xir = (u_char) readb(base_addr + (CyMIR << index)); channel = (u_short) (save_xir & CyIRChannel); info = &cy_port[channel + chip * 4 + cinfo->first_line]; - info->last_active = jiffies; save_car = readb(base_addr + (CyCAR << index)); cy_writeb(base_addr + (CyCAR << index), save_xir); @@ -1570,9 +1567,6 @@ cyz_handle_rx(struct cyclades_port *info, struct CH_CTRL __iomem *ch_ctrl, char_count = rx_put - rx_get + rx_bufsize; if (char_count) { - info->last_active = jiffies; - info->jiffies[1] = jiffies; - #ifdef CY_ENABLE_MONITORING info->mon.int_count++; info->mon.char_count += char_count; @@ -1678,8 +1672,6 @@ cyz_handle_tx(struct cyclades_port *info, struct CH_CTRL __iomem *ch_ctrl, info->x_char = 0; char_count--; info->icount.tx++; - info->last_active = jiffies; - info->jiffies[2] = jiffies; } #ifdef BLOCKMOVE while (0 < (small_count = min_t(unsigned int, @@ -1699,8 +1691,6 @@ cyz_handle_tx(struct cyclades_port *info, struct CH_CTRL __iomem *ch_ctrl, info->xmit_cnt -= small_count; info->xmit_tail = (info->xmit_tail + small_count) & (SERIAL_XMIT_SIZE - 1); - info->last_active = jiffies; - info->jiffies[2] = jiffies; } #else while (info->xmit_cnt && char_count) { @@ -1713,8 +1703,6 @@ cyz_handle_tx(struct cyclades_port *info, struct CH_CTRL __iomem *ch_ctrl, tx_put = (tx_put + 1) & (tx_bufsize - 1); char_count--; info->icount.tx++; - info->last_active = jiffies; - info->jiffies[2] = jiffies; } #endif ztxdone: diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index cdd77398030c..07791f026766 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -569,7 +569,6 @@ struct cyclades_port { int close_delay; unsigned short closing_wait; unsigned long event; - unsigned long last_active; int count; /* # of fd on device */ int breakon; int breakoff; @@ -580,7 +579,6 @@ struct cyclades_port { int xmit_cnt; int default_threshold; int default_timeout; - unsigned long jiffies[3]; unsigned long rflush_count; struct cyclades_monitor mon; struct cyclades_idle_stats idle_stats; -- cgit v1.2.3 From dd025c0c7a047b1be7dfaa4061368b4783d89ebb Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 8 May 2007 00:37:02 -0700 Subject: Char: cyclades, dynamic ports and save thus approx. 160k of .bss Signed-off-by: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/cyclades.c | 142 +++++++++++++++++++++++------------------------ include/linux/cyclades.h | 1 + 2 files changed, 70 insertions(+), 73 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index c6a12bad70f5..7fe4bb60f690 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -722,11 +722,6 @@ module_param_array(irq, int, NULL, 0); */ static struct cyclades_card cy_card[NR_CARDS]; -/* This is the per-channel data structure containing pointers, flags - and variables for the port. This driver supports a maximum of NR_PORTS. -*/ -static struct cyclades_port cy_port[NR_PORTS]; - static int cy_next_channel; /* next minor available */ /* @@ -855,13 +850,6 @@ static inline int serial_paranoia_check(struct cyclades_port *info, return 1; } - if ((long)info < (long)(&cy_port[0]) || - (long)(&cy_port[NR_PORTS]) < (long)info) { - printk(KERN_WARNING "cyc Warning: cyclades_port out of range " - "for (%s) in %s\n", name, routine); - return 1; - } - if (info->magic != CYCLADES_MAGIC) { printk(KERN_WARNING "cyc Warning: bad magic number for serial " "struct (%s) in %s\n", name, routine); @@ -1025,7 +1013,7 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, struct cyclades_port *info; struct tty_struct *tty; int char_count; - int i, j, len, mdm_change, mdm_status, outch; + int j, len, mdm_change, mdm_status, outch; int save_xir, channel, save_car; char data; @@ -1037,8 +1025,7 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, spin_lock(&cinfo->card_lock); save_xir = (u_char) readb(base_addr + (CyRIR << index)); channel = (u_short) (save_xir & CyIRChannel); - i = channel + chip * 4 + cinfo->first_line; - info = &cy_port[i]; + info = &cinfo->ports[channel + chip * 4]; save_car = readb(base_addr + (CyCAR << index)); cy_writeb(base_addr + (CyCAR << index), save_xir); @@ -1202,18 +1189,17 @@ static void cyy_intr_chip(struct cyclades_card *cinfo, int chip, spin_lock(&cinfo->card_lock); save_xir = (u_char) readb(base_addr + (CyTIR << index)); channel = (u_short) (save_xir & CyIRChannel); - i = channel + chip * 4 + cinfo->first_line; save_car = readb(base_addr + (CyCAR << index)); cy_writeb(base_addr + (CyCAR << index), save_xir); /* validate the port# (as configured and open) */ - if ((i < 0) || (NR_PORTS <= i)) { + if (channel + chip * 4 >= cinfo->nports) { cy_writeb(base_addr + (CySRER << index), readb(base_addr + (CySRER << index)) & ~CyTxRdy); goto txend; } - info = &cy_port[i]; + info = &cinfo->ports[channel + chip * 4]; if (info->tty == NULL) { cy_writeb(base_addr + (CySRER << index), readb(base_addr + (CySRER << index)) & @@ -1325,7 +1311,7 @@ txend: spin_lock(&cinfo->card_lock); save_xir = (u_char) readb(base_addr + (CyMIR << index)); channel = (u_short) (save_xir & CyIRChannel); - info = &cy_port[channel + chip * 4 + cinfo->first_line]; + info = &cinfo->ports[channel + chip * 4]; save_car = readb(base_addr + (CyCAR << index)); cy_writeb(base_addr + (CyCAR << index), save_xir); @@ -1726,7 +1712,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo) while (cyz_fetch_msg(cinfo, &channel, &cmd, ¶m) == 1) { special_count = 0; delta_count = 0; - info = &cy_port[channel + cinfo->first_line]; + info = &cinfo->ports[channel]; if ((tty = info->tty) == NULL) continue; @@ -1896,7 +1882,7 @@ static void cyz_poll(unsigned long arg) cyz_handle_cmd(cinfo); for (port = 0; port < cinfo->nports; port++) { - info = &cy_port[port + cinfo->first_line]; + info = &cinfo->ports[port]; tty = info->tty; ch_ctrl = &(zfw_ctrl->ch_ctrl[port]); buf_ctrl = &(zfw_ctrl->buf_ctrl[port]); @@ -2468,13 +2454,20 @@ block_til_ready(struct tty_struct *tty, struct file *filp, static int cy_open(struct tty_struct *tty, struct file *filp) { struct cyclades_port *info; + unsigned int i; int retval, line; line = tty->index; if ((line < 0) || (NR_PORTS <= line)) { return -ENODEV; } - info = &cy_port[line]; + for (i = 0; i < NR_CARDS; i++) + if (line < cy_card[i].first_line + cy_card[i].nports && + line >= cy_card[i].first_line) + break; + if (i >= NR_CARDS) + return -ENODEV; + info = &cy_card[i].ports[line - cy_card[i].first_line]; if (info->line < 0) { return -ENODEV; } @@ -4437,7 +4430,7 @@ static void cy_hangup(struct tty_struct *tty) * --------------------------------------------------------------------- */ -static void __devinit cy_init_card(struct cyclades_card *cinfo) +static int __devinit cy_init_card(struct cyclades_card *cinfo) { struct cyclades_port *info; u32 mailbox; @@ -4459,10 +4452,15 @@ static void __devinit cy_init_card(struct cyclades_card *cinfo) nports = cinfo->nports = CyPORTS_PER_CHIP * cinfo->num_chips; } + cinfo->ports = kzalloc(sizeof(*cinfo->ports) * nports, GFP_KERNEL); + if (cinfo->ports == NULL) { + printk(KERN_ERR "Cyclades: cannot allocate ports\n"); + return -ENOMEM; + } + for (port = cinfo->first_line; port < cinfo->first_line + nports; port++) { - info = &cy_port[port]; - memset(info, 0, sizeof(*info)); + info = &cinfo->ports[port - cinfo->first_line]; info->magic = CYCLADES_MAGIC; info->card = cinfo; info->line = port; @@ -4525,6 +4523,7 @@ static void __devinit cy_init_card(struct cyclades_card *cinfo) #endif } #endif + return 0; } /* initialize chips on Cyclom-Y card -- return number of valid @@ -5094,13 +5093,11 @@ static void __devexit cy_pci_remove(struct pci_dev *pdev) pci_release_regions(pdev); cinfo->base_addr = NULL; - for (i = cinfo->first_line; i < cinfo->first_line + cinfo->nports; i++){ - cy_port[i].line = -1; - cy_port[i].magic = -1; - } for (i = cinfo->first_line; i < cinfo->first_line + cinfo->nports; i++) tty_unregister_device(cy_serial_driver, i); + cinfo->nports = 0; + kfree(cinfo->ports); } static struct pci_driver cy_pci_driver = { @@ -5116,7 +5113,7 @@ cyclades_get_proc_info(char *buf, char **start, off_t offset, int length, int *eof, void *data) { struct cyclades_port *info; - int i; + unsigned int i, j; int len = 0; off_t begin = 0; off_t pos = 0; @@ -5130,33 +5127,34 @@ cyclades_get_proc_info(char *buf, char **start, off_t offset, int length, len += size; /* Output one line for each known port */ - for (i = 0; i < NR_PORTS && cy_port[i].line >= 0; i++) { - info = &cy_port[i]; - - if (info->count) - size = sprintf(buf + len, "%3d %8lu %10lu %8lu %10lu " - "%8lu %9lu %6ld\n", info->line, - (cur_jifs - info->idle_stats.in_use) / HZ, - info->idle_stats.xmit_bytes, - (cur_jifs - info->idle_stats.xmit_idle) / HZ, - info->idle_stats.recv_bytes, - (cur_jifs - info->idle_stats.recv_idle) / HZ, - info->idle_stats.overruns, - (long)info->tty->ldisc.num); - else - size = sprintf(buf + len, "%3d %8lu %10lu %8lu %10lu " - "%8lu %9lu %6ld\n", - info->line, 0L, 0L, 0L, 0L, 0L, 0L, 0L); - len += size; - pos = begin + len; - - if (pos < offset) { - len = 0; - begin = pos; + for (i = 0; i < NR_CARDS; i++) + for (j = 0; j < cy_card[i].nports; j++) { + info = &cy_card[i].ports[j]; + + if (info->count) + size = sprintf(buf + len, "%3d %8lu %10lu %8lu " + "%10lu %8lu %9lu %6ld\n", info->line, + (cur_jifs - info->idle_stats.in_use) / + HZ, info->idle_stats.xmit_bytes, + (cur_jifs - info->idle_stats.xmit_idle)/ + HZ, info->idle_stats.recv_bytes, + (cur_jifs - info->idle_stats.recv_idle)/ + HZ, info->idle_stats.overruns, + (long)info->tty->ldisc.num); + else + size = sprintf(buf + len, "%3d %8lu %10lu %8lu " + "%10lu %8lu %9lu %6ld\n", + info->line, 0L, 0L, 0L, 0L, 0L, 0L, 0L); + len += size; + pos = begin + len; + + if (pos < offset) { + len = 0; + begin = pos; + } + if (pos > offset + length) + goto done; } - if (pos > offset + length) - goto done; - } *eof = 1; done: *start = buf + (offset - begin); /* Start of wanted data */ @@ -5211,7 +5209,7 @@ static const struct tty_operations cy_ops = { static int __init cy_init(void) { - unsigned int i, nboards; + unsigned int nboards; int retval = -ENOMEM; cy_serial_driver = alloc_tty_driver(NR_PORTS); @@ -5242,11 +5240,6 @@ static int __init cy_init(void) goto err_frtty; } - for (i = 0; i < NR_PORTS; i++) { - cy_port[i].line = -1; - cy_port[i].magic = -1; - } - /* the code below is responsible to find the boards. Each different type of board has its own detection routine. If a board is found, the next cy_card structure available is set by the detection @@ -5275,6 +5268,7 @@ err: static void __exit cy_cleanup_module(void) { + struct cyclades_card *card; int i, e1; #ifndef CONFIG_CYZ_INTR @@ -5290,22 +5284,24 @@ static void __exit cy_cleanup_module(void) #endif for (i = 0; i < NR_CARDS; i++) { - if (cy_card[i].base_addr) { + card = &cy_card[i]; + if (card->base_addr) { /* clear interrupt */ - cy_writeb(cy_card[i].base_addr + Cy_ClrIntr, 0); - iounmap(cy_card[i].base_addr); - if (cy_card[i].ctl_addr) - iounmap(cy_card[i].ctl_addr); - if (cy_card[i].irq + cy_writeb(card->base_addr + Cy_ClrIntr, 0); + iounmap(card->base_addr); + if (card->ctl_addr) + iounmap(card->ctl_addr); + if (card->irq #ifndef CONFIG_CYZ_INTR - && !IS_CYC_Z(cy_card[i]) + && !IS_CYC_Z(*card) #endif /* CONFIG_CYZ_INTR */ ) - free_irq(cy_card[i].irq, &cy_card[i]); - for (e1 = cy_card[i].first_line; - e1 < cy_card[i].first_line + - cy_card[i].nports; e1++) + free_irq(card->irq, card); + for (e1 = card->first_line; + e1 < card->first_line + + card->nports; e1++) tty_unregister_device(cy_serial_driver, e1); + kfree(card->ports); } } diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index 07791f026766..72aa00cc4b2d 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -518,6 +518,7 @@ struct cyclades_card { int bus_index; /* address shift - 0 for ISA, 1 for PCI */ int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ spinlock_t card_lock; + struct cyclades_port *ports; }; /*************************************** -- cgit v1.2.3 From 2ee121631b9dd0291502ccac6f897907505faf8c Mon Sep 17 00:00:00 2001 From: James Simmons Date: Tue, 8 May 2007 00:37:15 -0700 Subject: fbdev: display class Add the new display class. This is meant to unite the various solutions to display units ie acpi output device, auxdisplay and the defunct lcd class in the backlight directory. Signed-off-by: James Simmons Cc: "Antonino A. Daplas" Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/Kconfig | 1 + drivers/video/Makefile | 2 +- drivers/video/display/Kconfig | 24 ++++ drivers/video/display/Makefile | 6 + drivers/video/display/display-sysfs.c | 217 ++++++++++++++++++++++++++++++++++ include/linux/display.h | 61 ++++++++++ 6 files changed, 310 insertions(+), 1 deletion(-) create mode 100644 drivers/video/display/Kconfig create mode 100644 drivers/video/display/Makefile create mode 100644 drivers/video/display/display-sysfs.c create mode 100644 include/linux/display.h (limited to 'include/linux') diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 344c37595305..8dfa88a68ae9 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -5,6 +5,7 @@ menu "Graphics support" source "drivers/video/backlight/Kconfig" +source "drivers/video/display/Kconfig" config FB tristate "Support for frame buffer devices" diff --git a/drivers/video/Makefile b/drivers/video/Makefile index 558473d040d6..281462414930 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -12,7 +12,7 @@ fb-objs := $(fb-y) obj-$(CONFIG_VT) += console/ obj-$(CONFIG_LOGO) += logo/ -obj-y += backlight/ +obj-y += backlight/ display/ obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o diff --git a/drivers/video/display/Kconfig b/drivers/video/display/Kconfig new file mode 100644 index 000000000000..f99af931d4f8 --- /dev/null +++ b/drivers/video/display/Kconfig @@ -0,0 +1,24 @@ +# +# Display drivers configuration +# + +menu "Display device support" + +config DISPLAY_SUPPORT + tristate "Display panel/monitor support" + ---help--- + This framework adds support for low-level control of a display. + This includes support for power. + + Enable this to be able to choose the drivers for controlling the + physical display panel/monitor on some platforms. This not only + covers LCD displays for PDAs but also other types of displays + such as CRT, TVout etc. + + To have support for your specific display panel you will have to + select the proper drivers which depend on this option. + +comment "Display hardware drivers" + depends on DISPLAY_SUPPORT + +endmenu diff --git a/drivers/video/display/Makefile b/drivers/video/display/Makefile new file mode 100644 index 000000000000..c0ea832bf171 --- /dev/null +++ b/drivers/video/display/Makefile @@ -0,0 +1,6 @@ +# Display drivers + +display-objs := display-sysfs.o + +obj-$(CONFIG_DISPLAY_SUPPORT) += display.o + diff --git a/drivers/video/display/display-sysfs.c b/drivers/video/display/display-sysfs.c new file mode 100644 index 000000000000..39c27b604cc3 --- /dev/null +++ b/drivers/video/display/display-sysfs.c @@ -0,0 +1,217 @@ +/* + * display-sysfs.c - Display output driver sysfs interface + * + * Copyright (C) 2007 James Simmons + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include +#include +#include +#include +#include + +static ssize_t display_show_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct display_device *dsp = dev_get_drvdata(dev); + return snprintf(buf, PAGE_SIZE, "%s\n", dsp->name); +} + +static ssize_t display_show_type(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct display_device *dsp = dev_get_drvdata(dev); + return snprintf(buf, PAGE_SIZE, "%s\n", dsp->type); +} + +static ssize_t display_show_contrast(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct display_device *dsp = dev_get_drvdata(dev); + ssize_t rc = -ENXIO; + + mutex_lock(&dsp->lock); + if (likely(dsp->driver) && dsp->driver->get_contrast) + rc = sprintf(buf, "%d\n", dsp->driver->get_contrast(dsp)); + mutex_unlock(&dsp->lock); + return rc; +} + +static ssize_t display_store_contrast(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct display_device *dsp = dev_get_drvdata(dev); + ssize_t ret = -EINVAL, size; + int contrast; + char *endp; + + contrast = simple_strtoul(buf, &endp, 0); + size = endp - buf; + + if (*endp && isspace(*endp)) + size++; + + if (size != count) + return ret; + + mutex_lock(&dsp->lock); + if (likely(dsp->driver && dsp->driver->set_contrast)) { + pr_debug("display: set contrast to %d\n", contrast); + dsp->driver->set_contrast(dsp, contrast); + ret = count; + } + mutex_unlock(&dsp->lock); + return ret; +} + +static ssize_t display_show_max_contrast(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct display_device *dsp = dev_get_drvdata(dev); + ssize_t rc = -ENXIO; + + mutex_lock(&dsp->lock); + if (likely(dsp->driver)) + rc = sprintf(buf, "%d\n", dsp->driver->max_contrast); + mutex_unlock(&dsp->lock); + return rc; +} + +static struct device_attribute display_attrs[] = { + __ATTR(name, S_IRUGO, display_show_name, NULL), + __ATTR(type, S_IRUGO, display_show_type, NULL), + __ATTR(contrast, S_IRUGO | S_IWUSR, display_show_contrast, display_store_contrast), + __ATTR(max_contrast, S_IRUGO, display_show_max_contrast, NULL), +}; + +static int display_suspend(struct device *dev, pm_message_t state) +{ + struct display_device *dsp = dev_get_drvdata(dev); + + mutex_lock(&dsp->lock); + if (likely(dsp->driver->suspend)) + dsp->driver->suspend(dsp, state); + mutex_unlock(&dsp->lock); + return 0; +}; + +static int display_resume(struct device *dev) +{ + struct display_device *dsp = dev_get_drvdata(dev); + + mutex_lock(&dsp->lock); + if (likely(dsp->driver->resume)) + dsp->driver->resume(dsp); + mutex_unlock(&dsp->lock); + return 0; +}; + +static struct mutex allocated_dsp_lock; +static DEFINE_IDR(allocated_dsp); +struct class *display_class; + +struct display_device *display_device_register(struct display_driver *driver, + struct device *parent, void *devdata) +{ + struct display_device *new_dev = NULL; + int ret = -EINVAL; + + if (unlikely(!driver)) + return ERR_PTR(ret); + + mutex_lock(&allocated_dsp_lock); + ret = idr_pre_get(&allocated_dsp, GFP_KERNEL); + mutex_unlock(&allocated_dsp_lock); + if (!ret) + return ERR_PTR(ret); + + new_dev = kzalloc(sizeof(struct display_device), GFP_KERNEL); + if (likely(new_dev) && unlikely(driver->probe(new_dev, devdata))) { + // Reserve the index for this display + mutex_lock(&allocated_dsp_lock); + ret = idr_get_new(&allocated_dsp, new_dev, &new_dev->idx); + mutex_unlock(&allocated_dsp_lock); + + if (!ret) { + new_dev->dev = device_create(display_class, parent, 0, + "display%d", new_dev->idx); + if (!IS_ERR(new_dev->dev)) { + dev_set_drvdata(new_dev->dev, new_dev); + new_dev->parent = parent; + new_dev->driver = driver; + mutex_init(&new_dev->lock); + return new_dev; + } + mutex_lock(&allocated_dsp_lock); + idr_remove(&allocated_dsp, new_dev->idx); + mutex_unlock(&allocated_dsp_lock); + ret = -EINVAL; + } + } + kfree(new_dev); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(display_device_register); + +void display_device_unregister(struct display_device *ddev) +{ + if (!ddev) + return; + // Free device + mutex_lock(&ddev->lock); + device_unregister(ddev->dev); + mutex_unlock(&ddev->lock); + // Mark device index as avaliable + mutex_lock(&allocated_dsp_lock); + idr_remove(&allocated_dsp, ddev->idx); + mutex_unlock(&allocated_dsp_lock); + kfree(ddev); +} +EXPORT_SYMBOL(display_device_unregister); + +static int __init display_class_init(void) +{ + display_class = class_create(THIS_MODULE, "display"); + if (IS_ERR(display_class)) { + printk(KERN_ERR "Failed to create display class\n"); + display_class = NULL; + return -EINVAL; + } + display_class->dev_attrs = display_attrs; + display_class->suspend = display_suspend; + display_class->resume = display_resume; + mutex_init(&allocated_dsp_lock); + return 0; +} + +static void __exit display_class_exit(void) +{ + class_destroy(display_class); +} + +module_init(display_class_init); +module_exit(display_class_exit); + +MODULE_DESCRIPTION("Display Hardware handling"); +MODULE_AUTHOR("James Simmons "); +MODULE_LICENSE("GPL"); + diff --git a/include/linux/display.h b/include/linux/display.h new file mode 100644 index 000000000000..3bf70d639728 --- /dev/null +++ b/include/linux/display.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2006 James Simmons + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef _LINUX_DISPLAY_H +#define _LINUX_DISPLAY_H + +#include + +struct display_device; + +/* This structure defines all the properties of a Display. */ +struct display_driver { + int (*set_contrast)(struct display_device *, unsigned int); + int (*get_contrast)(struct display_device *); + void (*suspend)(struct display_device *, pm_message_t state); + void (*resume)(struct display_device *); + int (*probe)(struct display_device *, void *); + int (*remove)(struct display_device *); + int max_contrast; +}; + +struct display_device { + struct module *owner; /* Owner module */ + struct display_driver *driver; + struct device *parent; /* This is the parent */ + struct device *dev; /* This is this display device */ + struct mutex lock; + void *priv_data; + char type[16]; + char *name; + int idx; +}; + +extern struct display_device *display_device_register(struct display_driver *driver, + struct device *dev, void *devdata); +extern void display_device_unregister(struct display_device *dev); + +extern int probe_edid(struct display_device *dev, void *devdata); + +#define to_display_device(obj) container_of(obj, struct display_device, class_dev) + +#endif -- cgit v1.2.3 From 60b59beafba875aef6d378078bce0baf2287ae14 Mon Sep 17 00:00:00 2001 From: Jaya Kumar Date: Tue, 8 May 2007 00:37:37 -0700 Subject: fbdev: mm: Deferred IO support This implements deferred IO support in fbdev. Deferred IO is a way to delay and repurpose IO. This implementation is done using mm's page_mkwrite and page_mkclean hooks in order to detect, delay and then rewrite IO. This functionality is used by hecubafb. [adaplas] This is useful for graphics hardware with no directly addressable/mappable framebuffer. Implementing this will allow the "framebuffer" to be accesible from user space via mmap(). Signed-off-by: Jaya Kumar Signed-off-by: Antonino Daplas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/fb/deferred_io.txt | 75 +++++++++++++++++++++ drivers/video/Kconfig | 5 ++ drivers/video/Makefile | 1 + drivers/video/fb_defio.c | 137 +++++++++++++++++++++++++++++++++++++++ include/linux/fb.h | 20 ++++++ mm/rmap.c | 1 + 6 files changed, 239 insertions(+) create mode 100644 Documentation/fb/deferred_io.txt create mode 100644 drivers/video/fb_defio.c (limited to 'include/linux') diff --git a/Documentation/fb/deferred_io.txt b/Documentation/fb/deferred_io.txt new file mode 100644 index 000000000000..73cf9fb7cf60 --- /dev/null +++ b/Documentation/fb/deferred_io.txt @@ -0,0 +1,75 @@ +Deferred IO +----------- + +Deferred IO is a way to delay and repurpose IO. It uses host memory as a +buffer and the MMU pagefault as a pretrigger for when to perform the device +IO. The following example may be a useful explaination of how one such setup +works: + +- userspace app like Xfbdev mmaps framebuffer +- deferred IO and driver sets up nopage and page_mkwrite handlers +- userspace app tries to write to mmaped vaddress +- we get pagefault and reach nopage handler +- nopage handler finds and returns physical page +- we get page_mkwrite where we add this page to a list +- schedule a workqueue task to be run after a delay +- app continues writing to that page with no additional cost. this is + the key benefit. +- the workqueue task comes in and mkcleans the pages on the list, then + completes the work associated with updating the framebuffer. this is + the real work talking to the device. +- app tries to write to the address (that has now been mkcleaned) +- get pagefault and the above sequence occurs again + +As can be seen from above, one benefit is roughly to allow bursty framebuffer +writes to occur at minimum cost. Then after some time when hopefully things +have gone quiet, we go and really update the framebuffer which would be +a relatively more expensive operation. + +For some types of nonvolatile high latency displays, the desired image is +the final image rather than the intermediate stages which is why it's okay +to not update for each write that is occuring. + +It may be the case that this is useful in other scenarios as well. Paul Mundt +has mentioned a case where it is beneficial to use the page count to decide +whether to coalesce and issue SG DMA or to do memory bursts. + +Another one may be if one has a device framebuffer that is in an usual format, +say diagonally shifting RGB, this may then be a mechanism for you to allow +apps to pretend to have a normal framebuffer but reswizzle for the device +framebuffer at vsync time based on the touched pagelist. + +How to use it: (for applications) +--------------------------------- +No changes needed. mmap the framebuffer like normal and just use it. + +How to use it: (for fbdev drivers) +---------------------------------- +The following example may be helpful. + +1. Setup your structure. Eg: + +static struct fb_deferred_io hecubafb_defio = { + .delay = HZ, + .deferred_io = hecubafb_dpy_deferred_io, +}; + +The delay is the minimum delay between when the page_mkwrite trigger occurs +and when the deferred_io callback is called. The deferred_io callback is +explained below. + +2. Setup your deferred IO callback. Eg: +static void hecubafb_dpy_deferred_io(struct fb_info *info, + struct list_head *pagelist) + +The deferred_io callback is where you would perform all your IO to the display +device. You receive the pagelist which is the list of pages that were written +to during the delay. You must not modify this list. This callback is called +from a workqueue. + +3. Call init + info->fbdefio = &hecubafb_defio; + fb_deferred_io_init(info); + +4. Call cleanup + fb_deferred_io_cleanup(info); diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 8dfa88a68ae9..61c0a03d33f2 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -91,6 +91,11 @@ config FB_CFB_IMAGEBLIT blitting. This is used by drivers that don't provide their own (accelerated) version. +config FB_DEFERRED_IO + bool + depends on FB + default y + config FB_SVGALIB tristate depends on FB diff --git a/drivers/video/Makefile b/drivers/video/Makefile index 281462414930..deb5fa55f783 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_FB_CFB_IMAGEBLIT) += cfbimgblt.o obj-$(CONFIG_FB_SVGALIB) += svgalib.o obj-$(CONFIG_FB_MACMODES) += macmodes.o obj-$(CONFIG_FB_DDC) += fb_ddc.o +obj-$(CONFIG_FB_DEFERRED_IO) += fb_defio.o # Hardware specific drivers go first obj-$(CONFIG_FB_AMIGA) += amifb.o c2p.o diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c new file mode 100644 index 000000000000..cc780f4e183d --- /dev/null +++ b/drivers/video/fb_defio.c @@ -0,0 +1,137 @@ +/* + * linux/drivers/video/fb_defio.c + * + * Copyright (C) 2006 Jaya Kumar + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* to support deferred IO */ +#include +#include + +/* this is to find and return the vmalloc-ed fb pages */ +static struct page* fb_deferred_io_nopage(struct vm_area_struct *vma, + unsigned long vaddr, int *type) +{ + unsigned long offset; + struct page *page; + struct fb_info *info = vma->vm_private_data; + + offset = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); + if (offset >= info->fix.smem_len) + return NOPAGE_SIGBUS; + + page = vmalloc_to_page(info->screen_base + offset); + if (!page) + return NOPAGE_OOM; + + get_page(page); + if (type) + *type = VM_FAULT_MINOR; + return page; +} + +/* vm_ops->page_mkwrite handler */ +int fb_deferred_io_mkwrite(struct vm_area_struct *vma, + struct page *page) +{ + struct fb_info *info = vma->vm_private_data; + struct fb_deferred_io *fbdefio = info->fbdefio; + + /* this is a callback we get when userspace first tries to + write to the page. we schedule a workqueue. that workqueue + will eventually mkclean the touched pages and execute the + deferred framebuffer IO. then if userspace touches a page + again, we repeat the same scheme */ + + /* protect against the workqueue changing the page list */ + mutex_lock(&fbdefio->lock); + list_add(&page->lru, &fbdefio->pagelist); + mutex_unlock(&fbdefio->lock); + + /* come back after delay to process the deferred IO */ + schedule_delayed_work(&info->deferred_work, fbdefio->delay); + return 0; +} + +static struct vm_operations_struct fb_deferred_io_vm_ops = { + .nopage = fb_deferred_io_nopage, + .page_mkwrite = fb_deferred_io_mkwrite, +}; + +static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + vma->vm_ops = &fb_deferred_io_vm_ops; + vma->vm_flags |= ( VM_IO | VM_RESERVED | VM_DONTEXPAND ); + vma->vm_private_data = info; + return 0; +} + +/* workqueue callback */ +static void fb_deferred_io_work(struct work_struct *work) +{ + struct fb_info *info = container_of(work, struct fb_info, + deferred_work.work); + struct list_head *node, *next; + struct page *cur; + struct fb_deferred_io *fbdefio = info->fbdefio; + + /* here we mkclean the pages, then do all deferred IO */ + mutex_lock(&fbdefio->lock); + list_for_each_entry(cur, &fbdefio->pagelist, lru) { + lock_page(cur); + page_mkclean(cur); + unlock_page(cur); + } + + /* driver's callback with pagelist */ + fbdefio->deferred_io(info, &fbdefio->pagelist); + + /* clear the list */ + list_for_each_safe(node, next, &fbdefio->pagelist) { + list_del(node); + } + mutex_unlock(&fbdefio->lock); +} + +void fb_deferred_io_init(struct fb_info *info) +{ + struct fb_deferred_io *fbdefio = info->fbdefio; + + BUG_ON(!fbdefio); + mutex_init(&fbdefio->lock); + info->fbops->fb_mmap = fb_deferred_io_mmap; + INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work); + INIT_LIST_HEAD(&fbdefio->pagelist); + if (fbdefio->delay == 0) /* set a default of 1 s */ + fbdefio->delay = HZ; +} +EXPORT_SYMBOL_GPL(fb_deferred_io_init); + +void fb_deferred_io_cleanup(struct fb_info *info) +{ + struct fb_deferred_io *fbdefio = info->fbdefio; + + BUG_ON(!fbdefio); + cancel_delayed_work(&info->deferred_work); + flush_scheduled_work(); +} +EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); + +MODULE_LICENSE("GPL"); diff --git a/include/linux/fb.h b/include/linux/fb.h index be913ec87169..8a8255b94b62 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -561,6 +561,16 @@ struct fb_pixmap { void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size); }; +#ifdef CONFIG_FB_DEFERRED_IO +struct fb_deferred_io { + /* delay between mkwrite and deferred handler */ + unsigned long delay; + struct mutex lock; /* mutex that protects the page list */ + struct list_head pagelist; /* list of touched pages */ + /* callback */ + void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); +}; +#endif /* * Frame buffer operations @@ -778,6 +788,10 @@ struct fb_info { struct mutex bl_curve_mutex; u8 bl_curve[FB_BACKLIGHT_LEVELS]; #endif +#ifdef CONFIG_FB_DEFERRED_IO + struct delayed_work deferred_work; + struct fb_deferred_io *fbdefio; +#endif struct fb_ops *fbops; struct device *device; /* This is the parent */ @@ -913,6 +927,12 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, } } +#ifdef CONFIG_FB_DEFERRED_IO +/* drivers/video/fb_defio.c */ +extern void fb_deferred_io_init(struct fb_info *info); +extern void fb_deferred_io_cleanup(struct fb_info *info); +#endif + /* drivers/video/fbsysfs.c */ extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev); extern void framebuffer_release(struct fb_info *info); diff --git a/mm/rmap.c b/mm/rmap.c index 75a32be64a21..304f51985c78 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -505,6 +505,7 @@ int page_mkclean(struct page *page) return ret; } +EXPORT_SYMBOL_GPL(page_mkclean); /** * page_set_anon_rmap - setup new anonymous rmap -- cgit v1.2.3 From 5e841b88d23d0ea0a6ee4e76c489899d4d23ce25 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 8 May 2007 00:37:41 -0700 Subject: fb: fsync() method for deferred I/O flush. There are cases when we do not want to wait on the delay for automatically updating the "real" framebuffer, this implements a simple ->fsync() hook for explicitly flushing the deferred I/O work. The ->page_mkwrite() handler will rearm the work queue normally. (akpm: nuke unneeded ifdefs, forward-delcare struct dentry) Signed-off-by: Paul Mundt Cc: Jaya Kumar Acked-by: Antonino Daplas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/fb_defio.c | 12 ++++++++++++ drivers/video/fbmem.c | 3 +++ include/linux/fb.h | 6 ++++-- 3 files changed, 19 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c index 65f4e6ee7f94..f963f5f59b73 100644 --- a/drivers/video/fb_defio.c +++ b/drivers/video/fb_defio.c @@ -47,6 +47,18 @@ static struct page* fb_deferred_io_nopage(struct vm_area_struct *vma, return page; } +int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync) +{ + struct fb_info *info = file->private_data; + + /* Kill off the delayed work */ + cancel_rearming_delayed_work(&info->deferred_work); + + /* Run it immediately */ + return schedule_delayed_work(&info->deferred_work, 0); +} +EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); + /* vm_ops->page_mkwrite handler */ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, struct page *page) diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 28225265159a..44cf0e4f52e9 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1266,6 +1266,9 @@ static const struct file_operations fb_fops = { #ifdef HAVE_ARCH_FB_UNMAPPED_AREA .get_unmapped_area = get_fb_unmapped_area, #endif +#ifdef CONFIG_FB_DEFERRED_IO + .fsync = fb_deferred_io_fsync, +#endif }; struct class *fb_class; diff --git a/include/linux/fb.h b/include/linux/fb.h index 8a8255b94b62..0de426026f4a 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -4,6 +4,8 @@ #include #include +struct dentry; + /* Definitions of frame buffers */ #define FB_MAJOR 29 @@ -927,11 +929,11 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, } } -#ifdef CONFIG_FB_DEFERRED_IO /* drivers/video/fb_defio.c */ extern void fb_deferred_io_init(struct fb_info *info); extern void fb_deferred_io_cleanup(struct fb_info *info); -#endif +extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, + int datasync); /* drivers/video/fbsysfs.c */ extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev); -- cgit v1.2.3 From fa6ce9ab5fbcb4c276c48861584b70d387e787b3 Mon Sep 17 00:00:00 2001 From: Jan Engelhardt Date: Tue, 8 May 2007 00:38:04 -0700 Subject: vt: add color support to the "underline" and "italic" attributes Add color support to the "underline" and "italic" attributes as in OpenBSD/NetBSD-style (vt220) and xterm. Signed-off-by: Jan Engelhardt Acked-by: "Antonino A. Daplas" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/vt.c | 34 ++++++++++++++++++++++++++++------ drivers/video/console/mdacon.c | 3 ++- drivers/video/console/promcon.c | 3 ++- drivers/video/console/sticon.c | 2 +- drivers/video/console/vgacon.c | 12 ++++++++---- include/linux/console.h | 2 +- include/linux/console_struct.h | 3 +++ 7 files changed, 45 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 4ef9a286a230..4818327180ac 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -348,10 +348,12 @@ void update_region(struct vc_data *vc, unsigned long start, int count) /* Structure of attributes is hardware-dependent */ -static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink, u8 _underline, u8 _reverse) +static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink, + u8 _underline, u8 _reverse, u8 _italic) { if (vc->vc_sw->con_build_attr) - return vc->vc_sw->con_build_attr(vc, _color, _intensity, _blink, _underline, _reverse); + return vc->vc_sw->con_build_attr(vc, _color, _intensity, + _blink, _underline, _reverse, _italic); #ifndef VT_BUF_VRAM_ONLY /* @@ -368,10 +370,13 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink, u8 u8 a = vc->vc_color; if (!vc->vc_can_do_color) return _intensity | + (_italic ? 2 : 0) | (_underline ? 4 : 0) | (_reverse ? 8 : 0) | (_blink ? 0x80 : 0); - if (_underline) + if (_italic) + a = (a & 0xF0) | vc->vc_itcolor; + else if (_underline) a = (a & 0xf0) | vc->vc_ulcolor; else if (_intensity == 0) a = (a & 0xf0) | vc->vc_ulcolor; @@ -392,8 +397,10 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink, u8 static void update_attr(struct vc_data *vc) { - vc->vc_attr = build_attr(vc, vc->vc_color, vc->vc_intensity, vc->vc_blink, vc->vc_underline, vc->vc_reverse ^ vc->vc_decscnm); - vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm) << 8) | ' '; + vc->vc_attr = build_attr(vc, vc->vc_color, vc->vc_intensity, + vc->vc_blink, vc->vc_underline, + vc->vc_reverse ^ vc->vc_decscnm, vc->vc_italic); + vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm, 0) << 8) | ' '; } /* Note: inverting the screen twice should revert to the original state */ @@ -1136,6 +1143,7 @@ static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar posi static void default_attr(struct vc_data *vc) { vc->vc_intensity = 1; + vc->vc_italic = 0; vc->vc_underline = 0; vc->vc_reverse = 0; vc->vc_blink = 0; @@ -1158,6 +1166,9 @@ static void csi_m(struct vc_data *vc) case 2: vc->vc_intensity = 0; break; + case 3: + vc->vc_italic = 1; + break; case 4: vc->vc_underline = 1; break; @@ -1198,6 +1209,9 @@ static void csi_m(struct vc_data *vc) case 22: vc->vc_intensity = 1; break; + case 23: + vc->vc_italic = 0; + break; case 24: vc->vc_underline = 0; break; @@ -1458,6 +1472,7 @@ static void save_cur(struct vc_data *vc) vc->vc_saved_x = vc->vc_x; vc->vc_saved_y = vc->vc_y; vc->vc_s_intensity = vc->vc_intensity; + vc->vc_s_italic = vc->vc_italic; vc->vc_s_underline = vc->vc_underline; vc->vc_s_blink = vc->vc_blink; vc->vc_s_reverse = vc->vc_reverse; @@ -1472,6 +1487,7 @@ static void restore_cur(struct vc_data *vc) { gotoxy(vc, vc->vc_saved_x, vc->vc_saved_y); vc->vc_intensity = vc->vc_s_intensity; + vc->vc_italic = vc->vc_s_italic; vc->vc_underline = vc->vc_s_underline; vc->vc_blink = vc->vc_s_blink; vc->vc_reverse = vc->vc_s_reverse; @@ -2686,6 +2702,11 @@ static void con_close(struct tty_struct *tty, struct file *filp) mutex_unlock(&tty_mutex); } +static int default_italic_color = 2; // green (ASCII) +static int default_underline_color = 3; // cyan (ASCII) +module_param_named(italic, default_italic_color, int, S_IRUGO | S_IWUSR); +module_param_named(underline, default_underline_color, int, S_IRUGO | S_IWUSR); + static void vc_init(struct vc_data *vc, unsigned int rows, unsigned int cols, int do_clear) { @@ -2705,7 +2726,8 @@ static void vc_init(struct vc_data *vc, unsigned int rows, vc->vc_palette[k++] = default_blu[j] ; } vc->vc_def_color = 0x07; /* white */ - vc->vc_ulcolor = 0x0f; /* bold white */ + vc->vc_ulcolor = default_underline_color; + vc->vc_itcolor = default_italic_color; vc->vc_halfcolor = 0x08; /* grey */ init_waitqueue_head(&vc->paste_wait); reset_terminal(vc, do_clear); diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c index 124ecbe6f88c..bd8d995fe25d 100644 --- a/drivers/video/console/mdacon.c +++ b/drivers/video/console/mdacon.c @@ -384,7 +384,7 @@ static inline u16 mda_convert_attr(u16 ch) } static u8 mdacon_build_attr(struct vc_data *c, u8 color, u8 intensity, - u8 blink, u8 underline, u8 reverse) + u8 blink, u8 underline, u8 reverse, u8 italic) { /* The attribute is just a bit vector: * @@ -397,6 +397,7 @@ static u8 mdacon_build_attr(struct vc_data *c, u8 color, u8 intensity, return (intensity & 3) | ((underline & 1) << 2) | ((reverse & 1) << 3) | + (!!italic << 4) | ((blink & 1) << 7); } diff --git a/drivers/video/console/promcon.c b/drivers/video/console/promcon.c index b78eac63459f..ae02e4eb18e7 100644 --- a/drivers/video/console/promcon.c +++ b/drivers/video/console/promcon.c @@ -548,7 +548,8 @@ promcon_scroll(struct vc_data *conp, int t, int b, int dir, int count) } #if !(PROMCON_COLOR) -static u8 promcon_build_attr(struct vc_data *conp, u8 _color, u8 _intensity, u8 _blink, u8 _underline, u8 _reverse) +static u8 promcon_build_attr(struct vc_data *conp, u8 _color, u8 _intensity, + u8 _blink, u8 _underline, u8 _reverse, u8 _italic) { return (_reverse) ? 0xf : 0x7; } diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c index 57b21e533036..67a682d6cc7b 100644 --- a/drivers/video/console/sticon.c +++ b/drivers/video/console/sticon.c @@ -314,7 +314,7 @@ static unsigned long sticon_getxy(struct vc_data *conp, unsigned long pos, } static u8 sticon_build_attr(struct vc_data *conp, u8 color, u8 intens, - u8 blink, u8 underline, u8 reverse) + u8 blink, u8 underline, u8 reverse, u8 italic) { u8 attr = ((color & 0x70) >> 1) | ((color & 7)); diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 3e67c34df9a5..53c22197b631 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -87,7 +87,7 @@ static void vgacon_save_screen(struct vc_data *c); static int vgacon_scroll(struct vc_data *c, int t, int b, int dir, int lines); static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity, - u8 blink, u8 underline, u8 reverse); + u8 blink, u8 underline, u8 reverse, u8); static void vgacon_invert_region(struct vc_data *c, u16 * p, int count); static unsigned long vgacon_uni_pagedir[2]; @@ -578,12 +578,14 @@ static void vgacon_deinit(struct vc_data *c) } static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity, - u8 blink, u8 underline, u8 reverse) + u8 blink, u8 underline, u8 reverse, u8 italic) { u8 attr = color; if (vga_can_do_color) { - if (underline) + if (italic) + attr = (attr & 0xF0) | c->vc_itcolor; + else if (underline) attr = (attr & 0xf0) | c->vc_ulcolor; else if (intensity == 0) attr = (attr & 0xf0) | c->vc_halfcolor; @@ -597,7 +599,9 @@ static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity, if (intensity == 2) attr ^= 0x08; if (!vga_can_do_color) { - if (underline) + if (italic) + attr = (attr & 0xF8) | 0x02; + else if (underline) attr = (attr & 0xf8) | 0x01; else if (intensity == 0) attr = (attr & 0xf0) | 0x08; diff --git a/include/linux/console.h b/include/linux/console.h index fcc18e012e45..62ef6e11d0d2 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -51,7 +51,7 @@ struct consw { int (*con_scrolldelta)(struct vc_data *, int); int (*con_set_origin)(struct vc_data *); void (*con_save_screen)(struct vc_data *); - u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8); + u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8); void (*con_invert_region)(struct vc_data *, u16 *, int); u16 *(*con_screen_pos)(struct vc_data *, int); unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *); diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index a86162b26c0d..a461f76fb004 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -37,6 +37,7 @@ struct vc_data { unsigned char vc_color; /* Foreground & background */ unsigned char vc_s_color; /* Saved foreground & background */ unsigned char vc_ulcolor; /* Color for underline mode */ + unsigned char vc_itcolor; unsigned char vc_halfcolor; /* Color for half intensity mode */ /* cursor */ unsigned int vc_cursor_type; @@ -71,10 +72,12 @@ struct vc_data { unsigned int vc_deccolm : 1; /* 80/132 Column Mode */ /* attribute flags */ unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */ + unsigned int vc_italic:1; unsigned int vc_underline : 1; unsigned int vc_blink : 1; unsigned int vc_reverse : 1; unsigned int vc_s_intensity : 2; /* saved rendition */ + unsigned int vc_s_italic:1; unsigned int vc_s_underline : 1; unsigned int vc_s_blink : 1; unsigned int vc_s_reverse : 1; -- cgit v1.2.3 From 68648ed1f58d98b8e8d994022e5e25331fbfe42a Mon Sep 17 00:00:00 2001 From: "Antonino A. Daplas" Date: Tue, 8 May 2007 00:38:57 -0700 Subject: fbdev: add drawing functions for framebuffers in system RAM The generic drawing functions (cfbimgblt, cfbcopyarea, cfbfillrect) assume that the framebuffer is in IO memory. However, we have 3 drivers (hecubafb, arcfb, and vfb) where the framebuffer is allocated from system RAM (via vmalloc). Using _raw_read/write and family for these drivers (as used in the cfb* functions) is illegal, especially in other platforms. Create 3 new drawing functions, based almost entirely from the original except that the framebuffer memory is assumed to be in system RAM. These are named as sysimgblt, syscopyarea, and sysfillrect. Signed-off-by: Antonino Daplas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/Kconfig | 27 +++ drivers/video/Makefile | 3 + drivers/video/syscopyarea.c | 388 ++++++++++++++++++++++++++++++++++++++++++ drivers/video/sysfillrect.c | 400 ++++++++++++++++++++++++++++++++++++++++++++ drivers/video/sysimgblt.c | 291 ++++++++++++++++++++++++++++++++ include/linux/fb.h | 6 + 6 files changed, 1115 insertions(+) create mode 100644 drivers/video/syscopyarea.c create mode 100644 drivers/video/sysfillrect.c create mode 100644 drivers/video/sysimgblt.c (limited to 'include/linux') diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index d9aed1033442..f0bc5d908e2c 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -95,6 +95,33 @@ config FB_CFB_IMAGEBLIT blitting. This is used by drivers that don't provide their own (accelerated) version. +config FB_SYS_FILLRECT + tristate + depends on FB + default n + ---help--- + Include the sys_fillrect function for generic software rectangle + filling. This is used by drivers that don't provide their own + (accelerated) version and the framebuffer is in system RAM. + +config FB_SYS_COPYAREA + tristate + depends on FB + default n + ---help--- + Include the sys_copyarea function for generic software area copying. + This is used by drivers that don't provide their own (accelerated) + version and the framebuffer is in system RAM. + +config FB_SYS_IMAGEBLIT + tristate + depends on FB + default n + ---help--- + Include the sys_imageblit function for generic software image + blitting. This is used by drivers that don't provide their own + (accelerated) version and the framebuffer is in system RAM. + config FB_DEFERRED_IO bool depends on FB diff --git a/drivers/video/Makefile b/drivers/video/Makefile index cd94375c3fc1..fa025e786848 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -18,6 +18,9 @@ obj-y += backlight/ display/ obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o obj-$(CONFIG_FB_CFB_IMAGEBLIT) += cfbimgblt.o +obj-$(CONFIG_FB_SYS_FILLRECT) += sysfillrect.o +obj-$(CONFIG_FB_SYS_COPYAREA) += syscopyarea.o +obj-$(CONFIG_FB_SYS_IMAGEBLIT) += sysimgblt.o obj-$(CONFIG_FB_SVGALIB) += svgalib.o obj-$(CONFIG_FB_MACMODES) += macmodes.o obj-$(CONFIG_FB_DDC) += fb_ddc.o diff --git a/drivers/video/syscopyarea.c b/drivers/video/syscopyarea.c new file mode 100644 index 000000000000..e3488932c7b3 --- /dev/null +++ b/drivers/video/syscopyarea.c @@ -0,0 +1,388 @@ +/* + * Generic Bit Block Transfer for frame buffers located in system RAM with + * packed pixels of any depth. + * + * Based almost entirely from cfbcopyarea.c (which is based almost entirely + * on Geert Uytterhoeven's copyarea routine) + * + * Copyright (C) 2007 Antonino Daplas + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + * + */ +#include +#include +#include +#include +#include +#include +#include + + /* + * Compose two values, using a bitmask as decision value + * This is equivalent to (a & mask) | (b & ~mask) + */ + +static inline unsigned long +comp(unsigned long a, unsigned long b, unsigned long mask) +{ + return ((a ^ b) & mask) ^ b; +} + + /* + * Generic bitwise copy algorithm + */ + +static void +bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src, + int src_idx, int bits, unsigned n) +{ + unsigned long first, last; + int const shift = dst_idx-src_idx; + int left, right; + + first = FB_SHIFT_HIGH(~0UL, dst_idx); + last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits)); + + if (!shift) { + /* Same alignment for source and dest */ + if (dst_idx+n <= bits) { + /* Single word */ + if (last) + first &= last; + *dst = comp(*src, *dst, first); + } else { + /* Multiple destination words */ + /* Leading bits */ + if (first != ~0UL) { + *dst = comp(*src, *dst, first); + dst++; + src++; + n -= bits - dst_idx; + } + + /* Main chunk */ + n /= bits; + while (n >= 8) { + *dst++ = *src++; + *dst++ = *src++; + *dst++ = *src++; + *dst++ = *src++; + *dst++ = *src++; + *dst++ = *src++; + *dst++ = *src++; + *dst++ = *src++; + n -= 8; + } + while (n--) + *dst++ = *src++; + + /* Trailing bits */ + if (last) + *dst = comp(*src, *dst, last); + } + } else { + unsigned long d0, d1; + int m; + + /* Different alignment for source and dest */ + right = shift & (bits - 1); + left = -shift & (bits - 1); + + if (dst_idx+n <= bits) { + /* Single destination word */ + if (last) + first &= last; + if (shift > 0) { + /* Single source word */ + *dst = comp(*src >> right, *dst, first); + } else if (src_idx+n <= bits) { + /* Single source word */ + *dst = comp(*src << left, *dst, first); + } else { + /* 2 source words */ + d0 = *src++; + d1 = *src; + *dst = comp(d0 << left | d1 >> right, *dst, + first); + } + } else { + /* Multiple destination words */ + /** We must always remember the last value read, + because in case SRC and DST overlap bitwise (e.g. + when moving just one pixel in 1bpp), we always + collect one full long for DST and that might + overlap with the current long from SRC. We store + this value in 'd0'. */ + d0 = *src++; + /* Leading bits */ + if (shift > 0) { + /* Single source word */ + *dst = comp(d0 >> right, *dst, first); + dst++; + n -= bits - dst_idx; + } else { + /* 2 source words */ + d1 = *src++; + *dst = comp(d0 << left | *dst >> right, *dst, first); + d0 = d1; + dst++; + n -= bits - dst_idx; + } + + /* Main chunk */ + m = n % bits; + n /= bits; + while (n >= 4) { + d1 = *src++; + *dst++ = d0 << left | d1 >> right; + d0 = d1; + d1 = *src++; + *dst++ = d0 << left | d1 >> right; + d0 = d1; + d1 = *src++; + *dst++ = d0 << left | d1 >> right; + d0 = d1; + d1 = *src++; + *dst++ = d0 << left | d1 >> right; + d0 = d1; + n -= 4; + } + while (n--) { + d1 = *src++; + *dst++ = d0 << left | d1 >> right; + d0 = d1; + } + + /* Trailing bits */ + if (last) { + if (m <= right) { + /* Single source word */ + *dst = comp(d0 << left, *dst, last); + } else { + /* 2 source words */ + d1 = *src; + *dst = comp(d0 << left | d1 >> right, + *dst, last); + } + } + } + } +} + + /* + * Generic bitwise copy algorithm, operating backward + */ + +static void +bitcpy_rev(unsigned long *dst, int dst_idx, const unsigned long *src, + int src_idx, int bits, unsigned n) +{ + unsigned long first, last; + int shift; + + dst += (n-1)/bits; + src += (n-1)/bits; + if ((n-1) % bits) { + dst_idx += (n-1) % bits; + dst += dst_idx >> (ffs(bits) - 1); + dst_idx &= bits - 1; + src_idx += (n-1) % bits; + src += src_idx >> (ffs(bits) - 1); + src_idx &= bits - 1; + } + + shift = dst_idx-src_idx; + + first = FB_SHIFT_LOW(~0UL, bits - 1 - dst_idx); + last = ~(FB_SHIFT_LOW(~0UL, bits - 1 - ((dst_idx-n) % bits))); + + if (!shift) { + /* Same alignment for source and dest */ + if ((unsigned long)dst_idx+1 >= n) { + /* Single word */ + if (last) + first &= last; + *dst = comp(*src, *dst, first); + } else { + /* Multiple destination words */ + + /* Leading bits */ + if (first != ~0UL) { + *dst = comp(*src, *dst, first); + dst--; + src--; + n -= dst_idx+1; + } + + /* Main chunk */ + n /= bits; + while (n >= 8) { + *dst-- = *src--; + *dst-- = *src--; + *dst-- = *src--; + *dst-- = *src--; + *dst-- = *src--; + *dst-- = *src--; + *dst-- = *src--; + *dst-- = *src--; + n -= 8; + } + while (n--) + *dst-- = *src--; + /* Trailing bits */ + if (last) + *dst = comp(*src, *dst, last); + } + } else { + /* Different alignment for source and dest */ + + int const left = -shift & (bits-1); + int const right = shift & (bits-1); + + if ((unsigned long)dst_idx+1 >= n) { + /* Single destination word */ + if (last) + first &= last; + if (shift < 0) { + /* Single source word */ + *dst = comp(*src << left, *dst, first); + } else if (1+(unsigned long)src_idx >= n) { + /* Single source word */ + *dst = comp(*src >> right, *dst, first); + } else { + /* 2 source words */ + *dst = comp(*src >> right | *(src-1) << left, + *dst, first); + } + } else { + /* Multiple destination words */ + /** We must always remember the last value read, + because in case SRC and DST overlap bitwise (e.g. + when moving just one pixel in 1bpp), we always + collect one full long for DST and that might + overlap with the current long from SRC. We store + this value in 'd0'. */ + unsigned long d0, d1; + int m; + + d0 = *src--; + /* Leading bits */ + if (shift < 0) { + /* Single source word */ + *dst = comp(d0 << left, *dst, first); + } else { + /* 2 source words */ + d1 = *src--; + *dst = comp(d0 >> right | d1 << left, *dst, + first); + d0 = d1; + } + dst--; + n -= dst_idx+1; + + /* Main chunk */ + m = n % bits; + n /= bits; + while (n >= 4) { + d1 = *src--; + *dst-- = d0 >> right | d1 << left; + d0 = d1; + d1 = *src--; + *dst-- = d0 >> right | d1 << left; + d0 = d1; + d1 = *src--; + *dst-- = d0 >> right | d1 << left; + d0 = d1; + d1 = *src--; + *dst-- = d0 >> right | d1 << left; + d0 = d1; + n -= 4; + } + while (n--) { + d1 = *src--; + *dst-- = d0 >> right | d1 << left; + d0 = d1; + } + + /* Trailing bits */ + if (last) { + if (m <= left) { + /* Single source word */ + *dst = comp(d0 >> right, *dst, last); + } else { + /* 2 source words */ + d1 = *src; + *dst = comp(d0 >> right | d1 << left, + *dst, last); + } + } + } + } +} + +void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area) +{ + u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy; + u32 height = area->height, width = area->width; + unsigned long const bits_per_line = p->fix.line_length*8u; + unsigned long *dst = NULL, *src = NULL; + int bits = BITS_PER_LONG, bytes = bits >> 3; + int dst_idx = 0, src_idx = 0, rev_copy = 0; + + if (p->state != FBINFO_STATE_RUNNING) + return; + + /* if the beginning of the target area might overlap with the end of + the source area, be have to copy the area reverse. */ + if ((dy == sy && dx > sx) || (dy > sy)) { + dy += height; + sy += height; + rev_copy = 1; + } + + /* split the base of the framebuffer into a long-aligned address and + the index of the first bit */ + dst = src = (unsigned long *)((unsigned long)p->screen_base & + ~(bytes-1)); + dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1)); + /* add offset of source and target area */ + dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel; + src_idx += sy*bits_per_line + sx*p->var.bits_per_pixel; + + if (p->fbops->fb_sync) + p->fbops->fb_sync(p); + + if (rev_copy) { + while (height--) { + dst_idx -= bits_per_line; + src_idx -= bits_per_line; + dst += dst_idx >> (ffs(bits) - 1); + dst_idx &= (bytes - 1); + src += src_idx >> (ffs(bits) - 1); + src_idx &= (bytes - 1); + bitcpy_rev(dst, dst_idx, src, src_idx, bits, + width*p->var.bits_per_pixel); + } + } else { + while (height--) { + dst += dst_idx >> (ffs(bits) - 1); + dst_idx &= (bytes - 1); + src += src_idx >> (ffs(bits) - 1); + src_idx &= (bytes - 1); + bitcpy(dst, dst_idx, src, src_idx, bits, + width*p->var.bits_per_pixel); + dst_idx += bits_per_line; + src_idx += bits_per_line; + } + } +} + +EXPORT_SYMBOL(sys_copyarea); + +MODULE_AUTHOR("Antonino Daplas "); +MODULE_DESCRIPTION("Generic copyarea (sys-to-sys)"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/video/sysfillrect.c b/drivers/video/sysfillrect.c new file mode 100644 index 000000000000..10de70779a50 --- /dev/null +++ b/drivers/video/sysfillrect.c @@ -0,0 +1,400 @@ +/* + * Generic fillrect for frame buffers in system RAM with packed pixels of + * any depth. + * + * Based almost entirely from cfbfillrect.c (which is based almost entirely + * on Geert Uytterhoeven's fillrect routine) + * + * Copyright (C) 2007 Antonino Daplas + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ +#include +#include +#include +#include + + /* + * Compose two values, using a bitmask as decision value + * This is equivalent to (a & mask) | (b & ~mask) + */ + +static inline unsigned long +comp(unsigned long a, unsigned long b, unsigned long mask) +{ + return ((a ^ b) & mask) ^ b; +} + + /* + * Create a pattern with the given pixel's color + */ + +#if BITS_PER_LONG == 64 +static inline unsigned long +pixel_to_pat( u32 bpp, u32 pixel) +{ + switch (bpp) { + case 1: + return 0xfffffffffffffffful*pixel; + case 2: + return 0x5555555555555555ul*pixel; + case 4: + return 0x1111111111111111ul*pixel; + case 8: + return 0x0101010101010101ul*pixel; + case 12: + return 0x0001001001001001ul*pixel; + case 16: + return 0x0001000100010001ul*pixel; + case 24: + return 0x0000000001000001ul*pixel; + case 32: + return 0x0000000100000001ul*pixel; + default: + panic("pixel_to_pat(): unsupported pixelformat\n"); + } +} +#else +static inline unsigned long +pixel_to_pat( u32 bpp, u32 pixel) +{ + switch (bpp) { + case 1: + return 0xfffffffful*pixel; + case 2: + return 0x55555555ul*pixel; + case 4: + return 0x11111111ul*pixel; + case 8: + return 0x01010101ul*pixel; + case 12: + return 0x00001001ul*pixel; + case 16: + return 0x00010001ul*pixel; + case 24: + return 0x00000001ul*pixel; + case 32: + return 0x00000001ul*pixel; + default: + panic("pixel_to_pat(): unsupported pixelformat\n"); + } +} +#endif + + /* + * Aligned pattern fill using 32/64-bit memory accesses + */ + +static void +bitfill_aligned(unsigned long *dst, int dst_idx, unsigned long pat, + unsigned n, int bits) +{ + unsigned long first, last; + + if (!n) + return; + + first = FB_SHIFT_HIGH(~0UL, dst_idx); + last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits)); + + if (dst_idx+n <= bits) { + /* Single word */ + if (last) + first &= last; + *dst = comp(pat, *dst, first); + } else { + /* Multiple destination words */ + + /* Leading bits */ + if (first!= ~0UL) { + *dst = comp(pat, *dst, first); + dst++; + n -= bits - dst_idx; + } + + /* Main chunk */ + n /= bits; + while (n >= 8) { + *dst++ = pat; + *dst++ = pat; + *dst++ = pat; + *dst++ = pat; + *dst++ = pat; + *dst++ = pat; + *dst++ = pat; + *dst++ = pat; + n -= 8; + } + while (n--) + *dst++ = pat; + /* Trailing bits */ + if (last) + *dst = comp(pat, *dst, last); + } +} + + + /* + * Unaligned generic pattern fill using 32/64-bit memory accesses + * The pattern must have been expanded to a full 32/64-bit value + * Left/right are the appropriate shifts to convert to the pattern to be + * used for the next 32/64-bit word + */ + +static void +bitfill_unaligned(unsigned long *dst, int dst_idx, unsigned long pat, + int left, int right, unsigned n, int bits) +{ + unsigned long first, last; + + if (!n) + return; + + first = FB_SHIFT_HIGH(~0UL, dst_idx); + last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits)); + + if (dst_idx+n <= bits) { + /* Single word */ + if (last) + first &= last; + *dst = comp(pat, *dst, first); + } else { + /* Multiple destination words */ + /* Leading bits */ + if (first) { + *dst = comp(pat, *dst, first); + dst++; + pat = pat << left | pat >> right; + n -= bits - dst_idx; + } + + /* Main chunk */ + n /= bits; + while (n >= 4) { + *dst++ = pat; + pat = pat << left | pat >> right; + *dst++ = pat; + pat = pat << left | pat >> right; + *dst++ = pat; + pat = pat << left | pat >> right; + *dst++ = pat; + pat = pat << left | pat >> right; + n -= 4; + } + while (n--) { + *dst++ = pat; + pat = pat << left | pat >> right; + } + + /* Trailing bits */ + if (last) + *dst = comp(pat, *dst, first); + } +} + + /* + * Aligned pattern invert using 32/64-bit memory accesses + */ +static void +bitfill_aligned_rev(unsigned long *dst, int dst_idx, unsigned long pat, + unsigned n, int bits) +{ + unsigned long val = pat; + unsigned long first, last; + + if (!n) + return; + + first = FB_SHIFT_HIGH(~0UL, dst_idx); + last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits)); + + if (dst_idx+n <= bits) { + /* Single word */ + if (last) + first &= last; + *dst = comp(*dst ^ val, *dst, first); + } else { + /* Multiple destination words */ + /* Leading bits */ + if (first!=0UL) { + *dst = comp(*dst ^ val, *dst, first); + dst++; + n -= bits - dst_idx; + } + + /* Main chunk */ + n /= bits; + while (n >= 8) { + *dst++ ^= val; + *dst++ ^= val; + *dst++ ^= val; + *dst++ ^= val; + *dst++ ^= val; + *dst++ ^= val; + *dst++ ^= val; + *dst++ ^= val; + n -= 8; + } + while (n--) + *dst++ ^= val; + /* Trailing bits */ + if (last) + *dst = comp(*dst ^ val, *dst, last); + } +} + + + /* + * Unaligned generic pattern invert using 32/64-bit memory accesses + * The pattern must have been expanded to a full 32/64-bit value + * Left/right are the appropriate shifts to convert to the pattern to be + * used for the next 32/64-bit word + */ + +static void +bitfill_unaligned_rev(unsigned long *dst, int dst_idx, unsigned long pat, + int left, int right, unsigned n, int bits) +{ + unsigned long first, last; + + if (!n) + return; + + first = FB_SHIFT_HIGH(~0UL, dst_idx); + last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits)); + + if (dst_idx+n <= bits) { + /* Single word */ + if (last) + first &= last; + *dst = comp(*dst ^ pat, *dst, first); + } else { + /* Multiple destination words */ + + /* Leading bits */ + if (first != 0UL) { + *dst = comp(*dst ^ pat, *dst, first); + dst++; + pat = pat << left | pat >> right; + n -= bits - dst_idx; + } + + /* Main chunk */ + n /= bits; + while (n >= 4) { + *dst++ ^= pat; + pat = pat << left | pat >> right; + *dst++ ^= pat; + pat = pat << left | pat >> right; + *dst++ ^= pat; + pat = pat << left | pat >> right; + *dst++ ^= pat; + pat = pat << left | pat >> right; + n -= 4; + } + while (n--) { + *dst ^= pat; + pat = pat << left | pat >> right; + } + + /* Trailing bits */ + if (last) + *dst = comp(*dst ^ pat, *dst, last); + } +} + +void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect) +{ + unsigned long pat, fg; + unsigned long width = rect->width, height = rect->height; + int bits = BITS_PER_LONG, bytes = bits >> 3; + u32 bpp = p->var.bits_per_pixel; + unsigned long *dst; + int dst_idx, left; + + if (p->state != FBINFO_STATE_RUNNING) + return; + + if (p->fix.visual == FB_VISUAL_TRUECOLOR || + p->fix.visual == FB_VISUAL_DIRECTCOLOR ) + fg = ((u32 *) (p->pseudo_palette))[rect->color]; + else + fg = rect->color; + + pat = pixel_to_pat( bpp, fg); + + dst = (unsigned long *)((unsigned long)p->screen_base & ~(bytes-1)); + dst_idx = ((unsigned long)p->screen_base & (bytes - 1))*8; + dst_idx += rect->dy*p->fix.line_length*8+rect->dx*bpp; + /* FIXME For now we support 1-32 bpp only */ + left = bits % bpp; + if (p->fbops->fb_sync) + p->fbops->fb_sync(p); + if (!left) { + void (*fill_op32)(unsigned long *dst, int dst_idx, + unsigned long pat, unsigned n, int bits) = + NULL; + + switch (rect->rop) { + case ROP_XOR: + fill_op32 = bitfill_aligned_rev; + break; + case ROP_COPY: + fill_op32 = bitfill_aligned; + break; + default: + printk( KERN_ERR "cfb_fillrect(): unknown rop, " + "defaulting to ROP_COPY\n"); + fill_op32 = bitfill_aligned; + break; + } + while (height--) { + dst += dst_idx >> (ffs(bits) - 1); + dst_idx &= (bits - 1); + fill_op32(dst, dst_idx, pat, width*bpp, bits); + dst_idx += p->fix.line_length*8; + } + } else { + int right; + int r; + int rot = (left-dst_idx) % bpp; + void (*fill_op)(unsigned long *dst, int dst_idx, + unsigned long pat, int left, int right, + unsigned n, int bits) = NULL; + + /* rotate pattern to correct start position */ + pat = pat << rot | pat >> (bpp-rot); + + right = bpp-left; + switch (rect->rop) { + case ROP_XOR: + fill_op = bitfill_unaligned_rev; + break; + case ROP_COPY: + fill_op = bitfill_unaligned; + break; + default: + printk(KERN_ERR "cfb_fillrect(): unknown rop, " + "defaulting to ROP_COPY\n"); + fill_op = bitfill_unaligned; + break; + } + while (height--) { + dst += dst_idx >> (ffs(bits) - 1); + dst_idx &= (bits - 1); + fill_op(dst, dst_idx, pat, left, right, + width*bpp, bits); + r = (p->fix.line_length*8) % bpp; + pat = pat << (bpp-r) | pat >> r; + dst_idx += p->fix.line_length*8; + } + } +} + +EXPORT_SYMBOL(sys_fillrect); + +MODULE_AUTHOR("Antonino Daplas "); +MODULE_DESCRIPTION("Generic fill rectangle (sys-to-sys)"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/sysimgblt.c b/drivers/video/sysimgblt.c new file mode 100644 index 000000000000..bd7e7e9d155f --- /dev/null +++ b/drivers/video/sysimgblt.c @@ -0,0 +1,291 @@ +/* + * Generic 1-bit or 8-bit source to 1-32 bit destination expansion + * for frame buffer located in system RAM with packed pixels of any depth. + * + * Based almost entirely on cfbimgblt.c + * + * Copyright (C) April 2007 Antonino Daplas + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ +#include +#include +#include +#include + +#define DEBUG + +#ifdef DEBUG +#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt,__FUNCTION__,## args) +#else +#define DPRINTK(fmt, args...) +#endif + +static const u32 cfb_tab8[] = { +#if defined(__BIG_ENDIAN) + 0x00000000,0x000000ff,0x0000ff00,0x0000ffff, + 0x00ff0000,0x00ff00ff,0x00ffff00,0x00ffffff, + 0xff000000,0xff0000ff,0xff00ff00,0xff00ffff, + 0xffff0000,0xffff00ff,0xffffff00,0xffffffff +#elif defined(__LITTLE_ENDIAN) + 0x00000000,0xff000000,0x00ff0000,0xffff0000, + 0x0000ff00,0xff00ff00,0x00ffff00,0xffffff00, + 0x000000ff,0xff0000ff,0x00ff00ff,0xffff00ff, + 0x0000ffff,0xff00ffff,0x00ffffff,0xffffffff +#else +#error FIXME: No endianness?? +#endif +}; + +static const u32 cfb_tab16[] = { +#if defined(__BIG_ENDIAN) + 0x00000000, 0x0000ffff, 0xffff0000, 0xffffffff +#elif defined(__LITTLE_ENDIAN) + 0x00000000, 0xffff0000, 0x0000ffff, 0xffffffff +#else +#error FIXME: No endianness?? +#endif +}; + +static const u32 cfb_tab32[] = { + 0x00000000, 0xffffffff +}; + +static void color_imageblit(const struct fb_image *image, struct fb_info *p, + void *dst1, u32 start_index, u32 pitch_index) +{ + /* Draw the penguin */ + u32 *dst, *dst2; + u32 color = 0, val, shift; + int i, n, bpp = p->var.bits_per_pixel; + u32 null_bits = 32 - bpp; + u32 *palette = (u32 *) p->pseudo_palette; + const u8 *src = image->data; + + dst2 = dst1; + for (i = image->height; i--; ) { + n = image->width; + dst = dst1; + shift = 0; + val = 0; + + if (start_index) { + u32 start_mask = ~(FB_SHIFT_HIGH(~(u32)0, + start_index)); + val = *dst & start_mask; + shift = start_index; + } + while (n--) { + if (p->fix.visual == FB_VISUAL_TRUECOLOR || + p->fix.visual == FB_VISUAL_DIRECTCOLOR ) + color = palette[*src]; + else + color = *src; + color <<= FB_LEFT_POS(bpp); + val |= FB_SHIFT_HIGH(color, shift); + if (shift >= null_bits) { + *dst++ = val; + + val = (shift == null_bits) ? 0 : + FB_SHIFT_LOW(color, 32 - shift); + } + shift += bpp; + shift &= (32 - 1); + src++; + } + if (shift) { + u32 end_mask = FB_SHIFT_HIGH(~(u32)0, shift); + + *dst &= end_mask; + *dst |= val; + } + dst1 += p->fix.line_length; + if (pitch_index) { + dst2 += p->fix.line_length; + dst1 = (u8 *)((long)dst2 & ~(sizeof(u32) - 1)); + + start_index += pitch_index; + start_index &= 32 - 1; + } + } +} + +static void slow_imageblit(const struct fb_image *image, struct fb_info *p, + void *dst1, u32 fgcolor, u32 bgcolor, + u32 start_index, u32 pitch_index) +{ + u32 shift, color = 0, bpp = p->var.bits_per_pixel; + u32 *dst, *dst2; + u32 val, pitch = p->fix.line_length; + u32 null_bits = 32 - bpp; + u32 spitch = (image->width+7)/8; + const u8 *src = image->data, *s; + u32 i, j, l; + + dst2 = dst1; + fgcolor <<= FB_LEFT_POS(bpp); + bgcolor <<= FB_LEFT_POS(bpp); + + for (i = image->height; i--; ) { + shift = val = 0; + l = 8; + j = image->width; + dst = dst1; + s = src; + + /* write leading bits */ + if (start_index) { + u32 start_mask = ~(FB_SHIFT_HIGH(~(u32)0,start_index)); + val = *dst & start_mask; + shift = start_index; + } + + while (j--) { + l--; + color = (*s & (1 << l)) ? fgcolor : bgcolor; + val |= FB_SHIFT_HIGH(color, shift); + + /* Did the bitshift spill bits to the next long? */ + if (shift >= null_bits) { + *dst++ = val; + val = (shift == null_bits) ? 0 : + FB_SHIFT_LOW(color,32 - shift); + } + shift += bpp; + shift &= (32 - 1); + if (!l) { l = 8; s++; }; + } + + /* write trailing bits */ + if (shift) { + u32 end_mask = FB_SHIFT_HIGH(~(u32)0, shift); + + *dst &= end_mask; + *dst |= val; + } + + dst1 += pitch; + src += spitch; + if (pitch_index) { + dst2 += pitch; + dst1 = (u8 *)((long)dst2 & ~(sizeof(u32) - 1)); + start_index += pitch_index; + start_index &= 32 - 1; + } + + } +} + +/* + * fast_imageblit - optimized monochrome color expansion + * + * Only if: bits_per_pixel == 8, 16, or 32 + * image->width is divisible by pixel/dword (ppw); + * fix->line_legth is divisible by 4; + * beginning and end of a scanline is dword aligned + */ +static void fast_imageblit(const struct fb_image *image, struct fb_info *p, + void *dst1, u32 fgcolor, u32 bgcolor) +{ + u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel; + u32 ppw = 32/bpp, spitch = (image->width + 7)/8; + u32 bit_mask, end_mask, eorx, shift; + const char *s = image->data, *src; + u32 *dst; + const u32 *tab = NULL; + int i, j, k; + + switch (bpp) { + case 8: + tab = cfb_tab8; + break; + case 16: + tab = cfb_tab16; + break; + case 32: + default: + tab = cfb_tab32; + break; + } + + for (i = ppw-1; i--; ) { + fgx <<= bpp; + bgx <<= bpp; + fgx |= fgcolor; + bgx |= bgcolor; + } + + bit_mask = (1 << ppw) - 1; + eorx = fgx ^ bgx; + k = image->width/ppw; + + for (i = image->height; i--; ) { + dst = dst1; + shift = 8; + src = s; + + for (j = k; j--; ) { + shift -= ppw; + end_mask = tab[(*src >> shift) & bit_mask]; + *dst++ = (end_mask & eorx) ^ bgx; + if (!shift) { + shift = 8; + src++; + } + } + dst1 += p->fix.line_length; + s += spitch; + } +} + +void sys_imageblit(struct fb_info *p, const struct fb_image *image) +{ + u32 fgcolor, bgcolor, start_index, bitstart, pitch_index = 0; + u32 bpl = sizeof(u32), bpp = p->var.bits_per_pixel; + u32 width = image->width; + u32 dx = image->dx, dy = image->dy; + void *dst1; + + if (p->state != FBINFO_STATE_RUNNING) + return; + + bitstart = (dy * p->fix.line_length * 8) + (dx * bpp); + start_index = bitstart & (32 - 1); + pitch_index = (p->fix.line_length & (bpl - 1)) * 8; + + bitstart /= 8; + bitstart &= ~(bpl - 1); + dst1 = (void __force *)p->screen_base + bitstart; + + if (p->fbops->fb_sync) + p->fbops->fb_sync(p); + + if (image->depth == 1) { + if (p->fix.visual == FB_VISUAL_TRUECOLOR || + p->fix.visual == FB_VISUAL_DIRECTCOLOR) { + fgcolor = ((u32*)(p->pseudo_palette))[image->fg_color]; + bgcolor = ((u32*)(p->pseudo_palette))[image->bg_color]; + } else { + fgcolor = image->fg_color; + bgcolor = image->bg_color; + } + + if (32 % bpp == 0 && !start_index && !pitch_index && + ((width & (32/bpp-1)) == 0) && + bpp >= 8 && bpp <= 32) + fast_imageblit(image, p, dst1, fgcolor, bgcolor); + else + slow_imageblit(image, p, dst1, fgcolor, bgcolor, + start_index, pitch_index); + } else + color_imageblit(image, p, dst1, start_index, pitch_index); +} + +EXPORT_SYMBOL(sys_imageblit); + +MODULE_AUTHOR("Antonino Daplas "); +MODULE_DESCRIPTION("1-bit/8-bit to 1-32 bit color expansion (sys-to-sys)"); +MODULE_LICENSE("GPL"); + diff --git a/include/linux/fb.h b/include/linux/fb.h index 0de426026f4a..c4e1b3d2ecf9 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -895,6 +895,12 @@ extern int fb_blank(struct fb_info *info, int blank); extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image); +/* + * Drawing operations where framebuffer is in system RAM + */ +extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +extern void sys_copyarea(struct fb_info *info, const struct fb_copyarea *area); +extern void sys_imageblit(struct fb_info *info, const struct fb_image *image); /* drivers/video/fbmem.c */ extern int register_framebuffer(struct fb_info *fb_info); -- cgit v1.2.3 From 3f9b0880e4a96b02bc0131451f2f6231cd90bd94 Mon Sep 17 00:00:00 2001 From: "Antonino A. Daplas" Date: Tue, 8 May 2007 00:39:02 -0700 Subject: fbdev: pass struct fb_info to fb_read and fb_write It is unnecessary to pass struct file to fb_read() and fb_write() in struct fb_ops. For consistency with the other methods, pass struct fb_info instead. Signed-off-by: Antonino Daplas Acked-by: Paul Mundt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/arcfb.c | 14 ++------------ drivers/video/epson1355fb.c | 18 ++---------------- drivers/video/fbmem.c | 4 ++-- drivers/video/hecubafb.c | 12 +----------- drivers/video/pvr2fb.c | 4 ++-- include/linux/fb.h | 6 ++++-- 6 files changed, 13 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c index 740b83b4bd8c..c668e56c9272 100644 --- a/drivers/video/arcfb.c +++ b/drivers/video/arcfb.c @@ -440,14 +440,11 @@ static int arcfb_ioctl(struct fb_info *info, * the fb. it's inefficient for them to do anything less than 64*8 * writes since we update the lcd in each write() anyway. */ -static ssize_t arcfb_write(struct file *file, const char __user *buf, size_t count, - loff_t *ppos) +static ssize_t arcfb_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos) { /* modded from epson 1355 */ - struct inode *inode; - int fbidx; - struct fb_info *info; unsigned long p; int err=-EINVAL; unsigned int fbmemlength,x,y,w,h, bitppos, startpos, endpos, bitcount; @@ -455,13 +452,6 @@ static ssize_t arcfb_write(struct file *file, const char __user *buf, size_t cou unsigned int xres; p = *ppos; - inode = file->f_path.dentry->d_inode; - fbidx = iminor(inode); - info = registered_fb[fbidx]; - - if (!info || !info->screen_base) - return -ENODEV; - par = info->par; xres = info->var.xres; fbmemlength = (xres * info->var.yres)/8; diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c index 37e297d7feb7..ca2c54ce508e 100644 --- a/drivers/video/epson1355fb.c +++ b/drivers/video/epson1355fb.c @@ -403,17 +403,10 @@ static inline unsigned long copy_to_user16(void *to, const void *from, static ssize_t -epson1355fb_read(struct file *file, char *buf, size_t count, loff_t * ppos) +epson1355fb_read(struct fb_info *info, char *buf, size_t count, loff_t * ppos) { - struct inode *inode = file->f_path.dentry->d_inode; - int fbidx = iminor(inode); - struct fb_info *info = registered_fb[fbidx]; unsigned long p = *ppos; - /* from fbmem.c except for our own copy_*_user */ - if (!info || !info->screen_base) - return -ENODEV; - if (p >= info->fix.smem_len) return 0; if (count >= info->fix.smem_len) @@ -434,19 +427,12 @@ epson1355fb_read(struct file *file, char *buf, size_t count, loff_t * ppos) } static ssize_t -epson1355fb_write(struct file *file, const char *buf, +epson1355fb_write(struct fb_info *info, const char *buf, size_t count, loff_t * ppos) { - struct inode *inode = file->f_path.dentry->d_inode; - int fbidx = iminor(inode); - struct fb_info *info = registered_fb[fbidx]; unsigned long p = *ppos; int err; - /* from fbmem.c except for our own copy_*_user */ - if (!info || !info->screen_base) - return -ENODEV; - /* from fbmem.c except for our own copy_*_user */ if (p > info->fix.smem_len) return -ENOSPC; diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 08c292d9c401..7b72841222df 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -588,7 +588,7 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) return -EPERM; if (info->fbops->fb_read) - return info->fbops->fb_read(file, buf, count, ppos); + return info->fbops->fb_read(info, buf, count, ppos); total_size = info->screen_size; @@ -663,7 +663,7 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) return -EPERM; if (info->fbops->fb_write) - return info->fbops->fb_write(file, buf, count, ppos); + return info->fbops->fb_write(info, buf, count, ppos); total_size = info->screen_size; diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c index 0ade72590998..3ec0e97f36e4 100644 --- a/drivers/video/hecubafb.c +++ b/drivers/video/hecubafb.c @@ -267,12 +267,9 @@ static void hecubafb_imageblit(struct fb_info *info, * this is the slow path from userspace. they can seek and write to * the fb. it's inefficient to do anything less than a full screen draw */ -static ssize_t hecubafb_write(struct file *file, const char __user *buf, +static ssize_t hecubafb_write(struct fb_info *info, const char __user *buf, size_t count, loff_t *ppos) { - struct inode *inode; - int fbidx; - struct fb_info *info; unsigned long p; int err=-EINVAL; struct hecubafb_par *par; @@ -280,13 +277,6 @@ static ssize_t hecubafb_write(struct file *file, const char __user *buf, unsigned int fbmemlength; p = *ppos; - inode = file->f_dentry->d_inode; - fbidx = iminor(inode); - info = registered_fb[fbidx]; - - if (!info || !info->screen_base) - return -ENODEV; - par = info->par; xres = info->var.xres; fbmemlength = (xres * info->var.yres)/8; diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c index a93618bc9d27..df2909ae704c 100644 --- a/drivers/video/pvr2fb.c +++ b/drivers/video/pvr2fb.c @@ -214,7 +214,7 @@ static int pvr2_init_cable(void); static int pvr2_get_param(const struct pvr2_params *p, const char *s, int val, int size); #ifdef CONFIG_SH_DMA -static ssize_t pvr2fb_write(struct file *file, const char *buf, +static ssize_t pvr2fb_write(struct fb_info *info, const char *buf, size_t count, loff_t *ppos); #endif @@ -674,7 +674,7 @@ static int pvr2_init_cable(void) } #ifdef CONFIG_SH_DMA -static ssize_t pvr2fb_write(struct file *file, const char *buf, +static ssize_t pvr2fb_write(struct fb_info *info, const char *buf, size_t count, loff_t *ppos) { unsigned long dst, start, end, len; diff --git a/include/linux/fb.h b/include/linux/fb.h index c4e1b3d2ecf9..acb6ddb68fa2 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -591,8 +591,10 @@ struct fb_ops { /* For framebuffers with strange non linear layouts or that do not * work with normal memory mapped access */ - ssize_t (*fb_read)(struct file *file, char __user *buf, size_t count, loff_t *ppos); - ssize_t (*fb_write)(struct file *file, const char __user *buf, size_t count, loff_t *ppos); + ssize_t (*fb_read)(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos); + ssize_t (*fb_write)(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos); /* checks var and eventually tweaks it to something supported, * DO NOT MODIFY PAR */ -- cgit v1.2.3 From 09aaf268eb1d22eee690d26a913f660e2081597f Mon Sep 17 00:00:00 2001 From: "Antonino A. Daplas" Date: Tue, 8 May 2007 00:39:03 -0700 Subject: fbdev: add fb_read/fb_write functions for framebuffers in system RAM The functions fb_read() and fb_write in fbmem.c assume that the framebuffer is in IO memory. However, we have 3 drivers (hecubafb, arcfb, and vfb) where the framebuffer is allocated from system RAM (via vmalloc). Using __raw_read/__raw_write (fb_readl/fb_writel) for these drivers is illegal, especially in other platforms. Create file read and write methods for these types of drivers. These are named fb_sys_read() and fb_sys_write(). Signed-off-by: Antonino Daplas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/Kconfig | 5 +++ drivers/video/Makefile | 1 + drivers/video/fb_sys_fops.c | 104 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/fb.h | 4 ++ 4 files changed, 114 insertions(+) create mode 100644 drivers/video/fb_sys_fops.c (limited to 'include/linux') diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 6d32726e8751..85d0e8707ee3 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -122,6 +122,11 @@ config FB_SYS_IMAGEBLIT blitting. This is used by drivers that don't provide their own (accelerated) version and the framebuffer is in system RAM. +config FB_SYS_FOPS + tristate + depends on FB + default n + config FB_DEFERRED_IO bool depends on FB diff --git a/drivers/video/Makefile b/drivers/video/Makefile index fa025e786848..6c7b26e81fc2 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_FB_CFB_IMAGEBLIT) += cfbimgblt.o obj-$(CONFIG_FB_SYS_FILLRECT) += sysfillrect.o obj-$(CONFIG_FB_SYS_COPYAREA) += syscopyarea.o obj-$(CONFIG_FB_SYS_IMAGEBLIT) += sysimgblt.o +obj-$(CONFIG_FB_SYS_FOPS) += fb_sys_fops.o obj-$(CONFIG_FB_SVGALIB) += svgalib.o obj-$(CONFIG_FB_MACMODES) += macmodes.o obj-$(CONFIG_FB_DDC) += fb_ddc.o diff --git a/drivers/video/fb_sys_fops.c b/drivers/video/fb_sys_fops.c new file mode 100644 index 000000000000..cf2538d669cd --- /dev/null +++ b/drivers/video/fb_sys_fops.c @@ -0,0 +1,104 @@ +/* + * linux/drivers/video/fb_sys_read.c - Generic file operations where + * framebuffer is in system RAM + * + * Copyright (C) 2007 Antonino Daplas + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + */ +#include +#include +#include + +ssize_t fb_sys_read(struct fb_info *info, char __user *buf, size_t count, + loff_t *ppos) +{ + unsigned long p = *ppos; + void *src; + int err = 0; + unsigned long total_size; + + if (info->state != FBINFO_STATE_RUNNING) + return -EPERM; + + total_size = info->screen_size; + + if (total_size == 0) + total_size = info->fix.smem_len; + + if (p >= total_size) + return 0; + + if (count >= total_size) + count = total_size; + + if (count + p > total_size) + count = total_size - p; + + src = (void __force *)(info->screen_base + p); + + if (info->fbops->fb_sync) + info->fbops->fb_sync(info); + + if (copy_to_user(buf, src, count)) + err = -EFAULT; + + if (!err) + *ppos += count; + + return (err) ? err : count; +} +EXPORT_SYMBOL_GPL(fb_sys_read); + +ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long p = *ppos; + void *dst; + int err = 0; + unsigned long total_size; + + if (info->state != FBINFO_STATE_RUNNING) + return -EPERM; + + total_size = info->screen_size; + + if (total_size == 0) + total_size = info->fix.smem_len; + + if (p > total_size) + return -EFBIG; + + if (count > total_size) { + err = -EFBIG; + count = total_size; + } + + if (count + p > total_size) { + if (!err) + err = -ENOSPC; + + count = total_size - p; + } + + dst = (void __force *) (info->screen_base + p); + + if (info->fbops->fb_sync) + info->fbops->fb_sync(info); + + if (copy_from_user(dst, buf, count)) + err = -EFAULT; + + if (!err) + *ppos += count; + + return (err) ? err : count; +} +EXPORT_SYMBOL_GPL(fb_sys_write); + +MODULE_AUTHOR("Antonino Daplas "); +MODULE_DESCRIPTION("Generic file read (fb in system RAM)"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/fb.h b/include/linux/fb.h index acb6ddb68fa2..70d154a02c5c 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -903,6 +903,10 @@ extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image); extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect); extern void sys_copyarea(struct fb_info *info, const struct fb_copyarea *area); extern void sys_imageblit(struct fb_info *info, const struct fb_image *image); +extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos); +extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos); /* drivers/video/fbmem.c */ extern int register_framebuffer(struct fb_info *fb_info); -- cgit v1.2.3 From bf26ad72a60c0009a99179b449a43daa6bf4b4f6 Mon Sep 17 00:00:00 2001 From: "Antonino A. Daplas" Date: Tue, 8 May 2007 00:39:09 -0700 Subject: fbdev: advertise limitation of drawing engine A few drivers are not capable of blitting rectangles of any dimension. vga16fb can only blit 8-pixel wide rectangles, while s3fb (in tileblitting mode) can only blit 8x16 rectangles. For example, loading a 12x22 font in vga16fb will result in a corrupt display. Advertise this limitation/capability in info->pixmap.blit_x and blit_y. These fields are 32-bit arrays (font max is 32x32 only), ie, if bit 7 is set, then width/height of 7+1 is supported. Signed-off-by: Antonino Daplas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/fbmem.c | 6 ++++++ include/linux/fb.h | 4 ++++ 2 files changed, 10 insertions(+) (limited to 'include/linux') diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 7b72841222df..c4ce3e44327e 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1321,6 +1321,12 @@ register_framebuffer(struct fb_info *fb_info) } fb_info->pixmap.offset = 0; + if (!fb_info->pixmap.blit_x) + fb_info->pixmap.blit_x = ~(u32)0; + + if (!fb_info->pixmap.blit_y) + fb_info->pixmap.blit_y = ~(u32)0; + if (!fb_info->modelist.prev || !fb_info->modelist.next) INIT_LIST_HEAD(&fb_info->modelist); diff --git a/include/linux/fb.h b/include/linux/fb.h index 70d154a02c5c..619ba1e40ab9 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -558,6 +558,10 @@ struct fb_pixmap { u32 scan_align; /* alignment per scanline */ u32 access_align; /* alignment per read/write (bits) */ u32 flags; /* see FB_PIXMAP_* */ + u32 blit_x; /* supported bit block dimensions (1-32)*/ + u32 blit_y; /* Format: blit_x = 1 << (width - 1) */ + /* blit_y = 1 << (height - 1) */ + /* if 0, will be set to 0xffffffff (all)*/ /* access methods */ void (*writeio)(struct fb_info *info, void __iomem *dst, void *src, unsigned int size); void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size); -- cgit v1.2.3 From 2d2699d984924890f6dac8cf51c3b6311f56816c Mon Sep 17 00:00:00 2001 From: "Antonino A. Daplas" Date: Tue, 8 May 2007 00:39:11 -0700 Subject: fbcon: font setting should check limitation of driver fbcon_set_font() will now check if the new font dimensions can be drawn by the driver (by checking pixmap.blit_x and blit_y). Similarly, add 2 new parameters to get_default_font(), font_w and font_h, to further aid in the font selection process. Signed-off-by: Antonino Daplas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/console/fbcon.c | 17 ++++++++++++++--- drivers/video/console/fonts.c | 10 +++++++++- drivers/video/console/sticore.c | 2 +- include/linux/font.h | 3 ++- 4 files changed, 26 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 19dfdfbd4341..c71a88d531cd 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -988,7 +988,9 @@ static const char *fbcon_startup(void) if (!p->fontdata) { if (!fontname[0] || !(font = find_font(fontname))) font = get_default_font(info->var.xres, - info->var.yres); + info->var.yres, + info->pixmap.blit_x, + info->pixmap.blit_y); vc->vc_font.width = font->width; vc->vc_font.height = font->height; vc->vc_font.data = (void *)(p->fontdata = font->data); @@ -1108,7 +1110,9 @@ static void fbcon_init(struct vc_data *vc, int init) if (!fontname[0] || !(font = find_font(fontname))) font = get_default_font(info->var.xres, - info->var.yres); + info->var.yres, + info->pixmap.blit_x, + info->pixmap.blit_y); vc->vc_font.width = font->width; vc->vc_font.height = font->height; vc->vc_font.data = (void *)(p->fontdata = font->data); @@ -2495,6 +2499,7 @@ static int fbcon_copy_font(struct vc_data *vc, int con) static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigned flags) { + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; unsigned charcount = font->charcount; int w = font->width; int h = font->height; @@ -2508,6 +2513,11 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigne if (charcount != 256 && charcount != 512) return -EINVAL; + /* Make sure drawing engine can handle the font */ + if (!(info->pixmap.blit_x & (1 << (font->width - 1))) || + !(info->pixmap.blit_y & (1 << (font->height - 1)))) + return -EINVAL; + size = h * pitch * charcount; new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER); @@ -2552,7 +2562,8 @@ static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, cha const struct font_desc *f; if (!name) - f = get_default_font(info->var.xres, info->var.yres); + f = get_default_font(info->var.xres, info->var.yres, + info->pixmap.blit_x, info->pixmap.blit_y); else if (!(f = find_font(name))) return -ENOENT; diff --git a/drivers/video/console/fonts.c b/drivers/video/console/fonts.c index c960728b7e82..a6828d0a4c56 100644 --- a/drivers/video/console/fonts.c +++ b/drivers/video/console/fonts.c @@ -98,6 +98,8 @@ const struct font_desc *find_font(const char *name) * get_default_font - get default font * @xres: screen size of X * @yres: screen size of Y + * @font_w: bit array of supported widths (1 - 32) + * @font_h: bit array of supported heights (1 - 32) * * Get the default font for a specified screen size. * Dimensions are in pixels. @@ -107,7 +109,8 @@ const struct font_desc *find_font(const char *name) * */ -const struct font_desc *get_default_font(int xres, int yres) +const struct font_desc *get_default_font(int xres, int yres, u32 font_w, + u32 font_h) { int i, c, cc; const struct font_desc *f, *g; @@ -129,6 +132,11 @@ const struct font_desc *get_default_font(int xres, int yres) #endif if ((yres < 400) == (f->height <= 8)) c += 1000; + + if (!(font_w & (1 << (f->width - 1))) || + !(font_w & (1 << (f->height - 1)))) + c += 1000; + if (c > cc) { cc = c; g = f; diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c index 88e7038eab88..717b360d0415 100644 --- a/drivers/video/console/sticore.c +++ b/drivers/video/console/sticore.c @@ -495,7 +495,7 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name) return NULL; fbfont = find_font(fbfont_name); if (!fbfont) - fbfont = get_default_font(1024,768); + fbfont = get_default_font(1024,768, ~(u32)0, ~(u32)0); if (!fbfont) return NULL; diff --git a/include/linux/font.h b/include/linux/font.h index 53b129f07f6f..40a24ab41b36 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -49,7 +49,8 @@ extern const struct font_desc *find_font(const char *name); /* Get the default font for a specific screen size */ -extern const struct font_desc *get_default_font(int xres, int yres); +extern const struct font_desc *get_default_font(int xres, int yres, + u32 font_w, u32 font_h); /* Max. length for the name of a predefined font */ #define MAX_FONT_NAME 32 -- cgit v1.2.3 From 11f11d522f74965fce421a5a09ff0ab178139d36 Mon Sep 17 00:00:00 2001 From: "Antonino A. Daplas" Date: Tue, 8 May 2007 00:39:16 -0700 Subject: fbdev: add tile operation to get the maximum length of the map Add a tile method, fb_get_tilemax(), that returns the maximum length of the tile map (or font map). This is needed by s3fb which can only handle 256 characters. Signed-off-by: Antonino Daplas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/fb.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/fb.h b/include/linux/fb.h index 619ba1e40ab9..a2f382c01cd2 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -655,7 +655,6 @@ struct fb_ops { }; #ifdef CONFIG_FB_TILEBLITTING - #define FB_TILE_CURSOR_NONE 0 #define FB_TILE_CURSOR_UNDERLINE 1 #define FB_TILE_CURSOR_LOWER_THIRD 2 @@ -727,6 +726,8 @@ struct fb_tile_ops { /* cursor */ void (*fb_tilecursor)(struct fb_info *info, struct fb_tilecursor *cursor); + /* get maximum length of the tile map */ + int (*fb_get_tilemax)(struct fb_info *info); }; #endif /* CONFIG_FB_TILEBLITTING */ -- cgit v1.2.3 From 38a3dc51852d8350b156ea909c5aa8767d71b005 Mon Sep 17 00:00:00 2001 From: "Antonino A. Daplas" Date: Tue, 8 May 2007 00:39:37 -0700 Subject: fbdev: fbcon: check if mode can handle new screen Check if the mode can properly display the screen. This will be needed by drivers where the capability is not constant with each mode. The function fb_set_var() will query fbcon the requirement, then it will query the driver (via a new hook fb_get_caps()) its capability. If the driver's capability cannot handle fbcon's requirement, then fb_set_var() will fail. For example, if a particular driver supports 2 modes where: mode1 = can only display 8x16 bitmaps mode2 = can display any bitmap then if current mode = mode2 and current font = 12x22 fbset /* mode1 cannot handle 12x22 */ fbset will fail Signed-off-by: Antonino Daplas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/console/fbcon.c | 42 ++++++++++++++++++++++++++++++++++++++++++ drivers/video/fbmem.c | 33 +++++++++++++++++++++++++++++++++ include/linux/fb.h | 12 ++++++++++++ 3 files changed, 87 insertions(+) (limited to 'include/linux') diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 6fc3501581d6..34899bd7c9f3 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -3043,6 +3043,43 @@ static void fbcon_new_modelist(struct fb_info *info) } } +static void fbcon_get_requirement(struct fb_info *info, + struct fb_blit_caps *caps) +{ + struct vc_data *vc; + struct display *p; + int charcnt; + + if (caps->flags) { + int i; + + for (i = first_fb_vc; i <= last_fb_vc; i++) { + vc = vc_cons[i].d; + if (vc && vc->vc_mode == KD_TEXT) { + p = &fb_display[i]; + caps->x |= 1 << (vc->vc_font.width - 1); + caps->y |= 1 << (vc->vc_font.height - 1); + charcnt = (p->userfont) ? + FNTCHARCNT(p->fontdata) : 256; + if (caps->len < charcnt) + caps->len = charcnt; + } + } + } else { + vc = vc_cons[fg_console].d; + + if (vc && vc->vc_mode == KD_TEXT) { + p = &fb_display[fg_console]; + caps->x |= 1 << (vc->vc_font.width - 1); + caps->y |= 1 << (vc->vc_font.height - 1); + charcnt = (p->userfont) ? + FNTCHARCNT(p->fontdata) : 256; + if (caps->len < charcnt) + caps->len = charcnt; + } + } +} + static int fbcon_event_notify(struct notifier_block *self, unsigned long action, void *data) { @@ -3050,6 +3087,7 @@ static int fbcon_event_notify(struct notifier_block *self, struct fb_info *info = event->info; struct fb_videomode *mode; struct fb_con2fbmap *con2fb; + struct fb_blit_caps *caps; int ret = 0; /* @@ -3098,6 +3136,10 @@ static int fbcon_event_notify(struct notifier_block *self, case FB_EVENT_NEW_MODELIST: fbcon_new_modelist(info); break; + case FB_EVENT_GET_REQ: + caps = event->data; + fbcon_get_requirement(info, caps); + break; } done: diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index cd1407921af5..354711c84aaa 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -773,6 +773,29 @@ fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var) return 0; } +static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var, + u32 activate) +{ + struct fb_event event; + struct fb_blit_caps caps, fbcaps; + int err = 0; + + memset(&caps, 0, sizeof(caps)); + memset(&fbcaps, 0, sizeof(fbcaps)); + caps.flags = (activate & FB_ACTIVATE_ALL) ? 1 : 0; + event.info = info; + event.data = ∩︀ + fb_notifier_call_chain(FB_EVENT_GET_REQ, &event); + info->fbops->fb_get_caps(info, &fbcaps, var); + + if (((fbcaps.x ^ caps.x) & caps.x) || + ((fbcaps.y ^ caps.y) & caps.y) || + (fbcaps.len < caps.len)) + err = -EINVAL; + + return err; +} + int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) { @@ -817,7 +840,15 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) struct fb_videomode mode; int err = 0; + if (info->fbops->fb_get_caps) { + err = fb_check_caps(info, var, activate); + + if (err) + goto done; + } + info->var = *var; + if (info->fbops->fb_set_par) info->fbops->fb_set_par(info); @@ -843,6 +874,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) } } } + + done: return 0; } diff --git a/include/linux/fb.h b/include/linux/fb.h index a2f382c01cd2..dff7a728948c 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -527,12 +527,20 @@ struct fb_cursor_user { #define FB_EVENT_MODE_CHANGE_ALL 0x0B /* A software display blank change occured */ #define FB_EVENT_CONBLANK 0x0C +/* Get drawing requirements */ +#define FB_EVENT_GET_REQ 0x0D struct fb_event { struct fb_info *info; void *data; }; +struct fb_blit_caps { + u32 x; + u32 y; + u32 len; + u32 flags; +}; extern int fb_register_client(struct notifier_block *nb); extern int fb_unregister_client(struct notifier_block *nb); @@ -652,6 +660,10 @@ struct fb_ops { /* restore saved state */ void (*fb_restore_state)(struct fb_info *info); + + /* get capability given var */ + void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps, + struct fb_var_screeninfo *var); }; #ifdef CONFIG_FB_TILEBLITTING -- cgit v1.2.3 From c831c338f0ad299fcd1592c6e4f30657480f39af Mon Sep 17 00:00:00 2001 From: Matthias Kaehlcke Date: Tue, 8 May 2007 00:39:49 -0700 Subject: use mutex instead of semaphore in virtual console driver The virtual console driver uses a semaphore as mutex. Use the mutex API instead of the (binary) semaphore. Signed-off-by: Matthias Kaehlcke Cc: "Antonino A. Daplas" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/vc_screen.c | 18 ++++++++++-------- drivers/char/vt.c | 5 +++-- include/linux/vt_kern.h | 3 ++- 3 files changed, 15 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/vc_screen.c b/drivers/char/vc_screen.c index 61fbc9e52eeb..83aeedda200c 100644 --- a/drivers/char/vc_screen.c +++ b/drivers/char/vc_screen.c @@ -28,11 +28,13 @@ #include #include #include +#include #include #include #include #include #include + #include #include #include @@ -69,11 +71,11 @@ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig) { int size; - down(&con_buf_sem); + mutex_lock(&con_buf_mtx); size = vcs_size(file->f_path.dentry->d_inode); switch (orig) { default: - up(&con_buf_sem); + mutex_unlock(&con_buf_mtx); return -EINVAL; case 2: offset += size; @@ -84,11 +86,11 @@ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig) break; } if (offset < 0 || offset > size) { - up(&con_buf_sem); + mutex_unlock(&con_buf_mtx); return -EINVAL; } file->f_pos = offset; - up(&con_buf_sem); + mutex_unlock(&con_buf_mtx); return file->f_pos; } @@ -105,7 +107,7 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) unsigned short *org = NULL; ssize_t ret; - down(&con_buf_sem); + mutex_lock(&con_buf_mtx); pos = *ppos; @@ -262,7 +264,7 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) ret = read; unlock_out: release_console_sem(); - up(&con_buf_sem); + mutex_unlock(&con_buf_mtx); return ret; } @@ -279,7 +281,7 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) u16 *org0 = NULL, *org = NULL; size_t ret; - down(&con_buf_sem); + mutex_lock(&con_buf_mtx); pos = *ppos; @@ -449,7 +451,7 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) unlock_out: release_console_sem(); - up(&con_buf_sem); + mutex_unlock(&con_buf_mtx); return ret; } diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 0edbbc3fa9d8..bbd9fc412877 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -86,6 +86,7 @@ #include #include #include +#include #include #include #include @@ -1952,7 +1953,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) * kernel memory allocation is available. */ char con_buf[CON_BUF_SIZE]; -DECLARE_MUTEX(con_buf_sem); +DEFINE_MUTEX(con_buf_mtx); /* is_double_width() is based on the wcwidth() implementation by * Markus Kuhn -- 2003-05-20 (Unicode 4.0) @@ -2049,7 +2050,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co /* At this point 'buf' is guaranteed to be a kernel buffer * and therefore no access to userspace (and therefore sleeping) - * will be needed. The con_buf_sem serializes all tty based + * will be needed. The con_buf_mtx serializes all tty based * console rendering and vcs write/read operations. We hold * the console spinlock during the entire write. */ diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index e0db669998f3..d961635d0e61 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -82,7 +83,7 @@ void reset_vc(struct vc_data *vc); #define CON_BUF_SIZE (CONFIG_BASE_SMALL ? 256 : PAGE_SIZE) extern char con_buf[CON_BUF_SIZE]; -extern struct semaphore con_buf_sem; +extern struct mutex con_buf_mtx; extern char vt_dont_switch; struct vt_spawn_console { -- cgit v1.2.3 From 34ed25f50b347c7e1ff78f9308e025ddd57c2f20 Mon Sep 17 00:00:00 2001 From: Ondrej Zajicek Date: Tue, 8 May 2007 00:40:00 -0700 Subject: s3fb: updates Move s3fb_get_tilemax to svgalib.c as svga_get_tilemax, because it reports limitation of other code from svgalib (svga_settile, svga_tilecopy, ...) Limit font width to 8 pixels in 4 bpp mode. Signed-off-by: Ondrej Zajicek Signed-off-by: Antonino Daplas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/s3fb.c | 15 ++++++--------- drivers/video/svgalib.c | 6 ++++++ include/linux/svga.h | 1 + 3 files changed, 13 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c index 4d0e204dd8ad..756fafb41d78 100644 --- a/drivers/video/s3fb.c +++ b/drivers/video/s3fb.c @@ -183,18 +183,13 @@ static void s3fb_settile_fast(struct fb_info *info, struct fb_tilemap *map) } } -static int s3fb_get_tilemax(struct fb_info *info) -{ - return 256; -} - static struct fb_tile_ops s3fb_tile_ops = { .fb_settile = svga_settile, .fb_tilecopy = svga_tilecopy, .fb_tilefill = svga_tilefill, .fb_tileblit = svga_tileblit, .fb_tilecursor = svga_tilecursor, - .fb_get_tilemax = s3fb_get_tilemax, + .fb_get_tilemax = svga_get_tilemax, }; static struct fb_tile_ops s3fb_fast_tile_ops = { @@ -203,7 +198,7 @@ static struct fb_tile_ops s3fb_fast_tile_ops = { .fb_tilefill = svga_tilefill, .fb_tileblit = svga_tileblit, .fb_tilecursor = svga_tilecursor, - .fb_get_tilemax = s3fb_get_tilemax, + .fb_get_tilemax = svga_get_tilemax, }; @@ -459,9 +454,10 @@ static int s3fb_set_par(struct fb_info *info) info->flags &= ~FBINFO_MISC_TILEBLITTING; info->tileops = NULL; - /* supports blit rectangles of any dimension */ - info->pixmap.blit_x = ~(u32)0; + /* in 4bpp supports 8p wide tiles only, any tiles otherwise */ + info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0); info->pixmap.blit_y = ~(u32)0; + offset_value = (info->var.xres_virtual * bpp) / 64; screen_size = info->var.yres_virtual * info->fix.line_length; } else { @@ -470,6 +466,7 @@ static int s3fb_set_par(struct fb_info *info) info->flags |= FBINFO_MISC_TILEBLITTING; info->tileops = fasttext ? &s3fb_fast_tile_ops : &s3fb_tile_ops; + /* supports 8x16 tiles only */ info->pixmap.blit_x = 1 << (8 - 1); info->pixmap.blit_y = 1 << (16 - 1); diff --git a/drivers/video/svgalib.c b/drivers/video/svgalib.c index 7fddc4a9b99b..079cdc911e48 100644 --- a/drivers/video/svgalib.c +++ b/drivers/video/svgalib.c @@ -342,6 +342,11 @@ void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor) vga_wcrt(NULL, 0x0A, cs); /* set cursor start and enable it */ } +int svga_get_tilemax(struct fb_info *info) +{ + return 256; +} + /* ------------------------------------------------------------------------- */ @@ -623,6 +628,7 @@ EXPORT_SYMBOL(svga_tilecopy); EXPORT_SYMBOL(svga_tilefill); EXPORT_SYMBOL(svga_tileblit); EXPORT_SYMBOL(svga_tilecursor); +EXPORT_SYMBOL(svga_get_tilemax); EXPORT_SYMBOL(svga_compute_pll); EXPORT_SYMBOL(svga_check_timings); diff --git a/include/linux/svga.h b/include/linux/svga.h index eadb981bb37c..e1cc552e04fe 100644 --- a/include/linux/svga.h +++ b/include/linux/svga.h @@ -112,6 +112,7 @@ void svga_tilecopy(struct fb_info *info, struct fb_tilearea *area); void svga_tilefill(struct fb_info *info, struct fb_tilerect *rect); void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit); void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor); +int svga_get_tilemax(struct fb_info *info); int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node); int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node); -- cgit v1.2.3 From 055b8224140e7a7515bf8113ce675d58abffedf1 Mon Sep 17 00:00:00 2001 From: Alex Dubov Date: Tue, 1 May 2007 20:14:55 -0700 Subject: disable socket power in adapter driver instead of media one Socket power must be fully controlled by adapter driver. This also prevents unnecessary power-off of the socket when media driver is unloaded, yet media remains in the socket. Signed-off-by: Alex Dubov Signed-off-by: Pierre Ossman --- drivers/misc/tifm_7xx1.c | 27 ++++++++++++++++++++++++--- drivers/mmc/host/tifm_sd.c | 13 +------------ include/linux/tifm.h | 1 + 3 files changed, 26 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index 1ba6c085419a..c08ad8f823d2 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -105,7 +105,8 @@ static unsigned char tifm_7xx1_toggle_sock_power(char __iomem *sock_addr) == TIFM_TYPE_XD) msleep(40); - writel((s_state & 7) | 0x0c00, sock_addr + SOCK_CONTROL); + writel((s_state & TIFM_CTRL_POWER_MASK) | 0x0c00, + sock_addr + SOCK_CONTROL); /* wait for power to stabilize */ msleep(20); for (cnt = 16; cnt <= 256; cnt <<= 1) { @@ -122,6 +123,12 @@ static unsigned char tifm_7xx1_toggle_sock_power(char __iomem *sock_addr) return (readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7; } +inline static void tifm_7xx1_sock_power_off(char __iomem *sock_addr) +{ + writel((~TIFM_CTRL_POWER_MASK) & readl(sock_addr + SOCK_CONTROL), + sock_addr + SOCK_CONTROL); +} + inline static char __iomem * tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num) { @@ -133,6 +140,7 @@ static void tifm_7xx1_switch_media(struct work_struct *work) struct tifm_adapter *fm = container_of(work, struct tifm_adapter, media_switcher); struct tifm_dev *sock; + char __iomem *sock_addr; unsigned long flags; unsigned char media_id; unsigned int socket_change_set, cnt; @@ -158,11 +166,12 @@ static void tifm_7xx1_switch_media(struct work_struct *work) "%s : demand removing card from socket %u:%u\n", fm->cdev.class_id, fm->id, cnt); fm->sockets[cnt] = NULL; + sock_addr = sock->addr; spin_unlock_irqrestore(&fm->lock, flags); device_unregister(&sock->dev); spin_lock_irqsave(&fm->lock, flags); - writel(0x0e00, tifm_7xx1_sock_addr(fm->addr, cnt) - + SOCK_CONTROL); + tifm_7xx1_sock_power_off(sock_addr); + writel(0x0e00, sock_addr + SOCK_CONTROL); } spin_unlock_irqrestore(&fm->lock, flags); @@ -205,8 +214,16 @@ static void tifm_7xx1_switch_media(struct work_struct *work) static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state) { + struct tifm_adapter *fm = pci_get_drvdata(dev); + int cnt; + dev_dbg(&dev->dev, "suspending host\n"); + for (cnt = 0; cnt < fm->num_sockets; cnt++) { + if (fm->sockets[cnt]) + tifm_7xx1_sock_power_off(fm->sockets[cnt]->addr); + } + pci_save_state(dev); pci_enable_wake(dev, pci_choose_state(dev, state), 0); pci_disable_device(dev); @@ -357,6 +374,7 @@ err_out: static void tifm_7xx1_remove(struct pci_dev *dev) { struct tifm_adapter *fm = pci_get_drvdata(dev); + int cnt; fm->eject = tifm_7xx1_dummy_eject; writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); @@ -365,6 +383,9 @@ static void tifm_7xx1_remove(struct pci_dev *dev) tifm_remove_adapter(fm); + for (cnt = 0; cnt < fm->num_sockets; cnt++) + tifm_7xx1_sock_power_off(tifm_7xx1_sock_addr(fm->addr, cnt)); + pci_set_drvdata(dev, NULL); iounmap(fm->addr); diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 7511f961c67b..8b736e968447 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -1021,10 +1021,6 @@ static void tifm_sd_remove(struct tifm_dev *sock) mmc_remove_host(mmc); dev_dbg(&sock->dev, "after remove\n"); - /* The meaning of the bit majority in this constant is unknown. */ - writel(0xfff8 & readl(sock->addr + SOCK_CONTROL), - sock->addr + SOCK_CONTROL); - mmc_free_host(mmc); } @@ -1032,14 +1028,7 @@ static void tifm_sd_remove(struct tifm_dev *sock) static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) { - struct mmc_host *mmc = tifm_get_drvdata(sock); - int rc; - - rc = mmc_suspend_host(mmc, state); - /* The meaning of the bit majority in this constant is unknown. */ - writel(0xfff8 & readl(sock->addr + SOCK_CONTROL), - sock->addr + SOCK_CONTROL); - return rc; + return mmc_suspend_host(tifm_get_drvdata(sock), state); } static int tifm_sd_resume(struct tifm_dev *sock) diff --git a/include/linux/tifm.h b/include/linux/tifm.h index 2a196982601f..6b3a31805c72 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -63,6 +63,7 @@ enum { #define TIFM_CTRL_LED 0x00000040 #define TIFM_CTRL_FAST_CLK 0x00000100 +#define TIFM_CTRL_POWER_MASK 0x00000007 #define TIFM_SOCK_STATE_OCCUPIED 0x00000008 #define TIFM_SOCK_STATE_POWERED 0x00000080 -- cgit v1.2.3 From 66da876962f782a3974b4a957d12f20656584a4d Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Wed, 2 May 2007 11:55:42 +0200 Subject: USB HID: report descriptor of Cypress USB barcode readers needs fixup Certain versions of Cypress USB barcode readers (this problem is known to happen at least with PIDs 0xde61 and 0xde64) have report descriptor which has swapped usage min and usage max tag. This results in HID parser failing for report descriptor of these devices, as it (wrongly) requires allocating more usages than HID_MAX_USAGES. Solve this by walking through the report descriptor for such devices, and swap the usage min and usage max items (and their values) to be in proper order. Reported-by: Bret Towe Signed-off-by: Jiri Kosina --- drivers/hid/usbhid/hid-core.c | 27 +++++++++++++++++++++++++++ drivers/hid/usbhid/hid-quirks.c | 5 +++++ include/linux/hid.h | 1 + 3 files changed, 33 insertions(+) (limited to 'include/linux') diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 91d610358d57..1c0bd481219c 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -692,6 +692,30 @@ static void hid_fixup_logitech_descriptor(unsigned char *rdesc, int rsize) } } +/* + * Some USB barcode readers from cypress have usage min and usage max in + * the wrong order + */ +static void hid_fixup_cypress_descriptor(unsigned char *rdesc, int rsize) +{ + short fixed = 0; + int i; + + for (i = 0; i < rsize - 4; i++) { + if (rdesc[i] == 0x29 && rdesc [i+2] == 0x19) { + unsigned char tmp; + + rdesc[i] = 0x19; rdesc[i+2] = 0x29; + tmp = rdesc[i+3]; + rdesc[i+3] = rdesc[i+1]; + rdesc[i+1] = tmp; + } + } + + if (fixed) + info("Fixing up Cypress report descriptor"); +} + static struct hid_device *usb_hid_configure(struct usb_interface *intf) { struct usb_host_interface *interface = intf->cur_altsetting; @@ -758,6 +782,9 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) if (quirks & HID_QUIRK_LOGITECH_DESCRIPTOR) hid_fixup_logitech_descriptor(rdesc, rsize); + if (quirks & HID_QUIRK_SWAPPED_MIN_MAX) + hid_fixup_cypress_descriptor(rdesc, rsize); + #ifdef CONFIG_HID_DEBUG printk(KERN_DEBUG __FILE__ ": report descriptor (size %u, read %d) = ", rsize, n); for (n = 0; n < rsize; n++) diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 17a87555e32f..6216f6049ecd 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -92,6 +92,8 @@ #define USB_DEVICE_ID_CYPRESS_MOUSE 0x0001 #define USB_DEVICE_ID_CYPRESS_HIDCOM 0x5500 #define USB_DEVICE_ID_CYPRESS_ULTRAMOUSE 0x7417 +#define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61 +#define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64 #define USB_VENDOR_ID_DELL 0x413c #define USB_DEVICE_ID_DELL_W7658 0x2005 @@ -445,6 +447,9 @@ static const struct hid_blacklist { { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_W7658, HID_QUIRK_RESET_LEDS }, + { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1, HID_QUIRK_SWAPPED_MIN_MAX }, + { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2, HID_QUIRK_SWAPPED_MIN_MAX }, + { 0, 0 } }; diff --git a/include/linux/hid.h b/include/linux/hid.h index 37076b116ed0..827ee748fd4c 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -275,6 +275,7 @@ struct hid_item { #define HID_QUIRK_LOGITECH_DESCRIPTOR 0x00100000 #define HID_QUIRK_DUPLICATE_USAGES 0x00200000 #define HID_QUIRK_RESET_LEDS 0x00400000 +#define HID_QUIRK_SWAPPED_MIN_MAX 0x00800000 /* * This is the global environment of the parser. This information is -- cgit v1.2.3 From 225c7b1feef1b41170f7037a5b10a65cd8a42c54 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Tue, 8 May 2007 18:00:38 -0700 Subject: IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters Add an InfiniBand driver for Mellanox ConnectX adapters. Because these adapters can also be used as ethernet NICs and Fibre Channel HBAs, the driver is split into two modules: mlx4_core: Handles low-level things like device initialization and processing firmware commands. Also controls resource allocation so that the InfiniBand, ethernet and FC functions can share a device without stepping on each other. mlx4_ib: Handles InfiniBand-specific things; plugs into the InfiniBand midlayer. Signed-off-by: Roland Dreier --- drivers/infiniband/Kconfig | 2 + drivers/infiniband/Makefile | 1 + drivers/infiniband/hw/mlx4/Kconfig | 9 + drivers/infiniband/hw/mlx4/Makefile | 3 + drivers/infiniband/hw/mlx4/ah.c | 100 +++ drivers/infiniband/hw/mlx4/cq.c | 525 +++++++++++++ drivers/infiniband/hw/mlx4/doorbell.c | 216 ++++++ drivers/infiniband/hw/mlx4/mad.c | 339 +++++++++ drivers/infiniband/hw/mlx4/main.c | 651 +++++++++++++++++ drivers/infiniband/hw/mlx4/mlx4_ib.h | 285 ++++++++ drivers/infiniband/hw/mlx4/mr.c | 184 +++++ drivers/infiniband/hw/mlx4/qp.c | 1294 +++++++++++++++++++++++++++++++++ drivers/infiniband/hw/mlx4/srq.c | 334 +++++++++ drivers/infiniband/hw/mlx4/user.h | 92 +++ drivers/net/Kconfig | 14 + drivers/net/Makefile | 1 + drivers/net/mlx4/Makefile | 4 + drivers/net/mlx4/alloc.c | 179 +++++ drivers/net/mlx4/catas.c | 70 ++ drivers/net/mlx4/cmd.c | 429 +++++++++++ drivers/net/mlx4/cq.c | 254 +++++++ drivers/net/mlx4/eq.c | 696 ++++++++++++++++++ drivers/net/mlx4/fw.c | 775 ++++++++++++++++++++ drivers/net/mlx4/fw.h | 167 +++++ drivers/net/mlx4/icm.c | 379 ++++++++++ drivers/net/mlx4/icm.h | 135 ++++ drivers/net/mlx4/intf.c | 165 +++++ drivers/net/mlx4/main.c | 936 ++++++++++++++++++++++++ drivers/net/mlx4/mcg.c | 380 ++++++++++ drivers/net/mlx4/mlx4.h | 348 +++++++++ drivers/net/mlx4/mr.c | 479 ++++++++++++ drivers/net/mlx4/pd.c | 102 +++ drivers/net/mlx4/profile.c | 238 ++++++ drivers/net/mlx4/qp.c | 280 +++++++ drivers/net/mlx4/reset.c | 181 +++++ drivers/net/mlx4/srq.c | 227 ++++++ include/linux/mlx4/cmd.h | 178 +++++ include/linux/mlx4/cq.h | 123 ++++ include/linux/mlx4/device.h | 331 +++++++++ include/linux/mlx4/doorbell.h | 97 +++ include/linux/mlx4/driver.h | 59 ++ include/linux/mlx4/qp.h | 288 ++++++++ include/linux/mlx4/srq.h | 42 ++ 43 files changed, 11592 insertions(+) create mode 100644 drivers/infiniband/hw/mlx4/Kconfig create mode 100644 drivers/infiniband/hw/mlx4/Makefile create mode 100644 drivers/infiniband/hw/mlx4/ah.c create mode 100644 drivers/infiniband/hw/mlx4/cq.c create mode 100644 drivers/infiniband/hw/mlx4/doorbell.c create mode 100644 drivers/infiniband/hw/mlx4/mad.c create mode 100644 drivers/infiniband/hw/mlx4/main.c create mode 100644 drivers/infiniband/hw/mlx4/mlx4_ib.h create mode 100644 drivers/infiniband/hw/mlx4/mr.c create mode 100644 drivers/infiniband/hw/mlx4/qp.c create mode 100644 drivers/infiniband/hw/mlx4/srq.c create mode 100644 drivers/infiniband/hw/mlx4/user.h create mode 100644 drivers/net/mlx4/Makefile create mode 100644 drivers/net/mlx4/alloc.c create mode 100644 drivers/net/mlx4/catas.c create mode 100644 drivers/net/mlx4/cmd.c create mode 100644 drivers/net/mlx4/cq.c create mode 100644 drivers/net/mlx4/eq.c create mode 100644 drivers/net/mlx4/fw.c create mode 100644 drivers/net/mlx4/fw.h create mode 100644 drivers/net/mlx4/icm.c create mode 100644 drivers/net/mlx4/icm.h create mode 100644 drivers/net/mlx4/intf.c create mode 100644 drivers/net/mlx4/main.c create mode 100644 drivers/net/mlx4/mcg.c create mode 100644 drivers/net/mlx4/mlx4.h create mode 100644 drivers/net/mlx4/mr.c create mode 100644 drivers/net/mlx4/pd.c create mode 100644 drivers/net/mlx4/profile.c create mode 100644 drivers/net/mlx4/qp.c create mode 100644 drivers/net/mlx4/reset.c create mode 100644 drivers/net/mlx4/srq.c create mode 100644 include/linux/mlx4/cmd.h create mode 100644 include/linux/mlx4/cq.h create mode 100644 include/linux/mlx4/device.h create mode 100644 include/linux/mlx4/doorbell.h create mode 100644 include/linux/mlx4/driver.h create mode 100644 include/linux/mlx4/qp.h create mode 100644 include/linux/mlx4/srq.h (limited to 'include/linux') diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 82afba5c0bf4..37deaae49190 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -45,6 +45,8 @@ source "drivers/infiniband/hw/ehca/Kconfig" source "drivers/infiniband/hw/amso1100/Kconfig" source "drivers/infiniband/hw/cxgb3/Kconfig" +source "drivers/infiniband/hw/mlx4/Kconfig" + source "drivers/infiniband/ulp/ipoib/Kconfig" source "drivers/infiniband/ulp/srp/Kconfig" diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile index da2066c4f22c..75f325e40b54 100644 --- a/drivers/infiniband/Makefile +++ b/drivers/infiniband/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/ obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ +obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig new file mode 100644 index 000000000000..b8912cdb9663 --- /dev/null +++ b/drivers/infiniband/hw/mlx4/Kconfig @@ -0,0 +1,9 @@ +config MLX4_INFINIBAND + tristate "Mellanox ConnectX HCA support" + depends on INFINIBAND + select MLX4_CORE + ---help--- + This driver provides low-level InfiniBand support for + Mellanox ConnectX PCI Express host channel adapters (HCAs). + This is required to use InfiniBand protocols such as + IP-over-IB or SRP with these devices. diff --git a/drivers/infiniband/hw/mlx4/Makefile b/drivers/infiniband/hw/mlx4/Makefile new file mode 100644 index 000000000000..70f09c7826da --- /dev/null +++ b/drivers/infiniband/hw/mlx4/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_MLX4_INFINIBAND) += mlx4_ib.o + +mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c new file mode 100644 index 000000000000..c75ac9463e20 --- /dev/null +++ b/drivers/infiniband/hw/mlx4/ah.c @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "mlx4_ib.h" + +struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) +{ + struct mlx4_dev *dev = to_mdev(pd->device)->dev; + struct mlx4_ib_ah *ah; + + ah = kmalloc(sizeof *ah, GFP_ATOMIC); + if (!ah) + return ERR_PTR(-ENOMEM); + + memset(&ah->av, 0, sizeof ah->av); + + ah->av.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); + ah->av.g_slid = ah_attr->src_path_bits; + ah->av.dlid = cpu_to_be16(ah_attr->dlid); + if (ah_attr->static_rate) { + ah->av.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET; + while (ah->av.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && + !(1 << ah->av.stat_rate & dev->caps.stat_rate_support)) + --ah->av.stat_rate; + } + ah->av.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); + if (ah_attr->ah_flags & IB_AH_GRH) { + ah->av.g_slid |= 0x80; + ah->av.gid_index = ah_attr->grh.sgid_index; + ah->av.hop_limit = ah_attr->grh.hop_limit; + ah->av.sl_tclass_flowlabel |= + cpu_to_be32((ah_attr->grh.traffic_class << 20) | + ah_attr->grh.flow_label); + memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, 16); + } + + return &ah->ibah; +} + +int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) +{ + struct mlx4_ib_ah *ah = to_mah(ibah); + + memset(ah_attr, 0, sizeof *ah_attr); + ah_attr->dlid = be16_to_cpu(ah->av.dlid); + ah_attr->sl = be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28; + ah_attr->port_num = be32_to_cpu(ah->av.port_pd) >> 24; + if (ah->av.stat_rate) + ah_attr->static_rate = ah->av.stat_rate - MLX4_STAT_RATE_OFFSET; + ah_attr->src_path_bits = ah->av.g_slid & 0x7F; + + if (mlx4_ib_ah_grh_present(ah)) { + ah_attr->ah_flags = IB_AH_GRH; + + ah_attr->grh.traffic_class = + be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20; + ah_attr->grh.flow_label = + be32_to_cpu(ah->av.sl_tclass_flowlabel) & 0xfffff; + ah_attr->grh.hop_limit = ah->av.hop_limit; + ah_attr->grh.sgid_index = ah->av.gid_index; + memcpy(ah_attr->grh.dgid.raw, ah->av.dgid, 16); + } + + return 0; +} + +int mlx4_ib_destroy_ah(struct ib_ah *ah) +{ + kfree(to_mah(ah)); + return 0; +} diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c new file mode 100644 index 000000000000..b2a290c6703a --- /dev/null +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -0,0 +1,525 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include "mlx4_ib.h" +#include "user.h" + +static void mlx4_ib_cq_comp(struct mlx4_cq *cq) +{ + struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; + ibcq->comp_handler(ibcq, ibcq->cq_context); +} + +static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) +{ + struct ib_event event; + struct ib_cq *ibcq; + + if (type != MLX4_EVENT_TYPE_CQ_ERROR) { + printk(KERN_WARNING "mlx4_ib: Unexpected event type %d " + "on CQ %06x\n", type, cq->cqn); + return; + } + + ibcq = &to_mibcq(cq)->ibcq; + if (ibcq->event_handler) { + event.device = ibcq->device; + event.event = IB_EVENT_CQ_ERR; + event.element.cq = ibcq; + ibcq->event_handler(&event, ibcq->cq_context); + } +} + +static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n) +{ + int offset = n * sizeof (struct mlx4_cqe); + + if (buf->buf.nbufs == 1) + return buf->buf.u.direct.buf + offset; + else + return buf->buf.u.page_list[offset >> PAGE_SHIFT].buf + + (offset & (PAGE_SIZE - 1)); +} + +static void *get_cqe(struct mlx4_ib_cq *cq, int n) +{ + return get_cqe_from_buf(&cq->buf, n); +} + +static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) +{ + struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); + + return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^ + !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; +} + +static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq) +{ + return get_sw_cqe(cq, cq->mcq.cons_index); +} + +struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct mlx4_ib_dev *dev = to_mdev(ibdev); + struct mlx4_ib_cq *cq; + struct mlx4_uar *uar; + int buf_size; + int err; + + if (entries < 1 || entries > dev->dev->caps.max_cqes) + return ERR_PTR(-EINVAL); + + cq = kmalloc(sizeof *cq, GFP_KERNEL); + if (!cq) + return ERR_PTR(-ENOMEM); + + entries = roundup_pow_of_two(entries + 1); + cq->ibcq.cqe = entries - 1; + buf_size = entries * sizeof (struct mlx4_cqe); + spin_lock_init(&cq->lock); + + if (context) { + struct mlx4_ib_create_cq ucmd; + + if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { + err = -EFAULT; + goto err_cq; + } + + cq->umem = ib_umem_get(context, ucmd.buf_addr, buf_size, + IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(cq->umem)) { + err = PTR_ERR(cq->umem); + goto err_cq; + } + + err = mlx4_mtt_init(dev->dev, ib_umem_page_count(cq->umem), + ilog2(cq->umem->page_size), &cq->buf.mtt); + if (err) + goto err_buf; + + err = mlx4_ib_umem_write_mtt(dev, &cq->buf.mtt, cq->umem); + if (err) + goto err_mtt; + + err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr, + &cq->db); + if (err) + goto err_mtt; + + uar = &to_mucontext(context)->uar; + } else { + err = mlx4_ib_db_alloc(dev, &cq->db, 1); + if (err) + goto err_cq; + + cq->mcq.set_ci_db = cq->db.db; + cq->mcq.arm_db = cq->db.db + 1; + *cq->mcq.set_ci_db = 0; + *cq->mcq.arm_db = 0; + + if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &cq->buf.buf)) { + err = -ENOMEM; + goto err_db; + } + + err = mlx4_mtt_init(dev->dev, cq->buf.buf.npages, cq->buf.buf.page_shift, + &cq->buf.mtt); + if (err) + goto err_buf; + + err = mlx4_buf_write_mtt(dev->dev, &cq->buf.mtt, &cq->buf.buf); + if (err) + goto err_mtt; + + uar = &dev->priv_uar; + } + + err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, + cq->db.dma, &cq->mcq); + if (err) + goto err_dbmap; + + cq->mcq.comp = mlx4_ib_cq_comp; + cq->mcq.event = mlx4_ib_cq_event; + + if (context) + if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { + err = -EFAULT; + goto err_dbmap; + } + + return &cq->ibcq; + +err_dbmap: + if (context) + mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); + +err_mtt: + mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); + +err_buf: + if (context) + ib_umem_release(cq->umem); + else + mlx4_buf_free(dev->dev, entries * sizeof (struct mlx4_cqe), + &cq->buf.buf); + +err_db: + if (!context) + mlx4_ib_db_free(dev, &cq->db); + +err_cq: + kfree(cq); + + return ERR_PTR(err); +} + +int mlx4_ib_destroy_cq(struct ib_cq *cq) +{ + struct mlx4_ib_dev *dev = to_mdev(cq->device); + struct mlx4_ib_cq *mcq = to_mcq(cq); + + mlx4_cq_free(dev->dev, &mcq->mcq); + mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt); + + if (cq->uobject) { + mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db); + ib_umem_release(mcq->umem); + } else { + mlx4_buf_free(dev->dev, (cq->cqe + 1) * sizeof (struct mlx4_cqe), + &mcq->buf.buf); + mlx4_ib_db_free(dev, &mcq->db); + } + + kfree(mcq); + + return 0; +} + +static void dump_cqe(void *cqe) +{ + __be32 *buf = cqe; + + printk(KERN_DEBUG "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", + be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]), + be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]), + be32_to_cpu(buf[6]), be32_to_cpu(buf[7])); +} + +static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, + struct ib_wc *wc) +{ + if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) { + printk(KERN_DEBUG "local QP operation err " + "(QPN %06x, WQE index %x, vendor syndrome %02x, " + "opcode = %02x)\n", + be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index), + cqe->vendor_err_syndrome, + cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); + dump_cqe(cqe); + } + + switch (cqe->syndrome) { + case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR: + wc->status = IB_WC_LOC_LEN_ERR; + break; + case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR: + wc->status = IB_WC_LOC_QP_OP_ERR; + break; + case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR: + wc->status = IB_WC_LOC_PROT_ERR; + break; + case MLX4_CQE_SYNDROME_WR_FLUSH_ERR: + wc->status = IB_WC_WR_FLUSH_ERR; + break; + case MLX4_CQE_SYNDROME_MW_BIND_ERR: + wc->status = IB_WC_MW_BIND_ERR; + break; + case MLX4_CQE_SYNDROME_BAD_RESP_ERR: + wc->status = IB_WC_BAD_RESP_ERR; + break; + case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR: + wc->status = IB_WC_LOC_ACCESS_ERR; + break; + case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: + wc->status = IB_WC_REM_INV_REQ_ERR; + break; + case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR: + wc->status = IB_WC_REM_ACCESS_ERR; + break; + case MLX4_CQE_SYNDROME_REMOTE_OP_ERR: + wc->status = IB_WC_REM_OP_ERR; + break; + case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: + wc->status = IB_WC_RETRY_EXC_ERR; + break; + case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR: + wc->status = IB_WC_RNR_RETRY_EXC_ERR; + break; + case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR: + wc->status = IB_WC_REM_ABORT_ERR; + break; + default: + wc->status = IB_WC_GENERAL_ERR; + break; + } + + wc->vendor_err = cqe->vendor_err_syndrome; +} + +static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, + struct mlx4_ib_qp **cur_qp, + struct ib_wc *wc) +{ + struct mlx4_cqe *cqe; + struct mlx4_qp *mqp; + struct mlx4_ib_wq *wq; + struct mlx4_ib_srq *srq; + int is_send; + int is_error; + u16 wqe_ctr; + + cqe = next_cqe_sw(cq); + if (!cqe) + return -EAGAIN; + + ++cq->mcq.cons_index; + + /* + * Make sure we read CQ entry contents after we've checked the + * ownership bit. + */ + rmb(); + + is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK; + is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == + MLX4_CQE_OPCODE_ERROR; + + if (!*cur_qp || + (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { + /* + * We do not have to take the QP table lock here, + * because CQs will be locked while QPs are removed + * from the table. + */ + mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, + be32_to_cpu(cqe->my_qpn)); + if (unlikely(!mqp)) { + printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n", + cq->mcq.cqn, be32_to_cpu(cqe->my_qpn) & 0xffffff); + return -EINVAL; + } + + *cur_qp = to_mibqp(mqp); + } + + wc->qp = &(*cur_qp)->ibqp; + + if (is_send) { + wq = &(*cur_qp)->sq; + wqe_ctr = be16_to_cpu(cqe->wqe_index); + wq->tail += wqe_ctr - (u16) wq->tail; + wc->wr_id = wq->wrid[wq->tail & (wq->max - 1)]; + ++wq->tail; + } else if ((*cur_qp)->ibqp.srq) { + srq = to_msrq((*cur_qp)->ibqp.srq); + wqe_ctr = be16_to_cpu(cqe->wqe_index); + wc->wr_id = srq->wrid[wqe_ctr]; + mlx4_ib_free_srq_wqe(srq, wqe_ctr); + } else { + wq = &(*cur_qp)->rq; + wc->wr_id = wq->wrid[wq->tail & (wq->max - 1)]; + ++wq->tail; + } + + if (unlikely(is_error)) { + mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc); + return 0; + } + + wc->status = IB_WC_SUCCESS; + + if (is_send) { + wc->wc_flags = 0; + switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { + case MLX4_OPCODE_RDMA_WRITE_IMM: + wc->wc_flags |= IB_WC_WITH_IMM; + case MLX4_OPCODE_RDMA_WRITE: + wc->opcode = IB_WC_RDMA_WRITE; + break; + case MLX4_OPCODE_SEND_IMM: + wc->wc_flags |= IB_WC_WITH_IMM; + case MLX4_OPCODE_SEND: + wc->opcode = IB_WC_SEND; + break; + case MLX4_OPCODE_RDMA_READ: + wc->opcode = IB_WC_SEND; + wc->byte_len = be32_to_cpu(cqe->byte_cnt); + break; + case MLX4_OPCODE_ATOMIC_CS: + wc->opcode = IB_WC_COMP_SWAP; + wc->byte_len = 8; + break; + case MLX4_OPCODE_ATOMIC_FA: + wc->opcode = IB_WC_FETCH_ADD; + wc->byte_len = 8; + break; + case MLX4_OPCODE_BIND_MW: + wc->opcode = IB_WC_BIND_MW; + break; + } + } else { + wc->byte_len = be32_to_cpu(cqe->byte_cnt); + + switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { + case MLX4_RECV_OPCODE_RDMA_WRITE_IMM: + wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; + wc->wc_flags = IB_WC_WITH_IMM; + wc->imm_data = cqe->immed_rss_invalid; + break; + case MLX4_RECV_OPCODE_SEND: + wc->opcode = IB_WC_RECV; + wc->wc_flags = 0; + break; + case MLX4_RECV_OPCODE_SEND_IMM: + wc->opcode = IB_WC_RECV; + wc->wc_flags = IB_WC_WITH_IMM; + wc->imm_data = cqe->immed_rss_invalid; + break; + } + + wc->slid = be16_to_cpu(cqe->rlid); + wc->sl = cqe->sl >> 4; + wc->src_qp = be32_to_cpu(cqe->g_mlpath_rqpn) & 0xffffff; + wc->dlid_path_bits = (be32_to_cpu(cqe->g_mlpath_rqpn) >> 24) & 0x7f; + wc->wc_flags |= be32_to_cpu(cqe->g_mlpath_rqpn) & 0x80000000 ? + IB_WC_GRH : 0; + wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) >> 16; + } + + return 0; +} + +int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) +{ + struct mlx4_ib_cq *cq = to_mcq(ibcq); + struct mlx4_ib_qp *cur_qp = NULL; + unsigned long flags; + int npolled; + int err = 0; + + spin_lock_irqsave(&cq->lock, flags); + + for (npolled = 0; npolled < num_entries; ++npolled) { + err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); + if (err) + break; + } + + if (npolled) + mlx4_cq_set_ci(&cq->mcq); + + spin_unlock_irqrestore(&cq->lock, flags); + + if (err == 0 || err == -EAGAIN) + return npolled; + else + return err; +} + +int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) +{ + mlx4_cq_arm(&to_mcq(ibcq)->mcq, + (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? + MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT, + to_mdev(ibcq->device)->uar_map, + MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock)); + + return 0; +} + +void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) +{ + u32 prod_index; + int nfreed = 0; + struct mlx4_cqe *cqe; + + /* + * First we need to find the current producer index, so we + * know where to start cleaning from. It doesn't matter if HW + * adds new entries after this loop -- the QP we're worried + * about is already in RESET, so the new entries won't come + * from our QP and therefore don't need to be checked. + */ + for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index) + if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) + break; + + /* + * Now sweep backwards through the CQ, removing CQ entries + * that match our QP by copying older entries on top of them. + */ + while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { + cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); + if ((be32_to_cpu(cqe->my_qpn) & 0xffffff) == qpn) { + if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) + mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); + ++nfreed; + } else if (nfreed) + memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe), + cqe, sizeof *cqe); + } + + if (nfreed) { + cq->mcq.cons_index += nfreed; + /* + * Make sure update of buffer contents is done before + * updating consumer index. + */ + wmb(); + mlx4_cq_set_ci(&cq->mcq); + } +} + +void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) +{ + spin_lock_irq(&cq->lock); + __mlx4_ib_cq_clean(cq, qpn, srq); + spin_unlock_irq(&cq->lock); +} diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c new file mode 100644 index 000000000000..1c36087aef14 --- /dev/null +++ b/drivers/infiniband/hw/mlx4/doorbell.c @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include + +#include "mlx4_ib.h" + +struct mlx4_ib_db_pgdir { + struct list_head list; + DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE); + DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2); + unsigned long *bits[2]; + __be32 *db_page; + dma_addr_t db_dma; +}; + +static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev) +{ + struct mlx4_ib_db_pgdir *pgdir; + + pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); + if (!pgdir) + return NULL; + + bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2); + pgdir->bits[0] = pgdir->order0; + pgdir->bits[1] = pgdir->order1; + pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device, + PAGE_SIZE, &pgdir->db_dma, + GFP_KERNEL); + if (!pgdir->db_page) { + kfree(pgdir); + return NULL; + } + + return pgdir; +} + +static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir, + struct mlx4_ib_db *db, int order) +{ + int o; + int i; + + for (o = order; o <= 1; ++o) { + i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o); + if (i < MLX4_IB_DB_PER_PAGE >> o) + goto found; + } + + return -ENOMEM; + +found: + clear_bit(i, pgdir->bits[o]); + + i <<= o; + + if (o > order) + set_bit(i ^ 1, pgdir->bits[order]); + + db->u.pgdir = pgdir; + db->index = i; + db->db = pgdir->db_page + db->index; + db->dma = pgdir->db_dma + db->index * 4; + db->order = order; + + return 0; +} + +int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order) +{ + struct mlx4_ib_db_pgdir *pgdir; + int ret = 0; + + mutex_lock(&dev->pgdir_mutex); + + list_for_each_entry(pgdir, &dev->pgdir_list, list) + if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order)) + goto out; + + pgdir = mlx4_ib_alloc_db_pgdir(dev); + if (!pgdir) { + ret = -ENOMEM; + goto out; + } + + list_add(&pgdir->list, &dev->pgdir_list); + + /* This should never fail -- we just allocated an empty page: */ + WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order)); + +out: + mutex_unlock(&dev->pgdir_mutex); + + return ret; +} + +void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db) +{ + int o; + int i; + + mutex_lock(&dev->pgdir_mutex); + + o = db->order; + i = db->index; + + if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { + clear_bit(i ^ 1, db->u.pgdir->order0); + ++o; + } + + i >>= o; + set_bit(i, db->u.pgdir->bits[o]); + + if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) { + dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE, + db->u.pgdir->db_page, db->u.pgdir->db_dma); + list_del(&db->u.pgdir->list); + kfree(db->u.pgdir); + } + + mutex_unlock(&dev->pgdir_mutex); +} + +struct mlx4_ib_user_db_page { + struct list_head list; + struct ib_umem *umem; + unsigned long user_virt; + int refcnt; +}; + +int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, + struct mlx4_ib_db *db) +{ + struct mlx4_ib_user_db_page *page; + struct ib_umem_chunk *chunk; + int err = 0; + + mutex_lock(&context->db_page_mutex); + + list_for_each_entry(page, &context->db_page_list, list) + if (page->user_virt == (virt & PAGE_MASK)) + goto found; + + page = kmalloc(sizeof *page, GFP_KERNEL); + if (!page) { + err = -ENOMEM; + goto out; + } + + page->user_virt = (virt & PAGE_MASK); + page->refcnt = 0; + page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, + PAGE_SIZE, 0); + if (IS_ERR(page->umem)) { + err = PTR_ERR(page->umem); + kfree(page); + goto out; + } + + list_add(&page->list, &context->db_page_list); + +found: + chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list); + db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK); + db->u.user_page = page; + ++page->refcnt; + +out: + mutex_unlock(&context->db_page_mutex); + + return err; +} + +void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db) +{ + mutex_lock(&context->db_page_mutex); + + if (!--db->u.user_page->refcnt) { + list_del(&db->u.user_page->list); + ib_umem_release(db->u.user_page->umem); + kfree(db->u.user_page); + } + + mutex_unlock(&context->db_page_mutex); +} diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c new file mode 100644 index 000000000000..333091787c5f --- /dev/null +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include + +#include "mlx4_ib.h" + +enum { + MLX4_IB_VENDOR_CLASS1 = 0x9, + MLX4_IB_VENDOR_CLASS2 = 0xa +}; + +int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey, + int port, struct ib_wc *in_wc, struct ib_grh *in_grh, + void *in_mad, void *response_mad) +{ + struct mlx4_cmd_mailbox *inmailbox, *outmailbox; + void *inbox; + int err; + u32 in_modifier = port; + u8 op_modifier = 0; + + inmailbox = mlx4_alloc_cmd_mailbox(dev->dev); + if (IS_ERR(inmailbox)) + return PTR_ERR(inmailbox); + inbox = inmailbox->buf; + + outmailbox = mlx4_alloc_cmd_mailbox(dev->dev); + if (IS_ERR(outmailbox)) { + mlx4_free_cmd_mailbox(dev->dev, inmailbox); + return PTR_ERR(outmailbox); + } + + memcpy(inbox, in_mad, 256); + + /* + * Key check traps can't be generated unless we have in_wc to + * tell us where to send the trap. + */ + if (ignore_mkey || !in_wc) + op_modifier |= 0x1; + if (ignore_bkey || !in_wc) + op_modifier |= 0x2; + + if (in_wc) { + struct { + __be32 my_qpn; + u32 reserved1; + __be32 rqpn; + u8 sl; + u8 g_path; + u16 reserved2[2]; + __be16 pkey; + u32 reserved3[11]; + u8 grh[40]; + } *ext_info; + + memset(inbox + 256, 0, 256); + ext_info = inbox + 256; + + ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num); + ext_info->rqpn = cpu_to_be32(in_wc->src_qp); + ext_info->sl = in_wc->sl << 4; + ext_info->g_path = in_wc->dlid_path_bits | + (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); + ext_info->pkey = cpu_to_be16(in_wc->pkey_index); + + if (in_grh) + memcpy(ext_info->grh, in_grh, 40); + + op_modifier |= 0x4; + + in_modifier |= in_wc->slid << 16; + } + + err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, + in_modifier, op_modifier, + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C); + + if (!err); + memcpy(response_mad, outmailbox->buf, 256); + + mlx4_free_cmd_mailbox(dev->dev, inmailbox); + mlx4_free_cmd_mailbox(dev->dev, outmailbox); + + return err; +} + +static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) +{ + struct ib_ah *new_ah; + struct ib_ah_attr ah_attr; + + if (!dev->send_agent[port_num - 1][0]) + return; + + memset(&ah_attr, 0, sizeof ah_attr); + ah_attr.dlid = lid; + ah_attr.sl = sl; + ah_attr.port_num = port_num; + + new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, + &ah_attr); + if (IS_ERR(new_ah)) + return; + + spin_lock(&dev->sm_lock); + if (dev->sm_ah[port_num - 1]) + ib_destroy_ah(dev->sm_ah[port_num - 1]); + dev->sm_ah[port_num - 1] = new_ah; + spin_unlock(&dev->sm_lock); +} + +/* + * Snoop SM MADs for port info and P_Key table sets, so we can + * synthesize LID change and P_Key change events. + */ +static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad) +{ + struct ib_event event; + + if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || + mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && + mad->mad_hdr.method == IB_MGMT_METHOD_SET) { + if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { + struct ib_port_info *pinfo = + (struct ib_port_info *) ((struct ib_smp *) mad)->data; + + update_sm_ah(to_mdev(ibdev), port_num, + be16_to_cpu(pinfo->sm_lid), + pinfo->neighbormtu_mastersmsl & 0xf); + + event.device = ibdev; + event.element.port_num = port_num; + + if(pinfo->clientrereg_resv_subnetto & 0x80) + event.event = IB_EVENT_CLIENT_REREGISTER; + else + event.event = IB_EVENT_LID_CHANGE; + + ib_dispatch_event(&event); + } + + if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { + event.device = ibdev; + event.event = IB_EVENT_PKEY_CHANGE; + event.element.port_num = port_num; + ib_dispatch_event(&event); + } + } +} + +static void node_desc_override(struct ib_device *dev, + struct ib_mad *mad) +{ + if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || + mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && + mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && + mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { + spin_lock(&to_mdev(dev)->sm_lock); + memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64); + spin_unlock(&to_mdev(dev)->sm_lock); + } +} + +static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad) +{ + int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; + struct ib_mad_send_buf *send_buf; + struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; + int ret; + + if (agent) { + send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, + IB_MGMT_MAD_DATA, GFP_ATOMIC); + /* + * We rely here on the fact that MLX QPs don't use the + * address handle after the send is posted (this is + * wrong following the IB spec strictly, but we know + * it's OK for our devices). + */ + spin_lock(&dev->sm_lock); + memcpy(send_buf->mad, mad, sizeof *mad); + if ((send_buf->ah = dev->sm_ah[port_num - 1])) + ret = ib_post_send_mad(send_buf, NULL); + else + ret = -EINVAL; + spin_unlock(&dev->sm_lock); + + if (ret) + ib_free_send_mad(send_buf); + } +} + +int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + struct ib_wc *in_wc, struct ib_grh *in_grh, + struct ib_mad *in_mad, struct ib_mad *out_mad) +{ + u16 slid; + int err; + + slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); + + if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) { + forward_trap(to_mdev(ibdev), port_num, in_mad); + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + } + + if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || + in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) + return IB_MAD_RESULT_SUCCESS; + + /* + * Don't process SMInfo queries or vendor-specific + * MADs -- the SMA can't handle them. + */ + if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || + ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == + IB_SMP_ATTR_VENDOR_MASK)) + return IB_MAD_RESULT_SUCCESS; + } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || + in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 || + in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2) { + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) + return IB_MAD_RESULT_SUCCESS; + } else + return IB_MAD_RESULT_SUCCESS; + + err = mlx4_MAD_IFC(to_mdev(ibdev), + mad_flags & IB_MAD_IGNORE_MKEY, + mad_flags & IB_MAD_IGNORE_BKEY, + port_num, in_wc, in_grh, in_mad, out_mad); + if (err) + return IB_MAD_RESULT_FAILURE; + + if (!out_mad->mad_hdr.status) { + smp_snoop(ibdev, port_num, in_mad); + node_desc_override(ibdev, out_mad); + } + + /* set return bit in status of directed route responses */ + if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) + out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); + + if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) + /* no response for trap repress */ + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; +} + +static void send_handler(struct ib_mad_agent *agent, + struct ib_mad_send_wc *mad_send_wc) +{ + ib_free_send_mad(mad_send_wc->send_buf); +} + +int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) +{ + struct ib_mad_agent *agent; + int p, q; + int ret; + + for (p = 0; p < dev->dev->caps.num_ports; ++p) + for (q = 0; q <= 1; ++q) { + agent = ib_register_mad_agent(&dev->ib_dev, p + 1, + q ? IB_QPT_GSI : IB_QPT_SMI, + NULL, 0, send_handler, + NULL, NULL); + if (IS_ERR(agent)) { + ret = PTR_ERR(agent); + goto err; + } + dev->send_agent[p][q] = agent; + } + + return 0; + +err: + for (p = 0; p < dev->dev->caps.num_ports; ++p) + for (q = 0; q <= 1; ++q) + if (dev->send_agent[p][q]) + ib_unregister_mad_agent(dev->send_agent[p][q]); + + return ret; +} + +void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) +{ + struct ib_mad_agent *agent; + int p, q; + + for (p = 0; p < dev->dev->caps.num_ports; ++p) { + for (q = 0; q <= 1; ++q) { + agent = dev->send_agent[p][q]; + dev->send_agent[p][q] = NULL; + ib_unregister_mad_agent(agent); + } + + if (dev->sm_ah[p]) + ib_destroy_ah(dev->sm_ah[p]); + } +} diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c new file mode 100644 index 000000000000..688ecb4c39f3 --- /dev/null +++ b/drivers/infiniband/hw/mlx4/main.c @@ -0,0 +1,651 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#include +#include + +#include +#include + +#include "mlx4_ib.h" +#include "user.h" + +#define DRV_NAME "mlx4_ib" +#define DRV_VERSION "0.01" +#define DRV_RELDATE "May 1, 2006" + +MODULE_AUTHOR("Roland Dreier"); +MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); + +static const char mlx4_ib_version[] __devinitdata = + DRV_NAME ": Mellanox ConnectX InfiniBand driver v" + DRV_VERSION " (" DRV_RELDATE ")\n"; + +static void init_query_mad(struct ib_smp *mad) +{ + mad->base_version = 1; + mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; + mad->class_version = 1; + mad->method = IB_MGMT_METHOD_GET; +} + +static int mlx4_ib_query_device(struct ib_device *ibdev, + struct ib_device_attr *props) +{ + struct mlx4_ib_dev *dev = to_mdev(ibdev); + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + + err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memset(props, 0, sizeof *props); + + props->fw_ver = dev->dev->caps.fw_ver; + props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | + IB_DEVICE_PORT_ACTIVE_EVENT | + IB_DEVICE_SYS_IMAGE_GUID | + IB_DEVICE_RC_RNR_NAK_GEN; + if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR) + props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; + if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) + props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; + if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM) + props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; + if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) + props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; + + props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & + 0xffffff; + props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); + props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); + memcpy(&props->sys_image_guid, out_mad->data + 4, 8); + + props->max_mr_size = ~0ull; + props->page_size_cap = dev->dev->caps.page_size_cap; + props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps; + props->max_qp_wr = dev->dev->caps.max_wqes; + props->max_sge = min(dev->dev->caps.max_sq_sg, + dev->dev->caps.max_rq_sg); + props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs; + props->max_cqe = dev->dev->caps.max_cqes; + props->max_mr = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws; + props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds; + props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma; + props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma; + props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; + props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs; + props->max_srq_wr = dev->dev->caps.max_srq_wqes; + props->max_srq_sge = dev->dev->caps.max_srq_sge; + props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; + props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? + IB_ATOMIC_HCA : IB_ATOMIC_NONE; + props->max_pkeys = dev->dev->caps.pkey_table_len; + props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; + props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; + props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * + props->max_mcast_grp; + props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1; + +out: + kfree(in_mad); + kfree(out_mad); + + return err; +} + +static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + memset(props, 0, sizeof *props); + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); + props->lmc = out_mad->data[34] & 0x7; + props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); + props->sm_sl = out_mad->data[36] & 0xf; + props->state = out_mad->data[32] & 0xf; + props->phys_state = out_mad->data[33] >> 4; + props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); + props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len; + props->max_msg_sz = 0x80000000; + props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len; + props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); + props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); + props->active_width = out_mad->data[31] & 0xf; + props->active_speed = out_mad->data[35] >> 4; + props->max_mtu = out_mad->data[41] & 0xf; + props->active_mtu = out_mad->data[36] >> 4; + props->subnet_timeout = out_mad->data[51] & 0x1f; + props->max_vl_num = out_mad->data[37] >> 4; + props->init_type_reply = out_mad->data[41] >> 4; + +out: + kfree(in_mad); + kfree(out_mad); + + return err; +} + +static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memcpy(gid->raw, out_mad->data + 8, 8); + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; + in_mad->attr_mod = cpu_to_be32(index / 8); + + err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, + u16 *pkey) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; + in_mad->attr_mod = cpu_to_be32(index / 32); + + err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, + struct ib_device_modify *props) +{ + if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) + return -EOPNOTSUPP; + + if (mask & IB_DEVICE_MODIFY_NODE_DESC) { + spin_lock(&to_mdev(ibdev)->sm_lock); + memcpy(ibdev->node_desc, props->node_desc, 64); + spin_unlock(&to_mdev(ibdev)->sm_lock); + } + + return 0; +} + +static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, + u32 cap_mask) +{ + struct mlx4_cmd_mailbox *mailbox; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev->dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memset(mailbox->buf, 0, 256); + *(u8 *) mailbox->buf = !!reset_qkey_viols << 6; + ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask); + + err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B); + + mlx4_free_cmd_mailbox(dev->dev, mailbox); + return err; +} + +static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, + struct ib_port_modify *props) +{ + struct ib_port_attr attr; + u32 cap_mask; + int err; + + mutex_lock(&to_mdev(ibdev)->cap_mask_mutex); + + err = mlx4_ib_query_port(ibdev, port, &attr); + if (err) + goto out; + + cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & + ~props->clr_port_cap_mask; + + err = mlx4_SET_PORT(to_mdev(ibdev), port, + !!(mask & IB_PORT_RESET_QKEY_CNTR), + cap_mask); + +out: + mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); + return err; +} + +static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + struct mlx4_ib_dev *dev = to_mdev(ibdev); + struct mlx4_ib_ucontext *context; + struct mlx4_ib_alloc_ucontext_resp resp; + int err; + + resp.qp_tab_size = dev->dev->caps.num_qps; + resp.bf_reg_size = dev->dev->caps.bf_reg_size; + resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; + + context = kmalloc(sizeof *context, GFP_KERNEL); + if (!context) + return ERR_PTR(-ENOMEM); + + err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar); + if (err) { + kfree(context); + return ERR_PTR(err); + } + + INIT_LIST_HEAD(&context->db_page_list); + mutex_init(&context->db_page_mutex); + + err = ib_copy_to_udata(udata, &resp, sizeof resp); + if (err) { + mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar); + kfree(context); + return ERR_PTR(-EFAULT); + } + + return &context->ibucontext; +} + +static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) +{ + struct mlx4_ib_ucontext *context = to_mucontext(ibcontext); + + mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar); + kfree(context); + + return 0; +} + +static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) +{ + struct mlx4_ib_dev *dev = to_mdev(context->device); + + if (vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; + + if (vma->vm_pgoff == 0) { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (io_remap_pfn_range(vma, vma->vm_start, + to_mucontext(context)->uar.pfn, + PAGE_SIZE, vma->vm_page_prot)) + return -EAGAIN; + } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) { + /* FIXME want pgprot_writecombine() for BlueFlame pages */ + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (io_remap_pfn_range(vma, vma->vm_start, + to_mucontext(context)->uar.pfn + + dev->dev->caps.num_uars, + PAGE_SIZE, vma->vm_page_prot)) + return -EAGAIN; + } else + return -EINVAL; + + return 0; +} + +static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct mlx4_ib_pd *pd; + int err; + + pd = kmalloc(sizeof *pd, GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn); + if (err) { + kfree(pd); + return ERR_PTR(err); + } + + if (context) + if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) { + mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); + kfree(pd); + return ERR_PTR(-EFAULT); + } + + return &pd->ibpd; +} + +static int mlx4_ib_dealloc_pd(struct ib_pd *pd) +{ + mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); + kfree(pd); + + return 0; +} + +static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + return mlx4_multicast_attach(to_mdev(ibqp->device)->dev, + &to_mqp(ibqp)->mqp, gid->raw); +} + +static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + return mlx4_multicast_detach(to_mdev(ibqp->device)->dev, + &to_mqp(ibqp)->mqp, gid->raw); +} + +static int init_node_data(struct mlx4_ib_dev *dev) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; + + err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memcpy(dev->ib_dev.node_desc, out_mad->data, 64); + + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + + err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +static void *mlx4_ib_add(struct mlx4_dev *dev) +{ + struct mlx4_ib_dev *ibdev; + + ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); + if (!ibdev) { + dev_err(&dev->pdev->dev, "Device struct alloc failed\n"); + return NULL; + } + + if (mlx4_pd_alloc(dev, &ibdev->priv_pdn)) + goto err_dealloc; + + if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) + goto err_pd; + + ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); + if (!ibdev->uar_map) + goto err_uar; + + INIT_LIST_HEAD(&ibdev->pgdir_list); + mutex_init(&ibdev->pgdir_mutex); + + ibdev->dev = dev; + + strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); + ibdev->ib_dev.owner = THIS_MODULE; + ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; + ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports; + ibdev->ib_dev.num_comp_vectors = 1; + ibdev->ib_dev.dma_device = &dev->pdev->dev; + + ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; + ibdev->ib_dev.uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | + (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); + + ibdev->ib_dev.query_device = mlx4_ib_query_device; + ibdev->ib_dev.query_port = mlx4_ib_query_port; + ibdev->ib_dev.query_gid = mlx4_ib_query_gid; + ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey; + ibdev->ib_dev.modify_device = mlx4_ib_modify_device; + ibdev->ib_dev.modify_port = mlx4_ib_modify_port; + ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext; + ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext; + ibdev->ib_dev.mmap = mlx4_ib_mmap; + ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd; + ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd; + ibdev->ib_dev.create_ah = mlx4_ib_create_ah; + ibdev->ib_dev.query_ah = mlx4_ib_query_ah; + ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah; + ibdev->ib_dev.create_srq = mlx4_ib_create_srq; + ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq; + ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq; + ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv; + ibdev->ib_dev.create_qp = mlx4_ib_create_qp; + ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp; + ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp; + ibdev->ib_dev.post_send = mlx4_ib_post_send; + ibdev->ib_dev.post_recv = mlx4_ib_post_recv; + ibdev->ib_dev.create_cq = mlx4_ib_create_cq; + ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq; + ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq; + ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq; + ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; + ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; + ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; + ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; + ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; + ibdev->ib_dev.process_mad = mlx4_ib_process_mad; + + if (init_node_data(ibdev)) + goto err_map; + + spin_lock_init(&ibdev->sm_lock); + mutex_init(&ibdev->cap_mask_mutex); + + if (ib_register_device(&ibdev->ib_dev)) + goto err_map; + + if (mlx4_ib_mad_init(ibdev)) + goto err_reg; + + return ibdev; + +err_reg: + ib_unregister_device(&ibdev->ib_dev); + +err_map: + iounmap(ibdev->uar_map); + +err_uar: + mlx4_uar_free(dev, &ibdev->priv_uar); + +err_pd: + mlx4_pd_free(dev, ibdev->priv_pdn); + +err_dealloc: + ib_dealloc_device(&ibdev->ib_dev); + + return NULL; +} + +static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) +{ + struct mlx4_ib_dev *ibdev = ibdev_ptr; + int p; + + for (p = 1; p <= dev->caps.num_ports; ++p) + mlx4_CLOSE_PORT(dev, p); + + mlx4_ib_mad_cleanup(ibdev); + ib_unregister_device(&ibdev->ib_dev); + iounmap(ibdev->uar_map); + mlx4_uar_free(dev, &ibdev->priv_uar); + mlx4_pd_free(dev, ibdev->priv_pdn); + ib_dealloc_device(&ibdev->ib_dev); +} + +static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, + enum mlx4_dev_event event, int subtype, + int port) +{ + struct ib_event ibev; + + switch (event) { + case MLX4_EVENT_TYPE_PORT_CHANGE: + ibev.event = subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ? + IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; + break; + + case MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR: + ibev.event = IB_EVENT_DEVICE_FATAL; + break; + + default: + return; + } + + ibev.device = ibdev_ptr; + ibev.element.port_num = port; + + ib_dispatch_event(&ibev); +} + +static struct mlx4_interface mlx4_ib_interface = { + .add = mlx4_ib_add, + .remove = mlx4_ib_remove, + .event = mlx4_ib_event +}; + +static int __init mlx4_ib_init(void) +{ + return mlx4_register_interface(&mlx4_ib_interface); +} + +static void __exit mlx4_ib_cleanup(void) +{ + mlx4_unregister_interface(&mlx4_ib_interface); +} + +module_init(mlx4_ib_init); +module_exit(mlx4_ib_cleanup); diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h new file mode 100644 index 000000000000..93dac71f3230 --- /dev/null +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -0,0 +1,285 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_IB_H +#define MLX4_IB_H + +#include +#include + +#include +#include + +#include +#include + +enum { + MLX4_IB_DB_PER_PAGE = PAGE_SIZE / 4 +}; + +struct mlx4_ib_db_pgdir; +struct mlx4_ib_user_db_page; + +struct mlx4_ib_db { + __be32 *db; + union { + struct mlx4_ib_db_pgdir *pgdir; + struct mlx4_ib_user_db_page *user_page; + } u; + dma_addr_t dma; + int index; + int order; +}; + +struct mlx4_ib_ucontext { + struct ib_ucontext ibucontext; + struct mlx4_uar uar; + struct list_head db_page_list; + struct mutex db_page_mutex; +}; + +struct mlx4_ib_pd { + struct ib_pd ibpd; + u32 pdn; +}; + +struct mlx4_ib_cq_buf { + struct mlx4_buf buf; + struct mlx4_mtt mtt; +}; + +struct mlx4_ib_cq { + struct ib_cq ibcq; + struct mlx4_cq mcq; + struct mlx4_ib_cq_buf buf; + struct mlx4_ib_db db; + spinlock_t lock; + struct ib_umem *umem; +}; + +struct mlx4_ib_mr { + struct ib_mr ibmr; + struct mlx4_mr mmr; + struct ib_umem *umem; +}; + +struct mlx4_ib_wq { + u64 *wrid; + spinlock_t lock; + int max; + int max_gs; + int offset; + int wqe_shift; + unsigned head; + unsigned tail; +}; + +struct mlx4_ib_qp { + struct ib_qp ibqp; + struct mlx4_qp mqp; + struct mlx4_buf buf; + + struct mlx4_ib_db db; + struct mlx4_ib_wq rq; + + u32 doorbell_qpn; + __be32 sq_signal_bits; + struct mlx4_ib_wq sq; + + struct ib_umem *umem; + struct mlx4_mtt mtt; + int buf_size; + struct mutex mutex; + u8 port; + u8 alt_port; + u8 atomic_rd_en; + u8 resp_depth; + u8 state; +}; + +struct mlx4_ib_srq { + struct ib_srq ibsrq; + struct mlx4_srq msrq; + struct mlx4_buf buf; + struct mlx4_ib_db db; + u64 *wrid; + spinlock_t lock; + int head; + int tail; + u16 wqe_ctr; + struct ib_umem *umem; + struct mlx4_mtt mtt; + struct mutex mutex; +}; + +struct mlx4_ib_ah { + struct ib_ah ibah; + struct mlx4_av av; +}; + +struct mlx4_ib_dev { + struct ib_device ib_dev; + struct mlx4_dev *dev; + void __iomem *uar_map; + + struct list_head pgdir_list; + struct mutex pgdir_mutex; + + struct mlx4_uar priv_uar; + u32 priv_pdn; + MLX4_DECLARE_DOORBELL_LOCK(uar_lock); + + struct ib_mad_agent *send_agent[MLX4_MAX_PORTS][2]; + struct ib_ah *sm_ah[MLX4_MAX_PORTS]; + spinlock_t sm_lock; + + struct mutex cap_mask_mutex; +}; + +static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct mlx4_ib_dev, ib_dev); +} + +static inline struct mlx4_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct mlx4_ib_ucontext, ibucontext); +} + +static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct mlx4_ib_pd, ibpd); +} + +static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct mlx4_ib_cq, ibcq); +} + +static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq) +{ + return container_of(mcq, struct mlx4_ib_cq, mcq); +} + +static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct mlx4_ib_mr, ibmr); +} + +static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct mlx4_ib_qp, ibqp); +} + +static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp) +{ + return container_of(mqp, struct mlx4_ib_qp, mqp); +} + +static inline struct mlx4_ib_srq *to_msrq(struct ib_srq *ibsrq) +{ + return container_of(ibsrq, struct mlx4_ib_srq, ibsrq); +} + +static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq) +{ + return container_of(msrq, struct mlx4_ib_srq, msrq); +} + +static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah) +{ + return container_of(ibah, struct mlx4_ib_ah, ibah); +} + +int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order); +void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db); +int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, + struct mlx4_ib_db *db); +void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db); + +struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc); +int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, + struct ib_umem *umem); +struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata); +int mlx4_ib_dereg_mr(struct ib_mr *mr); + +struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, + struct ib_ucontext *context, + struct ib_udata *udata); +int mlx4_ib_destroy_cq(struct ib_cq *cq); +int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); +int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); +void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); +void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); + +struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); +int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); +int mlx4_ib_destroy_ah(struct ib_ah *ah); + +struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *init_attr, + struct ib_udata *udata); +int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, + enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); +int mlx4_ib_destroy_srq(struct ib_srq *srq); +void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index); +int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); + +struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); +int mlx4_ib_destroy_qp(struct ib_qp *qp); +int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); +int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr); +int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); + +int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey, + int port, struct ib_wc *in_wc, struct ib_grh *in_grh, + void *in_mad, void *response_mad); +int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + struct ib_wc *in_wc, struct ib_grh *in_grh, + struct ib_mad *in_mad, struct ib_mad *out_mad); +int mlx4_ib_mad_init(struct mlx4_ib_dev *dev); +void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev); + +static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) +{ + return !!(ah->av.g_slid & 0x80); +} + +#endif /* MLX4_IB_H */ diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c new file mode 100644 index 000000000000..85ae906f1d12 --- /dev/null +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "mlx4_ib.h" + +static u32 convert_access(int acc) +{ + return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) | + (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) | + (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) | + (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) | + MLX4_PERM_LOCAL_READ; +} + +struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) +{ + struct mlx4_ib_mr *mr; + int err; + + mr = kmalloc(sizeof *mr, GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0, + ~0ull, convert_access(acc), 0, 0, &mr->mmr); + if (err) + goto err_free; + + err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); + if (err) + goto err_mr; + + mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; + mr->umem = NULL; + + return &mr->ibmr; + +err_mr: + mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); + +err_free: + kfree(mr); + + return ERR_PTR(err); +} + +int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, + struct ib_umem *umem) +{ + u64 *pages; + struct ib_umem_chunk *chunk; + int i, j, k; + int n; + int len; + int err = 0; + + pages = (u64 *) __get_free_page(GFP_KERNEL); + if (!pages) + return -ENOMEM; + + i = n = 0; + + list_for_each_entry(chunk, &umem->chunk_list, list) + for (j = 0; j < chunk->nmap; ++j) { + len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift; + for (k = 0; k < len; ++k) { + pages[i++] = sg_dma_address(&chunk->page_list[j]) + + umem->page_size * k; + /* + * Be friendly to WRITE_MTT firmware + * command, and pass it chunks of + * appropriate size. + */ + if (i == PAGE_SIZE / sizeof (u64) - 2) { + err = mlx4_write_mtt(dev->dev, mtt, n, + i, pages); + if (err) + goto out; + n += i; + i = 0; + } + } + } + + if (i) + err = mlx4_write_mtt(dev->dev, mtt, n, i, pages); + +out: + free_page((unsigned long) pages); + return err; +} + +struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata) +{ + struct mlx4_ib_dev *dev = to_mdev(pd->device); + struct mlx4_ib_mr *mr; + int shift; + int err; + int n; + + mr = kmalloc(sizeof *mr, GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + mr->umem = ib_umem_get(pd->uobject->context, start, length, access_flags); + if (IS_ERR(mr->umem)) { + err = PTR_ERR(mr->umem); + goto err_free; + } + + n = ib_umem_page_count(mr->umem); + shift = ilog2(mr->umem->page_size); + + err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length, + convert_access(access_flags), n, shift, &mr->mmr); + if (err) + goto err_umem; + + err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); + if (err) + goto err_mr; + + err = mlx4_mr_enable(dev->dev, &mr->mmr); + if (err) + goto err_mr; + + mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; + + return &mr->ibmr; + +err_mr: + mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); + +err_umem: + ib_umem_release(mr->umem); + +err_free: + kfree(mr); + + return ERR_PTR(err); +} + +int mlx4_ib_dereg_mr(struct ib_mr *ibmr) +{ + struct mlx4_ib_mr *mr = to_mmr(ibmr); + + mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); + if (mr->umem) + ib_umem_release(mr->umem); + kfree(mr); + + return 0; +} diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c new file mode 100644 index 000000000000..5cd706908450 --- /dev/null +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -0,0 +1,1294 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include + +#include "mlx4_ib.h" +#include "user.h" + +enum { + MLX4_IB_ACK_REQ_FREQ = 8, +}; + +enum { + MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83, + MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f +}; + +enum { + /* + * Largest possible UD header: send with GRH and immediate data. + */ + MLX4_IB_UD_HEADER_SIZE = 72 +}; + +struct mlx4_ib_sqp { + struct mlx4_ib_qp qp; + int pkey_index; + u32 qkey; + u32 send_psn; + struct ib_ud_header ud_header; + u8 header_buf[MLX4_IB_UD_HEADER_SIZE]; +}; + +static const __be32 mlx4_ib_opcode[] = { + [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND), + [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM), + [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), + [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), + [IB_WR_RDMA_READ] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ), + [IB_WR_ATOMIC_CMP_AND_SWP] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), + [IB_WR_ATOMIC_FETCH_AND_ADD] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), +}; + +static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) +{ + return container_of(mqp, struct mlx4_ib_sqp, qp); +} + +static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) +{ + return qp->mqp.qpn >= dev->dev->caps.sqp_start && + qp->mqp.qpn <= dev->dev->caps.sqp_start + 3; +} + +static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) +{ + return qp->mqp.qpn >= dev->dev->caps.sqp_start && + qp->mqp.qpn <= dev->dev->caps.sqp_start + 1; +} + +static void *get_wqe(struct mlx4_ib_qp *qp, int offset) +{ + if (qp->buf.nbufs == 1) + return qp->buf.u.direct.buf + offset; + else + return qp->buf.u.page_list[offset >> PAGE_SHIFT].buf + + (offset & (PAGE_SIZE - 1)); +} + +static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); +} + +static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); +} + +static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) +{ + struct ib_event event; + struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; + + if (type == MLX4_EVENT_TYPE_PATH_MIG) + to_mibqp(qp)->port = to_mibqp(qp)->alt_port; + + if (ibqp->event_handler) { + event.device = ibqp->device; + event.element.qp = ibqp; + switch (type) { + case MLX4_EVENT_TYPE_PATH_MIG: + event.event = IB_EVENT_PATH_MIG; + break; + case MLX4_EVENT_TYPE_COMM_EST: + event.event = IB_EVENT_COMM_EST; + break; + case MLX4_EVENT_TYPE_SQ_DRAINED: + event.event = IB_EVENT_SQ_DRAINED; + break; + case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: + event.event = IB_EVENT_QP_LAST_WQE_REACHED; + break; + case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: + event.event = IB_EVENT_QP_FATAL; + break; + case MLX4_EVENT_TYPE_PATH_MIG_FAILED: + event.event = IB_EVENT_PATH_MIG_ERR; + break; + case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + event.event = IB_EVENT_QP_REQ_ERR; + break; + case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: + event.event = IB_EVENT_QP_ACCESS_ERR; + break; + default: + printk(KERN_WARNING "mlx4_ib: Unexpected event type %d " + "on QP %06x\n", type, qp->qpn); + return; + } + + ibqp->event_handler(&event, ibqp->qp_context); + } +} + +static int send_wqe_overhead(enum ib_qp_type type) +{ + /* + * UD WQEs must have a datagram segment. + * RC and UC WQEs might have a remote address segment. + * MLX WQEs need two extra inline data segments (for the UD + * header and space for the ICRC). + */ + switch (type) { + case IB_QPT_UD: + return sizeof (struct mlx4_wqe_ctrl_seg) + + sizeof (struct mlx4_wqe_datagram_seg); + case IB_QPT_UC: + return sizeof (struct mlx4_wqe_ctrl_seg) + + sizeof (struct mlx4_wqe_raddr_seg); + case IB_QPT_RC: + return sizeof (struct mlx4_wqe_ctrl_seg) + + sizeof (struct mlx4_wqe_atomic_seg) + + sizeof (struct mlx4_wqe_raddr_seg); + case IB_QPT_SMI: + case IB_QPT_GSI: + return sizeof (struct mlx4_wqe_ctrl_seg) + + ALIGN(MLX4_IB_UD_HEADER_SIZE + + sizeof (struct mlx4_wqe_inline_seg), + sizeof (struct mlx4_wqe_data_seg)) + + ALIGN(4 + + sizeof (struct mlx4_wqe_inline_seg), + sizeof (struct mlx4_wqe_data_seg)); + default: + return sizeof (struct mlx4_wqe_ctrl_seg); + } +} + +static int set_qp_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, + enum ib_qp_type type, struct mlx4_ib_qp *qp) +{ + /* Sanity check QP size before proceeding */ + if (cap->max_send_wr > dev->dev->caps.max_wqes || + cap->max_recv_wr > dev->dev->caps.max_wqes || + cap->max_send_sge > dev->dev->caps.max_sq_sg || + cap->max_recv_sge > dev->dev->caps.max_rq_sg || + cap->max_inline_data + send_wqe_overhead(type) + + sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) + return -EINVAL; + + /* + * For MLX transport we need 2 extra S/G entries: + * one for the header and one for the checksum at the end + */ + if ((type == IB_QPT_SMI || type == IB_QPT_GSI) && + cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) + return -EINVAL; + + qp->rq.max = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0; + qp->sq.max = cap->max_send_wr ? roundup_pow_of_two(cap->max_send_wr) : 0; + + qp->rq.wqe_shift = ilog2(roundup_pow_of_two(cap->max_recv_sge * + sizeof (struct mlx4_wqe_data_seg))); + qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof (struct mlx4_wqe_data_seg); + + qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge * + sizeof (struct mlx4_wqe_data_seg), + cap->max_inline_data + + sizeof (struct mlx4_wqe_inline_seg)) + + send_wqe_overhead(type))); + qp->sq.max_gs = ((1 << qp->sq.wqe_shift) - send_wqe_overhead(type)) / + sizeof (struct mlx4_wqe_data_seg); + + qp->buf_size = (qp->rq.max << qp->rq.wqe_shift) + + (qp->sq.max << qp->sq.wqe_shift); + if (qp->rq.wqe_shift > qp->sq.wqe_shift) { + qp->rq.offset = 0; + qp->sq.offset = qp->rq.max << qp->rq.wqe_shift; + } else { + qp->rq.offset = qp->sq.max << qp->sq.wqe_shift; + qp->sq.offset = 0; + } + + cap->max_send_wr = qp->sq.max; + cap->max_recv_wr = qp->rq.max; + cap->max_send_sge = qp->sq.max_gs; + cap->max_recv_sge = qp->rq.max_gs; + cap->max_inline_data = (1 << qp->sq.wqe_shift) - send_wqe_overhead(type) - + sizeof (struct mlx4_wqe_inline_seg); + + return 0; +} + +static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) +{ + struct mlx4_wqe_ctrl_seg *ctrl; + int err; + int i; + + mutex_init(&qp->mutex); + spin_lock_init(&qp->sq.lock); + spin_lock_init(&qp->rq.lock); + + qp->state = IB_QPS_RESET; + qp->atomic_rd_en = 0; + qp->resp_depth = 0; + + qp->rq.head = 0; + qp->rq.tail = 0; + qp->sq.head = 0; + qp->sq.tail = 0; + + err = set_qp_size(dev, &init_attr->cap, init_attr->qp_type, qp); + if (err) + goto err; + + if (pd->uobject) { + struct mlx4_ib_create_qp ucmd; + + if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { + err = -EFAULT; + goto err; + } + + qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, + qp->buf_size, 0); + if (IS_ERR(qp->umem)) { + err = PTR_ERR(qp->umem); + goto err; + } + + err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), + ilog2(qp->umem->page_size), &qp->mtt); + if (err) + goto err_buf; + + err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); + if (err) + goto err_mtt; + + err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), + ucmd.db_addr, &qp->db); + if (err) + goto err_mtt; + } else { + err = mlx4_ib_db_alloc(dev, &qp->db, 0); + if (err) + goto err; + + *qp->db.db = 0; + + if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { + err = -ENOMEM; + goto err_db; + } + + err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, + &qp->mtt); + if (err) + goto err_buf; + + err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); + if (err) + goto err_mtt; + + for (i = 0; i < qp->sq.max; ++i) { + ctrl = get_send_wqe(qp, i); + ctrl->owner_opcode = cpu_to_be32(1 << 31); + } + + qp->sq.wrid = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL); + qp->rq.wrid = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL); + + if (!qp->sq.wrid || !qp->rq.wrid) { + err = -ENOMEM; + goto err_wrid; + } + + /* We don't support inline sends for kernel QPs (yet) */ + init_attr->cap.max_inline_data = 0; + } + + err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp); + if (err) + goto err_wrid; + + /* + * Hardware wants QPN written in big-endian order (after + * shifting) for send doorbell. Precompute this value to save + * a little bit when posting sends. + */ + qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); + + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) + qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); + else + qp->sq_signal_bits = 0; + + qp->mqp.event = mlx4_ib_qp_event; + + return 0; + +err_wrid: + if (pd->uobject) + mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); + else { + kfree(qp->sq.wrid); + kfree(qp->rq.wrid); + } + +err_mtt: + mlx4_mtt_cleanup(dev->dev, &qp->mtt); + +err_buf: + if (pd->uobject) + ib_umem_release(qp->umem); + else + mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); + +err_db: + if (!pd->uobject) + mlx4_ib_db_free(dev, &qp->db); + +err: + return err; +} + +static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state) +{ + switch (state) { + case IB_QPS_RESET: return MLX4_QP_STATE_RST; + case IB_QPS_INIT: return MLX4_QP_STATE_INIT; + case IB_QPS_RTR: return MLX4_QP_STATE_RTR; + case IB_QPS_RTS: return MLX4_QP_STATE_RTS; + case IB_QPS_SQD: return MLX4_QP_STATE_SQD; + case IB_QPS_SQE: return MLX4_QP_STATE_SQER; + case IB_QPS_ERR: return MLX4_QP_STATE_ERR; + default: return -1; + } +} + +static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) +{ + if (send_cq == recv_cq) + spin_lock_irq(&send_cq->lock); + else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { + spin_lock_irq(&send_cq->lock); + spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); + } else { + spin_lock_irq(&recv_cq->lock); + spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); + } +} + +static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) +{ + if (send_cq == recv_cq) + spin_unlock_irq(&send_cq->lock); + else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { + spin_unlock(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + } else { + spin_unlock(&send_cq->lock); + spin_unlock_irq(&recv_cq->lock); + } +} + +static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, + int is_user) +{ + struct mlx4_ib_cq *send_cq, *recv_cq; + + if (qp->state != IB_QPS_RESET) + if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), + MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) + printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n", + qp->mqp.qpn); + + send_cq = to_mcq(qp->ibqp.send_cq); + recv_cq = to_mcq(qp->ibqp.recv_cq); + + mlx4_ib_lock_cqs(send_cq, recv_cq); + + if (!is_user) { + __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, + qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); + if (send_cq != recv_cq) + __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); + } + + mlx4_qp_remove(dev->dev, &qp->mqp); + + mlx4_ib_unlock_cqs(send_cq, recv_cq); + + mlx4_qp_free(dev->dev, &qp->mqp); + mlx4_mtt_cleanup(dev->dev, &qp->mtt); + + if (is_user) { + mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), + &qp->db); + ib_umem_release(qp->umem); + } else { + kfree(qp->sq.wrid); + kfree(qp->rq.wrid); + mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); + mlx4_ib_db_free(dev, &qp->db); + } +} + +struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct mlx4_ib_dev *dev = to_mdev(pd->device); + struct mlx4_ib_sqp *sqp; + struct mlx4_ib_qp *qp; + int err; + + switch (init_attr->qp_type) { + case IB_QPT_RC: + case IB_QPT_UC: + case IB_QPT_UD: + { + qp = kmalloc(sizeof *qp, GFP_KERNEL); + if (!qp) + return ERR_PTR(-ENOMEM); + + err = create_qp_common(dev, pd, init_attr, udata, 0, qp); + if (err) { + kfree(qp); + return ERR_PTR(err); + } + + qp->ibqp.qp_num = qp->mqp.qpn; + + break; + } + case IB_QPT_SMI: + case IB_QPT_GSI: + { + /* Userspace is not allowed to create special QPs: */ + if (pd->uobject) + return ERR_PTR(-EINVAL); + + sqp = kmalloc(sizeof *sqp, GFP_KERNEL); + if (!sqp) + return ERR_PTR(-ENOMEM); + + qp = &sqp->qp; + + err = create_qp_common(dev, pd, init_attr, udata, + dev->dev->caps.sqp_start + + (init_attr->qp_type == IB_QPT_SMI ? 0 : 2) + + init_attr->port_num - 1, + qp); + if (err) { + kfree(sqp); + return ERR_PTR(err); + } + + qp->port = init_attr->port_num; + qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; + + break; + } + default: + /* Don't support raw QPs */ + return ERR_PTR(-EINVAL); + } + + return &qp->ibqp; +} + +int mlx4_ib_destroy_qp(struct ib_qp *qp) +{ + struct mlx4_ib_dev *dev = to_mdev(qp->device); + struct mlx4_ib_qp *mqp = to_mqp(qp); + + if (is_qp0(dev, mqp)) + mlx4_CLOSE_PORT(dev->dev, mqp->port); + + destroy_qp_common(dev, mqp, !!qp->pd->uobject); + + if (is_sqp(dev, mqp)) + kfree(to_msqp(mqp)); + else + kfree(mqp); + + return 0; +} + +static void init_port(struct mlx4_ib_dev *dev, int port) +{ + struct mlx4_init_port_param param; + int err; + + memset(¶m, 0, sizeof param); + + param.port_width_cap = dev->dev->caps.port_width_cap; + param.vl_cap = dev->dev->caps.vl_cap; + param.mtu = ib_mtu_enum_to_int(dev->dev->caps.mtu_cap); + param.max_gid = dev->dev->caps.gid_table_len; + param.max_pkey = dev->dev->caps.pkey_table_len; + + err = mlx4_INIT_PORT(dev->dev, ¶m, port); + if (err) + printk(KERN_WARNING "INIT_PORT failed, return code %d.\n", err); +} + +static int to_mlx4_st(enum ib_qp_type type) +{ + switch (type) { + case IB_QPT_RC: return MLX4_QP_ST_RC; + case IB_QPT_UC: return MLX4_QP_ST_UC; + case IB_QPT_UD: return MLX4_QP_ST_UD; + case IB_QPT_SMI: + case IB_QPT_GSI: return MLX4_QP_ST_MLX; + default: return -1; + } +} + +static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, struct ib_qp_attr *attr, + int attr_mask) +{ + u8 dest_rd_atomic; + u32 access_flags; + u32 hw_access_flags = 0; + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + dest_rd_atomic = attr->max_dest_rd_atomic; + else + dest_rd_atomic = qp->resp_depth; + + if (attr_mask & IB_QP_ACCESS_FLAGS) + access_flags = attr->qp_access_flags; + else + access_flags = qp->atomic_rd_en; + + if (!dest_rd_atomic) + access_flags &= IB_ACCESS_REMOTE_WRITE; + + if (access_flags & IB_ACCESS_REMOTE_READ) + hw_access_flags |= MLX4_QP_BIT_RRE; + if (access_flags & IB_ACCESS_REMOTE_ATOMIC) + hw_access_flags |= MLX4_QP_BIT_RAE; + if (access_flags & IB_ACCESS_REMOTE_WRITE) + hw_access_flags |= MLX4_QP_BIT_RWE; + + return cpu_to_be32(hw_access_flags); +} + +static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, struct ib_qp_attr *attr, + int attr_mask) +{ + if (attr_mask & IB_QP_PKEY_INDEX) + sqp->pkey_index = attr->pkey_index; + if (attr_mask & IB_QP_QKEY) + sqp->qkey = attr->qkey; + if (attr_mask & IB_QP_SQ_PSN) + sqp->send_psn = attr->sq_psn; +} + +static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port) +{ + path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); +} + +static int mlx4_set_path(struct mlx4_ib_dev *dev, struct ib_ah_attr *ah, + struct mlx4_qp_path *path, u8 port) +{ + path->grh_mylmc = ah->src_path_bits & 0x7f; + path->rlid = cpu_to_be16(ah->dlid); + if (ah->static_rate) { + path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET; + while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && + !(1 << path->static_rate & dev->dev->caps.stat_rate_support)) + --path->static_rate; + } else + path->static_rate = 0; + path->counter_index = 0xff; + + if (ah->ah_flags & IB_AH_GRH) { + if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len) { + printk(KERN_ERR "sgid_index (%u) too large. max is %d\n", + ah->grh.sgid_index, dev->dev->caps.gid_table_len - 1); + return -1; + } + + path->grh_mylmc |= 1 << 7; + path->mgid_index = ah->grh.sgid_index; + path->hop_limit = ah->grh.hop_limit; + path->tclass_flowlabel = + cpu_to_be32((ah->grh.traffic_class << 20) | + (ah->grh.flow_label)); + memcpy(path->rgid, ah->grh.dgid.raw, 16); + } + + path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | + ((port - 1) << 6) | ((ah->sl & 0xf) << 2); + + return 0; +} + +int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct mlx4_ib_dev *dev = to_mdev(ibqp->device); + struct mlx4_ib_qp *qp = to_mqp(ibqp); + struct mlx4_qp_context *context; + enum mlx4_qp_optpar optpar = 0; + enum ib_qp_state cur_state, new_state; + int sqd_event; + int err = -EINVAL; + + context = kzalloc(sizeof *context, GFP_KERNEL); + if (!context) + return -ENOMEM; + + mutex_lock(&qp->mutex); + + cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; + new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; + + if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) + goto out; + + if ((attr_mask & IB_QP_PKEY_INDEX) && + attr->pkey_index >= dev->dev->caps.pkey_table_len) { + goto out; + } + + if ((attr_mask & IB_QP_PORT) && + (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) { + goto out; + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && + attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { + goto out; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && + attr->max_dest_rd_atomic > 1 << dev->dev->caps.max_qp_dest_rdma) { + goto out; + } + + context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | + (to_mlx4_st(ibqp->qp_type) << 16)); + context->flags |= cpu_to_be32(1 << 8); /* DE? */ + + if (!(attr_mask & IB_QP_PATH_MIG_STATE)) + context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); + else { + optpar |= MLX4_QP_OPTPAR_PM_STATE; + switch (attr->path_mig_state) { + case IB_MIG_MIGRATED: + context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); + break; + case IB_MIG_REARM: + context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11); + break; + case IB_MIG_ARMED: + context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11); + break; + } + } + + if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || + ibqp->qp_type == IB_QPT_UD) + context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; + else if (attr_mask & IB_QP_PATH_MTU) { + if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { + printk(KERN_ERR "path MTU (%u) is invalid\n", + attr->path_mtu); + return -EINVAL; + } + context->mtu_msgmax = (attr->path_mtu << 5) | 31; + } + + if (qp->rq.max) + context->rq_size_stride = ilog2(qp->rq.max) << 3; + context->rq_size_stride |= qp->rq.wqe_shift - 4; + + if (qp->sq.max) + context->sq_size_stride = ilog2(qp->sq.max) << 3; + context->sq_size_stride |= qp->sq.wqe_shift - 4; + + if (qp->ibqp.uobject) + context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); + else + context->usr_page = cpu_to_be32(dev->priv_uar.index); + + if (attr_mask & IB_QP_DEST_QPN) + context->remote_qpn = cpu_to_be32(attr->dest_qp_num); + + if (attr_mask & IB_QP_PORT) { + if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD && + !(attr_mask & IB_QP_AV)) { + mlx4_set_sched(&context->pri_path, attr->port_num); + optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE; + } + } + + if (attr_mask & IB_QP_PKEY_INDEX) { + context->pri_path.pkey_index = attr->pkey_index; + optpar |= MLX4_QP_OPTPAR_PKEY_INDEX; + } + + if (attr_mask & IB_QP_RNR_RETRY) { + context->params1 |= cpu_to_be32(attr->rnr_retry << 13); + optpar |= MLX4_QP_OPTPAR_RNR_RETRY; + } + + if (attr_mask & IB_QP_AV) { + if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path, + attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) { + err = -EINVAL; + goto out; + } + + optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | + MLX4_QP_OPTPAR_SCHED_QUEUE); + } + + if (attr_mask & IB_QP_TIMEOUT) { + context->pri_path.ackto = attr->timeout << 3; + optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT; + } + + if (attr_mask & IB_QP_ALT_PATH) { + if (attr->alt_pkey_index >= dev->dev->caps.pkey_table_len) + return -EINVAL; + + if (attr->alt_port_num == 0 || + attr->alt_port_num > dev->dev->caps.num_ports) + return -EINVAL; + + if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path, + attr->alt_port_num)) + return -EINVAL; + + context->alt_path.pkey_index = attr->alt_pkey_index; + context->alt_path.ackto = attr->alt_timeout << 3; + optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH; + } + + context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pdn); + context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); + if (attr_mask & IB_QP_RETRY_CNT) { + context->params1 |= cpu_to_be32(attr->retry_cnt << 16); + optpar |= MLX4_QP_OPTPAR_RETRY_COUNT; + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + if (attr->max_rd_atomic) + context->params1 |= + cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); + optpar |= MLX4_QP_OPTPAR_SRA_MAX; + } + + if (attr_mask & IB_QP_SQ_PSN) + context->next_send_psn = cpu_to_be32(attr->sq_psn); + + context->cqn_send = cpu_to_be32(to_mcq(ibqp->send_cq)->mcq.cqn); + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + if (attr->max_dest_rd_atomic) + context->params2 |= + cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); + optpar |= MLX4_QP_OPTPAR_RRA_MAX; + } + + if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { + context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); + optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE; + } + + if (ibqp->srq) + context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC); + + if (attr_mask & IB_QP_MIN_RNR_TIMER) { + context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); + optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT; + } + if (attr_mask & IB_QP_RQ_PSN) + context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); + + context->cqn_recv = cpu_to_be32(to_mcq(ibqp->recv_cq)->mcq.cqn); + + if (attr_mask & IB_QP_QKEY) { + context->qkey = cpu_to_be32(attr->qkey); + optpar |= MLX4_QP_OPTPAR_Q_KEY; + } + + if (ibqp->srq) + context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); + + if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) + context->db_rec_addr = cpu_to_be64(qp->db.dma); + + if (cur_state == IB_QPS_INIT && + new_state == IB_QPS_RTR && + (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || + ibqp->qp_type == IB_QPT_UD)) { + context->pri_path.sched_queue = (qp->port - 1) << 6; + if (is_qp0(dev, qp)) + context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE; + else + context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE; + } + + if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && + attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) + sqd_event = 1; + else + sqd_event = 0; + + err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), + to_mlx4_state(new_state), context, optpar, + sqd_event, &qp->mqp); + if (err) + goto out; + + qp->state = new_state; + + if (attr_mask & IB_QP_ACCESS_FLAGS) + qp->atomic_rd_en = attr->qp_access_flags; + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + qp->resp_depth = attr->max_dest_rd_atomic; + if (attr_mask & IB_QP_PORT) + qp->port = attr->port_num; + if (attr_mask & IB_QP_ALT_PATH) + qp->alt_port = attr->alt_port_num; + + if (is_sqp(dev, qp)) + store_sqp_attrs(to_msqp(qp), attr, attr_mask); + + /* + * If we moved QP0 to RTR, bring the IB link up; if we moved + * QP0 to RESET or ERROR, bring the link back down. + */ + if (is_qp0(dev, qp)) { + if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) + init_port(dev, qp->port); + + if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && + (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) + mlx4_CLOSE_PORT(dev->dev, qp->port); + } + + /* + * If we moved a kernel QP to RESET, clean up all old CQ + * entries and reinitialize the QP. + */ + if (new_state == IB_QPS_RESET && !ibqp->uobject) { + mlx4_ib_cq_clean(to_mcq(ibqp->recv_cq), qp->mqp.qpn, + ibqp->srq ? to_msrq(ibqp->srq): NULL); + if (ibqp->send_cq != ibqp->recv_cq) + mlx4_ib_cq_clean(to_mcq(ibqp->send_cq), qp->mqp.qpn, NULL); + + qp->rq.head = 0; + qp->rq.tail = 0; + qp->sq.head = 0; + qp->sq.tail = 0; + *qp->db.db = 0; + } + +out: + mutex_unlock(&qp->mutex); + kfree(context); + return err; +} + +static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, + void *wqe) +{ + struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev; + struct mlx4_wqe_mlx_seg *mlx = wqe; + struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; + struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); + u16 pkey; + int send_size; + int header_size; + int i; + + send_size = 0; + for (i = 0; i < wr->num_sge; ++i) + send_size += wr->sg_list[i].length; + + ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), &sqp->ud_header); + + sqp->ud_header.lrh.service_level = + be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28; + sqp->ud_header.lrh.destination_lid = ah->av.dlid; + sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.g_slid & 0x7f); + if (mlx4_ib_ah_grh_present(ah)) { + sqp->ud_header.grh.traffic_class = + (be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20) & 0xff; + sqp->ud_header.grh.flow_label = + ah->av.sl_tclass_flowlabel & cpu_to_be32(0xfffff); + ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.port_pd) >> 24, + ah->av.gid_index, &sqp->ud_header.grh.source_gid); + memcpy(sqp->ud_header.grh.destination_gid.raw, + ah->av.dgid, 16); + } + + mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); + mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | + (sqp->ud_header.lrh.destination_lid == + IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) | + (sqp->ud_header.lrh.service_level << 8)); + mlx->rlid = sqp->ud_header.lrh.destination_lid; + + switch (wr->opcode) { + case IB_WR_SEND: + sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; + sqp->ud_header.immediate_present = 0; + break; + case IB_WR_SEND_WITH_IMM: + sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; + sqp->ud_header.immediate_present = 1; + sqp->ud_header.immediate_data = wr->imm_data; + break; + default: + return -EINVAL; + } + + sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; + if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) + sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; + sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); + if (!sqp->qp.ibqp.qp_num) + ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); + else + ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); + sqp->ud_header.bth.pkey = cpu_to_be16(pkey); + sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); + sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); + sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? + sqp->qkey : wr->wr.ud.remote_qkey); + sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); + + header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); + + if (0) { + printk(KERN_ERR "built UD header of size %d:\n", header_size); + for (i = 0; i < header_size / 4; ++i) { + if (i % 8 == 0) + printk(" [%02x] ", i * 4); + printk(" %08x", + be32_to_cpu(((__be32 *) sqp->header_buf)[i])); + if ((i + 1) % 8 == 0) + printk("\n"); + } + printk("\n"); + } + + inl->byte_count = cpu_to_be32(1 << 31 | header_size); + memcpy(inl + 1, sqp->header_buf, header_size); + + return ALIGN(sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); +} + +static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) +{ + unsigned cur; + struct mlx4_ib_cq *cq; + + cur = wq->head - wq->tail; + if (likely(cur + nreq < wq->max)) + return 0; + + cq = to_mcq(ib_cq); + spin_lock(&cq->lock); + cur = wq->head - wq->tail; + spin_unlock(&cq->lock); + + return cur + nreq >= wq->max; +} + +int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + struct mlx4_ib_qp *qp = to_mqp(ibqp); + void *wqe; + struct mlx4_wqe_ctrl_seg *ctrl; + unsigned long flags; + int nreq; + int err = 0; + int ind; + int size; + int i; + + spin_lock_irqsave(&qp->rq.lock, flags); + + ind = qp->sq.head; + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { + err = -ENOMEM; + *bad_wr = wr; + goto out; + } + + if (unlikely(wr->num_sge > qp->sq.max_gs)) { + err = -EINVAL; + *bad_wr = wr; + goto out; + } + + ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.max - 1)); + qp->sq.wrid[ind & (qp->sq.max - 1)] = wr->wr_id; + + ctrl->srcrb_flags = + (wr->send_flags & IB_SEND_SIGNALED ? + cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) | + (wr->send_flags & IB_SEND_SOLICITED ? + cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) | + qp->sq_signal_bits; + + if (wr->opcode == IB_WR_SEND_WITH_IMM || + wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) + ctrl->imm = wr->imm_data; + else + ctrl->imm = 0; + + wqe += sizeof *ctrl; + size = sizeof *ctrl / 16; + + switch (ibqp->qp_type) { + case IB_QPT_RC: + case IB_QPT_UC: + switch (wr->opcode) { + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + ((struct mlx4_wqe_raddr_seg *) wqe)->raddr = + cpu_to_be64(wr->wr.atomic.remote_addr); + ((struct mlx4_wqe_raddr_seg *) wqe)->rkey = + cpu_to_be32(wr->wr.atomic.rkey); + ((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0; + + wqe += sizeof (struct mlx4_wqe_raddr_seg); + + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { + ((struct mlx4_wqe_atomic_seg *) wqe)->swap_add = + cpu_to_be64(wr->wr.atomic.swap); + ((struct mlx4_wqe_atomic_seg *) wqe)->compare = + cpu_to_be64(wr->wr.atomic.compare_add); + } else { + ((struct mlx4_wqe_atomic_seg *) wqe)->swap_add = + cpu_to_be64(wr->wr.atomic.compare_add); + ((struct mlx4_wqe_atomic_seg *) wqe)->compare = 0; + } + + wqe += sizeof (struct mlx4_wqe_atomic_seg); + size += (sizeof (struct mlx4_wqe_raddr_seg) + + sizeof (struct mlx4_wqe_atomic_seg)) / 16; + + break; + + case IB_WR_RDMA_READ: + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + ((struct mlx4_wqe_raddr_seg *) wqe)->raddr = + cpu_to_be64(wr->wr.rdma.remote_addr); + ((struct mlx4_wqe_raddr_seg *) wqe)->rkey = + cpu_to_be32(wr->wr.rdma.rkey); + ((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0; + + wqe += sizeof (struct mlx4_wqe_raddr_seg); + size += sizeof (struct mlx4_wqe_raddr_seg) / 16; + + break; + + default: + /* No extra segments required for sends */ + break; + } + break; + + case IB_QPT_UD: + memcpy(((struct mlx4_wqe_datagram_seg *) wqe)->av, + &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); + ((struct mlx4_wqe_datagram_seg *) wqe)->dqpn = + cpu_to_be32(wr->wr.ud.remote_qpn); + ((struct mlx4_wqe_datagram_seg *) wqe)->qkey = + cpu_to_be32(wr->wr.ud.remote_qkey); + + wqe += sizeof (struct mlx4_wqe_datagram_seg); + size += sizeof (struct mlx4_wqe_datagram_seg) / 16; + break; + + case IB_QPT_SMI: + case IB_QPT_GSI: + err = build_mlx_header(to_msqp(qp), wr, ctrl); + if (err < 0) { + *bad_wr = wr; + goto out; + } + wqe += err; + size += err / 16; + + err = 0; + break; + + default: + break; + } + + for (i = 0; i < wr->num_sge; ++i) { + ((struct mlx4_wqe_data_seg *) wqe)->byte_count = + cpu_to_be32(wr->sg_list[i].length); + ((struct mlx4_wqe_data_seg *) wqe)->lkey = + cpu_to_be32(wr->sg_list[i].lkey); + ((struct mlx4_wqe_data_seg *) wqe)->addr = + cpu_to_be64(wr->sg_list[i].addr); + + wqe += sizeof (struct mlx4_wqe_data_seg); + size += sizeof (struct mlx4_wqe_data_seg) / 16; + } + + /* Add one more inline data segment for ICRC for MLX sends */ + if (qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) { + ((struct mlx4_wqe_inline_seg *) wqe)->byte_count = + cpu_to_be32((1 << 31) | 4); + ((u32 *) wqe)[1] = 0; + wqe += sizeof (struct mlx4_wqe_data_seg); + size += sizeof (struct mlx4_wqe_data_seg) / 16; + } + + ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? + MLX4_WQE_CTRL_FENCE : 0) | size; + + /* + * Make sure descriptor is fully written before + * setting ownership bit (because HW can start + * executing as soon as we do). + */ + wmb(); + + if (wr->opcode < 0 || wr->opcode > ARRAY_SIZE(mlx4_ib_opcode)) { + err = -EINVAL; + goto out; + } + + ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | + (ind & qp->sq.max ? cpu_to_be32(1 << 31) : 0); + + ++ind; + } + +out: + if (likely(nreq)) { + qp->sq.head += nreq; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + + writel(qp->doorbell_qpn, + to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL); + + /* + * Make sure doorbells don't leak out of SQ spinlock + * and reach the HCA out of order. + */ + mmiowb(); + } + + spin_unlock_irqrestore(&qp->rq.lock, flags); + + return err; +} + +int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mlx4_ib_qp *qp = to_mqp(ibqp); + struct mlx4_wqe_data_seg *scat; + unsigned long flags; + int err = 0; + int nreq; + int ind; + int i; + + spin_lock_irqsave(&qp->rq.lock, flags); + + ind = qp->rq.head & (qp->rq.max - 1); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.send_cq)) { + err = -ENOMEM; + *bad_wr = wr; + goto out; + } + + if (unlikely(wr->num_sge > qp->rq.max_gs)) { + err = -EINVAL; + *bad_wr = wr; + goto out; + } + + scat = get_recv_wqe(qp, ind); + + for (i = 0; i < wr->num_sge; ++i) { + scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); + scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); + scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); + } + + if (i < qp->rq.max_gs) { + scat[i].byte_count = 0; + scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); + scat[i].addr = 0; + } + + qp->rq.wrid[ind] = wr->wr_id; + + ind = (ind + 1) & (qp->rq.max - 1); + } + +out: + if (likely(nreq)) { + qp->rq.head += nreq; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + + *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); + } + + spin_unlock_irqrestore(&qp->rq.lock, flags); + + return err; +} diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c new file mode 100644 index 000000000000..42ab4a801d6a --- /dev/null +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -0,0 +1,334 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include "mlx4_ib.h" +#include "user.h" + +static void *get_wqe(struct mlx4_ib_srq *srq, int n) +{ + int offset = n << srq->msrq.wqe_shift; + + if (srq->buf.nbufs == 1) + return srq->buf.u.direct.buf + offset; + else + return srq->buf.u.page_list[offset >> PAGE_SHIFT].buf + + (offset & (PAGE_SIZE - 1)); +} + +static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) +{ + struct ib_event event; + struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; + + if (ibsrq->event_handler) { + event.device = ibsrq->device; + event.element.srq = ibsrq; + switch (type) { + case MLX4_EVENT_TYPE_SRQ_LIMIT: + event.event = IB_EVENT_SRQ_LIMIT_REACHED; + break; + case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: + event.event = IB_EVENT_SRQ_ERR; + break; + default: + printk(KERN_WARNING "mlx4_ib: Unexpected event type %d " + "on SRQ %06x\n", type, srq->srqn); + return; + } + + ibsrq->event_handler(&event, ibsrq->srq_context); + } +} + +struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *init_attr, + struct ib_udata *udata) +{ + struct mlx4_ib_dev *dev = to_mdev(pd->device); + struct mlx4_ib_srq *srq; + struct mlx4_wqe_srq_next_seg *next; + int desc_size; + int buf_size; + int err; + int i; + + /* Sanity check SRQ size before proceeding */ + if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes || + init_attr->attr.max_sge > dev->dev->caps.max_srq_sge) + return ERR_PTR(-EINVAL); + + srq = kmalloc(sizeof *srq, GFP_KERNEL); + if (!srq) + return ERR_PTR(-ENOMEM); + + mutex_init(&srq->mutex); + spin_lock_init(&srq->lock); + srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); + srq->msrq.max_gs = init_attr->attr.max_sge; + + desc_size = max(32UL, + roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) + + srq->msrq.max_gs * + sizeof (struct mlx4_wqe_data_seg))); + srq->msrq.wqe_shift = ilog2(desc_size); + + buf_size = srq->msrq.max * desc_size; + + if (pd->uobject) { + struct mlx4_ib_create_srq ucmd; + + if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { + err = -EFAULT; + goto err_srq; + } + + srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, + buf_size, 0); + if (IS_ERR(srq->umem)) { + err = PTR_ERR(srq->umem); + goto err_srq; + } + + err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), + ilog2(srq->umem->page_size), &srq->mtt); + if (err) + goto err_buf; + + err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); + if (err) + goto err_mtt; + + err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), + ucmd.db_addr, &srq->db); + if (err) + goto err_mtt; + } else { + err = mlx4_ib_db_alloc(dev, &srq->db, 0); + if (err) + goto err_srq; + + *srq->db.db = 0; + + if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) { + err = -ENOMEM; + goto err_db; + } + + srq->head = 0; + srq->tail = srq->msrq.max - 1; + srq->wqe_ctr = 0; + + for (i = 0; i < srq->msrq.max; ++i) { + next = get_wqe(srq, i); + next->next_wqe_index = + cpu_to_be16((i + 1) & (srq->msrq.max - 1)); + } + + err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, + &srq->mtt); + if (err) + goto err_buf; + + err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf); + if (err) + goto err_mtt; + + srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL); + if (!srq->wrid) { + err = -ENOMEM; + goto err_mtt; + } + } + + err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, &srq->mtt, + srq->db.dma, &srq->msrq); + if (err) + goto err_wrid; + + srq->msrq.event = mlx4_ib_srq_event; + + if (pd->uobject) + if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { + err = -EFAULT; + goto err_wrid; + } + + init_attr->attr.max_wr = srq->msrq.max - 1; + + return &srq->ibsrq; + +err_wrid: + if (pd->uobject) + mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); + else + kfree(srq->wrid); + +err_mtt: + mlx4_mtt_cleanup(dev->dev, &srq->mtt); + +err_buf: + if (pd->uobject) + ib_umem_release(srq->umem); + else + mlx4_buf_free(dev->dev, buf_size, &srq->buf); + +err_db: + if (!pd->uobject) + mlx4_ib_db_free(dev, &srq->db); + +err_srq: + kfree(srq); + + return ERR_PTR(err); +} + +int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, + enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) +{ + struct mlx4_ib_dev *dev = to_mdev(ibsrq->device); + struct mlx4_ib_srq *srq = to_msrq(ibsrq); + int ret; + + /* We don't support resizing SRQs (yet?) */ + if (attr_mask & IB_SRQ_MAX_WR) + return -EINVAL; + + if (attr_mask & IB_SRQ_LIMIT) { + if (attr->srq_limit >= srq->msrq.max) + return -EINVAL; + + mutex_lock(&srq->mutex); + ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit); + mutex_unlock(&srq->mutex); + + if (ret) + return ret; + } + + return 0; +} + +int mlx4_ib_destroy_srq(struct ib_srq *srq) +{ + struct mlx4_ib_dev *dev = to_mdev(srq->device); + struct mlx4_ib_srq *msrq = to_msrq(srq); + + mlx4_srq_free(dev->dev, &msrq->msrq); + mlx4_mtt_cleanup(dev->dev, &msrq->mtt); + + if (srq->uobject) { + mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); + ib_umem_release(msrq->umem); + } else { + kfree(msrq->wrid); + mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, + &msrq->buf); + mlx4_ib_db_free(dev, &msrq->db); + } + + kfree(msrq); + + return 0; +} + +void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index) +{ + struct mlx4_wqe_srq_next_seg *next; + + /* always called with interrupts disabled. */ + spin_lock(&srq->lock); + + next = get_wqe(srq, srq->tail); + next->next_wqe_index = cpu_to_be16(wqe_index); + srq->tail = wqe_index; + + spin_unlock(&srq->lock); +} + +int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mlx4_ib_srq *srq = to_msrq(ibsrq); + struct mlx4_wqe_srq_next_seg *next; + struct mlx4_wqe_data_seg *scat; + unsigned long flags; + int err = 0; + int nreq; + int i; + + spin_lock_irqsave(&srq->lock, flags); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + if (unlikely(wr->num_sge > srq->msrq.max_gs)) { + err = -EINVAL; + *bad_wr = wr; + break; + } + + srq->wrid[srq->head] = wr->wr_id; + + next = get_wqe(srq, srq->head); + srq->head = be16_to_cpu(next->next_wqe_index); + scat = (struct mlx4_wqe_data_seg *) (next + 1); + + for (i = 0; i < wr->num_sge; ++i) { + scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); + scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); + scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); + } + + if (i < srq->msrq.max_gs) { + scat[i].byte_count = 0; + scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); + scat[i].addr = 0; + } + } + + if (likely(nreq)) { + srq->wqe_ctr += nreq; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + + *srq->db.db = cpu_to_be32(srq->wqe_ctr); + } + + spin_unlock_irqrestore(&srq->lock, flags); + + return err; +} diff --git a/drivers/infiniband/hw/mlx4/user.h b/drivers/infiniband/hw/mlx4/user.h new file mode 100644 index 000000000000..5b8eddc9fa83 --- /dev/null +++ b/drivers/infiniband/hw/mlx4/user.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_IB_USER_H +#define MLX4_IB_USER_H + +#include + +/* + * Increment this value if any changes that break userspace ABI + * compatibility are made. + */ +#define MLX4_IB_UVERBS_ABI_VERSION 1 + +/* + * Make sure that all structs defined in this file remain laid out so + * that they pack the same way on 32-bit and 64-bit architectures (to + * avoid incompatibility between 32-bit userspace and 64-bit kernels). + * In particular do not use pointer types -- pass pointers in __u64 + * instead. + */ + +struct mlx4_ib_alloc_ucontext_resp { + __u32 qp_tab_size; + __u16 bf_reg_size; + __u16 bf_regs_per_page; +}; + +struct mlx4_ib_alloc_pd_resp { + __u32 pdn; + __u32 reserved; +}; + +struct mlx4_ib_create_cq { + __u64 buf_addr; + __u64 db_addr; +}; + +struct mlx4_ib_create_cq_resp { + __u32 cqn; + __u32 reserved; +}; + +struct mlx4_ib_resize_cq { + __u64 buf_addr; +}; + +struct mlx4_ib_create_srq { + __u64 buf_addr; + __u64 db_addr; +}; + +struct mlx4_ib_create_srq_resp { + __u32 srqn; + __u32 reserved; +}; + +struct mlx4_ib_create_qp { + __u64 buf_addr; + __u64 db_addr; +}; + +#endif /* MLX4_IB_USER_H */ diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index b86ccd2ecd5b..6db12d0265da 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2493,6 +2493,20 @@ config PASEMI_MAC This driver supports the on-chip 1/10Gbit Ethernet controller on PA Semi's PWRficient line of chips. +config MLX4_CORE + tristate + depends on PCI + default n + +config MLX4_DEBUG + bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED) + default y + ---help--- + This option causes debugging code to be compiled into the + mlx4_core driver. The output can be turned on via the + debug_level module parameter (which can also be set after + the driver is loaded through sysfs). + endmenu source "drivers/net/tokenring/Kconfig" diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 59c0459a037c..7faeeeabc838 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -197,6 +197,7 @@ obj-$(CONFIG_SMC911X) += smc911x.o obj-$(CONFIG_DM9000) += dm9000.o obj-$(CONFIG_FEC_8XX) += fec_8xx/ obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o +obj-$(CONFIG_MLX4_CORE) += mlx4/ obj-$(CONFIG_MACB) += macb.o diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile new file mode 100644 index 000000000000..0952a6528f58 --- /dev/null +++ b/drivers/net/mlx4/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_MLX4_CORE) += mlx4_core.o + +mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ + mr.o pd.o profile.o qp.o reset.o srq.o diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c new file mode 100644 index 000000000000..9ffdb9d29da9 --- /dev/null +++ b/drivers/net/mlx4/alloc.c @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#include "mlx4.h" + +u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) +{ + u32 obj; + + spin_lock(&bitmap->lock); + + obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); + if (obj >= bitmap->max) { + bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; + obj = find_first_zero_bit(bitmap->table, bitmap->max); + } + + if (obj < bitmap->max) { + set_bit(obj, bitmap->table); + obj |= bitmap->top; + bitmap->last = obj + 1; + } else + obj = -1; + + spin_unlock(&bitmap->lock); + + return obj; +} + +void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) +{ + obj &= bitmap->max - 1; + + spin_lock(&bitmap->lock); + clear_bit(obj, bitmap->table); + bitmap->last = min(bitmap->last, obj); + bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; + spin_unlock(&bitmap->lock); +} + +int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved) +{ + int i; + + /* num must be a power of 2 */ + if (num != roundup_pow_of_two(num)) + return -EINVAL; + + bitmap->last = 0; + bitmap->top = 0; + bitmap->max = num; + bitmap->mask = mask; + spin_lock_init(&bitmap->lock); + bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL); + if (!bitmap->table) + return -ENOMEM; + + for (i = 0; i < reserved; ++i) + set_bit(i, bitmap->table); + + return 0; +} + +void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap) +{ + kfree(bitmap->table); +} + +/* + * Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ + +int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, + struct mlx4_buf *buf) +{ + dma_addr_t t; + + if (size <= max_direct) { + buf->nbufs = 1; + buf->npages = 1; + buf->page_shift = get_order(size) + PAGE_SHIFT; + buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev, + size, &t, GFP_KERNEL); + if (!buf->u.direct.buf) + return -ENOMEM; + + buf->u.direct.map = t; + + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } + + memset(buf->u.direct.buf, 0, size); + } else { + int i; + + buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->npages = buf->nbufs; + buf->page_shift = PAGE_SHIFT; + buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list, + GFP_KERNEL); + if (!buf->u.page_list) + return -ENOMEM; + + for (i = 0; i < buf->nbufs; ++i) { + buf->u.page_list[i].buf = + dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL); + if (!buf->u.page_list[i].buf) + goto err_free; + + buf->u.page_list[i].map = t; + + memset(buf->u.page_list[i].buf, 0, PAGE_SIZE); + } + } + + return 0; + +err_free: + mlx4_buf_free(dev, size, buf); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(mlx4_buf_alloc); + +void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) +{ + int i; + + if (buf->nbufs == 1) + dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf, + buf->u.direct.map); + else { + for (i = 0; i < buf->nbufs; ++i) + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + buf->u.page_list[i].buf, + buf->u.page_list[i].map); + kfree(buf->u.page_list); + } +} +EXPORT_SYMBOL_GPL(mlx4_buf_free); diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c new file mode 100644 index 000000000000..1bb088aeaf71 --- /dev/null +++ b/drivers/net/mlx4/catas.c @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "mlx4.h" + +void mlx4_handle_catas_err(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + int i; + + mlx4_err(dev, "Catastrophic error detected:\n"); + for (i = 0; i < priv->fw.catas_size; ++i) + mlx4_err(dev, " buf[%02x]: %08x\n", + i, swab32(readl(priv->catas_err.map + i))); + + mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0); +} + +void mlx4_map_catas_buf(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + unsigned long addr; + + addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) + + priv->fw.catas_offset; + + priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); + if (!priv->catas_err.map) + mlx4_warn(dev, "Failed to map catastrophic error buffer at 0x%lx\n", + addr); + +} + +void mlx4_unmap_catas_buf(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (priv->catas_err.map) + iounmap(priv->catas_err.map); +} diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c new file mode 100644 index 000000000000..c1f81a993f5d --- /dev/null +++ b/drivers/net/mlx4/cmd.c @@ -0,0 +1,429 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#include + +#include + +#include "mlx4.h" + +#define CMD_POLL_TOKEN 0xffff + +enum { + /* command completed successfully: */ + CMD_STAT_OK = 0x00, + /* Internal error (such as a bus error) occurred while processing command: */ + CMD_STAT_INTERNAL_ERR = 0x01, + /* Operation/command not supported or opcode modifier not supported: */ + CMD_STAT_BAD_OP = 0x02, + /* Parameter not supported or parameter out of range: */ + CMD_STAT_BAD_PARAM = 0x03, + /* System not enabled or bad system state: */ + CMD_STAT_BAD_SYS_STATE = 0x04, + /* Attempt to access reserved or unallocaterd resource: */ + CMD_STAT_BAD_RESOURCE = 0x05, + /* Requested resource is currently executing a command, or is otherwise busy: */ + CMD_STAT_RESOURCE_BUSY = 0x06, + /* Required capability exceeds device limits: */ + CMD_STAT_EXCEED_LIM = 0x08, + /* Resource is not in the appropriate state or ownership: */ + CMD_STAT_BAD_RES_STATE = 0x09, + /* Index out of range: */ + CMD_STAT_BAD_INDEX = 0x0a, + /* FW image corrupted: */ + CMD_STAT_BAD_NVMEM = 0x0b, + /* Attempt to modify a QP/EE which is not in the presumed state: */ + CMD_STAT_BAD_QP_STATE = 0x10, + /* Bad segment parameters (Address/Size): */ + CMD_STAT_BAD_SEG_PARAM = 0x20, + /* Memory Region has Memory Windows bound to: */ + CMD_STAT_REG_BOUND = 0x21, + /* HCA local attached memory not present: */ + CMD_STAT_LAM_NOT_PRE = 0x22, + /* Bad management packet (silently discarded): */ + CMD_STAT_BAD_PKT = 0x30, + /* More outstanding CQEs in CQ than new CQ size: */ + CMD_STAT_BAD_SIZE = 0x40 +}; + +enum { + HCR_IN_PARAM_OFFSET = 0x00, + HCR_IN_MODIFIER_OFFSET = 0x08, + HCR_OUT_PARAM_OFFSET = 0x0c, + HCR_TOKEN_OFFSET = 0x14, + HCR_STATUS_OFFSET = 0x18, + + HCR_OPMOD_SHIFT = 12, + HCR_T_BIT = 21, + HCR_E_BIT = 22, + HCR_GO_BIT = 23 +}; + +enum { + GO_BIT_TIMEOUT = 10000 +}; + +struct mlx4_cmd_context { + struct completion done; + int result; + int next; + u64 out_param; + u16 token; +}; + +static int mlx4_status_to_errno(u8 status) { + static const int trans_table[] = { + [CMD_STAT_INTERNAL_ERR] = -EIO, + [CMD_STAT_BAD_OP] = -EPERM, + [CMD_STAT_BAD_PARAM] = -EINVAL, + [CMD_STAT_BAD_SYS_STATE] = -ENXIO, + [CMD_STAT_BAD_RESOURCE] = -EBADF, + [CMD_STAT_RESOURCE_BUSY] = -EBUSY, + [CMD_STAT_EXCEED_LIM] = -ENOMEM, + [CMD_STAT_BAD_RES_STATE] = -EBADF, + [CMD_STAT_BAD_INDEX] = -EBADF, + [CMD_STAT_BAD_NVMEM] = -EFAULT, + [CMD_STAT_BAD_QP_STATE] = -EINVAL, + [CMD_STAT_BAD_SEG_PARAM] = -EFAULT, + [CMD_STAT_REG_BOUND] = -EBUSY, + [CMD_STAT_LAM_NOT_PRE] = -EAGAIN, + [CMD_STAT_BAD_PKT] = -EINVAL, + [CMD_STAT_BAD_SIZE] = -ENOMEM, + }; + + if (status >= ARRAY_SIZE(trans_table) || + (status != CMD_STAT_OK && trans_table[status] == 0)) + return -EIO; + + return trans_table[status]; +} + +static int cmd_pending(struct mlx4_dev *dev) +{ + u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); + + return (status & swab32(1 << HCR_GO_BIT)) || + (mlx4_priv(dev)->cmd.toggle == + !!(status & swab32(1 << HCR_T_BIT))); +} + +static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, + u32 in_modifier, u8 op_modifier, u16 op, u16 token, + int event) +{ + struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; + u32 __iomem *hcr = cmd->hcr; + int ret = -EAGAIN; + unsigned long end; + + mutex_lock(&cmd->hcr_mutex); + + end = jiffies; + if (event) + end += HZ * 10; + + while (cmd_pending(dev)) { + if (time_after_eq(jiffies, end)) + goto out; + cond_resched(); + } + + /* + * We use writel (instead of something like memcpy_toio) + * because writes of less than 32 bits to the HCR don't work + * (and some architectures such as ia64 implement memcpy_toio + * in terms of writeb). + */ + __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0); + __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1); + __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2); + __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3); + __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4); + __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5); + + /* __raw_writel may not order writes. */ + wmb(); + + __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | + (cmd->toggle << HCR_T_BIT) | + (event ? (1 << HCR_E_BIT) : 0) | + (op_modifier << HCR_OPMOD_SHIFT) | + op), hcr + 6); + cmd->toggle = cmd->toggle ^ 1; + + ret = 0; + +out: + mutex_unlock(&cmd->hcr_mutex); + return ret; +} + +static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + void __iomem *hcr = priv->cmd.hcr; + int err = 0; + unsigned long end; + + down(&priv->cmd.poll_sem); + + err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, + in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); + if (err) + goto out; + + end = msecs_to_jiffies(timeout) + jiffies; + while (cmd_pending(dev) && time_before(jiffies, end)) + cond_resched(); + + if (cmd_pending(dev)) { + err = -ETIMEDOUT; + goto out; + } + + if (out_is_imm) + *out_param = + (u64) be32_to_cpu((__force __be32) + __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 | + (u64) be32_to_cpu((__force __be32) + __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4)); + + err = mlx4_status_to_errno(be32_to_cpu((__force __be32) + __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24); + +out: + up(&priv->cmd.poll_sem); + return err; +} + +void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_context *context = + &priv->cmd.context[token & priv->cmd.token_mask]; + + /* previously timed out command completing at long last */ + if (token != context->token) + return; + + context->result = mlx4_status_to_errno(status); + context->out_param = out_param; + + context->token += priv->cmd.token_mask + 1; + + complete(&context->done); +} + +static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout) +{ + struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; + struct mlx4_cmd_context *context; + int err = 0; + + down(&cmd->event_sem); + + spin_lock(&cmd->context_lock); + BUG_ON(cmd->free_head < 0); + context = &cmd->context[cmd->free_head]; + cmd->free_head = context->next; + spin_unlock(&cmd->context_lock); + + init_completion(&context->done); + + mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, + in_modifier, op_modifier, op, context->token, 1); + + if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { + err = -EBUSY; + goto out; + } + + err = context->result; + if (err) + goto out; + + if (out_is_imm) + *out_param = context->out_param; + +out: + spin_lock(&cmd->context_lock); + context->next = cmd->free_head; + cmd->free_head = context - cmd->context; + spin_unlock(&cmd->context_lock); + + up(&cmd->event_sem); + return err; +} + +int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout) +{ + if (mlx4_priv(dev)->cmd.use_events) + return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm, + in_modifier, op_modifier, op, timeout); + else + return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm, + in_modifier, op_modifier, op, timeout); +} +EXPORT_SYMBOL_GPL(__mlx4_cmd); + +int mlx4_cmd_init(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + mutex_init(&priv->cmd.hcr_mutex); + sema_init(&priv->cmd.poll_sem, 1); + priv->cmd.use_events = 0; + priv->cmd.toggle = 1; + + priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE, + MLX4_HCR_SIZE); + if (!priv->cmd.hcr) { + mlx4_err(dev, "Couldn't map command register."); + return -ENOMEM; + } + + priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, + MLX4_MAILBOX_SIZE, + MLX4_MAILBOX_SIZE, 0); + if (!priv->cmd.pool) { + iounmap(priv->cmd.hcr); + return -ENOMEM; + } + + return 0; +} + +void mlx4_cmd_cleanup(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + pci_pool_destroy(priv->cmd.pool); + iounmap(priv->cmd.hcr); +} + +/* + * Switch to using events to issue FW commands (can only be called + * after event queue for command events has been initialized). + */ +int mlx4_cmd_use_events(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + + priv->cmd.context = kmalloc(priv->cmd.max_cmds * + sizeof (struct mlx4_cmd_context), + GFP_KERNEL); + if (!priv->cmd.context) + return -ENOMEM; + + for (i = 0; i < priv->cmd.max_cmds; ++i) { + priv->cmd.context[i].token = i; + priv->cmd.context[i].next = i + 1; + } + + priv->cmd.context[priv->cmd.max_cmds - 1].next = -1; + priv->cmd.free_head = 0; + + sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds); + spin_lock_init(&priv->cmd.context_lock); + + for (priv->cmd.token_mask = 1; + priv->cmd.token_mask < priv->cmd.max_cmds; + priv->cmd.token_mask <<= 1) + ; /* nothing */ + --priv->cmd.token_mask; + + priv->cmd.use_events = 1; + + down(&priv->cmd.poll_sem); + + return 0; +} + +/* + * Switch back to polling (used when shutting down the device) + */ +void mlx4_cmd_use_polling(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + + priv->cmd.use_events = 0; + + for (i = 0; i < priv->cmd.max_cmds; ++i) + down(&priv->cmd.event_sem); + + kfree(priv->cmd.context); + + up(&priv->cmd.poll_sem); +} + +struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) +{ + struct mlx4_cmd_mailbox *mailbox; + + mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL); + if (!mailbox) + return ERR_PTR(-ENOMEM); + + mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, + &mailbox->dma); + if (!mailbox->buf) { + kfree(mailbox); + return ERR_PTR(-ENOMEM); + } + + return mailbox; +} +EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); + +void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox) +{ + if (!mailbox) + return; + + pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma); + kfree(mailbox); +} +EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox); diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c new file mode 100644 index 000000000000..437d78ad0912 --- /dev/null +++ b/drivers/net/mlx4/cq.c @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include + +#include "mlx4.h" +#include "icm.h" + +struct mlx4_cq_context { + __be32 flags; + u16 reserved1[3]; + __be16 page_offset; + __be32 logsize_usrpage; + u8 reserved2; + u8 cq_period; + u8 reserved3; + u8 cq_max_count; + u8 reserved4[3]; + u8 comp_eqn; + u8 log_page_size; + u8 reserved5[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + __be32 last_notified_index; + __be32 solicit_producer_index; + __be32 consumer_index; + __be32 producer_index; + u8 reserved6[2]; + __be64 db_rec_addr; +}; + +#define MLX4_CQ_STATUS_OK ( 0 << 28) +#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28) +#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28) +#define MLX4_CQ_FLAG_CC ( 1 << 18) +#define MLX4_CQ_FLAG_OI ( 1 << 17) +#define MLX4_CQ_STATE_ARMED ( 9 << 8) +#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8) +#define MLX4_EQ_STATE_FIRED (10 << 8) + +void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) +{ + struct mlx4_cq *cq; + + cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, + cqn & (dev->caps.num_cqs - 1)); + if (!cq) { + mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn); + return; + } + + ++cq->arm_sn; + + cq->comp(cq); +} + +void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) +{ + struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; + struct mlx4_cq *cq; + + spin_lock(&cq_table->lock); + + cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); + if (cq) + atomic_inc(&cq->refcount); + + spin_unlock(&cq_table->lock); + + if (!cq) { + mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int cq_num) +{ + return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ, + MLX4_CMD_TIME_CLASS_A); +} + +static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int cq_num) +{ + return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num, + mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, + MLX4_CMD_TIME_CLASS_A); +} + +int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, + struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cq_table *cq_table = &priv->cq_table; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_cq_context *cq_context; + u64 mtt_addr; + int err; + + cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap); + if (cq->cqn == -1) + return -ENOMEM; + + err = mlx4_table_get(dev, &cq_table->table, cq->cqn); + if (err) + goto err_out; + + err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn); + if (err) + goto err_put; + + spin_lock_irq(&cq_table->lock); + err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); + spin_unlock_irq(&cq_table->lock); + if (err) + goto err_cmpt_put; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_radix; + } + + cq_context = mailbox->buf; + memset(cq_context, 0, sizeof *cq_context); + + cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); + cq_context->comp_eqn = priv->eq_table.eq[MLX4_EQ_COMP].eqn; + cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; + + mtt_addr = mlx4_mtt_addr(dev, mtt); + cq_context->mtt_base_addr_h = mtt_addr >> 32; + cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); + cq_context->db_rec_addr = cpu_to_be64(db_rec); + + err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn); + mlx4_free_cmd_mailbox(dev, mailbox); + if (err) + goto err_radix; + + cq->cons_index = 0; + cq->arm_sn = 1; + cq->uar = uar; + atomic_set(&cq->refcount, 1); + init_completion(&cq->free); + + return 0; + +err_radix: + spin_lock_irq(&cq_table->lock); + radix_tree_delete(&cq_table->tree, cq->cqn); + spin_unlock_irq(&cq_table->lock); + +err_cmpt_put: + mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn); + +err_put: + mlx4_table_put(dev, &cq_table->table, cq->cqn); + +err_out: + mlx4_bitmap_free(&cq_table->bitmap, cq->cqn); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_cq_alloc); + +void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cq_table *cq_table = &priv->cq_table; + int err; + + err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn); + if (err) + mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); + + synchronize_irq(priv->eq_table.eq[MLX4_EQ_COMP].irq); + + spin_lock_irq(&cq_table->lock); + radix_tree_delete(&cq_table->tree, cq->cqn); + spin_unlock_irq(&cq_table->lock); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); + wait_for_completion(&cq->free); + + mlx4_table_put(dev, &cq_table->table, cq->cqn); + mlx4_bitmap_free(&cq_table->bitmap, cq->cqn); +} +EXPORT_SYMBOL_GPL(mlx4_cq_free); + +int __devinit mlx4_init_cq_table(struct mlx4_dev *dev) +{ + struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; + int err; + + spin_lock_init(&cq_table->lock); + INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); + + err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, + dev->caps.num_cqs - 1, dev->caps.reserved_cqs); + if (err) + return err; + + return 0; +} + +void mlx4_cleanup_cq_table(struct mlx4_dev *dev) +{ + /* Nothing to do to clean up radix_tree */ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap); +} diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c new file mode 100644 index 000000000000..acf1c801a1b8 --- /dev/null +++ b/drivers/net/mlx4/eq.c @@ -0,0 +1,696 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include + +#include "mlx4.h" +#include "fw.h" + +enum { + MLX4_NUM_ASYNC_EQE = 0x100, + MLX4_NUM_SPARE_EQE = 0x80, + MLX4_EQ_ENTRY_SIZE = 0x20 +}; + +/* + * Must be packed because start is 64 bits but only aligned to 32 bits. + */ +struct mlx4_eq_context { + __be32 flags; + u16 reserved1[3]; + __be16 page_offset; + u8 log_eq_size; + u8 reserved2[4]; + u8 eq_period; + u8 reserved3; + u8 eq_max_count; + u8 reserved4[3]; + u8 intr; + u8 log_page_size; + u8 reserved5[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + u32 reserved6[2]; + __be32 consumer_index; + __be32 producer_index; + u32 reserved7[4]; +}; + +#define MLX4_EQ_STATUS_OK ( 0 << 28) +#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) +#define MLX4_EQ_OWNER_SW ( 0 << 24) +#define MLX4_EQ_OWNER_HW ( 1 << 24) +#define MLX4_EQ_FLAG_EC ( 1 << 18) +#define MLX4_EQ_FLAG_OI ( 1 << 17) +#define MLX4_EQ_STATE_ARMED ( 9 << 8) +#define MLX4_EQ_STATE_FIRED (10 << 8) +#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) + +#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ + (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ + (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ + (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ + (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ + (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ + (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ + (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ + (1ull << MLX4_EVENT_TYPE_CMD)) +#define MLX4_CATAS_EVENT_MASK (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) + +struct mlx4_eqe { + u8 reserved1; + u8 type; + u8 reserved2; + u8 subtype; + union { + u32 raw[6]; + struct { + __be32 cqn; + } __attribute__((packed)) comp; + struct { + u16 reserved1; + __be16 token; + u32 reserved2; + u8 reserved3[3]; + u8 status; + __be64 out_param; + } __attribute__((packed)) cmd; + struct { + __be32 qpn; + } __attribute__((packed)) qp; + struct { + __be32 srqn; + } __attribute__((packed)) srq; + struct { + __be32 cqn; + u32 reserved1; + u8 reserved2[3]; + u8 syndrome; + } __attribute__((packed)) cq_err; + struct { + u32 reserved1[2]; + __be32 port; + } __attribute__((packed)) port_change; + } event; + u8 reserved3[3]; + u8 owner; +} __attribute__((packed)); + +static void eq_set_ci(struct mlx4_eq *eq, int req_not) +{ + __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | + req_not << 31), + eq->doorbell); + /* We still want ordering, just not swabbing, so add a barrier */ + mb(); +} + +static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry) +{ + unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE; + return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; +} + +static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq) +{ + struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index); + return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; +} + +static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) +{ + struct mlx4_eqe *eqe; + int cqn; + int eqes_found = 0; + int set_ci = 0; + + while ((eqe = next_eqe_sw(eq))) { + /* + * Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + rmb(); + + switch (eqe->type) { + case MLX4_EVENT_TYPE_COMP: + cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; + mlx4_cq_completion(dev, cqn); + break; + + case MLX4_EVENT_TYPE_PATH_MIG: + case MLX4_EVENT_TYPE_COMM_EST: + case MLX4_EVENT_TYPE_SQ_DRAINED: + case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: + case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: + case MLX4_EVENT_TYPE_PATH_MIG_FAILED: + case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: + mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, + eqe->type); + break; + + case MLX4_EVENT_TYPE_SRQ_LIMIT: + case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: + mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, + eqe->type); + break; + + case MLX4_EVENT_TYPE_CMD: + mlx4_cmd_event(dev, + be16_to_cpu(eqe->event.cmd.token), + eqe->event.cmd.status, + be64_to_cpu(eqe->event.cmd.out_param)); + break; + + case MLX4_EVENT_TYPE_PORT_CHANGE: + mlx4_dispatch_event(dev, eqe->type, eqe->subtype, + be32_to_cpu(eqe->event.port_change.port) >> 28); + break; + + case MLX4_EVENT_TYPE_CQ_ERROR: + mlx4_warn(dev, "CQ %s on CQN %06x\n", + eqe->event.cq_err.syndrome == 1 ? + "overrun" : "access violation", + be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); + mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), + eqe->type); + break; + + case MLX4_EVENT_TYPE_EQ_OVERFLOW: + mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); + break; + + case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: + case MLX4_EVENT_TYPE_ECC_DETECT: + default: + mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n", + eqe->type, eqe->subtype, eq->eqn, eq->cons_index); + break; + }; + + ++eq->cons_index; + eqes_found = 1; + ++set_ci; + + /* + * The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create our EQs with MLX4_NUM_SPARE_EQE extra + * entries, so we must update our consumer index at + * least that often. + */ + if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { + /* + * Conditional on hca_type is OK here because + * this is a rare case, not the fast path. + */ + eq_set_ci(eq, 0); + set_ci = 0; + } + } + + eq_set_ci(eq, 1); + + return eqes_found; +} + +static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) +{ + struct mlx4_dev *dev = dev_ptr; + struct mlx4_priv *priv = mlx4_priv(dev); + int work = 0; + int i; + + writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); + + for (i = 0; i < MLX4_EQ_CATAS; ++i) + work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); + + return IRQ_RETVAL(work); +} + +static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) +{ + struct mlx4_eq *eq = eq_ptr; + struct mlx4_dev *dev = eq->dev; + + mlx4_eq_int(dev, eq); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} + +static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr) +{ + mlx4_handle_catas_err(dev_ptr); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} + +static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, + int eq_num) +{ + return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, + 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B); +} + +static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int eq_num) +{ + return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ, + MLX4_CMD_TIME_CLASS_A); +} + +static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int eq_num) +{ + return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ, + MLX4_CMD_TIME_CLASS_A); +} + +static void __devinit __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, + struct mlx4_eq *eq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int index; + + index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; + + if (!priv->eq_table.uar_map[index]) { + priv->eq_table.uar_map[index] = + ioremap(pci_resource_start(dev->pdev, 2) + + ((eq->eqn / 4) << PAGE_SHIFT), + PAGE_SIZE); + if (!priv->eq_table.uar_map[index]) { + mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", + eq->eqn); + return NULL; + } + } + + return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); +} + +static int __devinit mlx4_create_eq(struct mlx4_dev *dev, int nent, + u8 intr, struct mlx4_eq *eq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_eq_context *eq_context; + int npages; + u64 *dma_list = NULL; + dma_addr_t t; + u64 mtt_addr; + int err = -ENOMEM; + int i; + + eq->dev = dev; + eq->nent = roundup_pow_of_two(max(nent, 2)); + npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE; + + eq->page_list = kmalloc(npages * sizeof *eq->page_list, + GFP_KERNEL); + if (!eq->page_list) + goto err_out; + + for (i = 0; i < npages; ++i) + eq->page_list[i].buf = NULL; + + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + if (!dma_list) + goto err_out_free; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + goto err_out_free; + eq_context = mailbox->buf; + + for (i = 0; i < npages; ++i) { + eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, + PAGE_SIZE, &t, GFP_KERNEL); + if (!eq->page_list[i].buf) + goto err_out_free_pages; + + dma_list[i] = t; + eq->page_list[i].map = t; + + memset(eq->page_list[i].buf, 0, PAGE_SIZE); + } + + eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); + if (eq->eqn == -1) + goto err_out_free_pages; + + eq->doorbell = mlx4_get_eq_uar(dev, eq); + if (!eq->doorbell) { + err = -ENOMEM; + goto err_out_free_eq; + } + + err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); + if (err) + goto err_out_free_eq; + + err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); + if (err) + goto err_out_free_mtt; + + memset(eq_context, 0, sizeof *eq_context); + eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | + MLX4_EQ_STATE_ARMED); + eq_context->log_eq_size = ilog2(eq->nent); + eq_context->intr = intr; + eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; + + mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); + eq_context->mtt_base_addr_h = mtt_addr >> 32; + eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); + + err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); + if (err) { + mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); + goto err_out_free_mtt; + } + + kfree(dma_list); + mlx4_free_cmd_mailbox(dev, mailbox); + + eq->cons_index = 0; + + return err; + +err_out_free_mtt: + mlx4_mtt_cleanup(dev, &eq->mtt); + +err_out_free_eq: + mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); + +err_out_free_pages: + for (i = 0; i < npages; ++i) + if (eq->page_list[i].buf) + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + eq->page_list[i].buf, + eq->page_list[i].map); + + mlx4_free_cmd_mailbox(dev, mailbox); + +err_out_free: + kfree(eq->page_list); + kfree(dma_list); + +err_out: + return err; +} + +static void mlx4_free_eq(struct mlx4_dev *dev, + struct mlx4_eq *eq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_mailbox *mailbox; + int err; + int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE; + int i; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return; + + err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn); + if (err) + mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); + + if (0) { + mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); + for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) { + if (i % 4 == 0) + printk("[%02x] ", i * 4); + printk(" %08x", be32_to_cpup(mailbox->buf + i * 4)); + if ((i + 1) % 4 == 0) + printk("\n"); + } + } + + mlx4_mtt_cleanup(dev, &eq->mtt); + for (i = 0; i < npages; ++i) + pci_free_consistent(dev->pdev, PAGE_SIZE, + eq->page_list[i].buf, + eq->page_list[i].map); + + kfree(eq->page_list); + mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); + mlx4_free_cmd_mailbox(dev, mailbox); +} + +static void mlx4_free_irqs(struct mlx4_dev *dev) +{ + struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; + int i; + + if (eq_table->have_irq) + free_irq(dev->pdev->irq, dev); + for (i = 0; i < MLX4_NUM_EQ; ++i) + if (eq_table->eq[i].have_irq) + free_irq(eq_table->eq[i].irq, eq_table->eq + i); +} + +static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + + priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); + if (!priv->clr_base) { + mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n"); + return -ENOMEM; + } + + return 0; +} + +static void mlx4_unmap_clr_int(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + iounmap(priv->clr_base); +} + +int __devinit mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int ret; + + /* + * We assume that mapping one page is enough for the whole EQ + * context table. This is fine with all current HCAs, because + * we only use 32 EQs and each EQ uses 64 bytes of context + * memory, or 1 KB total. + */ + priv->eq_table.icm_virt = icm_virt; + priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER); + if (!priv->eq_table.icm_page) + return -ENOMEM; + priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(priv->eq_table.icm_dma)) { + __free_page(priv->eq_table.icm_page); + return -ENOMEM; + } + + ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt); + if (ret) { + pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + __free_page(priv->eq_table.icm_page); + } + + return ret; +} + +void mlx4_unmap_eq_icm(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1); + pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + __free_page(priv->eq_table.icm_page); +} + +int __devinit mlx4_init_eq_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + int i; + + err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, + dev->caps.num_eqs - 1, dev->caps.reserved_eqs); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i) + priv->eq_table.uar_map[i] = NULL; + + err = mlx4_map_clr_int(dev); + if (err) + goto err_out_free; + + priv->eq_table.clr_mask = + swab32(1 << (priv->eq_table.inta_pin & 31)); + priv->eq_table.clr_int = priv->clr_base + + (priv->eq_table.inta_pin < 32 ? 4 : 0); + + err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, + (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0, + &priv->eq_table.eq[MLX4_EQ_COMP]); + if (err) + goto err_out_unmap; + + err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, + (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0, + &priv->eq_table.eq[MLX4_EQ_ASYNC]); + if (err) + goto err_out_comp; + + if (dev->flags & MLX4_FLAG_MSI_X) { + static const char *eq_name[] = { + [MLX4_EQ_COMP] = DRV_NAME " (comp)", + [MLX4_EQ_ASYNC] = DRV_NAME " (async)", + [MLX4_EQ_CATAS] = DRV_NAME " (catas)" + }; + + err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS, + &priv->eq_table.eq[MLX4_EQ_CATAS]); + if (err) + goto err_out_async; + + for (i = 0; i < MLX4_EQ_CATAS; ++i) { + err = request_irq(priv->eq_table.eq[i].irq, + mlx4_msi_x_interrupt, + 0, eq_name[i], priv->eq_table.eq + i); + if (err) + goto err_out_catas; + + priv->eq_table.eq[i].have_irq = 1; + } + + err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq, + mlx4_catas_interrupt, 0, + eq_name[MLX4_EQ_CATAS], dev); + if (err) + goto err_out_catas; + + priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1; + } else { + err = request_irq(dev->pdev->irq, mlx4_interrupt, + SA_SHIRQ, DRV_NAME, dev); + if (err) + goto err_out_async; + + priv->eq_table.have_irq = 1; + } + + err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); + if (err) + mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); + + for (i = 0; i < MLX4_EQ_CATAS; ++i) + eq_set_ci(&priv->eq_table.eq[i], 1); + + if (dev->flags & MLX4_FLAG_MSI_X) { + err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0, + priv->eq_table.eq[MLX4_EQ_CATAS].eqn); + if (err) + mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n", + priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err); + } + + return 0; + +err_out_catas: + mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]); + +err_out_async: + mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]); + +err_out_comp: + mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]); + +err_out_unmap: + mlx4_unmap_clr_int(dev); + mlx4_free_irqs(dev); + +err_out_free: + mlx4_bitmap_cleanup(&priv->eq_table.bitmap); + return err; +} + +void mlx4_cleanup_eq_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + + if (dev->flags & MLX4_FLAG_MSI_X) + mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1, + priv->eq_table.eq[MLX4_EQ_CATAS].eqn); + + mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); + + mlx4_free_irqs(dev); + + for (i = 0; i < MLX4_EQ_CATAS; ++i) + mlx4_free_eq(dev, &priv->eq_table.eq[i]); + if (dev->flags & MLX4_FLAG_MSI_X) + mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]); + + mlx4_unmap_clr_int(dev); + + for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i) + if (priv->eq_table.uar_map[i]) + iounmap(priv->eq_table.uar_map[i]); + + mlx4_bitmap_cleanup(&priv->eq_table.bitmap); +} diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c new file mode 100644 index 000000000000..c42717313663 --- /dev/null +++ b/drivers/net/mlx4/fw.c @@ -0,0 +1,775 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include + +#include "fw.h" +#include "icm.h" + +extern void __buggy_use_of_MLX4_GET(void); +extern void __buggy_use_of_MLX4_PUT(void); + +#define MLX4_GET(dest, source, offset) \ + do { \ + void *__p = (char *) (source) + (offset); \ + switch (sizeof (dest)) { \ + case 1: (dest) = *(u8 *) __p; break; \ + case 2: (dest) = be16_to_cpup(__p); break; \ + case 4: (dest) = be32_to_cpup(__p); break; \ + case 8: (dest) = be64_to_cpup(__p); break; \ + default: __buggy_use_of_MLX4_GET(); \ + } \ + } while (0) + +#define MLX4_PUT(dest, source, offset) \ + do { \ + void *__d = ((char *) (dest) + (offset)); \ + switch (sizeof(source)) { \ + case 1: *(u8 *) __d = (source); break; \ + case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ + case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ + case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ + default: __buggy_use_of_MLX4_PUT(); \ + } \ + } while (0) + +static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags) +{ + static const char *fname[] = { + [ 0] = "RC transport", + [ 1] = "UC transport", + [ 2] = "UD transport", + [ 3] = "SRC transport", + [ 4] = "reliable multicast", + [ 5] = "FCoIB support", + [ 6] = "SRQ support", + [ 7] = "IPoIB checksum offload", + [ 8] = "P_Key violation counter", + [ 9] = "Q_Key violation counter", + [10] = "VMM", + [16] = "MW support", + [17] = "APM support", + [18] = "Atomic ops support", + [19] = "Raw multicast support", + [20] = "Address vector port checking support", + [21] = "UD multicast support", + [24] = "Demand paging support", + [25] = "Router support" + }; + int i; + + mlx4_dbg(dev, "DEV_CAP flags:\n"); + for (i = 0; i < 32; ++i) + if (fname[i] && (flags & (1 << i))) + mlx4_dbg(dev, " %s\n", fname[i]); +} + +int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 field; + u16 size; + u16 stat_rate; + int err; + +#define QUERY_DEV_CAP_OUT_SIZE 0x100 +#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 +#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11 +#define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12 +#define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13 +#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14 +#define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15 +#define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16 +#define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17 +#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19 +#define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a +#define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b +#define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d +#define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e +#define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f +#define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20 +#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 +#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 +#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 +#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 +#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 +#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b +#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f +#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 +#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 +#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 +#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 +#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b +#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c +#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f +#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 +#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 +#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 +#define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b +#define QUERY_DEV_CAP_BF_OFFSET 0x4c +#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d +#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e +#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f +#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51 +#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 +#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 +#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 +#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 +#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 +#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 +#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 +#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 +#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 +#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 +#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 +#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86 +#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88 +#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a +#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c +#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e +#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 +#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 +#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x97 +#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 +#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, + MLX4_CMD_TIME_CLASS_A); + + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); + dev_cap->reserved_qps = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); + dev_cap->max_qps = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET); + dev_cap->reserved_srqs = 1 << (field >> 4); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET); + dev_cap->max_srqs = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET); + dev_cap->max_cq_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET); + dev_cap->reserved_cqs = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET); + dev_cap->max_cqs = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); + dev_cap->max_mpts = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); + dev_cap->reserved_eqs = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); + dev_cap->max_eqs = 1 << (field & 0x7); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); + dev_cap->reserved_mtts = 1 << (field >> 4); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET); + dev_cap->max_mrw_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET); + dev_cap->reserved_mrws = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET); + dev_cap->max_mtt_seg = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); + dev_cap->max_requester_per_qp = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); + dev_cap->max_responder_per_qp = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); + dev_cap->max_rdma_global = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); + dev_cap->local_ca_ack_delay = field & 0x1f; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); + dev_cap->max_mtu = field >> 4; + dev_cap->max_port_width = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); + dev_cap->max_vl = field >> 4; + dev_cap->num_ports = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); + dev_cap->max_gids = 1 << (field & 0xf); + MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); + dev_cap->stat_rate_support = stat_rate; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); + dev_cap->max_pkeys = 1 << (field & 0xf); + MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); + dev_cap->reserved_uars = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); + dev_cap->uar_size = 1 << ((field & 0x3f) + 20); + MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET); + dev_cap->min_page_sz = 1 << field; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET); + if (field & 0x80) { + MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); + dev_cap->bf_reg_size = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); + dev_cap->bf_regs_per_page = 1 << (field & 0x3f); + mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", + dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); + } else { + dev_cap->bf_reg_size = 0; + mlx4_dbg(dev, "BlueFlame not available\n"); + } + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); + dev_cap->max_sq_sg = field; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); + dev_cap->max_sq_desc_sz = size; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET); + dev_cap->max_qp_per_mcg = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET); + dev_cap->reserved_mgms = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET); + dev_cap->max_mcgs = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET); + dev_cap->reserved_pds = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); + dev_cap->max_pds = 1 << (field & 0x3f); + + MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); + dev_cap->rdmarc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET); + dev_cap->qpc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET); + dev_cap->aux_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET); + dev_cap->altc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET); + dev_cap->eqc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET); + dev_cap->cqc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET); + dev_cap->srq_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET); + dev_cap->cmpt_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET); + dev_cap->mtt_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET); + dev_cap->dmpt_entry_sz = size; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET); + dev_cap->max_srq_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET); + dev_cap->max_qp_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET); + dev_cap->resize_srq = field & 1; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET); + dev_cap->max_rq_sg = field; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); + dev_cap->max_rq_desc_sz = size; + + MLX4_GET(dev_cap->bmme_flags, outbox, + QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + MLX4_GET(dev_cap->reserved_lkey, outbox, + QUERY_DEV_CAP_RSVD_LKEY_OFFSET); + MLX4_GET(dev_cap->max_icm_sz, outbox, + QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); + + if (dev_cap->bmme_flags & 1) + mlx4_dbg(dev, "Base MM extensions: yes " + "(flags %d, rsvd L_Key %08x)\n", + dev_cap->bmme_flags, dev_cap->reserved_lkey); + else + mlx4_dbg(dev, "Base MM extensions: no\n"); + + /* + * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then + * we can't use any EQs whose doorbell falls on that page, + * even if the EQ itself isn't reserved. + */ + dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, + dev_cap->reserved_eqs); + + mlx4_dbg(dev, "Max ICM size %lld MB\n", + (unsigned long long) dev_cap->max_icm_sz >> 20); + mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", + dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz); + mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", + dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); + mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", + dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); + mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", + dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz); + mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", + dev_cap->reserved_mrws, dev_cap->reserved_mtts); + mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", + dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars); + mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", + dev_cap->max_pds, dev_cap->reserved_mgms); + mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", + dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); + mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", + dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu, + dev_cap->max_port_width); + mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", + dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); + mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", + dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); + + dump_dev_cap_flags(dev, dev_cap->flags); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_icm_iter iter; + __be64 *pages; + int lg; + int nent = 0; + int i; + int err = 0; + int ts = 0, tc = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); + pages = mailbox->buf; + + for (mlx4_icm_first(icm, &iter); + !mlx4_icm_last(&iter); + mlx4_icm_next(&iter)) { + /* + * We have to pass pages that are aligned to their + * size, so find the least significant 1 in the + * address or size and use that as our log2 size. + */ + lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; + if (lg < MLX4_ICM_PAGE_SHIFT) { + mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n", + MLX4_ICM_PAGE_SIZE, + (unsigned long long) mlx4_icm_addr(&iter), + mlx4_icm_size(&iter)); + err = -EINVAL; + goto out; + } + + for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { + if (virt != -1) { + pages[nent * 2] = cpu_to_be64(virt); + virt += 1 << lg; + } + + pages[nent * 2 + 1] = + cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) | + (lg - MLX4_ICM_PAGE_SHIFT)); + ts += 1 << (lg - 10); + ++tc; + + if (++nent == MLX4_MAILBOX_SIZE / 16) { + err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, + MLX4_CMD_TIME_CLASS_B); + if (err) + goto out; + nent = 0; + } + } + } + + if (nent) + err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B); + if (err) + goto out; + + switch (op) { + case MLX4_CMD_MAP_FA: + mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); + break; + case MLX4_CMD_MAP_ICM_AUX: + mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); + break; + case MLX4_CMD_MAP_ICM: + mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", + tc, ts, (unsigned long long) virt - (ts << 10)); + break; + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) +{ + return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1); +} + +int mlx4_UNMAP_FA(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B); +} + + +int mlx4_RUN_FW(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A); +} + +int mlx4_QUERY_FW(struct mlx4_dev *dev) +{ + struct mlx4_fw *fw = &mlx4_priv(dev)->fw; + struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + int err = 0; + u64 fw_ver; + u8 lg; + +#define QUERY_FW_OUT_SIZE 0x100 +#define QUERY_FW_VER_OFFSET 0x00 +#define QUERY_FW_MAX_CMD_OFFSET 0x0f +#define QUERY_FW_ERR_START_OFFSET 0x30 +#define QUERY_FW_ERR_SIZE_OFFSET 0x38 +#define QUERY_FW_ERR_BAR_OFFSET 0x3c + +#define QUERY_FW_SIZE_OFFSET 0x00 +#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 +#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, + MLX4_CMD_TIME_CLASS_A); + if (err) + goto out; + + MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET); + /* + * FW subminor version is at more signifant bits than minor + * version, so swap here. + */ + dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) | + ((fw_ver & 0xffff0000ull) >> 16) | + ((fw_ver & 0x0000ffffull) << 16); + + MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); + cmd->max_cmds = 1 << lg; + + mlx4_dbg(dev, "FW version %d.%d.%03d, max commands %d\n", + (int) (dev->caps.fw_ver >> 32), + (int) (dev->caps.fw_ver >> 16) & 0xffff, + (int) dev->caps.fw_ver & 0xffff, + cmd->max_cmds); + + MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET); + MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET); + MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET); + fw->catas_bar = (fw->catas_bar >> 6) * 2; + + mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n", + (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar); + + MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET); + MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); + MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); + fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; + + mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); + + /* + * Round up number of system pages needed in case + * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. + */ + fw->fw_pages = + ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> + (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); + + mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n", + (unsigned long long) fw->clr_int_base, fw->clr_int_bar); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +static void get_board_id(void *vsd, char *board_id) +{ + int i; + +#define VSD_OFFSET_SIG1 0x00 +#define VSD_OFFSET_SIG2 0xde +#define VSD_OFFSET_MLX_BOARD_ID 0xd0 +#define VSD_OFFSET_TS_BOARD_ID 0x20 + +#define VSD_SIGNATURE_TOPSPIN 0x5ad + + memset(board_id, 0, MLX4_BOARD_ID_LEN); + + if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && + be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { + strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN); + } else { + /* + * The board ID is a string but the firmware byte + * swaps each 4-byte word before passing it back to + * us. Therefore we need to swab it before printing. + */ + for (i = 0; i < 4; ++i) + ((u32 *) board_id)[i] = + swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); + } +} + +int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + int err; + +#define QUERY_ADAPTER_OUT_SIZE 0x100 +#define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00 +#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04 +#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 +#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 +#define QUERY_ADAPTER_VSD_OFFSET 0x20 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, + MLX4_CMD_TIME_CLASS_A); + if (err) + goto out; + + MLX4_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET); + MLX4_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET); + MLX4_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); + MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); + + get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, + adapter->board_id); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) +{ + struct mlx4_cmd_mailbox *mailbox; + __be32 *inbox; + int err; + +#define INIT_HCA_IN_SIZE 0x200 +#define INIT_HCA_VERSION_OFFSET 0x000 +#define INIT_HCA_VERSION 2 +#define INIT_HCA_FLAGS_OFFSET 0x014 +#define INIT_HCA_QPC_OFFSET 0x020 +#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) +#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) +#define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) +#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) +#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) +#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) +#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) +#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) +#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) +#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) +#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) +#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) +#define INIT_HCA_MCAST_OFFSET 0x0c0 +#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) +#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) +#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) +#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) +#define INIT_HCA_TPT_OFFSET 0x0f0 +#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) +#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) +#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) +#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) +#define INIT_HCA_UAR_OFFSET 0x120 +#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) +#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + memset(inbox, 0, INIT_HCA_IN_SIZE); + + *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; + +#if defined(__LITTLE_ENDIAN) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); +#elif defined(__BIG_ENDIAN) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); +#else +#error Host endianness not defined +#endif + /* Check port for UD address vector: */ + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); + + /* QPC/EEC/CQC/EQC/RDMARC attributes */ + + MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); + MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); + MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); + MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET); + MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); + MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); + MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); + + /* multicast attributes */ + + MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); + MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + + /* TPT attributes */ + + MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); + MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); + MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); + MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); + + /* UAR attributes */ + + MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET); + MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); + + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 1000); + + if (err) + mlx4_err(dev, "INIT_HCA returns %d\n", err); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_INIT_PORT(struct mlx4_dev *dev, struct mlx4_init_port_param *param, int port) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *inbox; + int err; + u32 flags; + +#define INIT_PORT_IN_SIZE 256 +#define INIT_PORT_FLAGS_OFFSET 0x00 +#define INIT_PORT_FLAG_SIG (1 << 18) +#define INIT_PORT_FLAG_NG (1 << 17) +#define INIT_PORT_FLAG_G0 (1 << 16) +#define INIT_PORT_VL_SHIFT 4 +#define INIT_PORT_PORT_WIDTH_SHIFT 8 +#define INIT_PORT_MTU_OFFSET 0x04 +#define INIT_PORT_MAX_GID_OFFSET 0x06 +#define INIT_PORT_MAX_PKEY_OFFSET 0x0a +#define INIT_PORT_GUID0_OFFSET 0x10 +#define INIT_PORT_NODE_GUID_OFFSET 0x18 +#define INIT_PORT_SI_GUID_OFFSET 0x20 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + memset(inbox, 0, INIT_PORT_IN_SIZE); + + flags = 0; + flags |= param->set_guid0 ? INIT_PORT_FLAG_G0 : 0; + flags |= param->set_node_guid ? INIT_PORT_FLAG_NG : 0; + flags |= param->set_si_guid ? INIT_PORT_FLAG_SIG : 0; + flags |= (param->vl_cap & 0xf) << INIT_PORT_VL_SHIFT; + flags |= (param->port_width_cap & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; + MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); + + MLX4_PUT(inbox, param->mtu, INIT_PORT_MTU_OFFSET); + MLX4_PUT(inbox, param->max_gid, INIT_PORT_MAX_GID_OFFSET); + MLX4_PUT(inbox, param->max_pkey, INIT_PORT_MAX_PKEY_OFFSET); + MLX4_PUT(inbox, param->guid0, INIT_PORT_GUID0_OFFSET); + MLX4_PUT(inbox, param->node_guid, INIT_PORT_NODE_GUID_OFFSET); + MLX4_PUT(inbox, param->si_guid, INIT_PORT_SI_GUID_OFFSET); + + err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A); + + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); + +int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) +{ + return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000); +} +EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); + +int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) +{ + return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000); +} + +int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) +{ + int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, + MLX4_CMD_SET_ICM_SIZE, + MLX4_CMD_TIME_CLASS_A); + if (ret) + return ret; + + /* + * Round up number of system pages needed in case + * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. + */ + *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> + (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); + + return 0; +} + +int mlx4_NOP(struct mlx4_dev *dev) +{ + /* Input modifier of 0x1f means "finish as soon as possible." */ + return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); +} diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h new file mode 100644 index 000000000000..2616fa53d4d0 --- /dev/null +++ b/drivers/net/mlx4/fw.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_FW_H +#define MLX4_FW_H + +#include "mlx4.h" +#include "icm.h" + +struct mlx4_dev_cap { + int max_srq_sz; + int max_qp_sz; + int reserved_qps; + int max_qps; + int reserved_srqs; + int max_srqs; + int max_cq_sz; + int reserved_cqs; + int max_cqs; + int max_mpts; + int reserved_eqs; + int max_eqs; + int reserved_mtts; + int max_mrw_sz; + int reserved_mrws; + int max_mtt_seg; + int max_requester_per_qp; + int max_responder_per_qp; + int max_rdma_global; + int local_ca_ack_delay; + int max_mtu; + int max_port_width; + int max_vl; + int num_ports; + int max_gids; + u16 stat_rate_support; + int max_pkeys; + u32 flags; + int reserved_uars; + int uar_size; + int min_page_sz; + int bf_reg_size; + int bf_regs_per_page; + int max_sq_sg; + int max_sq_desc_sz; + int max_rq_sg; + int max_rq_desc_sz; + int max_qp_per_mcg; + int reserved_mgms; + int max_mcgs; + int reserved_pds; + int max_pds; + int qpc_entry_sz; + int rdmarc_entry_sz; + int altc_entry_sz; + int aux_entry_sz; + int srq_entry_sz; + int cqc_entry_sz; + int eqc_entry_sz; + int dmpt_entry_sz; + int cmpt_entry_sz; + int mtt_entry_sz; + int resize_srq; + u8 bmme_flags; + u32 reserved_lkey; + u64 max_icm_sz; +}; + +struct mlx4_adapter { + u32 vendor_id; + u32 device_id; + u32 revision_id; + char board_id[MLX4_BOARD_ID_LEN]; + u8 inta_pin; +}; + +struct mlx4_init_hca_param { + u64 qpc_base; + u64 rdmarc_base; + u64 auxc_base; + u64 altc_base; + u64 srqc_base; + u64 cqc_base; + u64 eqc_base; + u64 mc_base; + u64 dmpt_base; + u64 cmpt_base; + u64 mtt_base; + u16 log_mc_entry_sz; + u16 log_mc_hash_sz; + u8 log_num_qps; + u8 log_num_srqs; + u8 log_num_cqs; + u8 log_num_eqs; + u8 log_rd_per_qp; + u8 log_mc_table_sz; + u8 log_mpt_sz; + u8 log_uar_sz; +}; + +struct mlx4_init_ib_param { + int port_width; + int vl_cap; + int mtu_cap; + u16 gid_cap; + u16 pkey_cap; + int set_guid0; + u64 guid0; + int set_node_guid; + u64 node_guid; + int set_si_guid; + u64 si_guid; +}; + +struct mlx4_set_ib_param { + int set_si_guid; + int reset_qkey_viol; + u64 si_guid; + u32 cap_mask; +}; + +int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); +int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm); +int mlx4_UNMAP_FA(struct mlx4_dev *dev); +int mlx4_RUN_FW(struct mlx4_dev *dev); +int mlx4_QUERY_FW(struct mlx4_dev *dev); +int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter); +int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param); +int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic); +int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt); +int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages); +int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); +int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); +int mlx4_NOP(struct mlx4_dev *dev); + +#endif /* MLX4_FW_H */ diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c new file mode 100644 index 000000000000..e96feaed6ed4 --- /dev/null +++ b/drivers/net/mlx4/icm.c @@ -0,0 +1,379 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include + +#include "mlx4.h" +#include "icm.h" +#include "fw.h" + +/* + * We allocate in as big chunks as we can, up to a maximum of 256 KB + * per chunk. + */ +enum { + MLX4_ICM_ALLOC_SIZE = 1 << 18, + MLX4_TABLE_CHUNK_SIZE = 1 << 18 +}; + +void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm) +{ + struct mlx4_icm_chunk *chunk, *tmp; + int i; + + list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { + if (chunk->nsg > 0) + pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + for (i = 0; i < chunk->npages; ++i) + __free_pages(chunk->mem[i].page, + get_order(chunk->mem[i].length)); + + kfree(chunk); + } + + kfree(icm); +} + +struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, + gfp_t gfp_mask) +{ + struct mlx4_icm *icm; + struct mlx4_icm_chunk *chunk = NULL; + int cur_order; + + icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); + if (!icm) + return icm; + + icm->refcount = 0; + INIT_LIST_HEAD(&icm->chunk_list); + + cur_order = get_order(MLX4_ICM_ALLOC_SIZE); + + while (npages > 0) { + if (!chunk) { + chunk = kmalloc(sizeof *chunk, + gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); + if (!chunk) + goto fail; + + chunk->npages = 0; + chunk->nsg = 0; + list_add_tail(&chunk->list, &icm->chunk_list); + } + + while (1 << cur_order > npages) + --cur_order; + + chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order); + if (chunk->mem[chunk->npages].page) { + chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order; + chunk->mem[chunk->npages].offset = 0; + + if (++chunk->npages == MLX4_ICM_CHUNK_LEN) { + chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, + chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + if (chunk->nsg <= 0) + goto fail; + + chunk = NULL; + } + + npages -= 1 << cur_order; + } else { + --cur_order; + if (cur_order < 0) + goto fail; + } + } + + if (chunk) { + chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, + chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + if (chunk->nsg <= 0) + goto fail; + } + + return icm; + +fail: + mlx4_free_icm(dev, icm); + return NULL; +} + +static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt) +{ + return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt); +} + +int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) +{ + return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, + MLX4_CMD_TIME_CLASS_B); +} + +int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt) +{ + struct mlx4_cmd_mailbox *mailbox; + __be64 *inbox; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + inbox[0] = cpu_to_be64(virt); + inbox[1] = cpu_to_be64(dma_addr); + + err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM, + MLX4_CMD_TIME_CLASS_B); + + mlx4_free_cmd_mailbox(dev, mailbox); + + if (!err) + mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n", + (unsigned long long) dma_addr, (unsigned long long) virt); + + return err; +} + +int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) +{ + return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1); +} + +int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B); +} + +int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) +{ + int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); + int ret = 0; + + mutex_lock(&table->mutex); + + if (table->icm[i]) { + ++table->icm[i]->refcount; + goto out; + } + + table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, + (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | + __GFP_NOWARN); + if (!table->icm[i]) { + ret = -ENOMEM; + goto out; + } + + if (mlx4_MAP_ICM(dev, table->icm[i], table->virt + + (u64) i * MLX4_TABLE_CHUNK_SIZE)) { + mlx4_free_icm(dev, table->icm[i]); + table->icm[i] = NULL; + ret = -ENOMEM; + goto out; + } + + ++table->icm[i]->refcount; + +out: + mutex_unlock(&table->mutex); + return ret; +} + +void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) +{ + int i; + + i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); + + mutex_lock(&table->mutex); + + if (--table->icm[i]->refcount == 0) { + mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, + MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); + mlx4_free_icm(dev, table->icm[i]); + table->icm[i] = NULL; + } + + mutex_unlock(&table->mutex); +} + +void *mlx4_table_find(struct mlx4_icm_table *table, int obj) +{ + int idx, offset, i; + struct mlx4_icm_chunk *chunk; + struct mlx4_icm *icm; + struct page *page = NULL; + + if (!table->lowmem) + return NULL; + + mutex_lock(&table->mutex); + + idx = obj & (table->num_obj - 1); + icm = table->icm[idx / (MLX4_TABLE_CHUNK_SIZE / table->obj_size)]; + offset = idx % (MLX4_TABLE_CHUNK_SIZE / table->obj_size); + + if (!icm) + goto out; + + list_for_each_entry(chunk, &icm->chunk_list, list) { + for (i = 0; i < chunk->npages; ++i) { + if (chunk->mem[i].length > offset) { + page = chunk->mem[i].page; + goto out; + } + offset -= chunk->mem[i].length; + } + } + +out: + mutex_unlock(&table->mutex); + return page ? lowmem_page_address(page) + offset : NULL; +} + +int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, + int start, int end) +{ + int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size; + int i, err; + + for (i = start; i <= end; i += inc) { + err = mlx4_table_get(dev, table, i); + if (err) + goto fail; + } + + return 0; + +fail: + while (i > start) { + i -= inc; + mlx4_table_put(dev, table, i); + } + + return err; +} + +void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, + int start, int end) +{ + int i; + + for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size) + mlx4_table_put(dev, table, i); +} + +int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, + u64 virt, int obj_size, int nobj, int reserved, + int use_lowmem) +{ + int obj_per_chunk; + int num_icm; + unsigned chunk_size; + int i; + + obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; + num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; + + table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL); + if (!table->icm) + return -ENOMEM; + table->virt = virt; + table->num_icm = num_icm; + table->num_obj = nobj; + table->obj_size = obj_size; + table->lowmem = use_lowmem; + mutex_init(&table->mutex); + + for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { + chunk_size = MLX4_TABLE_CHUNK_SIZE; + if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size) + chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE); + + table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, + (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | + __GFP_NOWARN); + if (!table->icm[i]) + goto err; + if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) { + mlx4_free_icm(dev, table->icm[i]); + table->icm[i] = NULL; + goto err; + } + + /* + * Add a reference to this ICM chunk so that it never + * gets freed (since it contains reserved firmware objects). + */ + ++table->icm[i]->refcount; + } + + return 0; + +err: + for (i = 0; i < num_icm; ++i) + if (table->icm[i]) { + mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE, + MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); + mlx4_free_icm(dev, table->icm[i]); + } + + return -ENOMEM; +} + +void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table) +{ + int i; + + for (i = 0; i < table->num_icm; ++i) + if (table->icm[i]) { + mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, + MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); + mlx4_free_icm(dev, table->icm[i]); + } + + kfree(table->icm); +} diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h new file mode 100644 index 000000000000..bea223d879a5 --- /dev/null +++ b/drivers/net/mlx4/icm.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_ICM_H +#define MLX4_ICM_H + +#include +#include +#include + +#define MLX4_ICM_CHUNK_LEN \ + ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \ + (sizeof (struct scatterlist))) + +enum { + MLX4_ICM_PAGE_SHIFT = 12, + MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT, +}; + +struct mlx4_icm_chunk { + struct list_head list; + int npages; + int nsg; + struct scatterlist mem[MLX4_ICM_CHUNK_LEN]; +}; + +struct mlx4_icm { + struct list_head chunk_list; + int refcount; +}; + +struct mlx4_icm_iter { + struct mlx4_icm *icm; + struct mlx4_icm_chunk *chunk; + int page_idx; +}; + +struct mlx4_dev; + +struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, gfp_t gfp_mask); +void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm); + +int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); +void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); +int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, + int start, int end); +void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, + int start, int end); +int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, + u64 virt, int obj_size, int nobj, int reserved, + int use_lowmem); +void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); +int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); +void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); +void *mlx4_table_find(struct mlx4_icm_table *table, int obj); +int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, + int start, int end); +void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, + int start, int end); + +static inline void mlx4_icm_first(struct mlx4_icm *icm, + struct mlx4_icm_iter *iter) +{ + iter->icm = icm; + iter->chunk = list_empty(&icm->chunk_list) ? + NULL : list_entry(icm->chunk_list.next, + struct mlx4_icm_chunk, list); + iter->page_idx = 0; +} + +static inline int mlx4_icm_last(struct mlx4_icm_iter *iter) +{ + return !iter->chunk; +} + +static inline void mlx4_icm_next(struct mlx4_icm_iter *iter) +{ + if (++iter->page_idx >= iter->chunk->nsg) { + if (iter->chunk->list.next == &iter->icm->chunk_list) { + iter->chunk = NULL; + return; + } + + iter->chunk = list_entry(iter->chunk->list.next, + struct mlx4_icm_chunk, list); + iter->page_idx = 0; + } +} + +static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) +{ + return sg_dma_address(&iter->chunk->mem[iter->page_idx]); +} + +static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) +{ + return sg_dma_len(&iter->chunk->mem[iter->page_idx]); +} + +int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count); +int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt); +int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); +int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); + +#endif /* MLX4_ICM_H */ diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c new file mode 100644 index 000000000000..65854f9e9c76 --- /dev/null +++ b/drivers/net/mlx4/intf.c @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include + +#include "mlx4.h" + +struct mlx4_device_context { + struct list_head list; + struct mlx4_interface *intf; + void *context; +}; + +static LIST_HEAD(intf_list); +static LIST_HEAD(dev_list); +static DEFINE_MUTEX(intf_mutex); + +static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv) +{ + struct mlx4_device_context *dev_ctx; + + dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL); + if (!dev_ctx) + return; + + dev_ctx->intf = intf; + dev_ctx->context = intf->add(&priv->dev); + + if (dev_ctx->context) { + spin_lock_irq(&priv->ctx_lock); + list_add_tail(&dev_ctx->list, &priv->ctx_list); + spin_unlock_irq(&priv->ctx_lock); + } else + kfree(dev_ctx); +} + +static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv) +{ + struct mlx4_device_context *dev_ctx; + + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf == intf) { + spin_lock_irq(&priv->ctx_lock); + list_del(&dev_ctx->list); + spin_unlock_irq(&priv->ctx_lock); + + intf->remove(&priv->dev, dev_ctx->context); + kfree(dev_ctx); + return; + } +} + +int mlx4_register_interface(struct mlx4_interface *intf) +{ + struct mlx4_priv *priv; + + if (!intf->add || !intf->remove) + return -EINVAL; + + mutex_lock(&intf_mutex); + + list_add_tail(&intf->list, &intf_list); + list_for_each_entry(priv, &dev_list, dev_list) + mlx4_add_device(intf, priv); + + mutex_unlock(&intf_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_register_interface); + +void mlx4_unregister_interface(struct mlx4_interface *intf) +{ + struct mlx4_priv *priv; + + mutex_lock(&intf_mutex); + + list_for_each_entry(priv, &dev_list, dev_list) + mlx4_remove_device(intf, priv); + + list_del(&intf->list); + + mutex_unlock(&intf_mutex); +} +EXPORT_SYMBOL_GPL(mlx4_unregister_interface); + +void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type, + int subtype, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_device_context *dev_ctx; + unsigned long flags; + + spin_lock_irqsave(&priv->ctx_lock, flags); + + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf->event) + dev_ctx->intf->event(dev, dev_ctx->context, type, + subtype, port); + + spin_unlock_irqrestore(&priv->ctx_lock, flags); +} + +int mlx4_register_device(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_interface *intf; + + INIT_LIST_HEAD(&priv->ctx_list); + spin_lock_init(&priv->ctx_lock); + + mutex_lock(&intf_mutex); + + list_add_tail(&priv->dev_list, &dev_list); + list_for_each_entry(intf, &intf_list, list) + mlx4_add_device(intf, priv); + + mutex_unlock(&intf_mutex); + + return 0; +} + +void mlx4_unregister_device(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_interface *intf; + + mutex_lock(&intf_mutex); + + list_for_each_entry(intf, &intf_list, list) + mlx4_remove_device(intf, priv); + + list_del(&priv->dev_list); + + mutex_unlock(&intf_mutex); +} diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c new file mode 100644 index 000000000000..4debb024eaf9 --- /dev/null +++ b/drivers/net/mlx4/main.c @@ -0,0 +1,936 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "mlx4.h" +#include "fw.h" +#include "icm.h" + +MODULE_AUTHOR("Roland Dreier"); +MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); + +#ifdef CONFIG_MLX4_DEBUG + +int mlx4_debug_level = 0; +module_param_named(debug_level, mlx4_debug_level, int, 0644); +MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); + +#endif /* CONFIG_MLX4_DEBUG */ + +#ifdef CONFIG_PCI_MSI + +static int msi_x; +module_param(msi_x, int, 0444); +MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); + +#else /* CONFIG_PCI_MSI */ + +#define msi_x (0) + +#endif /* CONFIG_PCI_MSI */ + +static const char mlx4_version[] __devinitdata = + DRV_NAME ": Mellanox ConnectX core driver v" + DRV_VERSION " (" DRV_RELDATE ")\n"; + +static struct mlx4_profile default_profile = { + .num_qp = 1 << 16, + .num_srq = 1 << 16, + .rdmarc_per_qp = 4, + .num_cq = 1 << 16, + .num_mcg = 1 << 13, + .num_mpt = 1 << 17, + .num_mtt = 1 << 20, +}; + +static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) +{ + int err; + + err = mlx4_QUERY_DEV_CAP(dev, dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + return err; + } + + if (dev_cap->min_page_sz > PAGE_SIZE) { + mlx4_err(dev, "HCA minimum page size of %d bigger than " + "kernel PAGE_SIZE of %ld, aborting.\n", + dev_cap->min_page_sz, PAGE_SIZE); + return -ENODEV; + } + if (dev_cap->num_ports > MLX4_MAX_PORTS) { + mlx4_err(dev, "HCA has %d ports, but we only support %d, " + "aborting.\n", + dev_cap->num_ports, MLX4_MAX_PORTS); + return -ENODEV; + } + + if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { + mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " + "PCI resource 2 size of 0x%llx, aborting.\n", + dev_cap->uar_size, + (unsigned long long) pci_resource_len(dev->pdev, 2)); + return -ENODEV; + } + + dev->caps.num_ports = dev_cap->num_ports; + dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; + dev->caps.vl_cap = dev_cap->max_vl; + dev->caps.mtu_cap = dev_cap->max_mtu; + dev->caps.gid_table_len = dev_cap->max_gids; + dev->caps.pkey_table_len = dev_cap->max_pkeys; + dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; + dev->caps.bf_reg_size = dev_cap->bf_reg_size; + dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; + dev->caps.max_sq_sg = dev_cap->max_sq_sg; + dev->caps.max_rq_sg = dev_cap->max_rq_sg; + dev->caps.max_wqes = dev_cap->max_qp_sz; + dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; + dev->caps.reserved_qps = dev_cap->reserved_qps; + dev->caps.max_srq_wqes = dev_cap->max_srq_sz; + dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; + dev->caps.reserved_srqs = dev_cap->reserved_srqs; + dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; + dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; + dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM; + /* + * Subtract 1 from the limit because we need to allocate a + * spare CQE so the HCA HW can tell the difference between an + * empty CQ and a full CQ. + */ + dev->caps.max_cqes = dev_cap->max_cq_sz - 1; + dev->caps.reserved_cqs = dev_cap->reserved_cqs; + dev->caps.reserved_eqs = dev_cap->reserved_eqs; + dev->caps.reserved_mtts = dev_cap->reserved_mtts; + dev->caps.reserved_mrws = dev_cap->reserved_mrws; + dev->caps.reserved_uars = dev_cap->reserved_uars; + dev->caps.reserved_pds = dev_cap->reserved_pds; + dev->caps.port_width_cap = dev_cap->max_port_width; + dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; + dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); + dev->caps.flags = dev_cap->flags; + dev->caps.stat_rate_support = dev_cap->stat_rate_support; + + return 0; +} + +static int __devinit mlx4_load_fw(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, + GFP_HIGHUSER | __GFP_NOWARN); + if (!priv->fw.fw_icm) { + mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); + return -ENOMEM; + } + + err = mlx4_MAP_FA(dev, priv->fw.fw_icm); + if (err) { + mlx4_err(dev, "MAP_FA command failed, aborting.\n"); + goto err_free; + } + + err = mlx4_RUN_FW(dev); + if (err) { + mlx4_err(dev, "RUN_FW command failed, aborting.\n"); + goto err_unmap_fa; + } + + return 0; + +err_unmap_fa: + mlx4_UNMAP_FA(dev); + +err_free: + mlx4_free_icm(dev, priv->fw.fw_icm); + return err; +} + +static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, + int cmpt_entry_sz) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, + cmpt_base + + ((u64) (MLX4_CMPT_TYPE_QP * + cmpt_entry_sz) << MLX4_CMPT_SHIFT), + cmpt_entry_sz, dev->caps.num_qps, + dev->caps.reserved_qps, 0); + if (err) + goto err; + + err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, + cmpt_base + + ((u64) (MLX4_CMPT_TYPE_SRQ * + cmpt_entry_sz) << MLX4_CMPT_SHIFT), + cmpt_entry_sz, dev->caps.num_srqs, + dev->caps.reserved_srqs, 0); + if (err) + goto err_qp; + + err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, + cmpt_base + + ((u64) (MLX4_CMPT_TYPE_CQ * + cmpt_entry_sz) << MLX4_CMPT_SHIFT), + cmpt_entry_sz, dev->caps.num_cqs, + dev->caps.reserved_cqs, 0); + if (err) + goto err_srq; + + err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, + cmpt_base + + ((u64) (MLX4_CMPT_TYPE_EQ * + cmpt_entry_sz) << MLX4_CMPT_SHIFT), + cmpt_entry_sz, + roundup_pow_of_two(MLX4_NUM_EQ + + dev->caps.reserved_eqs), + MLX4_NUM_EQ + dev->caps.reserved_eqs, 0); + if (err) + goto err_cq; + + return 0; + +err_cq: + mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); + +err_srq: + mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); + +err_qp: + mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); + +err: + return err; +} + +static int __devinit mlx4_init_icm(struct mlx4_dev *dev, + struct mlx4_dev_cap *dev_cap, + struct mlx4_init_hca_param *init_hca, + u64 icm_size) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u64 aux_pages; + int err; + + err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); + if (err) { + mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); + return err; + } + + mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", + (unsigned long long) icm_size >> 10, + (unsigned long long) aux_pages << 2); + + priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, + GFP_HIGHUSER | __GFP_NOWARN); + if (!priv->fw.aux_icm) { + mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); + return -ENOMEM; + } + + err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); + if (err) { + mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); + goto err_free_aux; + } + + err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); + if (err) { + mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); + goto err_unmap_aux; + } + + err = mlx4_map_eq_icm(dev, init_hca->eqc_base); + if (err) { + mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); + goto err_unmap_cmpt; + } + + err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, + init_hca->mtt_base, + dev->caps.mtt_entry_sz, + dev->caps.num_mtt_segs, + dev->caps.reserved_mtts, 1); + if (err) { + mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); + goto err_unmap_eq; + } + + err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, + init_hca->dmpt_base, + dev_cap->dmpt_entry_sz, + dev->caps.num_mpts, + dev->caps.reserved_mrws, 1); + if (err) { + mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); + goto err_unmap_mtt; + } + + err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, + init_hca->qpc_base, + dev_cap->qpc_entry_sz, + dev->caps.num_qps, + dev->caps.reserved_qps, 0); + if (err) { + mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); + goto err_unmap_dmpt; + } + + err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, + init_hca->auxc_base, + dev_cap->aux_entry_sz, + dev->caps.num_qps, + dev->caps.reserved_qps, 0); + if (err) { + mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); + goto err_unmap_qp; + } + + err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, + init_hca->altc_base, + dev_cap->altc_entry_sz, + dev->caps.num_qps, + dev->caps.reserved_qps, 0); + if (err) { + mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); + goto err_unmap_auxc; + } + + err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, + init_hca->rdmarc_base, + dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, + dev->caps.num_qps, + dev->caps.reserved_qps, 0); + if (err) { + mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); + goto err_unmap_altc; + } + + err = mlx4_init_icm_table(dev, &priv->cq_table.table, + init_hca->cqc_base, + dev_cap->cqc_entry_sz, + dev->caps.num_cqs, + dev->caps.reserved_cqs, 0); + if (err) { + mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); + goto err_unmap_rdmarc; + } + + err = mlx4_init_icm_table(dev, &priv->srq_table.table, + init_hca->srqc_base, + dev_cap->srq_entry_sz, + dev->caps.num_srqs, + dev->caps.reserved_srqs, 0); + if (err) { + mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); + goto err_unmap_cq; + } + + /* + * It's not strictly required, but for simplicity just map the + * whole multicast group table now. The table isn't very big + * and it's a lot easier than trying to track ref counts. + */ + err = mlx4_init_icm_table(dev, &priv->mcg_table.table, + init_hca->mc_base, MLX4_MGM_ENTRY_SIZE, + dev->caps.num_mgms + dev->caps.num_amgms, + dev->caps.num_mgms + dev->caps.num_amgms, + 0); + if (err) { + mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); + goto err_unmap_srq; + } + + return 0; + +err_unmap_srq: + mlx4_cleanup_icm_table(dev, &priv->srq_table.table); + +err_unmap_cq: + mlx4_cleanup_icm_table(dev, &priv->cq_table.table); + +err_unmap_rdmarc: + mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); + +err_unmap_altc: + mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); + +err_unmap_auxc: + mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); + +err_unmap_qp: + mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); + +err_unmap_dmpt: + mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); + +err_unmap_mtt: + mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); + +err_unmap_eq: + mlx4_unmap_eq_icm(dev); + +err_unmap_cmpt: + mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); + +err_unmap_aux: + mlx4_UNMAP_ICM_AUX(dev); + +err_free_aux: + mlx4_free_icm(dev, priv->fw.aux_icm); + + return err; +} + +static void mlx4_free_icms(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); + mlx4_cleanup_icm_table(dev, &priv->srq_table.table); + mlx4_cleanup_icm_table(dev, &priv->cq_table.table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); + mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); + mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); + mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); + mlx4_unmap_eq_icm(dev); + + mlx4_UNMAP_ICM_AUX(dev); + mlx4_free_icm(dev, priv->fw.aux_icm); +} + +static void mlx4_close_hca(struct mlx4_dev *dev) +{ + mlx4_CLOSE_HCA(dev, 0); + mlx4_free_icms(dev); + mlx4_UNMAP_FA(dev); + mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm); +} + +static int __devinit mlx4_init_hca(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_adapter adapter; + struct mlx4_dev_cap dev_cap; + struct mlx4_profile profile; + struct mlx4_init_hca_param init_hca; + u64 icm_size; + int err; + + err = mlx4_QUERY_FW(dev); + if (err) { + mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); + return err; + } + + err = mlx4_load_fw(dev); + if (err) { + mlx4_err(dev, "Failed to start FW, aborting.\n"); + return err; + } + + err = mlx4_dev_cap(dev, &dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + goto err_stop_fw; + } + + profile = default_profile; + + icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca); + if ((long long) icm_size < 0) { + err = icm_size; + goto err_stop_fw; + } + + init_hca.log_uar_sz = ilog2(dev->caps.num_uars); + + err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); + if (err) + goto err_stop_fw; + + err = mlx4_INIT_HCA(dev, &init_hca); + if (err) { + mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); + goto err_free_icm; + } + + err = mlx4_QUERY_ADAPTER(dev, &adapter); + if (err) { + mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); + goto err_close; + } + + priv->eq_table.inta_pin = adapter.inta_pin; + priv->rev_id = adapter.revision_id; + memcpy(priv->board_id, adapter.board_id, sizeof priv->board_id); + + return 0; + +err_close: + mlx4_close_hca(dev); + +err_free_icm: + mlx4_free_icms(dev); + +err_stop_fw: + mlx4_UNMAP_FA(dev); + mlx4_free_icm(dev, priv->fw.fw_icm); + + return err; +} + +static int __devinit mlx4_setup_hca(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + MLX4_INIT_DOORBELL_LOCK(&priv->doorbell_lock); + + err = mlx4_init_uar_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize " + "user access region table, aborting.\n"); + return err; + } + + err = mlx4_uar_alloc(dev, &priv->driver_uar); + if (err) { + mlx4_err(dev, "Failed to allocate driver access region, " + "aborting.\n"); + goto err_uar_table_free; + } + + priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); + if (!priv->kar) { + mlx4_err(dev, "Couldn't map kernel access region, " + "aborting.\n"); + err = -ENOMEM; + goto err_uar_free; + } + + err = mlx4_init_pd_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize " + "protection domain table, aborting.\n"); + goto err_kar_unmap; + } + + err = mlx4_init_mr_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize " + "memory region table, aborting.\n"); + goto err_pd_table_free; + } + + mlx4_map_catas_buf(dev); + + err = mlx4_init_eq_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize " + "event queue table, aborting.\n"); + goto err_catas_buf; + } + + err = mlx4_cmd_use_events(dev); + if (err) { + mlx4_err(dev, "Failed to switch to event-driven " + "firmware commands, aborting.\n"); + goto err_eq_table_free; + } + + err = mlx4_NOP(dev); + if (err) { + mlx4_err(dev, "NOP command failed to generate interrupt " + "(IRQ %d), aborting.\n", + priv->eq_table.eq[MLX4_EQ_ASYNC].irq); + if (dev->flags & MLX4_FLAG_MSI_X) + mlx4_err(dev, "Try again with MSI-X disabled.\n"); + else + mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); + + goto err_cmd_poll; + } + + mlx4_dbg(dev, "NOP command IRQ test passed\n"); + + err = mlx4_init_cq_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize " + "completion queue table, aborting.\n"); + goto err_cmd_poll; + } + + err = mlx4_init_srq_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize " + "shared receive queue table, aborting.\n"); + goto err_cq_table_free; + } + + err = mlx4_init_qp_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize " + "queue pair table, aborting.\n"); + goto err_srq_table_free; + } + + err = mlx4_init_mcg_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize " + "multicast group table, aborting.\n"); + goto err_qp_table_free; + } + + return 0; + +err_qp_table_free: + mlx4_cleanup_qp_table(dev); + +err_srq_table_free: + mlx4_cleanup_srq_table(dev); + +err_cq_table_free: + mlx4_cleanup_cq_table(dev); + +err_cmd_poll: + mlx4_cmd_use_polling(dev); + +err_eq_table_free: + mlx4_cleanup_eq_table(dev); + +err_catas_buf: + mlx4_unmap_catas_buf(dev); + mlx4_cleanup_mr_table(dev); + +err_pd_table_free: + mlx4_cleanup_pd_table(dev); + +err_kar_unmap: + iounmap(priv->kar); + +err_uar_free: + mlx4_uar_free(dev, &priv->driver_uar); + +err_uar_table_free: + mlx4_cleanup_uar_table(dev); + return err; +} + +static void __devinit mlx4_enable_msi_x(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct msix_entry entries[MLX4_NUM_EQ]; + int err; + int i; + + if (msi_x) { + for (i = 0; i < MLX4_NUM_EQ; ++i) + entries[i].entry = i; + + err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries)); + if (err) { + if (err > 0) + mlx4_info(dev, "Only %d MSI-X vectors available, " + "not using MSI-X\n", err); + goto no_msi; + } + + for (i = 0; i < MLX4_NUM_EQ; ++i) + priv->eq_table.eq[i].irq = entries[i].vector; + + dev->flags |= MLX4_FLAG_MSI_X; + return; + } + +no_msi: + for (i = 0; i < MLX4_NUM_EQ; ++i) + priv->eq_table.eq[i].irq = dev->pdev->irq; +} + +static int __devinit mlx4_init_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + static int mlx4_version_printed; + struct mlx4_priv *priv; + struct mlx4_dev *dev; + int err; + + if (!mlx4_version_printed) { + printk(KERN_INFO "%s", mlx4_version); + ++mlx4_version_printed; + } + + printk(KERN_INFO PFX "Initializing %s\n", + pci_name(pdev)); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, " + "aborting.\n"); + return err; + } + + /* + * Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not + * be present) + */ + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || + pci_resource_len(pdev, 0) != 1 << 20) { + dev_err(&pdev->dev, "Missing DCS, aborting.\n"); + err = -ENODEV; + goto err_disable_pdev; + } + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Missing UAR, aborting.\n"); + err = -ENODEV; + goto err_disable_pdev; + } + + err = pci_request_region(pdev, 0, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Cannot request control region, aborting.\n"); + goto err_disable_pdev; + } + + err = pci_request_region(pdev, 2, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n"); + goto err_release_bar0; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); + if (err) { + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); + goto err_release_bar2; + } + } + err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); + if (err) { + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " + "consistent PCI DMA mask.\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " + "aborting.\n"); + goto err_release_bar2; + } + } + + priv = kzalloc(sizeof *priv, GFP_KERNEL); + if (!priv) { + dev_err(&pdev->dev, "Device struct alloc failed, " + "aborting.\n"); + err = -ENOMEM; + goto err_release_bar2; + } + + dev = &priv->dev; + dev->pdev = pdev; + + /* + * Now reset the HCA before we touch the PCI capabilities or + * attempt a firmware command, since a boot ROM may have left + * the HCA in an undefined state. + */ + err = mlx4_reset(dev); + if (err) { + mlx4_err(dev, "Failed to reset HCA, aborting.\n"); + goto err_free_dev; + } + + mlx4_enable_msi_x(dev); + + if (mlx4_cmd_init(dev)) { + mlx4_err(dev, "Failed to init command interface, aborting.\n"); + goto err_free_dev; + } + + err = mlx4_init_hca(dev); + if (err) + goto err_cmd; + + err = mlx4_setup_hca(dev); + if (err) + goto err_close; + + err = mlx4_register_device(dev); + if (err) + goto err_cleanup; + + pci_set_drvdata(pdev, dev); + + return 0; + +err_cleanup: + mlx4_cleanup_mcg_table(dev); + mlx4_cleanup_qp_table(dev); + mlx4_cleanup_srq_table(dev); + mlx4_cleanup_cq_table(dev); + mlx4_cmd_use_polling(dev); + mlx4_cleanup_eq_table(dev); + + mlx4_unmap_catas_buf(dev); + + mlx4_cleanup_mr_table(dev); + mlx4_cleanup_pd_table(dev); + mlx4_cleanup_uar_table(dev); + +err_close: + mlx4_close_hca(dev); + +err_cmd: + mlx4_cmd_cleanup(dev); + +err_free_dev: + if (dev->flags & MLX4_FLAG_MSI_X) + pci_disable_msix(pdev); + + kfree(priv); + +err_release_bar2: + pci_release_region(pdev, 2); + +err_release_bar0: + pci_release_region(pdev, 0); + +err_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void __devexit mlx4_remove_one(struct pci_dev *pdev) +{ + struct mlx4_dev *dev = pci_get_drvdata(pdev); + struct mlx4_priv *priv = mlx4_priv(dev); + int p; + + if (dev) { + mlx4_unregister_device(dev); + + for (p = 1; p <= dev->caps.num_ports; ++p) + mlx4_CLOSE_PORT(dev, p); + + mlx4_cleanup_mcg_table(dev); + mlx4_cleanup_qp_table(dev); + mlx4_cleanup_srq_table(dev); + mlx4_cleanup_cq_table(dev); + mlx4_cmd_use_polling(dev); + mlx4_cleanup_eq_table(dev); + + mlx4_unmap_catas_buf(dev); + + mlx4_cleanup_mr_table(dev); + mlx4_cleanup_pd_table(dev); + + iounmap(priv->kar); + mlx4_uar_free(dev, &priv->driver_uar); + mlx4_cleanup_uar_table(dev); + mlx4_close_hca(dev); + mlx4_cmd_cleanup(dev); + + if (dev->flags & MLX4_FLAG_MSI_X) + pci_disable_msix(pdev); + + kfree(priv); + pci_release_region(pdev, 2); + pci_release_region(pdev, 0); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + } +} + +static struct pci_device_id mlx4_pci_table[] = { + { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ + { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ + { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, mlx4_pci_table); + +static struct pci_driver mlx4_driver = { + .name = DRV_NAME, + .id_table = mlx4_pci_table, + .probe = mlx4_init_one, + .remove = __devexit_p(mlx4_remove_one) +}; + +static int __init mlx4_init(void) +{ + int ret; + + ret = pci_register_driver(&mlx4_driver); + return ret < 0 ? ret : 0; +} + +static void __exit mlx4_cleanup(void) +{ + pci_unregister_driver(&mlx4_driver); +} + +module_init(mlx4_init); +module_exit(mlx4_cleanup); diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c new file mode 100644 index 000000000000..672024a0ee71 --- /dev/null +++ b/drivers/net/mlx4/mcg.c @@ -0,0 +1,380 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#include + +#include "mlx4.h" + +struct mlx4_mgm { + __be32 next_gid_index; + __be32 members_count; + u32 reserved[2]; + u8 gid[16]; + __be32 qp[MLX4_QP_PER_MGM]; +}; + +static const u8 zero_gid[16]; /* automatically initialized to 0 */ + +static int mlx4_READ_MCG(struct mlx4_dev *dev, int index, + struct mlx4_cmd_mailbox *mailbox) +{ + return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, + MLX4_CMD_TIME_CLASS_A); +} + +static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index, + struct mlx4_cmd_mailbox *mailbox) +{ + return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, + MLX4_CMD_TIME_CLASS_A); +} + +static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + u16 *hash) +{ + u64 imm; + int err; + + err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH, + MLX4_CMD_TIME_CLASS_A); + + if (!err) + *hash = imm; + + return err; +} + +/* + * Caller must hold MCG table semaphore. gid and mgm parameters must + * be properly aligned for command interface. + * + * Returns 0 unless a firmware command error occurs. + * + * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 + * and *mgm holds MGM entry. + * + * if GID is found in AMGM, *index = index in AMGM, *prev = index of + * previous entry in hash chain and *mgm holds AMGM entry. + * + * If no AMGM exists for given gid, *index = -1, *prev = index of last + * entry in hash chain and *mgm holds end of hash chain. + */ +static int find_mgm(struct mlx4_dev *dev, + u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox, + u16 *hash, int *prev, int *index) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm = mgm_mailbox->buf; + u8 *mgid; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return -ENOMEM; + mgid = mailbox->buf; + + memcpy(mgid, gid, 16); + + err = mlx4_MGID_HASH(dev, mailbox, hash); + mlx4_free_cmd_mailbox(dev, mailbox); + if (err) + return err; + + if (0) + mlx4_dbg(dev, "Hash for %04x:%04x:%04x:%04x:" + "%04x:%04x:%04x:%04x is %04x\n", + be16_to_cpu(((__be16 *) gid)[0]), + be16_to_cpu(((__be16 *) gid)[1]), + be16_to_cpu(((__be16 *) gid)[2]), + be16_to_cpu(((__be16 *) gid)[3]), + be16_to_cpu(((__be16 *) gid)[4]), + be16_to_cpu(((__be16 *) gid)[5]), + be16_to_cpu(((__be16 *) gid)[6]), + be16_to_cpu(((__be16 *) gid)[7]), + *hash); + + *index = *hash; + *prev = -1; + + do { + err = mlx4_READ_MCG(dev, *index, mgm_mailbox); + if (err) + return err; + + if (!memcmp(mgm->gid, zero_gid, 16)) { + if (*index != *hash) { + mlx4_err(dev, "Found zero MGID in AMGM.\n"); + err = -EINVAL; + } + return err; + } + + if (!memcmp(mgm->gid, gid, 16)) + return err; + + *prev = *index; + *index = be32_to_cpu(mgm->next_gid_index) >> 6; + } while (*index); + + *index = -1; + return err; +} + +int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + u32 members_count; + u16 hash; + int index, prev; + int link = 0; + int i; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mgm = mailbox->buf; + + mutex_lock(&priv->mcg_table.mutex); + + err = find_mgm(dev, gid, mailbox, &hash, &prev, &index); + if (err) + goto out; + + if (index != -1) { + if (!memcmp(mgm->gid, zero_gid, 16)) + memcpy(mgm->gid, gid, 16); + } else { + link = 1; + + index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); + if (index == -1) { + mlx4_err(dev, "No AMGM entries left\n"); + err = -ENOMEM; + goto out; + } + index += dev->caps.num_mgms; + + err = mlx4_READ_MCG(dev, index, mailbox); + if (err) + goto out; + + memset(mgm, 0, sizeof *mgm); + memcpy(mgm->gid, gid, 16); + } + + members_count = be32_to_cpu(mgm->members_count); + if (members_count == MLX4_QP_PER_MGM) { + mlx4_err(dev, "MGM at index %x is full.\n", index); + err = -ENOMEM; + goto out; + } + + for (i = 0; i < members_count; ++i) + if (mgm->qp[i] == cpu_to_be32(qp->qpn)) { + mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); + err = 0; + goto out; + } + + mgm->qp[members_count++] = cpu_to_be32(qp->qpn); + mgm->members_count = cpu_to_be32(members_count); + + err = mlx4_WRITE_MCG(dev, index, mailbox); + if (err) + goto out; + + if (!link) + goto out; + + err = mlx4_READ_MCG(dev, prev, mailbox); + if (err) + goto out; + + mgm->next_gid_index = cpu_to_be32(index << 6); + + err = mlx4_WRITE_MCG(dev, prev, mailbox); + if (err) + goto out; + +out: + if (err && link && index != -1) { + if (index < dev->caps.num_mgms) + mlx4_warn(dev, "Got AMGM index %d < %d", + index, dev->caps.num_mgms); + else + mlx4_bitmap_free(&priv->mcg_table.bitmap, + index - dev->caps.num_mgms); + } + mutex_unlock(&priv->mcg_table.mutex); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_multicast_attach); + +int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + u32 members_count; + u16 hash; + int prev, index; + int i, loc; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mgm = mailbox->buf; + + mutex_lock(&priv->mcg_table.mutex); + + err = find_mgm(dev, gid, mailbox, &hash, &prev, &index); + if (err) + goto out; + + if (index == -1) { + mlx4_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x " + "not found\n", + be16_to_cpu(((__be16 *) gid)[0]), + be16_to_cpu(((__be16 *) gid)[1]), + be16_to_cpu(((__be16 *) gid)[2]), + be16_to_cpu(((__be16 *) gid)[3]), + be16_to_cpu(((__be16 *) gid)[4]), + be16_to_cpu(((__be16 *) gid)[5]), + be16_to_cpu(((__be16 *) gid)[6]), + be16_to_cpu(((__be16 *) gid)[7])); + err = -EINVAL; + goto out; + } + + members_count = be32_to_cpu(mgm->members_count); + for (loc = -1, i = 0; i < members_count; ++i) + if (mgm->qp[i] == cpu_to_be32(qp->qpn)) + loc = i; + + if (loc == -1) { + mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); + err = -EINVAL; + goto out; + } + + + mgm->members_count = cpu_to_be32(--members_count); + mgm->qp[loc] = mgm->qp[i - 1]; + mgm->qp[i - 1] = 0; + + err = mlx4_WRITE_MCG(dev, index, mailbox); + if (err) + goto out; + + if (i != 1) + goto out; + + if (prev == -1) { + /* Remove entry from MGM */ + int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; + if (amgm_index) { + err = mlx4_READ_MCG(dev, amgm_index, mailbox); + if (err) + goto out; + } else + memset(mgm->gid, 0, 16); + + err = mlx4_WRITE_MCG(dev, index, mailbox); + if (err) + goto out; + + if (amgm_index) { + if (amgm_index < dev->caps.num_mgms) + mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", + index, amgm_index, dev->caps.num_mgms); + else + mlx4_bitmap_free(&priv->mcg_table.bitmap, + amgm_index - dev->caps.num_mgms); + } + } else { + /* Remove entry from AMGM */ + int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; + err = mlx4_READ_MCG(dev, prev, mailbox); + if (err) + goto out; + + mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); + + err = mlx4_WRITE_MCG(dev, prev, mailbox); + if (err) + goto out; + + if (index < dev->caps.num_mgms) + mlx4_warn(dev, "entry %d had next AMGM index %d < %d", + prev, index, dev->caps.num_mgms); + else + mlx4_bitmap_free(&priv->mcg_table.bitmap, + index - dev->caps.num_mgms); + } + +out: + mutex_unlock(&priv->mcg_table.mutex); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_multicast_detach); + +int __devinit mlx4_init_mcg_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + err = mlx4_bitmap_init(&priv->mcg_table.bitmap, + dev->caps.num_amgms, dev->caps.num_amgms - 1, 0); + if (err) + return err; + + mutex_init(&priv->mcg_table.mutex); + + return 0; +} + +void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) +{ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); +} diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h new file mode 100644 index 000000000000..9befbae3d196 --- /dev/null +++ b/drivers/net/mlx4/mlx4.h @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_H +#define MLX4_H + +#include + +#include +#include + +#define DRV_NAME "mlx4_core" +#define PFX DRV_NAME ": " +#define DRV_VERSION "0.01" +#define DRV_RELDATE "May 1, 2007" + +enum { + MLX4_HCR_BASE = 0x80680, + MLX4_HCR_SIZE = 0x0001c, + MLX4_CLR_INT_SIZE = 0x00008 +}; + +enum { + MLX4_BOARD_ID_LEN = 64 +}; + +enum { + MLX4_MGM_ENTRY_SIZE = 0x40, + MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2), + MLX4_MTT_ENTRY_PER_SEG = 8 +}; + +enum { + MLX4_EQ_ASYNC, + MLX4_EQ_COMP, + MLX4_EQ_CATAS, + MLX4_NUM_EQ +}; + +enum { + MLX4_NUM_PDS = 1 << 15 +}; + +enum { + MLX4_CMPT_TYPE_QP = 0, + MLX4_CMPT_TYPE_SRQ = 1, + MLX4_CMPT_TYPE_CQ = 2, + MLX4_CMPT_TYPE_EQ = 3, + MLX4_CMPT_NUM_TYPE +}; + +enum { + MLX4_CMPT_SHIFT = 24, + MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT +}; + +#ifdef CONFIG_MLX4_DEBUG +extern int mlx4_debug_level; + +#define mlx4_dbg(mdev, format, arg...) \ + do { \ + if (mlx4_debug_level) \ + dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \ + } while (0) + +#else /* CONFIG_MLX4_DEBUG */ + +#define mlx4_dbg(mdev, format, arg...) do { (void) mdev; } while (0) + +#endif /* CONFIG_MLX4_DEBUG */ + +#define mlx4_err(mdev, format, arg...) \ + dev_err(&mdev->pdev->dev, format, ## arg) +#define mlx4_info(mdev, format, arg...) \ + dev_info(&mdev->pdev->dev, format, ## arg) +#define mlx4_warn(mdev, format, arg...) \ + dev_warn(&mdev->pdev->dev, format, ## arg) + +struct mlx4_bitmap { + u32 last; + u32 top; + u32 max; + u32 mask; + spinlock_t lock; + unsigned long *table; +}; + +struct mlx4_buddy { + unsigned long **bits; + int max_order; + spinlock_t lock; +}; + +struct mlx4_icm; + +struct mlx4_icm_table { + u64 virt; + int num_icm; + int num_obj; + int obj_size; + int lowmem; + struct mutex mutex; + struct mlx4_icm **icm; +}; + +struct mlx4_eq { + struct mlx4_dev *dev; + void __iomem *doorbell; + int eqn; + u32 cons_index; + u16 irq; + u16 have_irq; + int nent; + struct mlx4_buf_list *page_list; + struct mlx4_mtt mtt; +}; + +struct mlx4_profile { + int num_qp; + int rdmarc_per_qp; + int num_srq; + int num_cq; + int num_mcg; + int num_mpt; + int num_mtt; +}; + +struct mlx4_fw { + u64 clr_int_base; + u64 catas_offset; + struct mlx4_icm *fw_icm; + struct mlx4_icm *aux_icm; + u32 catas_size; + u16 fw_pages; + u8 clr_int_bar; + u8 catas_bar; +}; + +struct mlx4_cmd { + struct pci_pool *pool; + void __iomem *hcr; + struct mutex hcr_mutex; + struct semaphore poll_sem; + struct semaphore event_sem; + int max_cmds; + spinlock_t context_lock; + int free_head; + struct mlx4_cmd_context *context; + u16 token_mask; + u8 use_events; + u8 toggle; +}; + +struct mlx4_uar_table { + struct mlx4_bitmap bitmap; +}; + +struct mlx4_mr_table { + struct mlx4_bitmap mpt_bitmap; + struct mlx4_buddy mtt_buddy; + u64 mtt_base; + u64 mpt_base; + struct mlx4_icm_table mtt_table; + struct mlx4_icm_table dmpt_table; +}; + +struct mlx4_cq_table { + struct mlx4_bitmap bitmap; + spinlock_t lock; + struct radix_tree_root tree; + struct mlx4_icm_table table; + struct mlx4_icm_table cmpt_table; +}; + +struct mlx4_eq_table { + struct mlx4_bitmap bitmap; + void __iomem *clr_int; + void __iomem *uar_map[(MLX4_NUM_EQ + 6) / 4]; + u32 clr_mask; + struct mlx4_eq eq[MLX4_NUM_EQ]; + u64 icm_virt; + struct page *icm_page; + dma_addr_t icm_dma; + struct mlx4_icm_table cmpt_table; + int have_irq; + u8 inta_pin; +}; + +struct mlx4_srq_table { + struct mlx4_bitmap bitmap; + spinlock_t lock; + struct radix_tree_root tree; + struct mlx4_icm_table table; + struct mlx4_icm_table cmpt_table; +}; + +struct mlx4_qp_table { + struct mlx4_bitmap bitmap; + u32 rdmarc_base; + int rdmarc_shift; + spinlock_t lock; + struct mlx4_icm_table qp_table; + struct mlx4_icm_table auxc_table; + struct mlx4_icm_table altc_table; + struct mlx4_icm_table rdmarc_table; + struct mlx4_icm_table cmpt_table; +}; + +struct mlx4_mcg_table { + struct mutex mutex; + struct mlx4_bitmap bitmap; + struct mlx4_icm_table table; +}; + +struct mlx4_catas_err { + u32 __iomem *map; + int size; +}; + +struct mlx4_priv { + struct mlx4_dev dev; + + struct list_head dev_list; + struct list_head ctx_list; + spinlock_t ctx_lock; + + struct mlx4_fw fw; + struct mlx4_cmd cmd; + + struct mlx4_bitmap pd_bitmap; + struct mlx4_uar_table uar_table; + struct mlx4_mr_table mr_table; + struct mlx4_cq_table cq_table; + struct mlx4_eq_table eq_table; + struct mlx4_srq_table srq_table; + struct mlx4_qp_table qp_table; + struct mlx4_mcg_table mcg_table; + + struct mlx4_catas_err catas_err; + + void __iomem *clr_base; + + struct mlx4_uar driver_uar; + void __iomem *kar; + MLX4_DECLARE_DOORBELL_LOCK(doorbell_lock) + + u32 rev_id; + char board_id[MLX4_BOARD_ID_LEN]; +}; + +static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) +{ + return container_of(dev, struct mlx4_priv, dev); +} + +u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); +void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); +int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved); +void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); + +int mlx4_reset(struct mlx4_dev *dev); + +int mlx4_init_pd_table(struct mlx4_dev *dev); +int mlx4_init_uar_table(struct mlx4_dev *dev); +int mlx4_init_mr_table(struct mlx4_dev *dev); +int mlx4_init_eq_table(struct mlx4_dev *dev); +int mlx4_init_cq_table(struct mlx4_dev *dev); +int mlx4_init_qp_table(struct mlx4_dev *dev); +int mlx4_init_srq_table(struct mlx4_dev *dev); +int mlx4_init_mcg_table(struct mlx4_dev *dev); + +void mlx4_cleanup_pd_table(struct mlx4_dev *dev); +void mlx4_cleanup_uar_table(struct mlx4_dev *dev); +void mlx4_cleanup_mr_table(struct mlx4_dev *dev); +void mlx4_cleanup_eq_table(struct mlx4_dev *dev); +void mlx4_cleanup_cq_table(struct mlx4_dev *dev); +void mlx4_cleanup_qp_table(struct mlx4_dev *dev); +void mlx4_cleanup_srq_table(struct mlx4_dev *dev); +void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); + +void mlx4_map_catas_buf(struct mlx4_dev *dev); +void mlx4_unmap_catas_buf(struct mlx4_dev *dev); + +int mlx4_register_device(struct mlx4_dev *dev); +void mlx4_unregister_device(struct mlx4_dev *dev); +void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type, + int subtype, int port); + +struct mlx4_dev_cap; +struct mlx4_init_hca_param; + +u64 mlx4_make_profile(struct mlx4_dev *dev, + struct mlx4_profile *request, + struct mlx4_dev_cap *dev_cap, + struct mlx4_init_hca_param *init_hca); + +int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt); +void mlx4_unmap_eq_icm(struct mlx4_dev *dev); + +int mlx4_cmd_init(struct mlx4_dev *dev); +void mlx4_cmd_cleanup(struct mlx4_dev *dev); +void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); +int mlx4_cmd_use_events(struct mlx4_dev *dev); +void mlx4_cmd_use_polling(struct mlx4_dev *dev); + +void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn); +void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type); + +void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type); + +void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); + +void mlx4_handle_catas_err(struct mlx4_dev *dev); + +#endif /* MLX4_H */ diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c new file mode 100644 index 000000000000..b33864dab179 --- /dev/null +++ b/drivers/net/mlx4/mr.c @@ -0,0 +1,479 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include + +#include "mlx4.h" +#include "icm.h" + +/* + * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. + */ +struct mlx4_mpt_entry { + __be32 flags; + __be32 qpn; + __be32 key; + __be32 pd; + __be64 start; + __be64 length; + __be32 lkey; + __be32 win_cnt; + u8 reserved1[3]; + u8 mtt_rep; + __be64 mtt_seg; + __be32 mtt_sz; + __be32 entity_size; + __be32 first_byte_offset; +} __attribute__((packed)); + +#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) +#define MLX4_MPT_FLAG_MIO (1 << 17) +#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) +#define MLX4_MPT_FLAG_PHYSICAL (1 << 9) +#define MLX4_MPT_FLAG_REGION (1 << 8) + +#define MLX4_MTT_FLAG_PRESENT 1 + +static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) +{ + int o; + int m; + u32 seg; + + spin_lock(&buddy->lock); + + for (o = order; o <= buddy->max_order; ++o) { + m = 1 << (buddy->max_order - o); + seg = find_first_bit(buddy->bits[o], m); + if (seg < m) + goto found; + } + + spin_unlock(&buddy->lock); + return -1; + + found: + clear_bit(seg, buddy->bits[o]); + + while (o > order) { + --o; + seg <<= 1; + set_bit(seg ^ 1, buddy->bits[o]); + } + + spin_unlock(&buddy->lock); + + seg <<= order; + + return seg; +} + +static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) +{ + seg >>= order; + + spin_lock(&buddy->lock); + + while (test_bit(seg ^ 1, buddy->bits[order])) { + clear_bit(seg ^ 1, buddy->bits[order]); + seg >>= 1; + ++order; + } + + set_bit(seg, buddy->bits[order]); + + spin_unlock(&buddy->lock); +} + +static int __devinit mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) +{ + int i, s; + + buddy->max_order = max_order; + spin_lock_init(&buddy->lock); + + buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), + GFP_KERNEL); + if (!buddy->bits) + goto err_out; + + for (i = 0; i <= buddy->max_order; ++i) { + s = BITS_TO_LONGS(1 << (buddy->max_order - i)); + buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); + if (!buddy->bits[i]) + goto err_out_free; + bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i)); + } + + set_bit(0, buddy->bits[buddy->max_order]); + + return 0; + +err_out_free: + for (i = 0; i <= buddy->max_order; ++i) + kfree(buddy->bits[i]); + + kfree(buddy->bits); + +err_out: + return -ENOMEM; +} + +static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) +{ + int i; + + for (i = 0; i <= buddy->max_order; ++i) + kfree(buddy->bits[i]); + + kfree(buddy->bits); +} + +static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) +{ + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + u32 seg; + + seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order); + if (seg == -1) + return -1; + + if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg, + seg + (1 << order) - 1)) { + mlx4_buddy_free(&mr_table->mtt_buddy, seg, order); + return -1; + } + + return seg; +} + +int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, + struct mlx4_mtt *mtt) +{ + int i; + + if (!npages) { + mtt->order = -1; + mtt->page_shift = MLX4_ICM_PAGE_SHIFT; + return 0; + } else + mtt->page_shift = page_shift; + + for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1) + ++mtt->order; + + mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); + if (mtt->first_seg == -1) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mtt_init); + +void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) +{ + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + + if (mtt->order < 0) + return; + + mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order); + mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg, + mtt->first_seg + (1 << mtt->order) - 1); +} +EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); + +u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) +{ + return (u64) mtt->first_seg * dev->caps.mtt_entry_sz; +} +EXPORT_SYMBOL_GPL(mlx4_mtt_addr); + +static u32 hw_index_to_key(u32 ind) +{ + return (ind >> 24) | (ind << 8); +} + +static u32 key_to_hw_index(u32 key) +{ + return (key << 24) | (key >> 8); +} + +static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int mpt_index) +{ + return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT, + MLX4_CMD_TIME_CLASS_B); +} + +static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int mpt_index) +{ + return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, + !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B); +} + +int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, + int npages, int page_shift, struct mlx4_mr *mr) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u32 index; + int err; + + index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); + if (index == -1) { + err = -ENOMEM; + goto err; + } + + mr->iova = iova; + mr->size = size; + mr->pd = pd; + mr->access = access; + mr->enabled = 0; + mr->key = hw_index_to_key(index); + + err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); + if (err) + goto err_index; + + return 0; + +err_index: + mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); + +err: + kfree(mr); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mr_alloc); + +void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + if (mr->enabled) { + err = mlx4_HW2SW_MPT(dev, NULL, + key_to_hw_index(mr->key) & + (dev->caps.num_mpts - 1)); + if (err) + mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err); + } + + mlx4_mtt_cleanup(dev, &mr->mtt); + mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key)); +} +EXPORT_SYMBOL_GPL(mlx4_mr_free); + +int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) +{ + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mpt_entry *mpt_entry; + int err; + + err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key)); + if (err) + return err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_table; + } + mpt_entry = mailbox->buf; + + memset(mpt_entry, 0, sizeof *mpt_entry); + + mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS | + MLX4_MPT_FLAG_MIO | + MLX4_MPT_FLAG_REGION | + mr->access); + if (mr->mtt.order < 0) + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); + + mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); + mpt_entry->pd = cpu_to_be32(mr->pd); + mpt_entry->start = cpu_to_be64(mr->iova); + mpt_entry->length = cpu_to_be64(mr->size); + mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); + mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt)); + + err = mlx4_SW2HW_MPT(dev, mailbox, + key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); + if (err) { + mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); + goto err_cmd; + } + + mr->enabled = 1; + + mlx4_free_cmd_mailbox(dev, mailbox); + + return 0; + +err_cmd: + mlx4_free_cmd_mailbox(dev, mailbox); + +err_table: + mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key)); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mr_enable); + +static int mlx4_WRITE_MTT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int num_mtt) +{ + return mlx4_cmd(dev, mailbox->dma, num_mtt, 0, MLX4_CMD_WRITE_MTT, + MLX4_CMD_TIME_CLASS_B); +} + +int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list) +{ + struct mlx4_cmd_mailbox *mailbox; + __be64 *mtt_entry; + int i; + int err = 0; + + if (mtt->order < 0) + return -EINVAL; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + mtt_entry = mailbox->buf; + + while (npages > 0) { + mtt_entry[0] = cpu_to_be64(mlx4_mtt_addr(dev, mtt) + start_index * 8); + mtt_entry[1] = 0; + + for (i = 0; i < npages && i < MLX4_MAILBOX_SIZE / 8 - 2; ++i) + mtt_entry[i + 2] = cpu_to_be64(page_list[i] | + MLX4_MTT_FLAG_PRESENT); + + /* + * If we have an odd number of entries to write, add + * one more dummy entry for firmware efficiency. + */ + if (i & 1) + mtt_entry[i + 2] = 0; + + err = mlx4_WRITE_MTT(dev, mailbox, (i + 1) & ~1); + if (err) + goto out; + + npages -= i; + start_index += i; + page_list += i; + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_write_mtt); + +int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + struct mlx4_buf *buf) +{ + u64 *page_list; + int err; + int i; + + page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL); + if (!page_list) + return -ENOMEM; + + for (i = 0; i < buf->npages; ++i) + if (buf->nbufs == 1) + page_list[i] = buf->u.direct.map + (i << buf->page_shift); + else + page_list[i] = buf->u.page_list[i].map; + + err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); + + kfree(page_list); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); + +int __devinit mlx4_init_mr_table(struct mlx4_dev *dev) +{ + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + int err; + + err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, + ~0, dev->caps.reserved_mrws); + if (err) + return err; + + err = mlx4_buddy_init(&mr_table->mtt_buddy, + ilog2(dev->caps.num_mtt_segs)); + if (err) + goto err_buddy; + + if (dev->caps.reserved_mtts) { + if (mlx4_alloc_mtt_range(dev, ilog2(dev->caps.reserved_mtts)) == -1) { + mlx4_warn(dev, "MTT table of order %d is too small.\n", + mr_table->mtt_buddy.max_order); + err = -ENOMEM; + goto err_reserve_mtts; + } + } + + return 0; + +err_reserve_mtts: + mlx4_buddy_cleanup(&mr_table->mtt_buddy); + +err_buddy: + mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); + + return err; +} + +void mlx4_cleanup_mr_table(struct mlx4_dev *dev) +{ + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + + mlx4_buddy_cleanup(&mr_table->mtt_buddy); + mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); +} diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c new file mode 100644 index 000000000000..23dea1ee7750 --- /dev/null +++ b/drivers/net/mlx4/pd.c @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include + +#include "mlx4.h" +#include "icm.h" + +int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap); + if (*pdn == -1) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_pd_alloc); + +void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn) +{ + mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn); +} +EXPORT_SYMBOL_GPL(mlx4_pd_free); + +int __devinit mlx4_init_pd_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, + (1 << 24) - 1, dev->caps.reserved_pds); +} + +void mlx4_cleanup_pd_table(struct mlx4_dev *dev) +{ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap); +} + + +int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) +{ + uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap); + if (uar->index == -1) + return -ENOMEM; + + uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_uar_alloc); + +void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar) +{ + mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index); +} +EXPORT_SYMBOL_GPL(mlx4_uar_free); + +int mlx4_init_uar_table(struct mlx4_dev *dev) +{ + return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, + dev->caps.num_uars, dev->caps.num_uars - 1, + max(128, dev->caps.reserved_uars)); +} + +void mlx4_cleanup_uar_table(struct mlx4_dev *dev) +{ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap); +} diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c new file mode 100644 index 000000000000..9ca42b213d54 --- /dev/null +++ b/drivers/net/mlx4/profile.c @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include + +#include "mlx4.h" +#include "fw.h" + +enum { + MLX4_RES_QP, + MLX4_RES_RDMARC, + MLX4_RES_ALTC, + MLX4_RES_AUXC, + MLX4_RES_SRQ, + MLX4_RES_CQ, + MLX4_RES_EQ, + MLX4_RES_DMPT, + MLX4_RES_CMPT, + MLX4_RES_MTT, + MLX4_RES_MCG, + MLX4_RES_NUM +}; + +static const char *res_name[] = { + [MLX4_RES_QP] = "QP", + [MLX4_RES_RDMARC] = "RDMARC", + [MLX4_RES_ALTC] = "ALTC", + [MLX4_RES_AUXC] = "AUXC", + [MLX4_RES_SRQ] = "SRQ", + [MLX4_RES_CQ] = "CQ", + [MLX4_RES_EQ] = "EQ", + [MLX4_RES_DMPT] = "DMPT", + [MLX4_RES_CMPT] = "CMPT", + [MLX4_RES_MTT] = "MTT", + [MLX4_RES_MCG] = "MCG", +}; + +u64 mlx4_make_profile(struct mlx4_dev *dev, + struct mlx4_profile *request, + struct mlx4_dev_cap *dev_cap, + struct mlx4_init_hca_param *init_hca) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource { + u64 size; + u64 start; + int type; + int num; + int log_num; + }; + + u64 total_size = 0; + struct mlx4_resource *profile; + struct mlx4_resource tmp; + int i, j; + + profile = kzalloc(MLX4_RES_NUM * sizeof *profile, GFP_KERNEL); + if (!profile) + return -ENOMEM; + + profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; + profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz; + profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz; + profile[MLX4_RES_AUXC].size = dev_cap->aux_entry_sz; + profile[MLX4_RES_SRQ].size = dev_cap->srq_entry_sz; + profile[MLX4_RES_CQ].size = dev_cap->cqc_entry_sz; + profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; + profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; + profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; + profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; + profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE; + + profile[MLX4_RES_QP].num = request->num_qp; + profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp; + profile[MLX4_RES_ALTC].num = request->num_qp; + profile[MLX4_RES_AUXC].num = request->num_qp; + profile[MLX4_RES_SRQ].num = request->num_srq; + profile[MLX4_RES_CQ].num = request->num_cq; + profile[MLX4_RES_EQ].num = MLX4_NUM_EQ + dev_cap->reserved_eqs; + profile[MLX4_RES_DMPT].num = request->num_mpt; + profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; + profile[MLX4_RES_MTT].num = request->num_mtt; + profile[MLX4_RES_MCG].num = request->num_mcg; + + for (i = 0; i < MLX4_RES_NUM; ++i) { + profile[i].type = i; + profile[i].num = roundup_pow_of_two(profile[i].num); + profile[i].log_num = ilog2(profile[i].num); + profile[i].size *= profile[i].num; + profile[i].size = max(profile[i].size, (u64) PAGE_SIZE); + } + + /* + * Sort the resources in decreasing order of size. Since they + * all have sizes that are powers of 2, we'll be able to keep + * resources aligned to their size and pack them without gaps + * using the sorted order. + */ + for (i = MLX4_RES_NUM; i > 0; --i) + for (j = 1; j < i; ++j) { + if (profile[j].size > profile[j - 1].size) { + tmp = profile[j]; + profile[j] = profile[j - 1]; + profile[j - 1] = tmp; + } + } + + for (i = 0; i < MLX4_RES_NUM; ++i) { + if (profile[i].size) { + profile[i].start = total_size; + total_size += profile[i].size; + } + + if (total_size > dev_cap->max_icm_sz) { + mlx4_err(dev, "Profile requires 0x%llx bytes; " + "won't fit in 0x%llx bytes of context memory.\n", + (unsigned long long) total_size, + (unsigned long long) dev_cap->max_icm_sz); + kfree(profile); + return -ENOMEM; + } + + if (profile[i].size) + mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, " + "size 0x%10llx\n", + i, res_name[profile[i].type], profile[i].log_num, + (unsigned long long) profile[i].start, + (unsigned long long) profile[i].size); + } + + mlx4_dbg(dev, "HCA context memory: reserving %d KB\n", + (int) (total_size >> 10)); + + for (i = 0; i < MLX4_RES_NUM; ++i) { + switch (profile[i].type) { + case MLX4_RES_QP: + dev->caps.num_qps = profile[i].num; + init_hca->qpc_base = profile[i].start; + init_hca->log_num_qps = profile[i].log_num; + break; + case MLX4_RES_RDMARC: + for (priv->qp_table.rdmarc_shift = 0; + request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num; + ++priv->qp_table.rdmarc_shift) + ; /* nothing */ + dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift; + priv->qp_table.rdmarc_base = (u32) profile[i].start; + init_hca->rdmarc_base = profile[i].start; + init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift; + break; + case MLX4_RES_ALTC: + init_hca->altc_base = profile[i].start; + break; + case MLX4_RES_AUXC: + init_hca->auxc_base = profile[i].start; + break; + case MLX4_RES_SRQ: + dev->caps.num_srqs = profile[i].num; + init_hca->srqc_base = profile[i].start; + init_hca->log_num_srqs = profile[i].log_num; + break; + case MLX4_RES_CQ: + dev->caps.num_cqs = profile[i].num; + init_hca->cqc_base = profile[i].start; + init_hca->log_num_cqs = profile[i].log_num; + break; + case MLX4_RES_EQ: + dev->caps.num_eqs = profile[i].num; + init_hca->eqc_base = profile[i].start; + init_hca->log_num_eqs = profile[i].log_num; + break; + case MLX4_RES_DMPT: + dev->caps.num_mpts = profile[i].num; + priv->mr_table.mpt_base = profile[i].start; + init_hca->dmpt_base = profile[i].start; + init_hca->log_mpt_sz = profile[i].log_num; + break; + case MLX4_RES_CMPT: + init_hca->cmpt_base = profile[i].start; + break; + case MLX4_RES_MTT: + dev->caps.num_mtt_segs = profile[i].num; + priv->mr_table.mtt_base = profile[i].start; + init_hca->mtt_base = profile[i].start; + break; + case MLX4_RES_MCG: + dev->caps.num_mgms = profile[i].num >> 1; + dev->caps.num_amgms = profile[i].num >> 1; + init_hca->mc_base = profile[i].start; + init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE); + init_hca->log_mc_table_sz = profile[i].log_num; + init_hca->log_mc_hash_sz = profile[i].log_num - 1; + break; + default: + break; + } + } + + /* + * PDs don't take any HCA memory, but we assign them as part + * of the HCA profile anyway. + */ + dev->caps.num_pds = MLX4_NUM_PDS; + + kfree(profile); + return total_size; +} diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c new file mode 100644 index 000000000000..7f8b7d55b6e1 --- /dev/null +++ b/drivers/net/mlx4/qp.c @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include + +#include +#include + +#include "mlx4.h" +#include "icm.h" + +void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + struct mlx4_qp *qp; + + spin_lock(&qp_table->lock); + + qp = __mlx4_qp_lookup(dev, qpn); + if (qp) + atomic_inc(&qp->refcount); + + spin_unlock(&qp_table->lock); + + if (!qp) { + mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn); + return; + } + + qp->event(qp, event_type); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); +} + +int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, + struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, + int sqd_event, struct mlx4_qp *qp) +{ + static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = { + [MLX4_QP_STATE_RST] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP, + }, + [MLX4_QP_STATE_INIT] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP, + [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP, + }, + [MLX4_QP_STATE_RTR] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP, + }, + [MLX4_QP_STATE_RTS] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP, + [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP, + }, + [MLX4_QP_STATE_SQD] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP, + [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP, + }, + [MLX4_QP_STATE_SQER] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP, + }, + [MLX4_QP_STATE_ERR] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + } + }; + + struct mlx4_cmd_mailbox *mailbox; + int ret = 0; + + if (cur_state < 0 || cur_state >= MLX4_QP_NUM_STATE || + new_state < 0 || cur_state >= MLX4_QP_NUM_STATE || + !op[cur_state][new_state]) + return -EINVAL; + + if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) + return mlx4_cmd(dev, 0, qp->qpn, 2, + MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A); + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) { + u64 mtt_addr = mlx4_mtt_addr(dev, mtt); + context->mtt_base_addr_h = mtt_addr >> 32; + context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); + context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; + } + + *(__be32 *) mailbox->buf = cpu_to_be32(optpar); + memcpy(mailbox->buf + 8, context, sizeof *context); + + ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = + cpu_to_be32(qp->qpn); + + ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31), + new_state == MLX4_QP_STATE_RST ? 2 : 0, + op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C); + + mlx4_free_cmd_mailbox(dev, mailbox); + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_qp_modify); + +int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_qp_table *qp_table = &priv->qp_table; + int err; + + if (sqpn) + qp->qpn = sqpn; + else { + qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap); + if (qp->qpn == -1) + return -ENOMEM; + } + + err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); + if (err) + goto err_out; + + err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn); + if (err) + goto err_put_qp; + + err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn); + if (err) + goto err_put_auxc; + + err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn); + if (err) + goto err_put_altc; + + err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn); + if (err) + goto err_put_rdmarc; + + spin_lock_irq(&qp_table->lock); + err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp); + spin_unlock_irq(&qp_table->lock); + if (err) + goto err_put_cmpt; + + atomic_set(&qp->refcount, 1); + init_completion(&qp->free); + + return 0; + +err_put_cmpt: + mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); + +err_put_rdmarc: + mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); + +err_put_altc: + mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); + +err_put_auxc: + mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); + +err_put_qp: + mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); + +err_out: + if (!sqpn) + mlx4_bitmap_free(&qp_table->bitmap, qp->qpn); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_qp_alloc); + +void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + unsigned long flags; + + spin_lock_irqsave(&qp_table->lock, flags); + radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); + spin_unlock_irqrestore(&qp_table->lock, flags); +} +EXPORT_SYMBOL_GPL(mlx4_qp_remove); + +void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); + wait_for_completion(&qp->free); + + mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); + mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); + mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); + mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); + mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); + + mlx4_bitmap_free(&qp_table->bitmap, qp->qpn); +} +EXPORT_SYMBOL_GPL(mlx4_qp_free); + +static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) +{ + return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, + MLX4_CMD_TIME_CLASS_B); +} + +int __devinit mlx4_init_qp_table(struct mlx4_dev *dev) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + int err; + + spin_lock_init(&qp_table->lock); + INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); + + /* + * We reserve 2 extra QPs per port for the special QPs. The + * block of special QPs must be aligned to a multiple of 8, so + * round up. + */ + dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8); + err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, + (1 << 24) - 1, dev->caps.sqp_start + 8); + if (err) + return err; + + return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start); +} + +void mlx4_cleanup_qp_table(struct mlx4_dev *dev) +{ + mlx4_CONF_SPECIAL_QP(dev, 0); + mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap); +} diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c new file mode 100644 index 000000000000..51eef8492e93 --- /dev/null +++ b/drivers/net/mlx4/reset.c @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "mlx4.h" + +int mlx4_reset(struct mlx4_dev *dev) +{ + void __iomem *reset; + u32 *hca_header = NULL; + int pcie_cap; + u16 devctl; + u16 linkctl; + u16 vendor; + unsigned long end; + u32 sem; + int i; + int err = 0; + +#define MLX4_RESET_BASE 0xf0000 +#define MLX4_RESET_SIZE 0x400 +#define MLX4_SEM_OFFSET 0x3fc +#define MLX4_RESET_OFFSET 0x10 +#define MLX4_RESET_VALUE swab32(1) + +#define MLX4_SEM_TIMEOUT_JIFFIES (10 * HZ) +#define MLX4_RESET_TIMEOUT_JIFFIES (2 * HZ) + + /* + * Reset the chip. This is somewhat ugly because we have to + * save off the PCI header before reset and then restore it + * after the chip reboots. We skip config space offsets 22 + * and 23 since those have a special meaning. + */ + + /* Do we need to save off the full 4K PCI Express header?? */ + hca_header = kmalloc(256, GFP_KERNEL); + if (!hca_header) { + err = -ENOMEM; + mlx4_err(dev, "Couldn't allocate memory to save HCA " + "PCI header, aborting.\n"); + goto out; + } + + pcie_cap = pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); + + for (i = 0; i < 64; ++i) { + if (i == 22 || i == 23) + continue; + if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) { + err = -ENODEV; + mlx4_err(dev, "Couldn't save HCA " + "PCI header, aborting.\n"); + goto out; + } + } + + reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE, + MLX4_RESET_SIZE); + if (!reset) { + err = -ENOMEM; + mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n"); + goto out; + } + + /* grab HW semaphore to lock out flash updates */ + end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES; + do { + sem = readl(reset + MLX4_SEM_OFFSET); + if (!sem) + break; + + msleep(1); + } while (time_before(jiffies, end)); + + if (sem) { + mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n"); + err = -EAGAIN; + iounmap(reset); + goto out; + } + + /* actually hit reset */ + writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET); + iounmap(reset); + + end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES; + do { + if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) && + vendor != 0xffff) + break; + + msleep(1); + } while (time_before(jiffies, end)); + + if (vendor == 0xffff) { + err = -ENODEV; + mlx4_err(dev, "PCI device did not come back after reset, " + "aborting.\n"); + goto out; + } + + /* Now restore the PCI headers */ + if (pcie_cap) { + devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; + if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL, + devctl)) { + err = -ENODEV; + mlx4_err(dev, "Couldn't restore HCA PCI Express " + "Device Control register, aborting.\n"); + goto out; + } + linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; + if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL, + linkctl)) { + err = -ENODEV; + mlx4_err(dev, "Couldn't restore HCA PCI Express " + "Link control register, aborting.\n"); + goto out; + } + } + + for (i = 0; i < 16; ++i) { + if (i * 4 == PCI_COMMAND) + continue; + + if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) { + err = -ENODEV; + mlx4_err(dev, "Couldn't restore HCA reg %x, " + "aborting.\n", i); + goto out; + } + } + + if (pci_write_config_dword(dev->pdev, PCI_COMMAND, + hca_header[PCI_COMMAND / 4])) { + err = -ENODEV; + mlx4_err(dev, "Couldn't restore HCA COMMAND, " + "aborting.\n"); + goto out; + } + +out: + kfree(hca_header); + + return err; +} diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c new file mode 100644 index 000000000000..2134f83aed87 --- /dev/null +++ b/drivers/net/mlx4/srq.c @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include + +#include + +#include "mlx4.h" +#include "icm.h" + +struct mlx4_srq_context { + __be32 state_logsize_srqn; + u8 logstride; + u8 reserved1[3]; + u8 pg_offset; + u8 reserved2[3]; + u32 reserved3; + u8 log_page_size; + u8 reserved4[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + __be32 pd; + __be16 limit_watermark; + __be16 wqe_cnt; + u16 reserved5; + __be16 wqe_counter; + u32 reserved6; + __be64 db_rec_addr; +}; + +void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + struct mlx4_srq *srq; + + spin_lock(&srq_table->lock); + + srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); + if (srq) + atomic_inc(&srq->refcount); + + spin_unlock(&srq_table->lock); + + if (!srq) { + mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn); + return; + } + + srq->event(srq, event_type); + + if (atomic_dec_and_test(&srq->refcount)) + complete(&srq->free); +} + +static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int srq_num) +{ + return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ, + MLX4_CMD_TIME_CLASS_A); +} + +static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int srq_num) +{ + return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num, + mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ, + MLX4_CMD_TIME_CLASS_A); +} + +static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark) +{ + return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ, + MLX4_CMD_TIME_CLASS_B); +} + +int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, + u64 db_rec, struct mlx4_srq *srq) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_srq_context *srq_context; + u64 mtt_addr; + int err; + + srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap); + if (srq->srqn == -1) + return -ENOMEM; + + err = mlx4_table_get(dev, &srq_table->table, srq->srqn); + if (err) + goto err_out; + + err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn); + if (err) + goto err_put; + + spin_lock_irq(&srq_table->lock); + err = radix_tree_insert(&srq_table->tree, srq->srqn, srq); + spin_unlock_irq(&srq_table->lock); + if (err) + goto err_cmpt_put; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_radix; + } + + srq_context = mailbox->buf; + memset(srq_context, 0, sizeof *srq_context); + + srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | + srq->srqn); + srq_context->logstride = srq->wqe_shift - 4; + srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; + + mtt_addr = mlx4_mtt_addr(dev, mtt); + srq_context->mtt_base_addr_h = mtt_addr >> 32; + srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); + srq_context->pd = cpu_to_be32(pdn); + srq_context->db_rec_addr = cpu_to_be64(db_rec); + + err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn); + mlx4_free_cmd_mailbox(dev, mailbox); + if (err) + goto err_radix; + + atomic_set(&srq->refcount, 1); + init_completion(&srq->free); + + return 0; + +err_radix: + spin_lock_irq(&srq_table->lock); + radix_tree_delete(&srq_table->tree, srq->srqn); + spin_unlock_irq(&srq_table->lock); + +err_cmpt_put: + mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn); + +err_put: + mlx4_table_put(dev, &srq_table->table, srq->srqn); + +err_out: + mlx4_bitmap_free(&srq_table->bitmap, srq->srqn); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_srq_alloc); + +void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + int err; + + err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn); + if (err) + mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn); + + spin_lock_irq(&srq_table->lock); + radix_tree_delete(&srq_table->tree, srq->srqn); + spin_unlock_irq(&srq_table->lock); + + if (atomic_dec_and_test(&srq->refcount)) + complete(&srq->free); + wait_for_completion(&srq->free); + + mlx4_table_put(dev, &srq_table->table, srq->srqn); + mlx4_bitmap_free(&srq_table->bitmap, srq->srqn); +} +EXPORT_SYMBOL_GPL(mlx4_srq_free); + +int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark) +{ + return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark); +} +EXPORT_SYMBOL_GPL(mlx4_srq_arm); + +int __devinit mlx4_init_srq_table(struct mlx4_dev *dev) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + int err; + + spin_lock_init(&srq_table->lock); + INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); + + err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, + dev->caps.num_srqs - 1, dev->caps.reserved_srqs); + if (err) + return err; + + return 0; +} + +void mlx4_cleanup_srq_table(struct mlx4_dev *dev) +{ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); +} diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h new file mode 100644 index 000000000000..4fb552d12f7a --- /dev/null +++ b/include/linux/mlx4/cmd.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2006 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_CMD_H +#define MLX4_CMD_H + +#include + +enum { + /* initialization and general commands */ + MLX4_CMD_SYS_EN = 0x1, + MLX4_CMD_SYS_DIS = 0x2, + MLX4_CMD_MAP_FA = 0xfff, + MLX4_CMD_UNMAP_FA = 0xffe, + MLX4_CMD_RUN_FW = 0xff6, + MLX4_CMD_MOD_STAT_CFG = 0x34, + MLX4_CMD_QUERY_DEV_CAP = 0x3, + MLX4_CMD_QUERY_FW = 0x4, + MLX4_CMD_ENABLE_LAM = 0xff8, + MLX4_CMD_DISABLE_LAM = 0xff7, + MLX4_CMD_QUERY_DDR = 0x5, + MLX4_CMD_QUERY_ADAPTER = 0x6, + MLX4_CMD_INIT_HCA = 0x7, + MLX4_CMD_CLOSE_HCA = 0x8, + MLX4_CMD_INIT_PORT = 0x9, + MLX4_CMD_CLOSE_PORT = 0xa, + MLX4_CMD_QUERY_HCA = 0xb, + MLX4_CMD_SET_PORT = 0xc, + MLX4_CMD_ACCESS_DDR = 0x2e, + MLX4_CMD_MAP_ICM = 0xffa, + MLX4_CMD_UNMAP_ICM = 0xff9, + MLX4_CMD_MAP_ICM_AUX = 0xffc, + MLX4_CMD_UNMAP_ICM_AUX = 0xffb, + MLX4_CMD_SET_ICM_SIZE = 0xffd, + + /* TPT commands */ + MLX4_CMD_SW2HW_MPT = 0xd, + MLX4_CMD_QUERY_MPT = 0xe, + MLX4_CMD_HW2SW_MPT = 0xf, + MLX4_CMD_READ_MTT = 0x10, + MLX4_CMD_WRITE_MTT = 0x11, + MLX4_CMD_SYNC_TPT = 0x2f, + + /* EQ commands */ + MLX4_CMD_MAP_EQ = 0x12, + MLX4_CMD_SW2HW_EQ = 0x13, + MLX4_CMD_HW2SW_EQ = 0x14, + MLX4_CMD_QUERY_EQ = 0x15, + + /* CQ commands */ + MLX4_CMD_SW2HW_CQ = 0x16, + MLX4_CMD_HW2SW_CQ = 0x17, + MLX4_CMD_QUERY_CQ = 0x18, + MLX4_CMD_RESIZE_CQ = 0x2c, + + /* SRQ commands */ + MLX4_CMD_SW2HW_SRQ = 0x35, + MLX4_CMD_HW2SW_SRQ = 0x36, + MLX4_CMD_QUERY_SRQ = 0x37, + MLX4_CMD_ARM_SRQ = 0x40, + + /* QP/EE commands */ + MLX4_CMD_RST2INIT_QP = 0x19, + MLX4_CMD_INIT2RTR_QP = 0x1a, + MLX4_CMD_RTR2RTS_QP = 0x1b, + MLX4_CMD_RTS2RTS_QP = 0x1c, + MLX4_CMD_SQERR2RTS_QP = 0x1d, + MLX4_CMD_2ERR_QP = 0x1e, + MLX4_CMD_RTS2SQD_QP = 0x1f, + MLX4_CMD_SQD2SQD_QP = 0x38, + MLX4_CMD_SQD2RTS_QP = 0x20, + MLX4_CMD_2RST_QP = 0x21, + MLX4_CMD_QUERY_QP = 0x22, + MLX4_CMD_INIT2INIT_QP = 0x2d, + MLX4_CMD_SUSPEND_QP = 0x32, + MLX4_CMD_UNSUSPEND_QP = 0x33, + /* special QP and management commands */ + MLX4_CMD_CONF_SPECIAL_QP = 0x23, + MLX4_CMD_MAD_IFC = 0x24, + + /* multicast commands */ + MLX4_CMD_READ_MCG = 0x25, + MLX4_CMD_WRITE_MCG = 0x26, + MLX4_CMD_MGID_HASH = 0x27, + + /* miscellaneous commands */ + MLX4_CMD_DIAG_RPRT = 0x30, + MLX4_CMD_NOP = 0x31, + + /* debug commands */ + MLX4_CMD_QUERY_DEBUG_MSG = 0x2a, + MLX4_CMD_SET_DEBUG_MSG = 0x2b, +}; + +enum { + MLX4_CMD_TIME_CLASS_A = 10000, + MLX4_CMD_TIME_CLASS_B = 10000, + MLX4_CMD_TIME_CLASS_C = 10000, +}; + +enum { + MLX4_MAILBOX_SIZE = 4096 +}; + +struct mlx4_dev; + +struct mlx4_cmd_mailbox { + void *buf; + dma_addr_t dma; +}; + +int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout); + +/* Invoke a command with no output parameter */ +static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier, + u8 op_modifier, u16 op, unsigned long timeout) +{ + return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier, + op_modifier, op, timeout); +} + +/* Invoke a command with an output mailbox */ +static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param, + u32 in_modifier, u8 op_modifier, u16 op, + unsigned long timeout) +{ + return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier, + op_modifier, op, timeout); +} + +/* + * Invoke a command with an immediate output parameter (and copy the + * output into the caller's out_param pointer after the command + * executes). + */ +static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + u32 in_modifier, u8 op_modifier, u16 op, + unsigned long timeout) +{ + return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier, + op_modifier, op, timeout); +} + +struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev); +void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox); + +#endif /* MLX4_CMD_H */ diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h new file mode 100644 index 000000000000..0181e0a57cbf --- /dev/null +++ b/include/linux/mlx4/cq.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_CQ_H +#define MLX4_CQ_H + +#include + +#include +#include + +struct mlx4_cqe { + __be32 my_qpn; + __be32 immed_rss_invalid; + __be32 g_mlpath_rqpn; + u8 sl; + u8 reserved1; + __be16 rlid; + u32 reserved2; + __be32 byte_cnt; + __be16 wqe_index; + __be16 checksum; + u8 reserved3[3]; + u8 owner_sr_opcode; +}; + +struct mlx4_err_cqe { + __be32 my_qpn; + u32 reserved1[5]; + __be16 wqe_index; + u8 vendor_err_syndrome; + u8 syndrome; + u8 reserved2[3]; + u8 owner_sr_opcode; +}; + +enum { + MLX4_CQE_OWNER_MASK = 0x80, + MLX4_CQE_IS_SEND_MASK = 0x40, + MLX4_CQE_OPCODE_MASK = 0x1f +}; + +enum { + MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01, + MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02, + MLX4_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04, + MLX4_CQE_SYNDROME_WR_FLUSH_ERR = 0x05, + MLX4_CQE_SYNDROME_MW_BIND_ERR = 0x06, + MLX4_CQE_SYNDROME_BAD_RESP_ERR = 0x10, + MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11, + MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, + MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13, + MLX4_CQE_SYNDROME_REMOTE_OP_ERR = 0x14, + MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15, + MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16, + MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, +}; + +static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, + void __iomem *uar_page, + spinlock_t *doorbell_lock) +{ + __be32 doorbell[2]; + u32 sn; + u32 ci; + + sn = cq->arm_sn & 3; + ci = cq->cons_index & 0xffffff; + + *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); + + /* + * Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + + doorbell[0] = cpu_to_be32(sn << 28 | cmd | cq->cqn); + doorbell[1] = cpu_to_be32(ci); + + mlx4_write64(doorbell, uar_page + MLX4_CQ_DOORBELL, doorbell_lock); +} + +static inline void mlx4_cq_set_ci(struct mlx4_cq *cq) +{ + *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); +} + +enum { + MLX4_CQ_DB_REQ_NOT_SOL = 1 << 24, + MLX4_CQ_DB_REQ_NOT = 2 << 24 +}; + +#endif /* MLX4_CQ_H */ diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h new file mode 100644 index 000000000000..8c5f8fd86841 --- /dev/null +++ b/include/linux/mlx4/device.h @@ -0,0 +1,331 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_DEVICE_H +#define MLX4_DEVICE_H + +#include +#include +#include + +#include + +enum { + MLX4_FLAG_MSI_X = 1 << 0, +}; + +enum { + MLX4_MAX_PORTS = 2 +}; + +enum { + MLX4_DEV_CAP_FLAG_RC = 1 << 0, + MLX4_DEV_CAP_FLAG_UC = 1 << 1, + MLX4_DEV_CAP_FLAG_UD = 1 << 2, + MLX4_DEV_CAP_FLAG_SRQ = 1 << 6, + MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7, + MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, + MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, + MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, + MLX4_DEV_CAP_FLAG_APM = 1 << 17, + MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, + MLX4_DEV_CAP_FLAG_RAW_MCAST = 1 << 19, + MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1 << 20, + MLX4_DEV_CAP_FLAG_UD_MCAST = 1 << 21 +}; + +enum mlx4_event { + MLX4_EVENT_TYPE_COMP = 0x00, + MLX4_EVENT_TYPE_PATH_MIG = 0x01, + MLX4_EVENT_TYPE_COMM_EST = 0x02, + MLX4_EVENT_TYPE_SQ_DRAINED = 0x03, + MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13, + MLX4_EVENT_TYPE_SRQ_LIMIT = 0x14, + MLX4_EVENT_TYPE_CQ_ERROR = 0x04, + MLX4_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, + MLX4_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, + MLX4_EVENT_TYPE_PATH_MIG_FAILED = 0x07, + MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, + MLX4_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, + MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08, + MLX4_EVENT_TYPE_PORT_CHANGE = 0x09, + MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f, + MLX4_EVENT_TYPE_ECC_DETECT = 0x0e, + MLX4_EVENT_TYPE_CMD = 0x0a +}; + +enum { + MLX4_PORT_CHANGE_SUBTYPE_DOWN = 1, + MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4 +}; + +enum { + MLX4_PERM_LOCAL_READ = 1 << 10, + MLX4_PERM_LOCAL_WRITE = 1 << 11, + MLX4_PERM_REMOTE_READ = 1 << 12, + MLX4_PERM_REMOTE_WRITE = 1 << 13, + MLX4_PERM_ATOMIC = 1 << 14 +}; + +enum { + MLX4_OPCODE_NOP = 0x00, + MLX4_OPCODE_SEND_INVAL = 0x01, + MLX4_OPCODE_RDMA_WRITE = 0x08, + MLX4_OPCODE_RDMA_WRITE_IMM = 0x09, + MLX4_OPCODE_SEND = 0x0a, + MLX4_OPCODE_SEND_IMM = 0x0b, + MLX4_OPCODE_LSO = 0x0e, + MLX4_OPCODE_RDMA_READ = 0x10, + MLX4_OPCODE_ATOMIC_CS = 0x11, + MLX4_OPCODE_ATOMIC_FA = 0x12, + MLX4_OPCODE_ATOMIC_MASK_CS = 0x14, + MLX4_OPCODE_ATOMIC_MASK_FA = 0x15, + MLX4_OPCODE_BIND_MW = 0x18, + MLX4_OPCODE_FMR = 0x19, + MLX4_OPCODE_LOCAL_INVAL = 0x1b, + MLX4_OPCODE_CONFIG_CMD = 0x1f, + + MLX4_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, + MLX4_RECV_OPCODE_SEND = 0x01, + MLX4_RECV_OPCODE_SEND_IMM = 0x02, + MLX4_RECV_OPCODE_SEND_INVAL = 0x03, + + MLX4_CQE_OPCODE_ERROR = 0x1e, + MLX4_CQE_OPCODE_RESIZE = 0x16, +}; + +enum { + MLX4_STAT_RATE_OFFSET = 5 +}; + +struct mlx4_caps { + u64 fw_ver; + int num_ports; + int vl_cap; + int mtu_cap; + int gid_table_len; + int pkey_table_len; + int local_ca_ack_delay; + int num_uars; + int bf_reg_size; + int bf_regs_per_page; + int max_sq_sg; + int max_rq_sg; + int num_qps; + int max_wqes; + int max_sq_desc_sz; + int max_rq_desc_sz; + int max_qp_init_rdma; + int max_qp_dest_rdma; + int reserved_qps; + int sqp_start; + int num_srqs; + int max_srq_wqes; + int max_srq_sge; + int reserved_srqs; + int num_cqs; + int max_cqes; + int reserved_cqs; + int num_eqs; + int reserved_eqs; + int num_mpts; + int num_mtt_segs; + int fmr_reserved_mtts; + int reserved_mtts; + int reserved_mrws; + int reserved_uars; + int num_mgms; + int num_amgms; + int reserved_mcgs; + int num_qp_per_mgm; + int num_pds; + int reserved_pds; + int mtt_entry_sz; + u32 page_size_cap; + u32 flags; + u16 stat_rate_support; + u8 port_width_cap; +}; + +struct mlx4_buf_list { + void *buf; + dma_addr_t map; +}; + +struct mlx4_buf { + union { + struct mlx4_buf_list direct; + struct mlx4_buf_list *page_list; + } u; + int nbufs; + int npages; + int page_shift; +}; + +struct mlx4_mtt { + u32 first_seg; + int order; + int page_shift; +}; + +struct mlx4_mr { + struct mlx4_mtt mtt; + u64 iova; + u64 size; + u32 key; + u32 pd; + u32 access; + int enabled; +}; + +struct mlx4_uar { + unsigned long pfn; + int index; +}; + +struct mlx4_cq { + void (*comp) (struct mlx4_cq *); + void (*event) (struct mlx4_cq *, enum mlx4_event); + + struct mlx4_uar *uar; + + u32 cons_index; + + __be32 *set_ci_db; + __be32 *arm_db; + int arm_sn; + + int cqn; + + atomic_t refcount; + struct completion free; +}; + +struct mlx4_qp { + void (*event) (struct mlx4_qp *, enum mlx4_event); + + int qpn; + + atomic_t refcount; + struct completion free; +}; + +struct mlx4_srq { + void (*event) (struct mlx4_srq *, enum mlx4_event); + + int srqn; + int max; + int max_gs; + int wqe_shift; + + atomic_t refcount; + struct completion free; +}; + +struct mlx4_av { + __be32 port_pd; + u8 reserved1; + u8 g_slid; + __be16 dlid; + u8 reserved2; + u8 gid_index; + u8 stat_rate; + u8 hop_limit; + __be32 sl_tclass_flowlabel; + u8 dgid[16]; +}; + +struct mlx4_dev { + struct pci_dev *pdev; + unsigned long flags; + struct mlx4_caps caps; + struct radix_tree_root qp_table_tree; +}; + +struct mlx4_init_port_param { + int set_guid0; + int set_node_guid; + int set_si_guid; + u16 mtu; + int port_width_cap; + u16 vl_cap; + u16 max_gid; + u16 max_pkey; + u64 guid0; + u64 node_guid; + u64 si_guid; +}; + +int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, + struct mlx4_buf *buf); +void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); + +int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn); +void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn); + +int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); +void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); + +int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, + struct mlx4_mtt *mtt); +void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt); +u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt); + +int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, + int npages, int page_shift, struct mlx4_mr *mr); +void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr); +int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr); +int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list); +int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + struct mlx4_buf *buf); + +int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, + struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq); +void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); + +int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp); +void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); + +int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, + u64 db_rec, struct mlx4_srq *srq); +void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq); +int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark); + +int mlx4_INIT_PORT(struct mlx4_dev *dev, struct mlx4_init_port_param *param, int port); +int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); + +int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); +int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); + +#endif /* MLX4_DEVICE_H */ diff --git a/include/linux/mlx4/doorbell.h b/include/linux/mlx4/doorbell.h new file mode 100644 index 000000000000..3f2da442d7cb --- /dev/null +++ b/include/linux/mlx4/doorbell.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_DOORBELL_H +#define MLX4_DOORBELL_H + +#include +#include + +#define MLX4_SEND_DOORBELL 0x14 +#define MLX4_CQ_DOORBELL 0x20 + +#if BITS_PER_LONG == 64 +/* + * Assume that we can just write a 64-bit doorbell atomically. s390 + * actually doesn't have writeq() but S/390 systems don't even have + * PCI so we won't worry about it. + */ + +#define MLX4_DECLARE_DOORBELL_LOCK(name) +#define MLX4_INIT_DOORBELL_LOCK(ptr) do { } while (0) +#define MLX4_GET_DOORBELL_LOCK(ptr) (NULL) + +static inline void mlx4_write64_raw(__be64 val, void __iomem *dest) +{ + __raw_writeq((__force u64) val, dest); +} + +static inline void mlx4_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + __raw_writeq(*(u64 *) val, dest); +} + +#else + +/* + * Just fall back to a spinlock to protect the doorbell if + * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit + * MMIO writes. + */ + +#define MLX4_DECLARE_DOORBELL_LOCK(name) spinlock_t name; +#define MLX4_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr) +#define MLX4_GET_DOORBELL_LOCK(ptr) (ptr) + +static inline void mlx4_write64_raw(__be64 val, void __iomem *dest) +{ + __raw_writel(((__force u32 *) &val)[0], dest); + __raw_writel(((__force u32 *) &val)[1], dest + 4); +} + +static inline void mlx4_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + unsigned long flags; + + spin_lock_irqsave(doorbell_lock, flags); + __raw_writel((__force u32) val[0], dest); + __raw_writel((__force u32) val[1], dest + 4); + spin_unlock_irqrestore(doorbell_lock, flags); +} + +#endif + +#endif /* MLX4_DOORBELL_H */ diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h new file mode 100644 index 000000000000..1b835ca49df1 --- /dev/null +++ b/include/linux/mlx4/driver.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2006 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_DRIVER_H +#define MLX4_DRIVER_H + +#include + +struct mlx4_dev; + +enum mlx4_dev_event { + MLX4_DEV_EVENT_CATASTROPHIC_ERROR, + MLX4_DEV_EVENT_PORT_UP, + MLX4_DEV_EVENT_PORT_DOWN, + MLX4_DEV_EVENT_PORT_REINIT, +}; + +struct mlx4_interface { + void * (*add) (struct mlx4_dev *dev); + void (*remove)(struct mlx4_dev *dev, void *context); + void (*event) (struct mlx4_dev *dev, void *context, + enum mlx4_dev_event event, int subtype, + int port); + struct list_head list; +}; + +int mlx4_register_interface(struct mlx4_interface *intf); +void mlx4_unregister_interface(struct mlx4_interface *intf); + +#endif /* MLX4_DRIVER_H */ diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h new file mode 100644 index 000000000000..9eeb61adf6a3 --- /dev/null +++ b/include/linux/mlx4/qp.h @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_QP_H +#define MLX4_QP_H + +#include + +#include + +#define MLX4_INVALID_LKEY 0x100 + +enum mlx4_qp_optpar { + MLX4_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, + MLX4_QP_OPTPAR_RRE = 1 << 1, + MLX4_QP_OPTPAR_RAE = 1 << 2, + MLX4_QP_OPTPAR_RWE = 1 << 3, + MLX4_QP_OPTPAR_PKEY_INDEX = 1 << 4, + MLX4_QP_OPTPAR_Q_KEY = 1 << 5, + MLX4_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, + MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, + MLX4_QP_OPTPAR_SRA_MAX = 1 << 8, + MLX4_QP_OPTPAR_RRA_MAX = 1 << 9, + MLX4_QP_OPTPAR_PM_STATE = 1 << 10, + MLX4_QP_OPTPAR_RETRY_COUNT = 1 << 12, + MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13, + MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, + MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16 +}; + +enum mlx4_qp_state { + MLX4_QP_STATE_RST = 0, + MLX4_QP_STATE_INIT = 1, + MLX4_QP_STATE_RTR = 2, + MLX4_QP_STATE_RTS = 3, + MLX4_QP_STATE_SQER = 4, + MLX4_QP_STATE_SQD = 5, + MLX4_QP_STATE_ERR = 6, + MLX4_QP_STATE_SQ_DRAINING = 7, + MLX4_QP_NUM_STATE +}; + +enum { + MLX4_QP_ST_RC = 0x0, + MLX4_QP_ST_UC = 0x1, + MLX4_QP_ST_RD = 0x2, + MLX4_QP_ST_UD = 0x3, + MLX4_QP_ST_MLX = 0x7 +}; + +enum { + MLX4_QP_PM_MIGRATED = 0x3, + MLX4_QP_PM_ARMED = 0x0, + MLX4_QP_PM_REARM = 0x1 +}; + +enum { + /* params1 */ + MLX4_QP_BIT_SRE = 1 << 15, + MLX4_QP_BIT_SWE = 1 << 14, + MLX4_QP_BIT_SAE = 1 << 13, + /* params2 */ + MLX4_QP_BIT_RRE = 1 << 15, + MLX4_QP_BIT_RWE = 1 << 14, + MLX4_QP_BIT_RAE = 1 << 13, + MLX4_QP_BIT_RIC = 1 << 4, +}; + +struct mlx4_qp_path { + u8 fl; + u8 reserved1[2]; + u8 pkey_index; + u8 reserved2; + u8 grh_mylmc; + __be16 rlid; + u8 ackto; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 tclass_flowlabel; + u8 rgid[16]; + u8 sched_queue; + u8 snooper_flags; + u8 reserved3[2]; + u8 counter_index; + u8 reserved4[7]; +}; + +struct mlx4_qp_context { + __be32 flags; + __be32 pd; + u8 mtu_msgmax; + u8 rq_size_stride; + u8 sq_size_stride; + u8 rlkey; + __be32 usr_page; + __be32 local_qpn; + __be32 remote_qpn; + struct mlx4_qp_path pri_path; + struct mlx4_qp_path alt_path; + __be32 params1; + u32 reserved1; + __be32 next_send_psn; + __be32 cqn_send; + u32 reserved2[2]; + __be32 last_acked_psn; + __be32 ssn; + __be32 params2; + __be32 rnr_nextrecvpsn; + __be32 srcd; + __be32 cqn_recv; + __be64 db_rec_addr; + __be32 qkey; + __be32 srqn; + __be32 msn; + __be16 rq_wqe_counter; + __be16 sq_wqe_counter; + u32 reserved3[2]; + __be32 param3; + __be32 nummmcpeers_basemkey; + u8 log_page_size; + u8 reserved4[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + u32 reserved5[10]; +}; + +enum { + MLX4_WQE_CTRL_FENCE = 1 << 6, + MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2, + MLX4_WQE_CTRL_SOLICITED = 1 << 1, +}; + +struct mlx4_wqe_ctrl_seg { + __be32 owner_opcode; + u8 reserved2[3]; + u8 fence_size; + /* + * High 24 bits are SRC remote buffer; low 8 bits are flags: + * [7] SO (strong ordering) + * [5] TCP/UDP checksum + * [4] IP checksum + * [3:2] C (generate completion queue entry) + * [1] SE (solicited event) + */ + __be32 srcrb_flags; + /* + * imm is immediate data for send/RDMA write w/ immediate; + * also invalidation key for send with invalidate; input + * modifier for WQEs on CCQs. + */ + __be32 imm; +}; + +enum { + MLX4_WQE_MLX_VL15 = 1 << 17, + MLX4_WQE_MLX_SLR = 1 << 16 +}; + +struct mlx4_wqe_mlx_seg { + u8 owner; + u8 reserved1[2]; + u8 opcode; + u8 reserved2[3]; + u8 size; + /* + * [17] VL15 + * [16] SLR + * [15:12] static rate + * [11:8] SL + * [4] ICRC + * [3:2] C + * [0] FL (force loopback) + */ + __be32 flags; + __be16 rlid; + u16 reserved3; +}; + +struct mlx4_wqe_datagram_seg { + __be32 av[8]; + __be32 dqpn; + __be32 qkey; + __be32 reservd[2]; +}; + +struct mlx4_wqe_bind_seg { + __be32 flags1; + __be32 flags2; + __be32 new_rkey; + __be32 lkey; + __be64 addr; + __be64 length; +}; + +struct mlx4_wqe_fmr_seg { + __be32 flags; + __be32 mem_key; + __be64 buf_list; + __be64 start_addr; + __be64 reg_len; + __be32 offset; + __be32 page_size; + u32 reserved[2]; +}; + +struct mlx4_wqe_fmr_ext_seg { + u8 flags; + u8 reserved; + __be16 app_mask; + __be16 wire_app_tag; + __be16 mem_app_tag; + __be32 wire_ref_tag_base; + __be32 mem_ref_tag_base; +}; + +struct mlx4_wqe_local_inval_seg { + u8 flags; + u8 reserved1[3]; + __be32 mem_key; + u8 reserved2[3]; + u8 guest_id; + __be64 pa; +}; + +struct mlx4_wqe_raddr_seg { + __be64 raddr; + __be32 rkey; + u32 reserved; +}; + +struct mlx4_wqe_atomic_seg { + __be64 swap_add; + __be64 compare; +}; + +struct mlx4_wqe_data_seg { + __be32 byte_count; + __be32 lkey; + __be64 addr; +}; + +struct mlx4_wqe_inline_seg { + __be32 byte_count; +}; + +int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, + struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, + int sqd_event, struct mlx4_qp *qp); + +static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) +{ + return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1)); +} + +void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp); + +#endif /* MLX4_QP_H */ diff --git a/include/linux/mlx4/srq.h b/include/linux/mlx4/srq.h new file mode 100644 index 000000000000..799a0697a383 --- /dev/null +++ b/include/linux/mlx4/srq.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_SRQ_H +#define MLX4_SRQ_H + +struct mlx4_wqe_srq_next_seg { + u16 reserved1; + __be16 next_wqe_index; + u32 reserved2[3]; +}; + +#endif /* MLX4_SRQ_H */ -- cgit v1.2.3 From beb7dd86a101263bf63a78c7c6d4da3849b35bd6 Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Wed, 9 May 2007 07:14:03 +0200 Subject: Fix misspellings collected by members of KJ list. Fix the misspellings of "propogate", "writting" and (oh, the shame :-) "kenrel" in the source tree. Signed-off-by: Robert P. J. Day Signed-off-by: Adrian Bunk --- Documentation/sysctl/kernel.txt | 2 +- arch/powerpc/oprofile/op_model_cell.c | 2 +- arch/x86_64/kernel/io_apic.c | 2 +- drivers/char/tty_io.c | 8 ++++---- drivers/media/dvb/frontends/dib7000m.c | 2 +- drivers/media/dvb/frontends/dib7000p.c | 2 +- drivers/media/video/em28xx/em28xx-i2c.c | 2 +- drivers/net/irda/donauboe.h | 2 +- drivers/net/wireless/prism54/islpci_dev.c | 2 +- drivers/net/wireless/wavelan_cs.c | 4 ++-- drivers/net/wireless/wavelan_cs.p.h | 2 +- drivers/sbus/char/bpp.c | 2 +- drivers/usb/serial/aircable.c | 4 ++-- drivers/usb/serial/io_edgeport.c | 4 ++-- drivers/video/matrox/matroxfb_Ti3026.c | 2 +- drivers/video/matrox/matroxfb_accel.c | 2 +- drivers/video/matrox/matroxfb_base.c | 2 +- drivers/video/matrox/matroxfb_misc.c | 2 +- fs/direct-io.c | 2 +- fs/reiserfs/journal.c | 4 ++-- include/asm-generic/bitops/atomic.h | 2 +- include/asm-i386/bitops.h | 2 +- include/asm-i386/boot.h | 2 +- include/asm-i386/sync_bitops.h | 2 +- include/asm-m32r/system.h | 2 +- include/asm-mips/bootinfo.h | 2 +- include/linux/mount.h | 2 +- sound/pci/ice1712/delta.h | 4 ++-- sound/sparc/dbri.c | 2 +- 29 files changed, 37 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 5922e84d9133..d3d3c255ddb7 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -228,7 +228,7 @@ Controls the kernel's behaviour when an oops or BUG is encountered. pid_max: -PID allocation wrap value. When the kenrel's next PID value +PID allocation wrap value. When the kernel's next PID value reaches this value, it wraps back to a minimum PID value. PIDs of value pid_max or larger are not allocated. diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index 626b29f38304..c29293befba9 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c @@ -747,7 +747,7 @@ cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) * counter value etc.) are not copied to the actual registers * until the performance monitor is enabled. In order to get * this to work as desired, the permormance monitor needs to - * be disabled while writting to the latches. This is a + * be disabled while writing to the latches. This is a * HW design issue. */ cbe_enable_pm(cpu); diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 4d582589fa89..d8bfe315471c 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c @@ -1413,7 +1413,7 @@ static void ack_apic_level(unsigned int irq) /* * We must acknowledge the irq before we move it or the acknowledge will - * not propogate properly. + * not propagate properly. */ ack_APIC_irq(); diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 7710a6a77d97..f6ac1d316ea4 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -1573,11 +1573,11 @@ void no_tty(void) /** - * stop_tty - propogate flow control + * stop_tty - propagate flow control * @tty: tty to stop * * Perform flow control to the driver. For PTY/TTY pairs we - * must also propogate the TIOCKPKT status. May be called + * must also propagate the TIOCKPKT status. May be called * on an already stopped device and will not re-call the driver * method. * @@ -1607,11 +1607,11 @@ void stop_tty(struct tty_struct *tty) EXPORT_SYMBOL(stop_tty); /** - * start_tty - propogate flow control + * start_tty - propagate flow control * @tty: tty to start * * Start a tty that has been stopped if at all possible. Perform - * any neccessary wakeups and propogate the TIOCPKT status. If this + * any neccessary wakeups and propagate the TIOCPKT status. If this * is the tty was previous stopped and is being started then the * driver start method is invoked and the line discipline woken. * diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c index f5d40aa3d27f..f64546c6aeb5 100644 --- a/drivers/media/dvb/frontends/dib7000m.c +++ b/drivers/media/dvb/frontends/dib7000m.c @@ -266,7 +266,7 @@ static int dib7000m_sad_calib(struct dib7000m_state *state) { /* internal */ -// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth +// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is written in set_bandwidth dib7000m_write_word(state, 929, (0 << 1) | (0 << 0)); dib7000m_write_word(state, 930, 776); // 0.625*3.3 / 4096 diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c index 0349a4b5da3f..aece458cfe12 100644 --- a/drivers/media/dvb/frontends/dib7000p.c +++ b/drivers/media/dvb/frontends/dib7000p.c @@ -223,7 +223,7 @@ static int dib7000p_set_bandwidth(struct dvb_frontend *demod, u8 BW_Idx) static int dib7000p_sad_calib(struct dib7000p_state *state) { /* internal */ -// dib7000p_write_word(state, 72, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth +// dib7000p_write_word(state, 72, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is written in set_bandwidth dib7000p_write_word(state, 73, (0 << 1) | (0 << 0)); dib7000p_write_word(state, 74, 776); // 0.625*3.3 / 4096 diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c index 563a8319e608..54ccc6e1f92e 100644 --- a/drivers/media/video/em28xx/em28xx-i2c.c +++ b/drivers/media/video/em28xx/em28xx-i2c.c @@ -70,7 +70,7 @@ static int em2800_i2c_send_max4(struct em28xx *dev, unsigned char addr, ret = dev->em28xx_write_regs(dev, 4 - len, &b2[4 - len], 2 + len); if (ret != 2 + len) { - em28xx_warn("writting to i2c device failed (error=%i)\n", ret); + em28xx_warn("writing to i2c device failed (error=%i)\n", ret); return -EIO; } for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0; diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h index 2ab173d9a0e4..1e67720f1066 100644 --- a/drivers/net/irda/donauboe.h +++ b/drivers/net/irda/donauboe.h @@ -113,7 +113,7 @@ /* RxOver overflow in Recv FIFO */ /* SipRcv received serial gap (or other condition you set) */ /* Interrupts are enabled by writing a one to the IER register */ -/* Interrupts are cleared by writting a one to the ISR register */ +/* Interrupts are cleared by writing a one to the ISR register */ /* */ /* 6. The remaining registers: 0x6 and 0x3 appear to be */ /* reserved parts of 16 or 32 bit registersthe remainder */ diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index a037b11dac9d..084795355b74 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c @@ -115,7 +115,7 @@ isl_upload_firmware(islpci_private *priv) ISL38XX_MEMORY_WINDOW_SIZE : fw_len; u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN; - /* set the cards base address for writting the data */ + /* set the card's base address for writing the data */ isl38xx_w32_flush(device_base, reg, ISL38XX_DIR_MEM_BASE_REG); wmb(); /* be paranoid */ diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index 67b867f837ca..5740d4d4267c 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c @@ -176,7 +176,7 @@ psa_write(struct net_device * dev, volatile u_char __iomem *verify = lp->mem + PSA_ADDR + (psaoff(0, psa_comp_number) << 1); - /* Authorize writting to PSA */ + /* Authorize writing to PSA */ hacr_write(base, HACR_PWR_STAT | HACR_ROM_WEN); while(n-- > 0) @@ -1676,7 +1676,7 @@ wv_set_frequency(u_long base, /* i/o port of the card */ fee_write(base, 0x60, dac, 2); - /* We now should verify here that the EEprom writting was ok */ + /* We now should verify here that the EEprom writing was ok */ /* ReRead the first area */ fee_read(base, 0x00, diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h index 4d1c4905c749..4b9de0093a7b 100644 --- a/drivers/net/wireless/wavelan_cs.p.h +++ b/drivers/net/wireless/wavelan_cs.p.h @@ -120,7 +120,7 @@ * the Wavelan itself (NCR -> AT&T -> Lucent). * * All started with Anders Klemets , - * writting a Wavelan ISA driver for the MACH microkernel. Girish + * writing a Wavelan ISA driver for the MACH microkernel. Girish * Welling had also worked on it. * Keith Moore modify this for the Pcmcia hardware. * diff --git a/drivers/sbus/char/bpp.c b/drivers/sbus/char/bpp.c index 74b999d77bbf..4fab0c23814c 100644 --- a/drivers/sbus/char/bpp.c +++ b/drivers/sbus/char/bpp.c @@ -156,7 +156,7 @@ static unsigned short get_pins(unsigned minor) #define BPP_ICR 0x18 #define BPP_SIZE 0x1A -/* BPP_CSR. Bits of type RW1 are cleared with writting '1'. */ +/* BPP_CSR. Bits of type RW1 are cleared with writing '1'. */ #define P_DEV_ID_MASK 0xf0000000 /* R */ #define P_DEV_ID_ZEBRA 0x40000000 #define P_DEV_ID_L64854 0xa0000000 /* == NCR 89C100+89C105. Pity. */ diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c index b675735bfbee..fbc8c27d5d99 100644 --- a/drivers/usb/serial/aircable.c +++ b/drivers/usb/serial/aircable.c @@ -9,7 +9,7 @@ * The device works as an standard CDC device, it has 2 interfaces, the first * one is for firmware access and the second is the serial one. * The protocol is very simply, there are two posibilities reading or writing. - * When writting the first urb must have a Header that starts with 0x20 0x29 the + * When writing the first urb must have a Header that starts with 0x20 0x29 the * next two bytes must say how much data will be sended. * When reading the process is almost equal except that the header starts with * 0x00 0x20. @@ -18,7 +18,7 @@ * buffer: The First and Second byte is used for a Header, the Third and Fourth * tells the device the amount of information the package holds. * Packages are 60 bytes long Header Stuff. - * When writting to the device the first two bytes of the header are 0x20 0x29 + * When writing to the device the first two bytes of the header are 0x20 0x29 * When reading the bytes are 0x00 0x20, or 0x00 0x10, there is an strange * situation, when too much data arrives to the device because it sends the data * but with out the header. I will use a simply hack to override this situation, diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 18f74ac76565..4807f960150b 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -2465,7 +2465,7 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r ((edge_serial->is_epic) && (!edge_serial->epic_descriptor.Supports.IOSPWriteMCR) && (regNum == MCR))) { - dbg("SendCmdWriteUartReg - Not writting to MCR Register"); + dbg("SendCmdWriteUartReg - Not writing to MCR Register"); return 0; } @@ -2473,7 +2473,7 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r ((edge_serial->is_epic) && (!edge_serial->epic_descriptor.Supports.IOSPWriteLCR) && (regNum == LCR))) { - dbg ("SendCmdWriteUartReg - Not writting to LCR Register"); + dbg ("SendCmdWriteUartReg - Not writing to LCR Register"); return 0; } diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c index a5690a5f29d5..9445cdb759b1 100644 --- a/drivers/video/matrox/matroxfb_Ti3026.c +++ b/drivers/video/matrox/matroxfb_Ti3026.c @@ -72,7 +72,7 @@ * (c) 1998 Gerd Knorr * * (following author is not in any relation with this code, but his ideas - * were used when writting this driver) + * were used when writing this driver) * * FreeVBE/AF (Matrox), "Shawn Hargreaves" * diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c index a5c825d99466..c57aaadf410c 100644 --- a/drivers/video/matrox/matroxfb_accel.c +++ b/drivers/video/matrox/matroxfb_accel.c @@ -70,7 +70,7 @@ * (c) 1998 Gerd Knorr * * (following author is not in any relation with this code, but his ideas - * were used when writting this driver) + * were used when writing this driver) * * FreeVBE/AF (Matrox), "Shawn Hargreaves" * diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c index cb2aa402ddfd..c8559a756b75 100644 --- a/drivers/video/matrox/matroxfb_base.c +++ b/drivers/video/matrox/matroxfb_base.c @@ -93,7 +93,7 @@ * (c) 1998 Gerd Knorr * * (following author is not in any relation with this code, but his ideas - * were used when writting this driver) + * were used when writing this driver) * * FreeVBE/AF (Matrox), "Shawn Hargreaves" * diff --git a/drivers/video/matrox/matroxfb_misc.c b/drivers/video/matrox/matroxfb_misc.c index 18886b629cb1..5948e54b9ef9 100644 --- a/drivers/video/matrox/matroxfb_misc.c +++ b/drivers/video/matrox/matroxfb_misc.c @@ -78,7 +78,7 @@ * (c) 1998 Gerd Knorr * * (following author is not in any relation with this code, but his ideas - * were used when writting this driver) + * were used when writing this driver) * * FreeVBE/AF (Matrox), "Shawn Hargreaves" * diff --git a/fs/direct-io.c b/fs/direct-io.c index d9d0833444f5..1e88d8d1d2a9 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -439,7 +439,7 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio) * Wait on and process all in-flight BIOs. This must only be called once * all bios have been issued so that the refcount can only decrease. * This just waits for all bios to make it through dio_bio_complete. IO - * errors are propogated through dio->io_error and should be propogated via + * errors are propagated through dio->io_error and should be propagated via * dio_complete(). */ static void dio_await_completion(struct dio *dio) diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index e073fd86cf60..f25086aeef5f 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -1110,7 +1110,7 @@ static int flush_commit_list(struct super_block *s, if (!barrier) { /* If there was a write error in the journal - we can't commit * this transaction - it will be invalid and, if successful, - * will just end up propogating the write error out to + * will just end up propagating the write error out to * the file system. */ if (likely(!retval && !reiserfs_is_journal_aborted (journal))) { if (buffer_dirty(jl->j_commit_bh)) @@ -1125,7 +1125,7 @@ static int flush_commit_list(struct super_block *s, /* If there was a write error in the journal - we can't commit this * transaction - it will be invalid and, if successful, will just end - * up propogating the write error out to the filesystem. */ + * up propagating the write error out to the filesystem. */ if (unlikely(!buffer_uptodate(jl->j_commit_bh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(s, "journal-615: buffer write failed"); diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 78339319ba02..cd8a9641bd66 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -58,7 +58,7 @@ extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; * if you do not require the atomic guarantees. * * Note: there are no guarantees that this function will not be reordered - * on non x86 architectures, so if you are writting portable code, + * on non x86 architectures, so if you are writing portable code, * make sure not to rely on its reordering guarantees. * * Note that @nr may be almost arbitrarily large; this function is not diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h index 273b50629357..a20fe9822f60 100644 --- a/include/asm-i386/bitops.h +++ b/include/asm-i386/bitops.h @@ -27,7 +27,7 @@ * if you do not require the atomic guarantees. * * Note: there are no guarantees that this function will not be reordered - * on non x86 architectures, so if you are writting portable code, + * on non x86 architectures, so if you are writing portable code, * make sure not to rely on its reordering guarantees. * * Note that @nr may be almost arbitrarily large; this function is not diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h index e7686d0a8413..bd024ab4fe53 100644 --- a/include/asm-i386/boot.h +++ b/include/asm-i386/boot.h @@ -12,7 +12,7 @@ #define EXTENDED_VGA 0xfffe /* 80x50 mode */ #define ASK_VGA 0xfffd /* ask for it at bootup */ -/* Physical address where kenrel should be loaded. */ +/* Physical address where kernel should be loaded. */ #define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ + (CONFIG_PHYSICAL_ALIGN - 1)) \ & ~(CONFIG_PHYSICAL_ALIGN - 1)) diff --git a/include/asm-i386/sync_bitops.h b/include/asm-i386/sync_bitops.h index 7d72351bea75..cbce08a2d135 100644 --- a/include/asm-i386/sync_bitops.h +++ b/include/asm-i386/sync_bitops.h @@ -24,7 +24,7 @@ * if you do not require the atomic guarantees. * * Note: there are no guarantees that this function will not be reordered - * on non x86 architectures, so if you are writting portable code, + * on non-x86 architectures, so if you are writing portable code, * make sure not to rely on its reordering guarantees. * * Note that @nr may be almost arbitrarily large; this function is not diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 06cdece35865..f62f5c9abba6 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h @@ -136,7 +136,7 @@ extern void __xchg_called_with_bad_pointer(void); "add3 "reg0", "addr", #0x2000; \n\t" \ "ld "reg0", @"reg0"; \n\t" \ "unlock "reg0", @"reg1"; \n\t" - /* FIXME: This workaround code cannot handle kenrel modules + /* FIXME: This workaround code cannot handle kernel modules * correctly under SMP environment. */ #else /* CONFIG_CHIP_M32700_TS1 */ diff --git a/include/asm-mips/bootinfo.h b/include/asm-mips/bootinfo.h index c7c945baf1ee..dbf834f4dac4 100644 --- a/include/asm-mips/bootinfo.h +++ b/include/asm-mips/bootinfo.h @@ -254,7 +254,7 @@ extern void free_init_pages(const char *what, extern char arcs_cmdline[CL_SIZE]; /* - * Registers a0, a1, a3 and a4 as passed to the kenrel entry by firmware + * Registers a0, a1, a3 and a4 as passed to the kernel entry by firmware */ extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; diff --git a/include/linux/mount.h b/include/linux/mount.h index dab69afee2fa..6d3047d8c91c 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -33,7 +33,7 @@ struct mnt_namespace; #define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ #define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ -#define MNT_PNODE_MASK 0x3000 /* propogation flag mask */ +#define MNT_PNODE_MASK 0x3000 /* propagation flag mask */ struct vfsmount { struct list_head mnt_hash; diff --git a/sound/pci/ice1712/delta.h b/sound/pci/ice1712/delta.h index e65d669af639..e47861ccd6e7 100644 --- a/sound/pci/ice1712/delta.h +++ b/sound/pci/ice1712/delta.h @@ -63,7 +63,7 @@ extern const struct snd_ice1712_card_info snd_ice1712_delta_cards[]; /* look to CS8414 datasheet */ #define ICE1712_DELTA_SPDIF_OUT_STAT_CLOCK 0x04 /* S/PDIF output status clock */ - /* (writting on rising edge - 0->1) */ + /* (writing on rising edge - 0->1) */ /* all except Delta44 */ /* look to CS8404A datasheet */ #define ICE1712_DELTA_SPDIF_OUT_STAT_DATA 0x08 @@ -100,7 +100,7 @@ extern const struct snd_ice1712_card_info snd_ice1712_delta_cards[]; /* AKM4524 serial data */ #define ICE1712_DELTA_CODEC_SERIAL_CLOCK 0x20 /* AKM4524 serial clock */ - /* (writting on rising edge - 0->1 */ + /* (writing on rising edge - 0->1 */ #define ICE1712_DELTA_CODEC_CHIP_A 0x40 #define ICE1712_DELTA_CODEC_CHIP_B 0x80 /* 1 - select chip A or B */ diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c index 25a2a7333006..e07085a7cfc3 100644 --- a/sound/sparc/dbri.c +++ b/sound/sparc/dbri.c @@ -673,7 +673,7 @@ static s32 *dbri_cmdlock(struct snd_dbri * dbri, int len) } /* - * Send prepared cmd string. It works by writting a JUMP cmd into + * Send prepared cmd string. It works by writing a JUMP cmd into * the last WAIT cmd and force DBRI to reread the cmd. * The JUMP cmd points to the new cmd string. * It also releases the cmdlock spinlock. -- cgit v1.2.3 From 940d67f6b95166475ff6e600ef7658e1cd441278 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 8 May 2007 19:23:49 +1000 Subject: [POWERPC] swsusp: Introduce register_nosave_region_late This patch introduces a new register_nosave_region_late function that can be called from initcalls when register_nosave_region can no longer be used because it uses bootmem. Signed-off-by: Johannes Berg Acked-by: Rafael J. Wysocki Signed-off-by: Paul Mackerras --- include/linux/suspend.h | 11 ++++++++++- kernel/power/snapshot.c | 12 +++++++++--- 2 files changed, 19 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 9d2aa1a12aa0..1f2f7ba9e709 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -34,13 +34,22 @@ static inline void pm_restore_console(void) {} #if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) /* kernel/power/snapshot.c */ -extern void __init register_nosave_region(unsigned long, unsigned long); +extern void __register_nosave_region(unsigned long b, unsigned long e, int km); +static inline void register_nosave_region(unsigned long b, unsigned long e) +{ + __register_nosave_region(b, e, 0); +} +static inline void register_nosave_region_late(unsigned long b, unsigned long e) +{ + __register_nosave_region(b, e, 1); +} extern int swsusp_page_is_forbidden(struct page *); extern void swsusp_set_page_free(struct page *); extern void swsusp_unset_page_free(struct page *); extern unsigned long get_safe_page(gfp_t gfp_mask); #else static inline void register_nosave_region(unsigned long b, unsigned long e) {} +static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } static inline void swsusp_set_page_free(struct page *p) {} static inline void swsusp_unset_page_free(struct page *p) {} diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index b7039772b05c..59fb89ba9a4d 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -607,7 +607,8 @@ static LIST_HEAD(nosave_regions); */ void __init -register_nosave_region(unsigned long start_pfn, unsigned long end_pfn) +__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn, + int use_kmalloc) { struct nosave_region *region; @@ -623,8 +624,13 @@ register_nosave_region(unsigned long start_pfn, unsigned long end_pfn) goto Report; } } - /* This allocation cannot fail */ - region = alloc_bootmem_low(sizeof(struct nosave_region)); + if (use_kmalloc) { + /* during init, this shouldn't fail */ + region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL); + BUG_ON(!region); + } else + /* This allocation cannot fail */ + region = alloc_bootmem_low(sizeof(struct nosave_region)); region->start_pfn = start_pfn; region->end_pfn = end_pfn; list_add_tail(®ion->list, &nosave_regions); -- cgit v1.2.3 From 59c51591a0ac7568824f541f57de967e88adaa07 Mon Sep 17 00:00:00 2001 From: Michael Opdenacker Date: Wed, 9 May 2007 08:57:56 +0200 Subject: Fix occurrences of "the the " Signed-off-by: Michael Opdenacker Signed-off-by: Adrian Bunk --- Documentation/ABI/removed/devfs | 2 +- Documentation/driver-model/platform.txt | 2 +- Documentation/netlabel/introduction.txt | 2 +- Documentation/pci.txt | 2 +- Documentation/powerpc/booting-without-of.txt | 2 +- arch/mips/Makefile | 2 +- arch/mips/pci/fixup-sb1250.c | 4 ++-- arch/powerpc/platforms/cell/io-workarounds.c | 2 +- arch/v850/kernel/entry.S | 2 +- block/ll_rw_blk.c | 2 +- drivers/block/rd.c | 2 +- drivers/char/drm/drm_dma.c | 2 +- drivers/char/drm/drm_vm.c | 2 +- drivers/char/drm/r300_reg.h | 2 +- drivers/char/pcmcia/cm4000_cs.c | 2 +- drivers/ide/pci/siimage.c | 2 +- drivers/ieee1394/nodemgr.c | 2 +- drivers/isdn/hardware/eicon/divasync.h | 2 +- drivers/isdn/hisax/hfc_usb.c | 4 ++-- drivers/media/dvb/dvb-usb/dvb-usb-remote.c | 2 +- drivers/media/dvb/frontends/tda10021.c | 2 +- drivers/media/dvb/frontends/ves1x93.c | 2 +- drivers/media/video/em28xx/em28xx-video.c | 2 +- drivers/media/video/usbvideo/vicam.c | 2 +- drivers/message/fusion/mptbase.c | 2 +- drivers/mtd/maps/nettel.c | 2 +- drivers/mtd/onenand/onenand_base.c | 2 +- drivers/net/bonding/bond_main.c | 2 +- drivers/net/eepro.c | 2 +- drivers/net/ixgb/ixgb_ee.c | 2 +- drivers/net/meth.h | 2 +- drivers/net/tulip/interrupt.c | 2 +- drivers/net/tulip/winbond-840.c | 2 +- drivers/net/tulip/xircom_cb.c | 2 +- drivers/net/typhoon.c | 2 +- drivers/net/wireless/airport.c | 2 +- drivers/s390/char/sclp_rw.c | 2 +- drivers/s390/net/qeth_main.c | 2 +- drivers/s390/scsi/zfcp_qdio.c | 2 +- drivers/scsi/aic7xxx/aic79xx_pci.c | 2 +- drivers/scsi/aic94xx/Makefile | 2 +- drivers/scsi/dc395x.c | 6 +++--- drivers/scsi/scsi_lib.c | 2 +- drivers/usb/misc/auerswald.c | 2 +- drivers/usb/net/usbnet.h | 2 +- drivers/video/i810/i810_main.c | 2 +- drivers/video/skeletonfb.c | 2 +- fs/jfs/jfs_dmap.c | 2 +- fs/jfs/jfs_imap.c | 4 ++-- fs/jfs/jfs_logmgr.c | 2 +- fs/xfs/xfs_itable.c | 2 +- include/asm-arm/dma-mapping.h | 2 +- include/asm-powerpc/ppc-pci.h | 2 +- include/linux/ext3_fs_i.h | 2 +- include/linux/ext4_fs_i.h | 2 +- include/linux/radix-tree.h | 4 ++-- include/linux/security.h | 2 +- include/linux/usb.h | 2 +- kernel/relay.c | 2 +- kernel/wait.c | 2 +- mm/mmap.c | 2 +- net/decnet/af_decnet.c | 2 +- net/ipv4/cipso_ipv4.c | 2 +- net/ipv4/ipvs/ip_vs_sed.c | 2 +- net/ipv4/udp.c | 2 +- net/llc/af_llc.c | 2 +- net/netfilter/nf_conntrack_expect.c | 2 +- net/sctp/chunk.c | 2 +- net/sctp/socket.c | 4 ++-- net/sunrpc/svcauth.c | 2 +- scripts/basic/docproc.c | 2 +- sound/pci/ac97/ac97_codec.c | 2 +- 72 files changed, 79 insertions(+), 79 deletions(-) (limited to 'include/linux') diff --git a/Documentation/ABI/removed/devfs b/Documentation/ABI/removed/devfs index 8195c4e0d0a1..8ffd28bf6598 100644 --- a/Documentation/ABI/removed/devfs +++ b/Documentation/ABI/removed/devfs @@ -6,7 +6,7 @@ Description: races, contains a naming policy within the kernel that is against the LSB, and can be replaced by using udev. The files fs/devfs/*, include/linux/devfs_fs*.h were removed, - along with the the assorted devfs function calls throughout the + along with the assorted devfs function calls throughout the kernel tree. Users: diff --git a/Documentation/driver-model/platform.txt b/Documentation/driver-model/platform.txt index f7c9262b2dc8..c5491c22086c 100644 --- a/Documentation/driver-model/platform.txt +++ b/Documentation/driver-model/platform.txt @@ -125,7 +125,7 @@ three different ways to find such a match: usually register later during booting, or by module loading. - Registering a driver using platform_driver_probe() works just like - using platform_driver_register(), except that the the driver won't + using platform_driver_register(), except that the driver won't be probed later if another device registers. (Which is OK, since this interface is only for use with non-hotpluggable devices.) diff --git a/Documentation/netlabel/introduction.txt b/Documentation/netlabel/introduction.txt index a4ffba1694c8..5ecd8d1dcf4e 100644 --- a/Documentation/netlabel/introduction.txt +++ b/Documentation/netlabel/introduction.txt @@ -30,7 +30,7 @@ The communication layer exists to allow NetLabel configuration and monitoring from user space. The NetLabel communication layer uses a message based protocol built on top of the Generic NETLINK transport mechanism. The exact formatting of these NetLabel messages as well as the Generic NETLINK family -names can be found in the the 'net/netlabel/' directory as comments in the +names can be found in the 'net/netlabel/' directory as comments in the header files as well as in 'include/net/netlabel.h'. * Security Module API diff --git a/Documentation/pci.txt b/Documentation/pci.txt index e2c9d0a0c43d..d38261b67905 100644 --- a/Documentation/pci.txt +++ b/Documentation/pci.txt @@ -373,7 +373,7 @@ E.g. clearing pending interrupts. 3.6 Register IRQ handler ~~~~~~~~~~~~~~~~~~~~~~~~ -While calling request_irq() is the the last step described here, +While calling request_irq() is the last step described here, this is often just another intermediate step to initialize a device. This step can often be deferred until the device is opened for use. diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt index d4bfae75c946..c141f2388474 100644 --- a/Documentation/powerpc/booting-without-of.txt +++ b/Documentation/powerpc/booting-without-of.txt @@ -1444,7 +1444,7 @@ platforms are moved over to use the flattened-device-tree model. Basically, it is a bus of devices, that could act more or less as a complete entity (UCC, USB etc ). All of them should be siblings on the "root" qe node, using the common properties from there. - The description below applies to the the qe of MPC8360 and + The description below applies to the qe of MPC8360 and more nodes and properties would be extended in the future. i) Root QE device diff --git a/arch/mips/Makefile b/arch/mips/Makefile index f2f742df32c7..4892db88a86a 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -92,7 +92,7 @@ cflags-y += -ffreestanding # when fed the toolchain default! # # Certain gcc versions upto gcc 4.1.1 (probably 4.2-subversion as of -# 2006-10-10 don't properly change the the predefined symbols if -EB / -EL +# 2006-10-10 don't properly change the predefined symbols if -EB / -EL # are used, so we kludge that here. A bug has been filed at # http://gcc.gnu.org/bugzilla/show_bug.cgi?id=29413. # diff --git a/arch/mips/pci/fixup-sb1250.c b/arch/mips/pci/fixup-sb1250.c index 7a7444874e80..0ad39e53f7b1 100644 --- a/arch/mips/pci/fixup-sb1250.c +++ b/arch/mips/pci/fixup-sb1250.c @@ -14,7 +14,7 @@ #include /* - * Set the the BCM1250, etc. PCI host bridge's TRDY timeout + * Set the BCM1250, etc. PCI host bridge's TRDY timeout * to the finite max. */ static void __init quirk_sb1250_pci(struct pci_dev *dev) @@ -35,7 +35,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_HT, quirk_sb1250_ht); /* - * Set the the SP1011 HT/PCI bridge's TRDY timeout to the finite max. + * Set the SP1011 HT/PCI bridge's TRDY timeout to the finite max. */ static void __init quirk_sp1011(struct pci_dev *dev) { diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c index d68d920eb2c4..7fb92f23f380 100644 --- a/arch/powerpc/platforms/cell/io-workarounds.c +++ b/arch/powerpc/platforms/cell/io-workarounds.c @@ -74,7 +74,7 @@ static void spider_io_flush(const volatile void __iomem *addr) /* Fast path if we have a non-0 token, it indicates which bus we * are on. * - * If the token is 0, that means either the the ioremap was done + * If the token is 0, that means either that the ioremap was done * before we initialized this layer, or it's a PIO operation. We * fallback to a low path in this case. Hopefully, internal devices * which are ioremap'ed early should use in_XX/out_XX functions diff --git a/arch/v850/kernel/entry.S b/arch/v850/kernel/entry.S index 8bc521ca081f..e4327a8d6bcd 100644 --- a/arch/v850/kernel/entry.S +++ b/arch/v850/kernel/entry.S @@ -523,7 +523,7 @@ END(ret_from_trap) /* This the initial entry point for a new child thread, with an appropriate - stack in place that makes it look the the child is in the middle of an + stack in place that makes it look that the child is in the middle of an syscall. This function is actually `returned to' from switch_thread (copy_thread makes ret_from_fork the return address in each new thread's saved context). */ diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index d99d402953a3..f294f1538f1e 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -1704,7 +1704,7 @@ EXPORT_SYMBOL(blk_stop_queue); * on a queue, such as calling the unplug function after a timeout. * A block device may call blk_sync_queue to ensure that any * such activity is cancelled, thus allowing it to release resources - * the the callbacks might use. The caller must already have made sure + * that the callbacks might use. The caller must already have made sure * that its ->make_request_fn will not re-add plugging prior to calling * this function. * diff --git a/drivers/block/rd.c b/drivers/block/rd.c index 43d4ebcb3b44..a1512da32410 100644 --- a/drivers/block/rd.c +++ b/drivers/block/rd.c @@ -151,7 +151,7 @@ static int ramdisk_commit_write(struct file *file, struct page *page, } /* - * ->writepage to the the blockdev's mapping has to redirty the page so that the + * ->writepage to the blockdev's mapping has to redirty the page so that the * VM doesn't go and steal it. We return AOP_WRITEPAGE_ACTIVATE so that the VM * won't try to (pointlessly) write the page again for a while. * diff --git a/drivers/char/drm/drm_dma.c b/drivers/char/drm/drm_dma.c index 892db7096986..32ed19c9ec1c 100644 --- a/drivers/char/drm/drm_dma.c +++ b/drivers/char/drm/drm_dma.c @@ -65,7 +65,7 @@ int drm_dma_setup(drm_device_t * dev) * \param dev DRM device. * * Free all pages associated with DMA buffers, the buffers and pages lists, and - * finally the the drm_device::dma structure itself. + * finally the drm_device::dma structure itself. */ void drm_dma_takedown(drm_device_t * dev) { diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c index 35540cfb43dd..b5c5b9fa84c3 100644 --- a/drivers/char/drm/drm_vm.c +++ b/drivers/char/drm/drm_vm.c @@ -157,7 +157,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, * \param address access address. * \return pointer to the page structure. * - * Get the the mapping, find the real physical page to map, get the page, and + * Get the mapping, find the real physical page to map, get the page, and * return it. */ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h index a881f96c983e..ecda760ae8c0 100644 --- a/drivers/char/drm/r300_reg.h +++ b/drivers/char/drm/r300_reg.h @@ -293,7 +293,7 @@ I am fairly certain that they are correct unless stated otherwise in comments. # define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0 # define R300_PVS_CNTL_1_POS_END_SHIFT 10 # define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20 -/* Addresses are relative the the vertex program parameters area. */ +/* Addresses are relative to the vertex program parameters area. */ #define R300_VAP_PVS_CNTL_2 0x22D4 # define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0 # define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16 diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index e91b43a014b0..4ea587983aef 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c @@ -1114,7 +1114,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf, /* * wait for atr to become valid. * note: it is important to lock this code. if we dont, the monitor - * could be run between test_bit and the the call the sleep on the + * could be run between test_bit and the call to sleep on the * atr-queue. if *then* the monitor detects atr valid, it will wake up * any process on the atr-queue, *but* since we have been interrupted, * we do not yet sleep on this queue. this would result in a missed diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c index c0188de3cc66..79cec50a242f 100644 --- a/drivers/ide/pci/siimage.c +++ b/drivers/ide/pci/siimage.c @@ -831,7 +831,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif) /* * Now set up the hw. We have to do this ourselves as - * the MMIO layout isnt the same as the the standard port + * the MMIO layout isnt the same as the standard port * based I/O */ diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index 6a1a0572275e..835937e38529 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c @@ -1702,7 +1702,7 @@ static int nodemgr_host_thread(void *__hi) generation = get_hpsb_generation(host); /* If we get a reset before we are done waiting, then - * start the the waiting over again */ + * start the waiting over again */ if (generation != g) g = generation, i = 0; } diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h index af3eb9e795b5..85784a7ffb25 100644 --- a/drivers/isdn/hardware/eicon/divasync.h +++ b/drivers/isdn/hardware/eicon/divasync.h @@ -216,7 +216,7 @@ typedef struct #define SERIAL_HOOK_RING 0x85 #define SERIAL_HOOK_DETACH 0x8f unsigned char Flags; /* function refinements */ - /* parameters passed by the the ATTACH request */ + /* parameters passed by the ATTACH request */ SERIAL_INT_CB InterruptHandler; /* called on each interrupt */ SERIAL_DPC_CB DeferredHandler; /* called on hook state changes */ void *HandlerContext; /* context for both handlers */ diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c index 99e70d4103b6..1f18f1993387 100644 --- a/drivers/isdn/hisax/hfc_usb.c +++ b/drivers/isdn/hisax/hfc_usb.c @@ -1217,11 +1217,11 @@ usb_init(hfcusb_data * hfc) /* aux = output, reset off */ write_usb(hfc, HFCUSB_CIRM, 0x10); - /* set USB_SIZE to match the the wMaxPacketSize for INT or BULK transfers */ + /* set USB_SIZE to match the wMaxPacketSize for INT or BULK transfers */ write_usb(hfc, HFCUSB_USB_SIZE, (hfc->packet_size / 8) | ((hfc->packet_size / 8) << 4)); - /* set USB_SIZE_I to match the the wMaxPacketSize for ISO transfers */ + /* set USB_SIZE_I to match the wMaxPacketSize for ISO transfers */ write_usb(hfc, HFCUSB_USB_SIZE_I, hfc->iso_packet_size); /* enable PCM/GCI master mode */ diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c index 68ed3a788083..9200a30dd1b9 100644 --- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c +++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c @@ -3,7 +3,7 @@ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) * see dvb-usb-init.c for copyright information. * - * This file contains functions for initializing the the input-device and for handling remote-control-queries. + * This file contains functions for initializing the input-device and for handling remote-control-queries. */ #include "dvb-usb-common.h" #include diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c index 110536843e8e..e725f612a6b7 100644 --- a/drivers/media/dvb/frontends/tda10021.c +++ b/drivers/media/dvb/frontends/tda10021.c @@ -1,6 +1,6 @@ /* TDA10021 - Single Chip Cable Channel Receiver driver module - used on the the Siemens DVB-C cards + used on the Siemens DVB-C cards Copyright (C) 1999 Convergence Integrated Media GmbH Copyright (C) 2004 Markus Schulz diff --git a/drivers/media/dvb/frontends/ves1x93.c b/drivers/media/dvb/frontends/ves1x93.c index 54d7b07571b8..23fd0303c91b 100644 --- a/drivers/media/dvb/frontends/ves1x93.c +++ b/drivers/media/dvb/frontends/ves1x93.c @@ -306,7 +306,7 @@ static int ves1x93_read_status(struct dvb_frontend* fe, fe_status_t* status) * The ves1893 sometimes returns sync values that make no sense, * because, e.g., the SIGNAL bit is 0, while some of the higher * bits are 1 (and how can there be a CARRIER w/o a SIGNAL?). - * Tests showed that the the VITERBI and SYNC bits are returned + * Tests showed that the VITERBI and SYNC bits are returned * reliably, while the SIGNAL and CARRIER bits ar sometimes wrong. * If such a case occurs, we read the value again, until we get a * valid value. diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c index bec67609500f..2c7b158ce7e1 100644 --- a/drivers/media/video/em28xx/em28xx-video.c +++ b/drivers/media/video/em28xx/em28xx-video.c @@ -1729,7 +1729,7 @@ static int em28xx_usb_probe(struct usb_interface *interface, endpoint = &interface->cur_altsetting->endpoint[1].desc; - /* check if the the device has the iso in endpoint at the correct place */ + /* check if the device has the iso in endpoint at the correct place */ if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_ISOC) { em28xx_err(DRIVER_NAME " probing error: endpoint is non-ISO endpoint!\n"); diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c index 876fd2768242..982b115193f8 100644 --- a/drivers/media/video/usbvideo/vicam.c +++ b/drivers/media/video/usbvideo/vicam.c @@ -28,7 +28,7 @@ * * Portions of this code were also copied from usbvideo.c * - * Special thanks to the the whole team at Sourceforge for help making + * Special thanks to the whole team at Sourceforge for help making * this driver become a reality. Notably: * Andy Armstrong who reverse engineered the color encoding and * Pavel Machek and Chris Cheney who worked on reverse engineering the diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 97471af4309c..5021d1a2a1d4 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -3585,7 +3585,7 @@ initChainBuffers(MPT_ADAPTER *ioc) * index = chain_idx * * Calculate the number of chain buffers needed(plus 1) per I/O - * then multiply the the maximum number of simultaneous cmds + * then multiply the maximum number of simultaneous cmds * * num_sge = num sge in request frame + last chain buffer * scale = num sge per chain buffer if no chain element diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c index 9f53c655af3a..7b96cd02f82b 100644 --- a/drivers/mtd/maps/nettel.c +++ b/drivers/mtd/maps/nettel.c @@ -358,7 +358,7 @@ int __init nettel_init(void) /* Turn other PAR off so the first probe doesn't find it */ *intel1par = 0; - /* Probe for the the size of the first Intel flash */ + /* Probe for the size of the first Intel flash */ nettel_intel_map.size = maxsize; nettel_intel_map.phys = intel0addr; nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize); diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index 000794c6caf5..0537fac8de74 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -2192,7 +2192,7 @@ static int onenand_check_maf(int manuf) * @param mtd MTD device structure * * OneNAND detection method: - * Compare the the values from command with ones from register + * Compare the values from command with ones from register */ static int onenand_probe(struct mtd_info *mtd) { diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 724bce51f936..223517dcbcfd 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3461,7 +3461,7 @@ void bond_unregister_arp(struct bonding *bond) /*---------------------------- Hashing Policies -----------------------------*/ /* - * Hash for the the output device based upon layer 3 and layer 4 data. If + * Hash for the output device based upon layer 3 and layer 4 data. If * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is * altogether not IP, mimic bond_xmit_hash_policy_l2() */ diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 39654e1e2bed..47680237f783 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c @@ -1126,7 +1126,7 @@ static void eepro_tx_timeout (struct net_device *dev) printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name, "network cable problem"); /* This is not a duplicate. One message for the console, - one for the the log file */ + one for the log file */ printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name, "network cable problem"); eepro_complete_selreset(ioaddr); diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c index f15aebde7b90..52c99d01d568 100644 --- a/drivers/net/ixgb/ixgb_ee.c +++ b/drivers/net/ixgb/ixgb_ee.c @@ -315,7 +315,7 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw) * hw - Struct containing variables accessed by shared code * * Reads the first 64 16 bit words of the EEPROM and sums the values read. - * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is * valid. * * Returns: diff --git a/drivers/net/meth.h b/drivers/net/meth.h index 84960dae2a22..ea3b8fc86d1e 100644 --- a/drivers/net/meth.h +++ b/drivers/net/meth.h @@ -126,7 +126,7 @@ typedef struct rx_packet { /* Note: when loopback is set this bit becomes collision control. Setting this bit will */ /* cause a collision to be reported. */ - /* Bits 5 and 6 are used to determine the the Destination address filter mode */ + /* Bits 5 and 6 are used to determine the Destination address filter mode */ #define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */ #define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */ #define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */ diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c index 9b08afbd1f65..ea896777bcaf 100644 --- a/drivers/net/tulip/interrupt.c +++ b/drivers/net/tulip/interrupt.c @@ -269,7 +269,7 @@ done: This would turn on IM for devices that is not contributing to backlog congestion with unnecessary latency. - We monitor the the device RX-ring and have: + We monitor the device RX-ring and have: HW Interrupt Mitigation either ON or OFF. diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index fa440706fb4a..38f3b99716b8 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c @@ -1021,7 +1021,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) np->tx_ring[entry].length |= DescEndRing; /* Now acquire the irq spinlock. - * The difficult race is the the ordering between + * The difficult race is the ordering between * increasing np->cur_tx and setting DescOwned: * - if np->cur_tx is increased first the interrupt * handler could consider the packet as transmitted diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index 985a1810ca59..2470b1ee33c0 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c @@ -1043,7 +1043,7 @@ static int enable_promisc(struct xircom_private *card) /* -link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what. +link_status() checks the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what. Must be called in locked state with interrupts disabled */ diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index f2dd7763cd0b..f72573594121 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c @@ -639,7 +639,7 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd, typhoon_inc_cmd_index(&ring->lastWrite, num_cmd); - /* "I feel a presence... another warrior is on the the mesa." + /* "I feel a presence... another warrior is on the mesa." */ wmb(); iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c index 38fac3bbcd82..7d5b8c2cc614 100644 --- a/drivers/net/wireless/airport.c +++ b/drivers/net/wireless/airport.c @@ -149,7 +149,7 @@ static int airport_hard_reset(struct orinoco_private *priv) /* Vitally important. If we don't do this it seems we get an * interrupt somewhere during the power cycle, since * hw_unavailable is already set it doesn't get ACKed, we get - * into an interrupt loop and the the PMU decides to turn us + * into an interrupt loop and the PMU decides to turn us * off. */ disable_irq(dev->irq); diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index bbd5b8b66f42..d6b06ab81188 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c @@ -23,7 +23,7 @@ /* * The room for the SCCB (only for writing) is not equal to a pages size - * (as it is specified as the maximum size in the the SCLP documentation) + * (as it is specified as the maximum size in the SCLP documentation) * because of the additional data structure described above. */ #define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer)) diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 29d176036e5c..0b96d49dd636 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -2860,7 +2860,7 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, if (!atomic_read(&queue->set_pci_flags_count)){ /* * there's no outstanding PCI any more, so we - * have to request a PCI to be sure the the PCI + * have to request a PCI to be sure that the PCI * will wake at some time in the future then we * can flush packed buffers that might still be * hanging around, which can happen if no diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index cb08ca3cc0f9..bdf5782b8a7a 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -222,7 +222,7 @@ zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, * Since we have been using this adapter, it is save to assume * that it is not failed but recoverable. The card seems to * report link-up events by self-initiated queue shutdown. - * That is why we need to clear the the link-down flag + * That is why we need to clear the link-down flag * which is set again in case we have missed by a mile. */ zfcp_erp_adapter_reopen( diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c index 8d72bbae96ad..0bada0028aa0 100644 --- a/drivers/scsi/aic7xxx/aic79xx_pci.c +++ b/drivers/scsi/aic7xxx/aic79xx_pci.c @@ -966,7 +966,7 @@ ahd_aic790X_setup(struct ahd_softc *ahd) | AHD_BUSFREEREV_BUG; ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG; - /* If the user requested the the SLOWCRC bit to be set. */ + /* If the user requested that the SLOWCRC bit to be set. */ if (aic79xx_slowcrc) ahd->features |= AHD_AIC79XXB_SLOWCRC; diff --git a/drivers/scsi/aic94xx/Makefile b/drivers/scsi/aic94xx/Makefile index e6b70123940c..e78ce0fa44d2 100644 --- a/drivers/scsi/aic94xx/Makefile +++ b/drivers/scsi/aic94xx/Makefile @@ -6,7 +6,7 @@ # # This file is licensed under GPLv2. # -# This file is part of the the aic94xx driver. +# This file is part of the aic94xx driver. # # The aic94xx driver is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index a965ed3548d5..564ea90ed3a0 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -541,7 +541,7 @@ static struct ParameterData __devinitdata cfg_data[] = { /* - * Safe settings. If set to zero the the BIOS/default values with + * Safe settings. If set to zero the BIOS/default values with * command line overrides will be used. If set to 1 then safe and * slow settings will be used. */ @@ -617,7 +617,7 @@ static void __devinit fix_settings(void) /* * Mapping from the eeprom delay index value (index into this array) - * to the the number of actual seconds that the delay should be for. + * to the number of actual seconds that the delay should be for. */ static char __devinitdata eeprom_index_to_delay_map[] = { 1, 3, 5, 10, 16, 30, 60, 120 }; @@ -4136,7 +4136,7 @@ static void __devinit trms1040_write_all(struct NvRamType *eeprom, unsigned long * @io_port: base I/O address * @addr: offset into SEEPROM * - * Returns the the byte read. + * Returns the byte read. **/ static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr) { diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 61fbcdcbb009..1f5a07bf2a75 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -173,7 +173,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) * @retries: number of times to retry request * @flags: or into request flags; * - * returns the req->errors value which is the the scsi_cmnd result + * returns the req->errors value which is the scsi_cmnd result * field. **/ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c index b5332e679c46..88fb56d5db8f 100644 --- a/drivers/usb/misc/auerswald.c +++ b/drivers/usb/misc/auerswald.c @@ -1307,7 +1307,7 @@ static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp) } -/* remove a service from the the device +/* remove a service from the device scp->id must be set! */ static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp) { diff --git a/drivers/usb/net/usbnet.h b/drivers/usb/net/usbnet.h index cbb53e065d6c..82db5a8e528e 100644 --- a/drivers/usb/net/usbnet.h +++ b/drivers/usb/net/usbnet.h @@ -129,7 +129,7 @@ extern void usbnet_disconnect(struct usb_interface *); /* Drivers that reuse some of the standard USB CDC infrastructure - * (notably, using multiple interfaces according to the the CDC + * (notably, using multiple interfaces according to the CDC * union descriptor) get some helper code. */ struct cdc_state { diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c index 7e760197cf29..1a7d7789d877 100644 --- a/drivers/video/i810/i810_main.c +++ b/drivers/video/i810/i810_main.c @@ -1717,7 +1717,7 @@ static int __devinit i810_alloc_agp_mem(struct fb_info *info) * @info: pointer to device specific info structure * * DESCRIPTION: - * Sets the the user monitor's horizontal and vertical + * Sets the user monitor's horizontal and vertical * frequency limits */ static void __devinit i810_init_monspecs(struct fb_info *info) diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c index 842b5cd054c6..836a612af977 100644 --- a/drivers/video/skeletonfb.c +++ b/drivers/video/skeletonfb.c @@ -14,7 +14,7 @@ * of it. * * First the roles of struct fb_info and struct display have changed. Struct - * display will go away. The way the the new framebuffer console code will + * display will go away. The way the new framebuffer console code will * work is that it will act to translate data about the tty/console in * struct vc_data to data in a device independent way in struct fb_info. Then * various functions in struct fb_ops will be called to store the device diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 82b0544bd76d..f3b1ebb22280 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -1507,7 +1507,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results) if (l2nb < budmin) { /* search the lower level dmap control pages to get - * the starting block number of the the dmap that + * the starting block number of the dmap that * contains or starts off the free space. */ if ((rc = diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index c465607be991..c6530227cda6 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c @@ -386,7 +386,7 @@ int diRead(struct inode *ip) return -EIO; } - /* locate the the disk inode requested */ + /* locate the disk inode requested */ dp = (struct dinode *) mp->data; dp += rel_inode; @@ -1407,7 +1407,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip) inum = pip->i_ino + 1; ino = inum & (INOSPERIAG - 1); - /* back off the the hint if it is outside of the iag */ + /* back off the hint if it is outside of the iag */ if (ino == 0) inum = pip->i_ino; diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 6a3f00dc8c83..44a2f33cb98d 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -1960,7 +1960,7 @@ static void lbmfree(struct lbuf * bp) /* * NAME: lbmRedrive * - * FUNCTION: add a log buffer to the the log redrive list + * FUNCTION: add a log buffer to the log redrive list * * PARAMETER: * bp - log buffer diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 7775ddc0b3c6..e725ddd3de5f 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -809,7 +809,7 @@ xfs_inumbers( xfs_buf_relse(agbp); agbp = NULL; /* - * Move up the the last inode in the current + * Move up the last inode in the current * chunk. The lookup_ge will always get * us the first inode in the next chunk. */ diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h index abfb75b654c7..c8b5d0db0cf0 100644 --- a/include/asm-arm/dma-mapping.h +++ b/include/asm-arm/dma-mapping.h @@ -445,7 +445,7 @@ extern void dmabounce_unregister_dev(struct device *); * * The dmabounce routines call this function whenever a dma-mapping * is requested to determine whether a given buffer needs to be bounced - * or not. The function must return 0 if the the buffer is OK for + * or not. The function must return 0 if the buffer is OK for * DMA access and 1 if the buffer needs to be bounced. * */ diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h index d74b2965bb82..6dcd7a811fe1 100644 --- a/include/asm-powerpc/ppc-pci.h +++ b/include/asm-powerpc/ppc-pci.h @@ -64,7 +64,7 @@ struct pci_dev *pci_get_device_by_addr(unsigned long addr); * eeh_slot_error_detail -- record and EEH error condition to the log * @severity: 1 if temporary, 2 if permanent failure. * - * Obtains the the EEH error details from the RTAS subsystem, + * Obtains the EEH error details from the RTAS subsystem, * and then logs these details with the RTAS error log system. */ void eeh_slot_error_detail (struct pci_dn *pdn, int severity); diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h index 4395e5206746..7894dd0f3b77 100644 --- a/include/linux/ext3_fs_i.h +++ b/include/linux/ext3_fs_i.h @@ -54,7 +54,7 @@ struct ext3_block_alloc_info { /* * Was i_next_alloc_goal in ext3_inode_info * is the *physical* companion to i_next_alloc_block. - * it the the physical block number of the block which was most-recentl + * it the physical block number of the block which was most-recentl * allocated to this file. This give us the goal (target) for the next * allocation when we detect linearly ascending requests. */ diff --git a/include/linux/ext4_fs_i.h b/include/linux/ext4_fs_i.h index bb42379cb7fd..d5b177e5b395 100644 --- a/include/linux/ext4_fs_i.h +++ b/include/linux/ext4_fs_i.h @@ -52,7 +52,7 @@ struct ext4_block_alloc_info { /* * Was i_next_alloc_goal in ext4_inode_info * is the *physical* companion to i_next_alloc_block. - * it the the physical block number of the block which was most-recentl + * it the physical block number of the block which was most-recentl * allocated to this file. This give us the goal (target) for the next * allocation when we detect linearly ascending requests. */ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 0deb842541ac..f9e77d2ee320 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -87,10 +87,10 @@ do { \ * management of their lifetimes must be completely managed by API users. * * For API usage, in general, - * - any function _modifying_ the the tree or tags (inserting or deleting + * - any function _modifying_ the tree or tags (inserting or deleting * items, setting or clearing tags must exclude other modifications, and * exclude any functions reading the tree. - * - any function _reading_ the the tree or tags (looking up items or tags, + * - any function _reading_ the tree or tags (looking up items or tags, * gang lookups) must exclude modifications to the tree, but may occur * concurrently with other readers. * diff --git a/include/linux/security.h b/include/linux/security.h index 47e82c120f9a..9eb9e0fe0331 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -322,7 +322,7 @@ struct request_sock; * @dir contains the inode structure of parent of the new file. * @dentry contains the dentry structure of the new file. * @mode contains the mode of the new file. - * @dev contains the the device number. + * @dev contains the device number. * Return 0 if permission is granted. * @inode_rename: * Check for permission to rename a file or directory. diff --git a/include/linux/usb.h b/include/linux/usb.h index cfbd2bb8fa2c..94bd38a6d947 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -126,7 +126,7 @@ enum usb_interface_condition { * Each interface may have alternate settings. The initial configuration * of a device sets altsetting 0, but the device driver can change * that setting using usb_set_interface(). Alternate settings are often - * used to control the the use of periodic endpoints, such as by having + * used to control the use of periodic endpoints, such as by having * different endpoints use different amounts of reserved USB bandwidth. * All standards-conformant USB devices that use isochronous endpoints * will use them in non-default settings. diff --git a/kernel/relay.c b/kernel/relay.c index 577f251c7e28..d24395e8b6e5 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -310,7 +310,7 @@ static struct rchan_callbacks default_channel_callbacks = { /** * wakeup_readers - wake up readers waiting on a channel - * @work: work struct that contains the the channel buffer + * @work: work struct that contains the channel buffer * * This is the work function used to defer reader waking. The * reason waking is deferred is that calling directly from write diff --git a/kernel/wait.c b/kernel/wait.c index 59a82f63275d..444ddbfaefc4 100644 --- a/kernel/wait.c +++ b/kernel/wait.c @@ -61,7 +61,7 @@ EXPORT_SYMBOL(remove_wait_queue); * The spin_unlock() itself is semi-permeable and only protects * one way (it only protects stuff inside the critical region and * stops them from bleeding out - it would still allow subsequent - * loads to move into the the critical region). + * loads to move into the critical region). */ void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) diff --git a/mm/mmap.c b/mm/mmap.c index cc1f543eb1b8..68b9ad2ef1d6 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1720,7 +1720,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, /* * Split a vma into two pieces at address 'addr', a new vma is allocated - * either for the first part or the the tail. + * either for the first part or the tail. */ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long addr, int new_below) diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 9fbe87c93802..bfa910b6ad25 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -1839,7 +1839,7 @@ static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *que } /* - * The DECnet spec requires the the "routing layer" accepts packets which + * The DECnet spec requires that the "routing layer" accepts packets which * are at least 230 bytes in size. This excludes any headers which the NSP * layer might add, so we always assume that we'll be using the maximal * length header on data packets. The variation in length is due to the diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index e1f18489db1d..86a2b52aad38 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -629,7 +629,7 @@ doi_walk_return: * @domain: the domain to add * * Description: - * Adds the @domain to the the DOI specified by @doi_def, this function + * Adds the @domain to the DOI specified by @doi_def, this function * should only be called by external functions (i.e. NetLabel). This function * does allocate memory. Returns zero on success, negative values on failure. * diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c index ff366f7390d9..dd7c128f9db3 100644 --- a/net/ipv4/ipvs/ip_vs_sed.c +++ b/net/ipv4/ipvs/ip_vs_sed.c @@ -18,7 +18,7 @@ * The SED algorithm attempts to minimize each job's expected delay until * completion. The expected delay that the job will experience is * (Ci + 1) / Ui if sent to the ith server, in which Ci is the number of - * jobs on the the ith server and Ui is the fixed service rate (weight) of + * jobs on the ith server and Ui is the fixed service rate (weight) of * the ith server. The SED algorithm adopts a greedy policy that each does * what is in its own best interest, i.e. to join the queue which would * minimize its expected delay of completion. diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 113e0c4c8a92..66026df1cc76 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -983,7 +983,7 @@ int udp_disconnect(struct sock *sk, int flags) } /* return: - * 1 if the the UDP system should process it + * 1 if the UDP system should process it * 0 if we should drop this packet * -1 if it should get processed by xfrm4_rcv_encap */ diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 7d9fa38b6a7d..6b8a103cf9e6 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -324,7 +324,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) memset(&laddr, 0, sizeof(laddr)); memset(&daddr, 0, sizeof(daddr)); /* - * FIXME: check if the the address is multicast, + * FIXME: check if the address is multicast, * only SOCK_DGRAM can do this. */ memcpy(laddr.mac, addr->sllc_mac, IFHWADDRLEN); diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index c31af29a4439..117cbfdb910c 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -177,7 +177,7 @@ void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp) struct nf_conntrack_expect *i; write_lock_bh(&nf_conntrack_lock); - /* choose the the oldest expectation to evict */ + /* choose the oldest expectation to evict */ list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) { if (expect_matches(i, exp) && del_timer(&i->timeout)) { nf_ct_unlink_expect(i); diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 83ef411772f4..77fb7b06a9c4 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -3,7 +3,7 @@ * * This file is part of the SCTP kernel reference Implementation * - * This file contains the code relating the the chunk abstraction. + * This file contains the code relating the chunk abstraction. * * The SCTP reference implementation is free software; * you can redistribute it and/or modify it under the terms of diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9f1a908776de..83a76ba9d7b3 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2586,7 +2586,7 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, int opt * * 7.1.2 SCTP_ASSOCINFO * - * This option is used to tune the the maximum retransmission attempts + * This option is used to tune the maximum retransmission attempts * of the association. * Returns an error if the new association retransmission value is * greater than the sum of the retransmission value of the peer. @@ -4547,7 +4547,7 @@ static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, * * 7.1.2 SCTP_ASSOCINFO * - * This option is used to tune the the maximum retransmission attempts + * This option is used to tune the maximum retransmission attempts * of the association. * Returns an error if the new association retransmission value is * greater than the sum of the retransmission value of the peer. diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index f5c3808bf85a..af7c5f05c6e1 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -64,7 +64,7 @@ int svc_set_client(struct svc_rqst *rqstp) } /* A request, which was authenticated, has now executed. - * Time to finalise the the credentials and verifier + * Time to finalise the credentials and verifier * and release and resources */ int svc_authorise(struct svc_rqst *rqstp) diff --git a/scripts/basic/docproc.c b/scripts/basic/docproc.c index d6071cbf13d7..f4d2f68452ba 100644 --- a/scripts/basic/docproc.c +++ b/scripts/basic/docproc.c @@ -211,7 +211,7 @@ void find_export_symbols(char * filename) * Document all external or internal functions in a file. * Call kernel-doc with following parameters: * kernel-doc -docbook -nofunction function_name1 filename - * function names are obtained from all the the src files + * function names are obtained from all the src files * by find_export_symbols. * intfunc uses -nofunction * extfunc uses -function diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c index a9eec2a2357d..3bfb2102fc5d 100644 --- a/sound/pci/ac97/ac97_codec.c +++ b/sound/pci/ac97/ac97_codec.c @@ -1083,7 +1083,7 @@ static void check_volume_resolution(struct snd_ac97 *ac97, int reg, unsigned cha unsigned short val; snd_ac97_write(ac97, reg, 0x8080 | cbit[i] | (cbit[i] << 8)); /* Do the read twice due to buffers on some ac97 codecs. - * e.g. The STAC9704 returns exactly what you wrote the the register + * e.g. The STAC9704 returns exactly what you wrote to the register * if you read it immediately. This causes the detect routine to fail. */ val = snd_ac97_read(ac97, reg); -- cgit v1.2.3 From 5886269962f94fa9185c32db3ec936c612503235 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Wed, 9 May 2007 07:51:49 +0200 Subject: fix file specification in comments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Many files include the filename at the beginning, serveral used a wrong one. Signed-off-by: Uwe Kleine-König Signed-off-by: Adrian Bunk --- arch/arm/mach-s3c2410/sleep.S | 2 +- arch/arm/plat-s3c24xx/sleep.S | 2 +- arch/i386/kernel/cpu/mcheck/therm_throt.c | 2 +- arch/powerpc/platforms/8xx/mpc86xads_setup.c | 2 +- arch/powerpc/platforms/8xx/mpc885ads_setup.c | 2 +- arch/ppc/platforms/mpc866ads_setup.c | 2 +- arch/ppc/syslib/ipic.c | 2 +- arch/sh/boards/se/7751/setup.c | 2 +- arch/sh/kernel/cpu/sh3/entry.S | 2 +- arch/sh/kernel/cpu/sh4a/clock-sh73180.c | 2 +- arch/sh/kernel/cpu/sh4a/clock-sh7343.c | 2 +- arch/sh/kernel/cpu/sh4a/clock-sh7770.c | 2 +- arch/sh/kernel/cpu/sh4a/clock-sh7780.c | 2 +- arch/sh/kernel/vsyscall/vsyscall.c | 2 +- arch/um/include/sysdep-i386/archsetjmp.h | 2 +- arch/um/include/sysdep-x86_64/archsetjmp.h | 2 +- arch/xtensa/kernel/pci-dma.c | 2 +- drivers/leds/leds-h1940.c | 2 +- drivers/video/console/softcursor.c | 2 +- fs/jbd/checkpoint.c | 2 +- fs/jbd/recovery.c | 2 +- fs/jbd/revoke.c | 2 +- fs/jbd/transaction.c | 2 +- fs/jbd2/checkpoint.c | 2 +- fs/jbd2/recovery.c | 2 +- fs/jbd2/revoke.c | 2 +- fs/jbd2/transaction.c | 2 +- include/asm-arm/arch-iop32x/glantank.h | 2 +- include/asm-arm/arch-iop32x/n2100.h | 2 +- include/asm-arm/arch-s3c2410/regs-power.h | 2 +- include/asm-arm/arch-s3c2410/regs-s3c2443-clock.h | 2 +- include/asm-arm/arch-s3c2410/regs-watchdog.h | 2 +- include/asm-arm/arch-s3c2410/udc.h | 2 +- include/asm-sh/edosk7705.h | 2 +- include/asm-sh/snapgear.h | 2 +- include/asm-xtensa/platform-iss/simcall.h | 2 +- include/linux/generic_acl.h | 2 +- sound/soc/pxa/pxa2xx-ac97.h | 2 +- sound/soc/pxa/pxa2xx-i2s.h | 2 +- 39 files changed, 39 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-s3c2410/sleep.S b/arch/arm/mach-s3c2410/sleep.S index 637aaba65390..d1eeed2ad47c 100644 --- a/arch/arm/mach-s3c2410/sleep.S +++ b/arch/arm/mach-s3c2410/sleep.S @@ -1,4 +1,4 @@ -/* linux/arch/arm/mach-s3c2410/s3c2410-sleep.S +/* linux/arch/arm/mach-s3c2410/sleep.S * * Copyright (c) 2004 Simtec Electronics * Ben Dooks diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/plat-s3c24xx/sleep.S index 435349dc3243..7b7ae790b00d 100644 --- a/arch/arm/plat-s3c24xx/sleep.S +++ b/arch/arm/plat-s3c24xx/sleep.S @@ -1,4 +1,4 @@ -/* linux/arch/arm/mach-s3c2410/sleep.S +/* linux/arch/arm/plat-s3c24xx/sleep.S * * Copyright (c) 2004 Simtec Electronics * Ben Dooks diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c index 065005c3f168..2f28540caae2 100644 --- a/arch/i386/kernel/cpu/mcheck/therm_throt.c +++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c @@ -1,5 +1,5 @@ /* - * linux/arch/i386/kerne/cpu/mcheck/therm_throt.c + * linux/arch/i386/kernel/cpu/mcheck/therm_throt.c * * Thermal throttle event support code (such as syslog messaging and rate * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c). diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c index a35315af5c53..cf0e7bc8c2e7 100644 --- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c @@ -1,4 +1,4 @@ -/*arch/ppc/platforms/mpc86xads-setup.c +/*arch/powerpc/platforms/8xx/mpc86xads_setup.c * * Platform setup for the Freescale mpc86xads board * diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c index a57b57785acd..c36e475d93dc 100644 --- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c @@ -1,4 +1,4 @@ -/*arch/ppc/platforms/mpc885ads-setup.c +/*arch/powerpc/platforms/8xx/mpc885ads_setup.c * * Platform setup for the Freescale mpc885ads board * diff --git a/arch/ppc/platforms/mpc866ads_setup.c b/arch/ppc/platforms/mpc866ads_setup.c index 7ce5364fdb3b..bf72204125c5 100644 --- a/arch/ppc/platforms/mpc866ads_setup.c +++ b/arch/ppc/platforms/mpc866ads_setup.c @@ -1,4 +1,4 @@ -/*arch/ppc/platforms/mpc866ads-setup.c +/*arch/ppc/platforms/mpc866ads_setup.c * * Platform setup for the Freescale mpc866ads board * diff --git a/arch/ppc/syslib/ipic.c b/arch/ppc/syslib/ipic.c index 10659c24b1be..9192777d0f78 100644 --- a/arch/ppc/syslib/ipic.c +++ b/arch/ppc/syslib/ipic.c @@ -1,5 +1,5 @@ /* - * include/asm-ppc/ipic.c + * arch/ppc/syslib/ipic.c * * IPIC routines implementations. * diff --git a/arch/sh/boards/se/7751/setup.c b/arch/sh/boards/se/7751/setup.c index 770defed9c4a..52c7bfa57c2c 100644 --- a/arch/sh/boards/se/7751/setup.c +++ b/arch/sh/boards/se/7751/setup.c @@ -1,5 +1,5 @@ /* - * linux/arch/sh/kernel/setup_7751se.c + * linux/arch/sh/boards/se/7751/setup.c * * Copyright (C) 2000 Kazumoto Kojima * diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index f3e827f29a46..832c0b4a1e6c 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -1,5 +1,5 @@ /* - * arch/sh/kernel/entry.S + * arch/sh/kernel/cpu/sh3/entry.S * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka * Copyright (C) 2003 - 2006 Paul Mundt diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh73180.c b/arch/sh/kernel/cpu/sh4a/clock-sh73180.c index 2fa5cb2ae68d..6d5ba373a75e 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh73180.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh73180.c @@ -1,5 +1,5 @@ /* - * arch/sh/kernel/cpu/sh4/clock-sh73180.c + * arch/sh/kernel/cpu/sh4a/clock-sh73180.c * * SH73180 support for the clock framework * diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c index 1707a213f0cf..7adc4f16e95a 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c @@ -1,5 +1,5 @@ /* - * arch/sh/kernel/cpu/sh4/clock-sh7343.c + * arch/sh/kernel/cpu/sh4a/clock-sh7343.c * * SH7343/SH7722 support for the clock framework * diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c index c8694bac6477..8e236062c721 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c @@ -1,5 +1,5 @@ /* - * arch/sh/kernel/cpu/sh4/clock-sh7770.c + * arch/sh/kernel/cpu/sh4a/clock-sh7770.c * * SH7770 support for the clock framework * diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c index 9e6a216750c8..01f3da619d3d 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c @@ -1,5 +1,5 @@ /* - * arch/sh/kernel/cpu/sh4/clock-sh7780.c + * arch/sh/kernel/cpu/sh4a/clock-sh7780.c * * SH7780 support for the clock framework * diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c index 7b0f66f03319..e146bafcd14f 100644 --- a/arch/sh/kernel/vsyscall/vsyscall.c +++ b/arch/sh/kernel/vsyscall/vsyscall.c @@ -1,5 +1,5 @@ /* - * arch/sh/kernel/vsyscall.c + * arch/sh/kernel/vsyscall/vsyscall.c * * Copyright (C) 2006 Paul Mundt * diff --git a/arch/um/include/sysdep-i386/archsetjmp.h b/arch/um/include/sysdep-i386/archsetjmp.h index 11bafab669e9..0f312085ce1d 100644 --- a/arch/um/include/sysdep-i386/archsetjmp.h +++ b/arch/um/include/sysdep-i386/archsetjmp.h @@ -1,5 +1,5 @@ /* - * arch/i386/include/klibc/archsetjmp.h + * arch/um/include/sysdep-i386/archsetjmp.h */ #ifndef _KLIBC_ARCHSETJMP_H diff --git a/arch/um/include/sysdep-x86_64/archsetjmp.h b/arch/um/include/sysdep-x86_64/archsetjmp.h index 9a5e1a6ec800..2af8f12ca161 100644 --- a/arch/um/include/sysdep-x86_64/archsetjmp.h +++ b/arch/um/include/sysdep-x86_64/archsetjmp.h @@ -1,5 +1,5 @@ /* - * arch/x86_64/include/klibc/archsetjmp.h + * arch/um/include/sysdep-x86_64/archsetjmp.h */ #ifndef _KLIBC_ARCHSETJMP_H diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index ca76f071666e..f5319d78c876 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -1,5 +1,5 @@ /* - * arch/xtensa/pci-dma.c + * arch/xtensa/kernel/pci-dma.c * * DMA coherent memory allocation. * diff --git a/drivers/leds/leds-h1940.c b/drivers/leds/leds-h1940.c index 1d49d2ade557..677c99325be5 100644 --- a/drivers/leds/leds-h1940.c +++ b/drivers/leds/leds-h1940.c @@ -1,5 +1,5 @@ /* - * drivers/leds/h1940-leds.c + * drivers/leds/leds-h1940.c * Copyright (c) Arnaud Patard * * This file is subject to the terms and conditions of the GNU General Public diff --git a/drivers/video/console/softcursor.c b/drivers/video/console/softcursor.c index f577bd80e020..03cfb7ac5733 100644 --- a/drivers/video/console/softcursor.c +++ b/drivers/video/console/softcursor.c @@ -1,5 +1,5 @@ /* - * linux/drivers/video/softcursor.c + * linux/drivers/video/console/softcursor.c * * Generic software cursor for frame buffer devices * diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index 0208cc7ac5d0..47552d4a6324 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c @@ -1,5 +1,5 @@ /* - * linux/fs/checkpoint.c + * linux/fs/jbd/checkpoint.c * * Written by Stephen C. Tweedie , 1999 * diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c index 11563fe2a52b..2a5f4b833e35 100644 --- a/fs/jbd/recovery.c +++ b/fs/jbd/recovery.c @@ -1,5 +1,5 @@ /* - * linux/fs/recovery.c + * linux/fs/jbd/recovery.c * * Written by Stephen C. Tweedie , 1999 * diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c index a68cbb605022..824e3b7d4ec1 100644 --- a/fs/jbd/revoke.c +++ b/fs/jbd/revoke.c @@ -1,5 +1,5 @@ /* - * linux/fs/revoke.c + * linux/fs/jbd/revoke.c * * Written by Stephen C. Tweedie , 2000 * diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index f9822fc07851..772b6531a2a2 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -1,5 +1,5 @@ /* - * linux/fs/transaction.c + * linux/fs/jbd/transaction.c * * Written by Stephen C. Tweedie , 1998 * diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 68039fa9a566..3fccde7ba008 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -1,5 +1,5 @@ /* - * linux/fs/checkpoint.c + * linux/fs/jbd2/checkpoint.c * * Written by Stephen C. Tweedie , 1999 * diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 9f10acafaf70..395c92a04ac9 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -1,5 +1,5 @@ /* - * linux/fs/recovery.c + * linux/fs/jbd2/recovery.c * * Written by Stephen C. Tweedie , 1999 * diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index 1e864dcc49ea..9246e763da78 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c @@ -1,5 +1,5 @@ /* - * linux/fs/revoke.c + * linux/fs/jbd2/revoke.c * * Written by Stephen C. Tweedie , 2000 * diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index e347d8c078bc..7946ff43fc40 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1,5 +1,5 @@ /* - * linux/fs/transaction.c + * linux/fs/jbd2/transaction.c * * Written by Stephen C. Tweedie , 1998 * diff --git a/include/asm-arm/arch-iop32x/glantank.h b/include/asm-arm/arch-iop32x/glantank.h index 3b065618dd00..bf0665acc1c1 100644 --- a/include/asm-arm/arch-iop32x/glantank.h +++ b/include/asm-arm/arch-iop32x/glantank.h @@ -1,5 +1,5 @@ /* - * include/asm/arch-iop32x/glantank.h + * include/asm-arm/arch-iop32x/glantank.h * * IO-Data GLAN Tank board registers */ diff --git a/include/asm-arm/arch-iop32x/n2100.h b/include/asm-arm/arch-iop32x/n2100.h index fed31a648425..77a8af476629 100644 --- a/include/asm-arm/arch-iop32x/n2100.h +++ b/include/asm-arm/arch-iop32x/n2100.h @@ -1,5 +1,5 @@ /* - * include/asm/arch-iop32x/n2100.h + * include/asm-arm/arch-iop32x/n2100.h * * Thecus N2100 board registers */ diff --git a/include/asm-arm/arch-s3c2410/regs-power.h b/include/asm-arm/arch-s3c2410/regs-power.h index 6c319ea2afac..94ff96505b6a 100644 --- a/include/asm-arm/arch-s3c2410/regs-power.h +++ b/include/asm-arm/arch-s3c2410/regs-power.h @@ -1,4 +1,4 @@ -/* linux/include/asm/arch-s3c2410/regs-power.h +/* linux/include/asm-arm/arch-s3c2410/regs-power.h * * Copyright (c) 2003,2004,2005,2006 Simtec Electronics * http://armlinux.simtec.co.uk/ diff --git a/include/asm-arm/arch-s3c2410/regs-s3c2443-clock.h b/include/asm-arm/arch-s3c2410/regs-s3c2443-clock.h index ff0536d2de42..cd9e26568f85 100644 --- a/include/asm-arm/arch-s3c2410/regs-s3c2443-clock.h +++ b/include/asm-arm/arch-s3c2410/regs-s3c2443-clock.h @@ -1,4 +1,4 @@ -/* linux/include/asm-arm/arch-s3c2410/regs-clock.h +/* linux/include/asm-arm/arch-s3c2410/regs-s3c2443-clock.h * * Copyright (c) 2007 Simtec Electronics * Ben Dooks diff --git a/include/asm-arm/arch-s3c2410/regs-watchdog.h b/include/asm-arm/arch-s3c2410/regs-watchdog.h index f4fff448c7bd..a9c5d491bdb6 100644 --- a/include/asm-arm/arch-s3c2410/regs-watchdog.h +++ b/include/asm-arm/arch-s3c2410/regs-watchdog.h @@ -1,4 +1,4 @@ -/* linux/include/asm/arch-s3c2410/regs-watchdog.h +/* linux/include/asm-arm/arch-s3c2410/regs-watchdog.h * * Copyright (c) 2003 Simtec Electronics * http://www.simtec.co.uk/products/SWLINUX/ diff --git a/include/asm-arm/arch-s3c2410/udc.h b/include/asm-arm/arch-s3c2410/udc.h index e59ec339d614..b8aa6cb69b58 100644 --- a/include/asm-arm/arch-s3c2410/udc.h +++ b/include/asm-arm/arch-s3c2410/udc.h @@ -1,4 +1,4 @@ -/* linux/include/asm/arch-s3c2410/udc.h +/* linux/include/asm-arm/arch-s3c2410/udc.h * * Copyright (c) 2005 Arnaud Patard * diff --git a/include/asm-sh/edosk7705.h b/include/asm-sh/edosk7705.h index a1089a65bc36..5bdc9d9be3de 100644 --- a/include/asm-sh/edosk7705.h +++ b/include/asm-sh/edosk7705.h @@ -1,5 +1,5 @@ /* - * include/asm-sh/edosk7705/io.h + * include/asm-sh/edosk7705.h * * Modified version of io_se.h for the EDOSK7705 specific functions. * diff --git a/include/asm-sh/snapgear.h b/include/asm-sh/snapgear.h index 6b5e4ddc073a..2d712e72c9e5 100644 --- a/include/asm-sh/snapgear.h +++ b/include/asm-sh/snapgear.h @@ -1,5 +1,5 @@ /* - * include/asm-sh/snapgear/io.h + * include/asm-sh/snapgear.h * * Modified version of io_se.h for the snapgear-specific functions. * diff --git a/include/asm-xtensa/platform-iss/simcall.h b/include/asm-xtensa/platform-iss/simcall.h index 6acb572759a6..b7952c06a2b7 100644 --- a/include/asm-xtensa/platform-iss/simcall.h +++ b/include/asm-xtensa/platform-iss/simcall.h @@ -1,5 +1,5 @@ /* - * include/asm-xtensa/platform-iss/hardware.h + * include/asm-xtensa/platform-iss/simcall.h * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive diff --git a/include/linux/generic_acl.h b/include/linux/generic_acl.h index 80764f40be75..886f5faa08cb 100644 --- a/include/linux/generic_acl.h +++ b/include/linux/generic_acl.h @@ -1,5 +1,5 @@ /* - * fs/generic_acl.c + * include/linux/generic_acl.h * * (C) 2005 Andreas Gruenbacher * diff --git a/sound/soc/pxa/pxa2xx-ac97.h b/sound/soc/pxa/pxa2xx-ac97.h index 4c4b882316ac..b8ccfee095c4 100644 --- a/sound/soc/pxa/pxa2xx-ac97.h +++ b/sound/soc/pxa/pxa2xx-ac97.h @@ -1,5 +1,5 @@ /* - * linux/sound/arm/pxa2xx-ac97.h + * linux/sound/soc/pxa/pxa2xx-ac97.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/sound/soc/pxa/pxa2xx-i2s.h b/sound/soc/pxa/pxa2xx-i2s.h index a2484f0881f1..4435bd9f884f 100644 --- a/sound/soc/pxa/pxa2xx-i2s.h +++ b/sound/soc/pxa/pxa2xx-i2s.h @@ -1,5 +1,5 @@ /* - * linux/sound/arm/pxa2xx-i2s.h + * linux/sound/soc/pxa/pxa2xx-i2s.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as -- cgit v1.2.3 From 121e70b69aef898a3c02fa90d0a2108381bcf975 Mon Sep 17 00:00:00 2001 From: John Anthony Kazos Jr Date: Wed, 9 May 2007 08:30:57 +0200 Subject: include files: convert "include" subdirectory to UTF-8 Convert the "include" subdirectory to UTF-8. Signed-off-by: John Anthony Kazos Jr. Signed-off-by: Adrian Bunk --- include/asm-arm/arch-imx/imx-regs.h | 2 +- include/asm-arm/arch-integrator/platform.h | 2 +- include/asm-arm/arch-omap/board.h | 2 +- include/asm-arm/arch-omap/dma.h | 2 +- include/asm-arm/arch-omap/gpio.h | 2 +- include/asm-m68k/atarihw.h | 2 +- include/asm-m68k/atariints.h | 2 +- include/asm-ppc/hydra.h | 2 +- include/linux/i2c-algo-bit.h | 2 +- include/linux/i2c-algo-pcf.h | 2 +- include/linux/irda.h | 2 +- include/linux/meye.h | 2 +- include/linux/sonypi.h | 2 +- include/net/irda/af_irda.h | 2 +- include/net/irda/irda.h | 2 +- include/net/irda/iriap.h | 2 +- include/net/irda/iriap_event.h | 2 +- include/net/irda/irias_object.h | 2 +- include/net/irda/irlan_client.h | 2 +- include/net/irda/irlan_common.h | 2 +- include/net/irda/irlan_eth.h | 2 +- include/net/irda/irlan_event.h | 2 +- include/net/irda/irlan_filter.h | 2 +- include/net/irda/irlan_provider.h | 2 +- include/net/irda/irlap.h | 2 +- include/net/irda/irlmp.h | 2 +- include/net/irda/irlmp_event.h | 2 +- include/net/irda/irlmp_frame.h | 2 +- include/net/irda/irmod.h | 2 +- include/net/irda/irqueue.h | 2 +- include/net/irda/irttp.h | 2 +- include/net/irda/parameters.h | 2 +- include/net/irda/timer.h | 2 +- include/net/irda/wrapper.h | 2 +- 34 files changed, 34 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/include/asm-arm/arch-imx/imx-regs.h b/include/asm-arm/arch-imx/imx-regs.h index de6494a4dc6b..30de404c61f5 100644 --- a/include/asm-arm/arch-imx/imx-regs.h +++ b/include/asm-arm/arch-imx/imx-regs.h @@ -297,7 +297,7 @@ #define SAR(x) __REG2( IMX_DMAC_BASE + 0x80, (x) << 6) /* Source Address Registers */ #define DAR(x) __REG2( IMX_DMAC_BASE + 0x84, (x) << 6) /* Destination Address Registers */ #define CNTR(x) __REG2( IMX_DMAC_BASE + 0x88, (x) << 6) /* Count Registers */ -#define CCR(x) __REG2( IMX_DMAC_BASE + 0x8c, (x) << 6) /* Control Registers */ +#define CCR(x) __REG2( IMX_DMAC_BASE + 0x8c, (x) << 6) /* Control Registers */ #define RSSR(x) __REG2( IMX_DMAC_BASE + 0x90, (x) << 6) /* Request source select Registers */ #define BLR(x) __REG2( IMX_DMAC_BASE + 0x94, (x) << 6) /* Burst length Registers */ #define RTOR(x) __REG2( IMX_DMAC_BASE + 0x98, (x) << 6) /* Request timeout Registers */ diff --git a/include/asm-arm/arch-integrator/platform.h b/include/asm-arm/arch-integrator/platform.h index 96ad3d2a66d1..83c4c1ceb411 100644 --- a/include/asm-arm/arch-integrator/platform.h +++ b/include/asm-arm/arch-integrator/platform.h @@ -17,7 +17,7 @@ * from .s file by awk -f s2h.awk */ /************************************************************************** - * * Copyright © ARM Limited 1998. All rights reserved. + * * Copyright © ARM Limited 1998. All rights reserved. * ***********************************************************************/ /* ************************************************************************ * diff --git a/include/asm-arm/arch-omap/board.h b/include/asm-arm/arch-omap/board.h index edf1dc6ad919..4fd717e626a2 100644 --- a/include/asm-arm/arch-omap/board.h +++ b/include/asm-arm/arch-omap/board.h @@ -4,7 +4,7 @@ * Information structures for board-specific data * * Copyright (C) 2004 Nokia Corporation - * Written by Juha Yrjölä + * Written by Juha Yrjölä */ #ifndef _OMAP_BOARD_H diff --git a/include/asm-arm/arch-omap/dma.h b/include/asm-arm/arch-omap/dma.h index d591d0585bba..f7774192a41e 100644 --- a/include/asm-arm/arch-omap/dma.h +++ b/include/asm-arm/arch-omap/dma.h @@ -2,7 +2,7 @@ * linux/include/asm-arm/arch-omap/dma.h * * Copyright (C) 2003 Nokia Corporation - * Author: Juha Yrjölä + * Author: Juha Yrjölä * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/include/asm-arm/arch-omap/gpio.h b/include/asm-arm/arch-omap/gpio.h index 590917efc94a..97b397dd7e87 100644 --- a/include/asm-arm/arch-omap/gpio.h +++ b/include/asm-arm/arch-omap/gpio.h @@ -5,7 +5,7 @@ * * Copyright (C) 2003-2005 Nokia Corporation * - * Written by Juha Yrjölä + * Written by Juha Yrjölä * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/include/asm-m68k/atarihw.h b/include/asm-m68k/atarihw.h index f28acd0fd689..6211363a345f 100644 --- a/include/asm-m68k/atarihw.h +++ b/include/asm-m68k/atarihw.h @@ -2,7 +2,7 @@ ** linux/atarihw.h -- This header defines some macros and pointers for ** the various Atari custom hardware registers. ** -** Copyright 1994 by Bj”rn Brauel +** Copyright 1994 by Bj”rn Brauel ** ** 5/1/94 Roman Hodek: ** Added definitions for TT specific chips. diff --git a/include/asm-m68k/atariints.h b/include/asm-m68k/atariints.h index 0ed454fc24bb..ce6c445805bd 100644 --- a/include/asm-m68k/atariints.h +++ b/include/asm-m68k/atariints.h @@ -1,7 +1,7 @@ /* ** atariints.h -- Atari Linux interrupt handling structs and prototypes ** -** Copyright 1994 by Bj”rn Brauel +** Copyright 1994 by Bj”rn Brauel ** ** 5/2/94 Roman Hodek: ** TT interrupt definitions added. diff --git a/include/asm-ppc/hydra.h b/include/asm-ppc/hydra.h index 833a8aff2a80..1ad4eed07fbe 100644 --- a/include/asm-ppc/hydra.h +++ b/include/asm-ppc/hydra.h @@ -8,7 +8,7 @@ * Macintosh Technology in the Common Hardware Reference Platform * Apple Computer, Inc. * - * © Copyright 1995 Apple Computer, Inc. All rights reserved. + * © Copyright 1995 Apple Computer, Inc. All rights reserved. * * It's available online from http://chrp.apple.com/MacTech.pdf. * You can obtain paper copies of this book from computer bookstores or by diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h index 9ee0f800592f..111334f5b922 100644 --- a/include/linux/i2c-algo-bit.h +++ b/include/linux/i2c-algo-bit.h @@ -18,7 +18,7 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* ------------------------------------------------------------------------- */ -/* With some changes from Kyösti Mälkki and even +/* With some changes from Kyösti Mälkki and even Frodo Looijaard */ #ifndef _LINUX_I2C_ALGO_BIT_H diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h index 994eb86f882c..77afbb60fd11 100644 --- a/include/linux/i2c-algo-pcf.h +++ b/include/linux/i2c-algo-pcf.h @@ -19,7 +19,7 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* ------------------------------------------------------------------------- */ -/* With some changes from Kyösti Mälkki and even +/* With some changes from Kyösti Mälkki and even Frodo Looijaard */ #ifndef _LINUX_I2C_ALGO_PCF_H diff --git a/include/linux/irda.h b/include/linux/irda.h index 09d8f105a5a8..945ba3110874 100644 --- a/include/linux/irda.h +++ b/include/linux/irda.h @@ -16,7 +16,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/linux/meye.h b/include/linux/meye.h index 11ec45e9a132..39fd9c8ddd4b 100644 --- a/include/linux/meye.h +++ b/include/linux/meye.h @@ -3,7 +3,7 @@ * * Copyright (C) 2001-2003 Stelian Pop * - * Copyright (C) 2001-2002 Alcôve + * Copyright (C) 2001-2002 Alcôve * * Copyright (C) 2000 Andrew Tridgell * diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h index f56d24734950..34d4b075f7b8 100644 --- a/include/linux/sonypi.h +++ b/include/linux/sonypi.h @@ -5,7 +5,7 @@ * * Copyright (C) 2005 Narayanan R S - * Copyright (C) 2001-2002 Alcôve + * Copyright (C) 2001-2002 Alcôve * * Copyright (C) 2001 Michael Ashley * diff --git a/include/net/irda/af_irda.h b/include/net/irda/af_irda.h index 7a209f61c482..0df574931522 100644 --- a/include/net/irda/af_irda.h +++ b/include/net/irda/af_irda.h @@ -17,7 +17,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h index 89fe534045f1..36bee441aa56 100644 --- a/include/net/irda/irda.h +++ b/include/net/irda/irda.h @@ -17,7 +17,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/iriap.h b/include/net/irda/iriap.h index 2007c5a0a43f..fcc896491a95 100644 --- a/include/net/irda/iriap.h +++ b/include/net/irda/iriap.h @@ -17,7 +17,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/iriap_event.h b/include/net/irda/iriap_event.h index 4ca3d2071b03..89747f06d9eb 100644 --- a/include/net/irda/iriap_event.h +++ b/include/net/irda/iriap_event.h @@ -16,7 +16,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/irias_object.h b/include/net/irda/irias_object.h index c41196b87955..83f78081799c 100644 --- a/include/net/irda/irias_object.h +++ b/include/net/irda/irias_object.h @@ -16,7 +16,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/irlan_client.h b/include/net/irda/irlan_client.h index 736dabe211e3..fa8455eda280 100644 --- a/include/net/irda/irlan_client.h +++ b/include/net/irda/irlan_client.h @@ -16,7 +16,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/irlan_common.h b/include/net/irda/irlan_common.h index 9592c374b41d..73cacb3ac16c 100644 --- a/include/net/irda/irlan_common.h +++ b/include/net/irda/irlan_common.h @@ -17,7 +17,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/irlan_eth.h b/include/net/irda/irlan_eth.h index 9a9b3619d305..0062347600b9 100644 --- a/include/net/irda/irlan_eth.h +++ b/include/net/irda/irlan_eth.h @@ -16,7 +16,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/irlan_event.h b/include/net/irda/irlan_event.h index b9baac9eb8b6..6d9539f05806 100644 --- a/include/net/irda/irlan_event.h +++ b/include/net/irda/irlan_event.h @@ -16,7 +16,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/irlan_filter.h b/include/net/irda/irlan_filter.h index 1720539ac2c1..a5a2539485bd 100644 --- a/include/net/irda/irlan_filter.h +++ b/include/net/irda/irlan_filter.h @@ -16,7 +16,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/irlan_provider.h b/include/net/irda/irlan_provider.h index ca51d5b7c999..92f3b0e1029b 100644 --- a/include/net/irda/irlan_provider.h +++ b/include/net/irda/irlan_provider.h @@ -16,7 +16,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h index e77eb88d9226..f0248fb8e196 100644 --- a/include/net/irda/irlap.h +++ b/include/net/irda/irlap.h @@ -18,7 +18,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/irlmp.h b/include/net/irda/irlmp.h index e212b9bc2503..3ffc1d0f93d6 100644 --- a/include/net/irda/irlmp.h +++ b/include/net/irda/irlmp.h @@ -18,7 +18,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/irlmp_event.h b/include/net/irda/irlmp_event.h index 03c6f81a502a..e03ae4ae3963 100644 --- a/include/net/irda/irlmp_event.h +++ b/include/net/irda/irlmp_event.h @@ -18,7 +18,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/irlmp_frame.h b/include/net/irda/irlmp_frame.h index c463f8bca856..1906eb71422e 100644 --- a/include/net/irda/irlmp_frame.h +++ b/include/net/irda/irlmp_frame.h @@ -17,7 +17,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/irmod.h b/include/net/irda/irmod.h index 72b446c1e22c..86f0dbb8ee5d 100644 --- a/include/net/irda/irmod.h +++ b/include/net/irda/irmod.h @@ -17,7 +17,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charg. * diff --git a/include/net/irda/irqueue.h b/include/net/irda/irqueue.h index 335b0ace9665..37f512bd6733 100644 --- a/include/net/irda/irqueue.h +++ b/include/net/irda/irqueue.h @@ -21,7 +21,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/irttp.h b/include/net/irda/irttp.h index a899e5837be8..cf80c1af5854 100644 --- a/include/net/irda/irttp.h +++ b/include/net/irda/irttp.h @@ -18,7 +18,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/parameters.h b/include/net/irda/parameters.h index 3a605d37ddbf..c0d938847bd3 100644 --- a/include/net/irda/parameters.h +++ b/include/net/irda/parameters.h @@ -26,7 +26,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * - * Michel Dänzer , 10/2001 + * Michel Dänzer , 10/2001 * - simplify irda_pv_t to avoid endianness issues * ********************************************************************/ diff --git a/include/net/irda/timer.h b/include/net/irda/timer.h index cb61568547d1..cb2615ccf761 100644 --- a/include/net/irda/timer.h +++ b/include/net/irda/timer.h @@ -18,7 +18,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * diff --git a/include/net/irda/wrapper.h b/include/net/irda/wrapper.h index 98768b3f9e31..2942ad6ab932 100644 --- a/include/net/irda/wrapper.h +++ b/include/net/irda/wrapper.h @@ -17,7 +17,7 @@ * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * - * Neither Dag Brattli nor University of Tromsø admit liability nor + * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * -- cgit v1.2.3 From 36200b76008d52d16b170d4f7dae9cfe00f5eb2b Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Thu, 3 May 2007 15:58:49 -0400 Subject: [MTD] Remove unnecessary user space check from mtd.h. Since the header file include/linux/mtd/mtd.h is not exported to user space, remove the user space check and error. Signed-off-by: Robert P. J. Day Signed-off-by: David Woodhouse --- include/linux/mtd/mtd.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 45d482ce8397..12a9a18f6e16 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -9,10 +9,6 @@ #ifndef __MTD_MTD_H__ #define __MTD_MTD_H__ -#ifndef __KERNEL__ -#error This is a kernel header. Perhaps include mtd-user.h instead? -#endif - #include #include #include -- cgit v1.2.3 From 42f209d3c94516affeb5e578fae62925f531a2d9 Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Fri, 4 May 2007 15:49:38 -0400 Subject: [MTD] Delete allegedly obsolete "bank_size" field of mtd_info. Delete the allegedly obsolete "bank_size" member of struct mtd_info. Signed-off-by: Robert P. J. Day Signed-off-by: David Woodhouse --- drivers/mtd/mtdpart.c | 1 - include/linux/mtd/mtd.h | 3 --- 2 files changed, 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 1af989023c66..9c6236852942 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -347,7 +347,6 @@ int add_mtd_partitions(struct mtd_info *master, slave->mtd.subpage_sft = master->subpage_sft; slave->mtd.name = parts[i].name; - slave->mtd.bank_size = master->bank_size; slave->mtd.owner = master->owner; slave->mtd.read = part_read; diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 12a9a18f6e16..fd64ccfbce02 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -133,9 +133,6 @@ struct mtd_info { int numeraseregions; struct mtd_erase_region_info *eraseregions; - /* This really shouldn't be here. It can go away in 2.5 */ - u_int32_t bank_size; - int (*erase) (struct mtd_info *mtd, struct erase_info *instr); /* This stuff for eXecute-In-Place */ -- cgit v1.2.3 From 97416ce82e20a9511ec369822098a8d20998398a Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 9 May 2007 02:32:35 -0700 Subject: Declare {compat_}sys_utimensat This is needed before Powerpc can wire up the syscall. Signed-off-by: Stephen Rothwell Cc: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compat.h | 3 +++ include/linux/syscalls.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/compat.h b/include/linux/compat.h index ccd863dd77fa..70a157a130bb 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -253,5 +253,8 @@ asmlinkage long compat_sys_epoll_pwait(int epfd, const compat_sigset_t __user *sigmask, compat_size_t sigsetsize); +asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename, + struct compat_timespec __user *t, int flags); + #endif /* CONFIG_COMPAT */ #endif /* _LINUX_COMPAT_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 1912c6cbef55..3139f4412297 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -576,6 +576,8 @@ asmlinkage long sys_fstatat64(int dfd, char __user *filename, struct stat64 __user *statbuf, int flag); asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf, int bufsiz); +asmlinkage long sys_utimensat(int dfd, char __user *filename, + struct timespec __user *utimes, int flags); asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename, struct compat_timeval __user *t); asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename, -- cgit v1.2.3 From a3d25c275d383975504dc53c25b691df59bd3c48 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 9 May 2007 02:33:18 -0700 Subject: PM: Separate hibernation code from suspend code [ With Johannes Berg ] Separate the hibernation (aka suspend to disk code) from the other suspend code. In particular: * Remove the definitions related to hibernation from include/linux/pm.h * Introduce struct hibernation_ops and a new hibernate() function to hibernate the system, defined in include/linux/suspend.h * Separate suspend code in kernel/power/main.c from hibernation-related code in kernel/power/disk.c and kernel/power/user.c (with the help of hibernation_ops) * Switch ACPI (the only user of pm_ops.pm_disk_mode) to hibernation_ops Signed-off-by: Rafael J. Wysocki Cc: Greg KH Cc: Pavel Machek Cc: Nigel Cunningham Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/power/userland-swsusp.txt | 26 +++-- drivers/acpi/sleep/main.c | 67 ++++++++--- drivers/acpi/sleep/proc.c | 2 +- drivers/i2c/chips/tps65010.c | 2 +- include/linux/pm.h | 31 +---- include/linux/suspend.h | 24 ++++ kernel/power/disk.c | 195 ++++++++++++++++++-------------- kernel/power/main.c | 42 +++---- kernel/power/power.h | 7 +- kernel/power/user.c | 13 +-- kernel/sys.c | 2 +- 11 files changed, 226 insertions(+), 185 deletions(-) (limited to 'include/linux') diff --git a/Documentation/power/userland-swsusp.txt b/Documentation/power/userland-swsusp.txt index 000556c932e9..e00c6cf09e85 100644 --- a/Documentation/power/userland-swsusp.txt +++ b/Documentation/power/userland-swsusp.txt @@ -93,21 +93,23 @@ SNAPSHOT_S2RAM - suspend to RAM; using this call causes the kernel to to resume the system from RAM if there's enough battery power or restore its state on the basis of the saved suspend image otherwise) -SNAPSHOT_PMOPS - enable the usage of the pmops->prepare, pmops->enter and - pmops->finish methods (the in-kernel swsusp knows these as the "platform - method") which are needed on many machines to (among others) speed up - the resume by letting the BIOS skip some steps or to let the system - recognise the correct state of the hardware after the resume (in - particular on many machines this ensures that unplugged AC - adapters get correctly detected and that kacpid does not run wild after - the resume). The last ioctl() argument can take one of the three - values, defined in kernel/power/power.h: +SNAPSHOT_PMOPS - enable the usage of the hibernation_ops->prepare, + hibernate_ops->enter and hibernation_ops->finish methods (the in-kernel + swsusp knows these as the "platform method") which are needed on many + machines to (among others) speed up the resume by letting the BIOS skip + some steps or to let the system recognise the correct state of the + hardware after the resume (in particular on many machines this ensures + that unplugged AC adapters get correctly detected and that kacpid does + not run wild after the resume). The last ioctl() argument can take one + of the three values, defined in kernel/power/power.h: PMOPS_PREPARE - make the kernel carry out the - pm_ops->prepare(PM_SUSPEND_DISK) operation + hibernation_ops->prepare() operation PMOPS_ENTER - make the kernel power off the system by calling - pm_ops->enter(PM_SUSPEND_DISK) + hibernation_ops->enter() PMOPS_FINISH - make the kernel carry out the - pm_ops->finish(PM_SUSPEND_DISK) operation + hibernation_ops->finish() operation + Note that the actual constants are misnamed because they surface + internal kernel implementation details that have changed. The device's read() operation can be used to transfer the snapshot image from the kernel. It has the following limitations: diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c index f8c63410bcbf..52b23471dd69 100644 --- a/drivers/acpi/sleep/main.c +++ b/drivers/acpi/sleep/main.c @@ -29,7 +29,6 @@ static u32 acpi_suspend_states[] = { [PM_SUSPEND_ON] = ACPI_STATE_S0, [PM_SUSPEND_STANDBY] = ACPI_STATE_S1, [PM_SUSPEND_MEM] = ACPI_STATE_S3, - [PM_SUSPEND_DISK] = ACPI_STATE_S4, [PM_SUSPEND_MAX] = ACPI_STATE_S5 }; @@ -94,14 +93,6 @@ static int acpi_pm_enter(suspend_state_t pm_state) do_suspend_lowlevel(); break; - case PM_SUSPEND_DISK: - if (acpi_pm_ops.pm_disk_mode == PM_DISK_PLATFORM) - status = acpi_enter_sleep_state(acpi_state); - break; - case PM_SUSPEND_MAX: - acpi_power_off(); - break; - default: return -EINVAL; } @@ -157,12 +148,13 @@ int acpi_suspend(u32 acpi_state) suspend_state_t states[] = { [1] = PM_SUSPEND_STANDBY, [3] = PM_SUSPEND_MEM, - [4] = PM_SUSPEND_DISK, [5] = PM_SUSPEND_MAX }; if (acpi_state < 6 && states[acpi_state]) return pm_suspend(states[acpi_state]); + if (acpi_state == 4) + return hibernate(); return -EINVAL; } @@ -189,6 +181,49 @@ static struct pm_ops acpi_pm_ops = { .finish = acpi_pm_finish, }; +#ifdef CONFIG_SOFTWARE_SUSPEND +static int acpi_hibernation_prepare(void) +{ + return acpi_sleep_prepare(ACPI_STATE_S4); +} + +static int acpi_hibernation_enter(void) +{ + acpi_status status = AE_OK; + unsigned long flags = 0; + + ACPI_FLUSH_CPU_CACHE(); + + local_irq_save(flags); + acpi_enable_wakeup_device(ACPI_STATE_S4); + /* This shouldn't return. If it returns, we have a problem */ + status = acpi_enter_sleep_state(ACPI_STATE_S4); + local_irq_restore(flags); + + return ACPI_SUCCESS(status) ? 0 : -EFAULT; +} + +static void acpi_hibernation_finish(void) +{ + acpi_leave_sleep_state(ACPI_STATE_S4); + acpi_disable_wakeup_device(ACPI_STATE_S4); + + /* reset firmware waking vector */ + acpi_set_firmware_waking_vector((acpi_physical_address) 0); + + if (init_8259A_after_S1) { + printk("Broken toshiba laptop -> kicking interrupts\n"); + init_8259A(0); + } +} + +static struct hibernation_ops acpi_hibernation_ops = { + .prepare = acpi_hibernation_prepare, + .enter = acpi_hibernation_enter, + .finish = acpi_hibernation_finish, +}; +#endif /* CONFIG_SOFTWARE_SUSPEND */ + /* * Toshiba fails to preserve interrupts over S1, reinitialization * of 8259 is needed after S1 resume. @@ -227,14 +262,18 @@ int __init acpi_sleep_init(void) sleep_states[i] = 1; printk(" S%d", i); } - if (i == ACPI_STATE_S4) { - if (sleep_states[i]) - acpi_pm_ops.pm_disk_mode = PM_DISK_PLATFORM; - } } printk(")\n"); pm_set_ops(&acpi_pm_ops); + +#ifdef CONFIG_SOFTWARE_SUSPEND + if (sleep_states[ACPI_STATE_S4]) + hibernation_set_ops(&acpi_hibernation_ops); +#else + sleep_states[ACPI_STATE_S4] = 0; +#endif + return 0; } diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c index 5a76e5be61d5..76b45f0b8341 100644 --- a/drivers/acpi/sleep/proc.c +++ b/drivers/acpi/sleep/proc.c @@ -60,7 +60,7 @@ acpi_system_write_sleep(struct file *file, state = simple_strtoul(str, NULL, 0); #ifdef CONFIG_SOFTWARE_SUSPEND if (state == 4) { - error = pm_suspend(PM_SUSPEND_DISK); + error = hibernate(); goto Done; } #endif diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c index 7ed92dc3d833..3c3f2ebf3fc9 100644 --- a/drivers/i2c/chips/tps65010.c +++ b/drivers/i2c/chips/tps65010.c @@ -354,7 +354,7 @@ static void tps65010_interrupt(struct tps65010 *tps) * also needs to get error handling and probably * an #ifdef CONFIG_SOFTWARE_SUSPEND */ - pm_suspend(PM_SUSPEND_DISK); + hibernate(); #endif poll = 1; } diff --git a/include/linux/pm.h b/include/linux/pm.h index 6e8fa3049e5d..87545e0f0b58 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -107,26 +107,11 @@ typedef int __bitwise suspend_state_t; #define PM_SUSPEND_ON ((__force suspend_state_t) 0) #define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1) #define PM_SUSPEND_MEM ((__force suspend_state_t) 3) -#define PM_SUSPEND_DISK ((__force suspend_state_t) 4) -#define PM_SUSPEND_MAX ((__force suspend_state_t) 5) - -typedef int __bitwise suspend_disk_method_t; - -/* invalid must be 0 so struct pm_ops initialisers can leave it out */ -#define PM_DISK_INVALID ((__force suspend_disk_method_t) 0) -#define PM_DISK_PLATFORM ((__force suspend_disk_method_t) 1) -#define PM_DISK_SHUTDOWN ((__force suspend_disk_method_t) 2) -#define PM_DISK_REBOOT ((__force suspend_disk_method_t) 3) -#define PM_DISK_TEST ((__force suspend_disk_method_t) 4) -#define PM_DISK_TESTPROC ((__force suspend_disk_method_t) 5) -#define PM_DISK_MAX ((__force suspend_disk_method_t) 6) +#define PM_SUSPEND_MAX ((__force suspend_state_t) 4) /** * struct pm_ops - Callbacks for managing platform dependent suspend states. * @valid: Callback to determine whether the given state can be entered. - * If %CONFIG_SOFTWARE_SUSPEND is set then %PM_SUSPEND_DISK is - * always valid and never passed to this call. If not assigned, - * no suspend states are valid. * Valid states are advertised in /sys/power/state but can still * be rejected by prepare or enter if the conditions aren't right. * There is a %pm_valid_only_mem function available that can be assigned @@ -140,24 +125,12 @@ typedef int __bitwise suspend_disk_method_t; * * @finish: Called when the system has left the given state and all devices * are resumed. The return value is ignored. - * - * @pm_disk_mode: The generic code always allows one of the shutdown methods - * %PM_DISK_SHUTDOWN, %PM_DISK_REBOOT, %PM_DISK_TEST and - * %PM_DISK_TESTPROC. If this variable is set, the mode it is set - * to is allowed in addition to those modes and is also made default. - * When this mode is sent selected, the @prepare call will be called - * before suspending to disk (if present), the @enter call should be - * present and will be called after all state has been saved and the - * machine is ready to be powered off; the @finish callback is called - * after state has been restored. All these calls are called with - * %PM_SUSPEND_DISK as the state. */ struct pm_ops { int (*valid)(suspend_state_t state); int (*prepare)(suspend_state_t state); int (*enter)(suspend_state_t state); int (*finish)(suspend_state_t state); - suspend_disk_method_t pm_disk_mode; }; /** @@ -276,8 +249,6 @@ extern void device_power_up(void); extern void device_resume(void); #ifdef CONFIG_PM -extern suspend_disk_method_t pm_disk_mode; - extern int device_suspend(pm_message_t state); extern int device_prepare_suspend(pm_message_t state); diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 9d2aa1a12aa0..d74da9122b60 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -32,6 +32,24 @@ static inline int pm_prepare_console(void) { return 0; } static inline void pm_restore_console(void) {} #endif +/** + * struct hibernation_ops - hibernation platform support + * + * The methods in this structure allow a platform to override the default + * mechanism of shutting down the machine during a hibernation transition. + * + * All three methods must be assigned. + * + * @prepare: prepare system for hibernation + * @enter: shut down system after state has been saved to disk + * @finish: finish/clean up after state has been reloaded + */ +struct hibernation_ops { + int (*prepare)(void); + int (*enter)(void); + void (*finish)(void); +}; + #if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) /* kernel/power/snapshot.c */ extern void __init register_nosave_region(unsigned long, unsigned long); @@ -39,11 +57,17 @@ extern int swsusp_page_is_forbidden(struct page *); extern void swsusp_set_page_free(struct page *); extern void swsusp_unset_page_free(struct page *); extern unsigned long get_safe_page(gfp_t gfp_mask); + +extern void hibernation_set_ops(struct hibernation_ops *ops); +extern int hibernate(void); #else static inline void register_nosave_region(unsigned long b, unsigned long e) {} static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } static inline void swsusp_set_page_free(struct page *p) {} static inline void swsusp_unset_page_free(struct page *p) {} + +static inline void hibernation_set_ops(struct hibernation_ops *ops) {} +static inline int hibernate(void) { return -ENOSYS; } #endif /* defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) */ void save_processor_state(void); diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 06331374d862..b5f0543ed84d 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -30,30 +30,69 @@ char resume_file[256] = CONFIG_PM_STD_PARTITION; dev_t swsusp_resume_device; sector_t swsusp_resume_block; +enum { + HIBERNATION_INVALID, + HIBERNATION_PLATFORM, + HIBERNATION_TEST, + HIBERNATION_TESTPROC, + HIBERNATION_SHUTDOWN, + HIBERNATION_REBOOT, + /* keep last */ + __HIBERNATION_AFTER_LAST +}; +#define HIBERNATION_MAX (__HIBERNATION_AFTER_LAST-1) +#define HIBERNATION_FIRST (HIBERNATION_INVALID + 1) + +static int hibernation_mode = HIBERNATION_SHUTDOWN; + +struct hibernation_ops *hibernation_ops; + +/** + * hibernation_set_ops - set the global hibernate operations + * @ops: the hibernation operations to use in subsequent hibernation transitions + */ + +void hibernation_set_ops(struct hibernation_ops *ops) +{ + if (ops && !(ops->prepare && ops->enter && ops->finish)) { + WARN_ON(1); + return; + } + mutex_lock(&pm_mutex); + hibernation_ops = ops; + if (ops) + hibernation_mode = HIBERNATION_PLATFORM; + else if (hibernation_mode == HIBERNATION_PLATFORM) + hibernation_mode = HIBERNATION_SHUTDOWN; + + mutex_unlock(&pm_mutex); +} + + /** * platform_prepare - prepare the machine for hibernation using the * platform driver if so configured and return an error code if it fails */ -static inline int platform_prepare(void) +static int platform_prepare(void) { - int error = 0; + return (hibernation_mode == HIBERNATION_PLATFORM && hibernation_ops) ? + hibernation_ops->prepare() : 0; +} - switch (pm_disk_mode) { - case PM_DISK_TEST: - case PM_DISK_TESTPROC: - case PM_DISK_SHUTDOWN: - case PM_DISK_REBOOT: - break; - default: - if (pm_ops && pm_ops->prepare) - error = pm_ops->prepare(PM_SUSPEND_DISK); - } - return error; +/** + * platform_finish - switch the machine to the normal mode of operation + * using the platform driver (must be called after platform_prepare()) + */ + +static void platform_finish(void) +{ + if (hibernation_mode == HIBERNATION_PLATFORM && hibernation_ops) + hibernation_ops->finish(); } /** - * power_down - Shut machine down for hibernate. + * power_down - Shut the machine down for hibernation. * * Use the platform driver, if configured so; otherwise try * to power off or reboot. @@ -61,20 +100,20 @@ static inline int platform_prepare(void) static void power_down(void) { - switch (pm_disk_mode) { - case PM_DISK_TEST: - case PM_DISK_TESTPROC: + switch (hibernation_mode) { + case HIBERNATION_TEST: + case HIBERNATION_TESTPROC: break; - case PM_DISK_SHUTDOWN: + case HIBERNATION_SHUTDOWN: kernel_power_off(); break; - case PM_DISK_REBOOT: + case HIBERNATION_REBOOT: kernel_restart(NULL); break; - default: - if (pm_ops && pm_ops->enter) { + case HIBERNATION_PLATFORM: + if (hibernation_ops) { kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK); - pm_ops->enter(PM_SUSPEND_DISK); + hibernation_ops->enter(); break; } } @@ -87,20 +126,6 @@ static void power_down(void) while(1); } -static inline void platform_finish(void) -{ - switch (pm_disk_mode) { - case PM_DISK_TEST: - case PM_DISK_TESTPROC: - case PM_DISK_SHUTDOWN: - case PM_DISK_REBOOT: - break; - default: - if (pm_ops && pm_ops->finish) - pm_ops->finish(PM_SUSPEND_DISK); - } -} - static void unprepare_processes(void) { thaw_processes(); @@ -120,13 +145,10 @@ static int prepare_processes(void) } /** - * pm_suspend_disk - The granpappy of hibernation power management. - * - * If not, then call swsusp to do its thing, then figure out how - * to power down the system. + * hibernate - The granpappy of the built-in hibernation management */ -int pm_suspend_disk(void) +int hibernate(void) { int error; @@ -143,7 +165,8 @@ int pm_suspend_disk(void) if (error) goto Finish; - if (pm_disk_mode == PM_DISK_TESTPROC) { + mutex_lock(&pm_mutex); + if (hibernation_mode == HIBERNATION_TESTPROC) { printk("swsusp debug: Waiting for 5 seconds.\n"); mdelay(5000); goto Thaw; @@ -168,7 +191,7 @@ int pm_suspend_disk(void) if (error) goto Enable_cpus; - if (pm_disk_mode == PM_DISK_TEST) { + if (hibernation_mode == HIBERNATION_TEST) { printk("swsusp debug: Waiting for 5 seconds.\n"); mdelay(5000); goto Enable_cpus; @@ -205,6 +228,7 @@ int pm_suspend_disk(void) device_resume(); resume_console(); Thaw: + mutex_unlock(&pm_mutex); unprepare_processes(); Finish: free_basic_memory_bitmaps(); @@ -220,7 +244,7 @@ int pm_suspend_disk(void) * Called as a late_initcall (so all devices are discovered and * initialized), we call swsusp to see if we have a saved image or not. * If so, we quiesce devices, the restore the saved image. We will - * return above (in pm_suspend_disk() ) if everything goes well. + * return above (in hibernate() ) if everything goes well. * Otherwise, we fail gracefully and return to the normally * scheduled program. * @@ -315,25 +339,26 @@ static int software_resume(void) late_initcall(software_resume); -static const char * const pm_disk_modes[] = { - [PM_DISK_PLATFORM] = "platform", - [PM_DISK_SHUTDOWN] = "shutdown", - [PM_DISK_REBOOT] = "reboot", - [PM_DISK_TEST] = "test", - [PM_DISK_TESTPROC] = "testproc", +static const char * const hibernation_modes[] = { + [HIBERNATION_PLATFORM] = "platform", + [HIBERNATION_SHUTDOWN] = "shutdown", + [HIBERNATION_REBOOT] = "reboot", + [HIBERNATION_TEST] = "test", + [HIBERNATION_TESTPROC] = "testproc", }; /** - * disk - Control suspend-to-disk mode + * disk - Control hibernation mode * * Suspend-to-disk can be handled in several ways. We have a few options * for putting the system to sleep - using the platform driver (e.g. ACPI - * or other pm_ops), powering off the system or rebooting the system - * (for testing) as well as the two test modes. + * or other hibernation_ops), powering off the system or rebooting the + * system (for testing) as well as the two test modes. * * The system can support 'platform', and that is known a priori (and - * encoded in pm_ops). However, the user may choose 'shutdown' or 'reboot' - * as alternatives, as well as the test modes 'test' and 'testproc'. + * encoded by the presence of hibernation_ops). However, the user may + * choose 'shutdown' or 'reboot' as alternatives, as well as one fo the + * test modes, 'test' or 'testproc'. * * show() will display what the mode is currently set to. * store() will accept one of @@ -345,7 +370,7 @@ static const char * const pm_disk_modes[] = { * 'testproc' * * It will only change to 'platform' if the system - * supports it (as determined from pm_ops->pm_disk_mode). + * supports it (as determined by having hibernation_ops). */ static ssize_t disk_show(struct kset *kset, char *buf) @@ -353,28 +378,25 @@ static ssize_t disk_show(struct kset *kset, char *buf) int i; char *start = buf; - for (i = PM_DISK_PLATFORM; i < PM_DISK_MAX; i++) { - if (!pm_disk_modes[i]) + for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { + if (!hibernation_modes[i]) continue; switch (i) { - case PM_DISK_SHUTDOWN: - case PM_DISK_REBOOT: - case PM_DISK_TEST: - case PM_DISK_TESTPROC: + case HIBERNATION_SHUTDOWN: + case HIBERNATION_REBOOT: + case HIBERNATION_TEST: + case HIBERNATION_TESTPROC: break; - default: - if (pm_ops && pm_ops->enter && - (i == pm_ops->pm_disk_mode)) + case HIBERNATION_PLATFORM: + if (hibernation_ops) break; /* not a valid mode, continue with loop */ continue; } - if (i == pm_disk_mode) - buf += sprintf(buf, "[%s]", pm_disk_modes[i]); + if (i == hibernation_mode) + buf += sprintf(buf, "[%s] ", hibernation_modes[i]); else - buf += sprintf(buf, "%s", pm_disk_modes[i]); - if (i+1 != PM_DISK_MAX) - buf += sprintf(buf, " "); + buf += sprintf(buf, "%s ", hibernation_modes[i]); } buf += sprintf(buf, "\n"); return buf-start; @@ -387,39 +409,38 @@ static ssize_t disk_store(struct kset *kset, const char *buf, size_t n) int i; int len; char *p; - suspend_disk_method_t mode = 0; + int mode = HIBERNATION_INVALID; p = memchr(buf, '\n', n); len = p ? p - buf : n; mutex_lock(&pm_mutex); - for (i = PM_DISK_PLATFORM; i < PM_DISK_MAX; i++) { - if (!strncmp(buf, pm_disk_modes[i], len)) { + for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { + if (!strncmp(buf, hibernation_modes[i], len)) { mode = i; break; } } - if (mode) { + if (mode != HIBERNATION_INVALID) { switch (mode) { - case PM_DISK_SHUTDOWN: - case PM_DISK_REBOOT: - case PM_DISK_TEST: - case PM_DISK_TESTPROC: - pm_disk_mode = mode; + case HIBERNATION_SHUTDOWN: + case HIBERNATION_REBOOT: + case HIBERNATION_TEST: + case HIBERNATION_TESTPROC: + hibernation_mode = mode; break; - default: - if (pm_ops && pm_ops->enter && - (mode == pm_ops->pm_disk_mode)) - pm_disk_mode = mode; + case HIBERNATION_PLATFORM: + if (hibernation_ops) + hibernation_mode = mode; else error = -EINVAL; } - } else { + } else error = -EINVAL; - } - pr_debug("PM: suspend-to-disk mode set to '%s'\n", - pm_disk_modes[mode]); + if (!error) + pr_debug("PM: suspend-to-disk mode set to '%s'\n", + hibernation_modes[mode]); mutex_unlock(&pm_mutex); return error ? error : n; } diff --git a/kernel/power/main.c b/kernel/power/main.c index f6dda685e7e2..40d56a31245e 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -30,7 +30,6 @@ DEFINE_MUTEX(pm_mutex); struct pm_ops *pm_ops; -suspend_disk_method_t pm_disk_mode = PM_DISK_SHUTDOWN; /** * pm_set_ops - Set the global power method table. @@ -41,10 +40,6 @@ void pm_set_ops(struct pm_ops * ops) { mutex_lock(&pm_mutex); pm_ops = ops; - if (ops && ops->pm_disk_mode != PM_DISK_INVALID) { - pm_disk_mode = ops->pm_disk_mode; - } else - pm_disk_mode = PM_DISK_SHUTDOWN; mutex_unlock(&pm_mutex); } @@ -184,24 +179,12 @@ static void suspend_finish(suspend_state_t state) static const char * const pm_states[PM_SUSPEND_MAX] = { [PM_SUSPEND_STANDBY] = "standby", [PM_SUSPEND_MEM] = "mem", - [PM_SUSPEND_DISK] = "disk", }; static inline int valid_state(suspend_state_t state) { - /* Suspend-to-disk does not really need low-level support. - * It can work with shutdown/reboot if needed. If it isn't - * configured, then it cannot be supported. - */ - if (state == PM_SUSPEND_DISK) -#ifdef CONFIG_SOFTWARE_SUSPEND - return 1; -#else - return 0; -#endif - - /* all other states need lowlevel support and need to be - * valid to the lowlevel implementation, no valid callback + /* All states need lowlevel support and need to be valid + * to the lowlevel implementation, no valid callback * implies that none are valid. */ if (!pm_ops || !pm_ops->valid || !pm_ops->valid(state)) return 0; @@ -229,11 +212,6 @@ static int enter_state(suspend_state_t state) if (!mutex_trylock(&pm_mutex)) return -EBUSY; - if (state == PM_SUSPEND_DISK) { - error = pm_suspend_disk(); - goto Unlock; - } - pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]); if ((error = suspend_prepare(state))) goto Unlock; @@ -251,7 +229,7 @@ static int enter_state(suspend_state_t state) /** * pm_suspend - Externally visible function for suspending system. - * @state: Enumarted value of state to enter. + * @state: Enumerated value of state to enter. * * Determine whether or not value is within range, get state * structure, and enter (above). @@ -289,7 +267,13 @@ static ssize_t state_show(struct kset *kset, char *buf) if (pm_states[i] && valid_state(i)) s += sprintf(s,"%s ", pm_states[i]); } - s += sprintf(s,"\n"); +#ifdef CONFIG_SOFTWARE_SUSPEND + s += sprintf(s, "%s\n", "disk"); +#else + if (s != buf) + /* convert the last space to a newline */ + *(s-1) = '\n'; +#endif return (s - buf); } @@ -304,6 +288,12 @@ static ssize_t state_store(struct kset *kset, const char *buf, size_t n) p = memchr(buf, '\n', n); len = p ? p - buf : n; + /* First, check if we are requested to hibernate */ + if (!strncmp(buf, "disk", len)) { + error = hibernate(); + return error ? error : n; + } + for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { if (*s && !strncmp(buf, *s, len)) break; diff --git a/kernel/power/power.h b/kernel/power/power.h index 34b43542785a..51381487103f 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -25,12 +25,7 @@ struct swsusp_info { */ #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) -extern int pm_suspend_disk(void); -#else -static inline int pm_suspend_disk(void) -{ - return -EPERM; -} +extern struct hibernation_ops *hibernation_ops; #endif extern int pfn_is_nosave(unsigned long); diff --git a/kernel/power/user.c b/kernel/power/user.c index 040560d9c312..24d7d78e6f42 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -130,16 +130,16 @@ static inline int platform_prepare(void) { int error = 0; - if (pm_ops && pm_ops->prepare) - error = pm_ops->prepare(PM_SUSPEND_DISK); + if (hibernation_ops) + error = hibernation_ops->prepare(); return error; } static inline void platform_finish(void) { - if (pm_ops && pm_ops->finish) - pm_ops->finish(PM_SUSPEND_DISK); + if (hibernation_ops) + hibernation_ops->finish(); } static inline int snapshot_suspend(int platform_suspend) @@ -384,7 +384,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, switch (arg) { case PMOPS_PREPARE: - if (pm_ops && pm_ops->enter) { + if (hibernation_ops) { data->platform_suspend = 1; error = 0; } else { @@ -395,8 +395,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, case PMOPS_ENTER: if (data->platform_suspend) { kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK); - error = pm_ops->enter(PM_SUSPEND_DISK); - error = 0; + error = hibernation_ops->enter(); } break; diff --git a/kernel/sys.c b/kernel/sys.c index 926bf9d7ac45..8ecfe3473779 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -881,7 +881,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user #ifdef CONFIG_SOFTWARE_SUSPEND case LINUX_REBOOT_CMD_SW_SUSPEND: { - int ret = pm_suspend(PM_SUSPEND_DISK); + int ret = hibernate(); unlock_kernel(); return ret; } -- cgit v1.2.3 From dd2a345f8f002845636dbf5d2d768bb5cd8a5f59 Mon Sep 17 00:00:00 2001 From: Dave Gilbert Date: Wed, 9 May 2007 02:33:24 -0700 Subject: Display all possible partitions when the root filesystem failed to mount Display all possible partitions when the root filesystem is not mounted. This helps to track spell'o's and missing drivers. Updated to work with newer kernels. Example output: VFS: Cannot open root device "foobar" or unknown-block(0,0) Please append a correct "root=" boot option; here are the available partitions: 0800 8388608 sda driver: sd 0801 192748 sda1 0802 8193150 sda2 0810 4194304 sdb driver: sd Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(0,0) [akpm@linux-foundation.org: cleanups, fix printk warnings] Signed-off-by: Jan Engelhardt Cc: Dave Gilbert Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- block/genhd.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/genhd.h | 1 + init/do_mounts.c | 7 ++++++- 3 files changed, 60 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/block/genhd.c b/block/genhd.c index b5664440896c..93a2cf654597 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -213,6 +213,59 @@ struct gendisk *get_gendisk(dev_t dev, int *part) return kobj ? to_disk(kobj) : NULL; } +/* + * print a full list of all partitions - intended for places where the root + * filesystem can't be mounted and thus to give the victim some idea of what + * went wrong + */ +void __init printk_all_partitions(void) +{ + int n; + struct gendisk *sgp; + + mutex_lock(&block_subsys_lock); + /* For each block device... */ + list_for_each_entry(sgp, &block_subsys.list, kobj.entry) { + char buf[BDEVNAME_SIZE]; + /* + * Don't show empty devices or things that have been surpressed + */ + if (get_capacity(sgp) == 0 || + (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)) + continue; + + /* + * Note, unlike /proc/partitions, I am showing the numbers in + * hex - the same format as the root= option takes. + */ + printk("%02x%02x %10llu %s", + sgp->major, sgp->first_minor, + (unsigned long long)get_capacity(sgp) >> 1, + disk_name(sgp, 0, buf)); + if (sgp->driverfs_dev != NULL && + sgp->driverfs_dev->driver != NULL) + printk(" driver: %s\n", + sgp->driverfs_dev->driver->name); + else + printk(" (driver?)\n"); + + /* now show the partitions */ + for (n = 0; n < sgp->minors - 1; ++n) { + if (sgp->part[n] == NULL) + continue; + if (sgp->part[n]->nr_sects == 0) + continue; + printk(" %02x%02x %10llu %s\n", + sgp->major, n + 1 + sgp->first_minor, + (unsigned long long)sgp->part[n]->nr_sects >> 1, + disk_name(sgp, n + 1, buf)); + } /* partition subloop */ + } /* Block device loop */ + + mutex_unlock(&block_subsys_lock); + return; +} + #ifdef CONFIG_PROC_FS /* iterator */ static void *part_start(struct seq_file *part, loff_t *pos) diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 2c65da7cabb2..f589559cf070 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -413,6 +413,7 @@ char *disk_name (struct gendisk *hd, int part, char *buf); extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); extern void add_partition(struct gendisk *, int, sector_t, sector_t, int); extern void delete_partition(struct gendisk *, int); +extern void printk_all_partitions(void); extern struct gendisk *alloc_disk_node(int minors, int node_id); extern struct gendisk *alloc_disk(int minors); diff --git a/init/do_mounts.c b/init/do_mounts.c index 3f57ed4599d6..46fe407fb03e 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -308,17 +309,21 @@ retry: /* * Allow the user to distinguish between failed sys_open * and bad superblock on root device. + * and give them a list of the available devices */ #ifdef CONFIG_BLOCK __bdevname(ROOT_DEV, b); #endif printk("VFS: Cannot open root device \"%s\" or %s\n", root_device_name, b); - printk("Please append a correct \"root=\" boot option\n"); + printk("Please append a correct \"root=\" boot option; here are the available partitions:\n"); + printk_all_partitions(); panic("VFS: Unable to mount root fs on %s", b); } + printk("List of all partitions:\n"); + printk_all_partitions(); printk("No filesystem could mount root, tried: "); for (p = fs_names; *p; p += strlen(p)+1) printk(" %s", p); -- cgit v1.2.3 From 2f4dfe206a2fc07099dfad77a8ea2f4b4ae2140f Mon Sep 17 00:00:00 2001 From: Fernando Luis Vazquez Cao Date: Wed, 9 May 2007 02:33:25 -0700 Subject: Remove hardcoding of hard_smp_processor_id on UP systems With the advent of kdump, the assumption that the boot CPU when booting an UP kernel is always the CPU with a particular hardware ID (often 0) (usually referred to as BSP on some architectures) is not valid anymore. The reason being that the dump capture kernel boots on the crashed CPU (the CPU that invoked crash_kexec), which may be or may not be that particular CPU. Move definition of hard_smp_processor_id for the UP case to architecture-specific code ("asm/smp.h") where it belongs, so that each architecture can provide its own implementation. Signed-off-by: Fernando Luis Vazquez Cao Cc: "Luck, Tony" Acked-by: Andi Kleen Cc: "Eric W. Biederman" Cc: Vivek Goyal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc64/kernel/traps.c | 1 + include/asm-alpha/smp.h | 1 + include/asm-i386/smp.h | 3 ++- include/asm-ia64/smp.h | 3 ++- include/asm-m32r/smp.h | 6 +++++- include/asm-powerpc/smp.h | 1 + include/asm-s390/smp.h | 1 + include/asm-sparc/smp.h | 1 + include/asm-sparc64/smp.h | 1 + include/asm-um/smp.h | 4 ++++ include/asm-x86_64/smp.h | 4 +++- include/linux/smp.h | 1 - 12 files changed, 22 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index dc652f210290..d0fde36395b4 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -19,6 +19,7 @@ #include #include +#include #include #include #include diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h index a1a1eca6be45..286e1d844f63 100644 --- a/include/asm-alpha/smp.h +++ b/include/asm-alpha/smp.h @@ -51,6 +51,7 @@ int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, in #else /* CONFIG_SMP */ +#define hard_smp_processor_id() 0 #define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; }) #endif /* CONFIG_SMP */ diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index 090abc1da32a..3243fa6f455f 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h @@ -147,12 +147,13 @@ extern unsigned int num_processors; #else /* CONFIG_SMP */ +#define hard_smp_processor_id() 0 #define safe_smp_processor_id() 0 #define cpu_physical_id(cpu) boot_cpu_physical_apicid #define NO_PROC_ID 0xFF /* No processor magic marker */ -#endif +#endif /* CONFIG_SMP */ #ifndef __ASSEMBLY__ diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 60fd4ae014f6..62014b643ecd 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -128,8 +128,9 @@ extern void unlock_ipi_calllock(void); extern void identify_siblings (struct cpuinfo_ia64 *); extern int is_multithreading_enabled(void); -#else +#else /* CONFIG_SMP */ +#define hard_smp_processor_id() 0 #define cpu_logical_id(i) 0 #define cpu_physical_id(i) ia64_get_lid() diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h index abd937ac5239..078e1a51a042 100644 --- a/include/asm-m32r/smp.h +++ b/include/asm-m32r/smp.h @@ -108,6 +108,10 @@ extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); #define IPI_SHIFT (0) #define NR_IPIS (8) -#endif /* CONFIG_SMP */ +#else /* CONFIG_SMP */ + +#define hard_smp_processor_id() 0 + +#endif /* CONFIG_SMP */ #endif /* _ASM_M32R_SMP_H */ diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h index 01717f266dc9..d037f50580e2 100644 --- a/include/asm-powerpc/smp.h +++ b/include/asm-powerpc/smp.h @@ -83,6 +83,7 @@ extern void __cpu_die(unsigned int cpu); #else /* for UP */ +#define hard_smp_processor_id() 0 #define smp_setup_cpu_maps() #endif /* CONFIG_SMP */ diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index 0a28e6d6ef40..76e424f718c6 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h @@ -110,6 +110,7 @@ static inline void smp_send_stop(void) __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); } +#define hard_smp_processor_id() 0 #define smp_cpu_not_running(cpu) 1 #define smp_setup_cpu_possible_map() do { } while (0) #endif diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h index b9da9a600e35..b3f492208fd2 100644 --- a/include/asm-sparc/smp.h +++ b/include/asm-sparc/smp.h @@ -165,6 +165,7 @@ void smp_setup_cpu_possible_map(void); #else /* SMP */ +#define hard_smp_processor_id() 0 #define smp_setup_cpu_possible_map() do { } while (0) #endif /* !(SMP) */ diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h index cca54804b722..869d16fb907b 100644 --- a/include/asm-sparc64/smp.h +++ b/include/asm-sparc64/smp.h @@ -48,6 +48,7 @@ extern unsigned char boot_cpu_id; #else +#define hard_smp_processor_id() 0 #define smp_setup_cpu_possible_map() do { } while (0) #define boot_cpu_id (0) diff --git a/include/asm-um/smp.h b/include/asm-um/smp.h index ca552261ed1f..84f8cf29324e 100644 --- a/include/asm-um/smp.h +++ b/include/asm-um/smp.h @@ -24,6 +24,10 @@ extern inline void smp_cpus_done(unsigned int maxcpus) extern struct task_struct *idle_threads[NR_CPUS]; +#else + +#define hard_smp_processor_id() 0 + #endif #endif diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index d5704421456b..f62fda527439 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -71,7 +71,9 @@ extern unsigned __cpuinitdata disabled_cpus; #define NO_PROC_ID 0xFF /* No processor magic marker */ -#endif +#else /* CONFIG_SMP */ +#define hard_smp_processor_id() 0 +#endif /* CONFIG_SMP */ /* * Some lowlevel functions might want to know about diff --git a/include/linux/smp.h b/include/linux/smp.h index 7ba23ec8211b..3f70149eabbb 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -83,7 +83,6 @@ void smp_prepare_boot_cpu(void); * These macros fold the SMP functionality into a single CPU system */ #define raw_smp_processor_id() 0 -#define hard_smp_processor_id() 0 static inline int up_smp_call_function(void) { return 0; -- cgit v1.2.3 From 8813d1c00ca923c1683da625ff85959be1db9a49 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Wed, 9 May 2007 02:33:30 -0700 Subject: mca: add integrated device bus matching The MCA bus has a few "integrated" functions, which are effectively virtual slots on the bus. The problem is that these special functions don't have dedicated pos IDs, so we have to manufacture ids for them outside the pos space ... and these ids can't be matched by the standard matching function, so add a special registration that requests a list of pos ids or a particular integrated function. Signed-off-by: James Bottomley Signed-off-by: Linus Torvalds --- drivers/mca/mca-bus.c | 26 ++++++++++++++++---------- drivers/mca/mca-driver.c | 13 +++++++++++++ include/linux/mca.h | 2 ++ 3 files changed, 31 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/mca/mca-bus.c b/drivers/mca/mca-bus.c index a70fe00aea16..67b8e9453b19 100644 --- a/drivers/mca/mca-bus.c +++ b/drivers/mca/mca-bus.c @@ -48,18 +48,24 @@ static int mca_bus_match (struct device *dev, struct device_driver *drv) struct mca_device *mca_dev = to_mca_device (dev); struct mca_driver *mca_drv = to_mca_driver (drv); const unsigned short *mca_ids = mca_drv->id_table; - int i; - - if (!mca_ids) - return 0; - - for(i = 0; mca_ids[i]; i++) { - if (mca_ids[i] == mca_dev->pos_id) { - mca_dev->index = i; - return 1; + int i = 0; + + if (mca_ids) { + for(i = 0; mca_ids[i]; i++) { + if (mca_ids[i] == mca_dev->pos_id) { + mca_dev->index = i; + return 1; + } } } - + /* If the integrated id is present, treat it as though it were an + * additional id in the id_table (it can't be because by definition, + * integrated id's overflow a short */ + if (mca_drv->integrated_id && mca_dev->pos_id == + mca_drv->integrated_id) { + mca_dev->index = i; + return 1; + } return 0; } diff --git a/drivers/mca/mca-driver.c b/drivers/mca/mca-driver.c index 2223466b3d8a..32cd39bcc715 100644 --- a/drivers/mca/mca-driver.c +++ b/drivers/mca/mca-driver.c @@ -36,12 +36,25 @@ int mca_register_driver(struct mca_driver *mca_drv) mca_drv->driver.bus = &mca_bus_type; if ((r = driver_register(&mca_drv->driver)) < 0) return r; + mca_drv->integrated_id = 0; } return 0; } EXPORT_SYMBOL(mca_register_driver); +int mca_register_driver_integrated(struct mca_driver *mca_driver, + int integrated_id) +{ + int r = mca_register_driver(mca_driver); + + if (!r) + mca_driver->integrated_id = integrated_id; + + return r; +} +EXPORT_SYMBOL(mca_register_driver_integrated); + void mca_unregister_driver(struct mca_driver *mca_drv) { if (MCA_bus) diff --git a/include/linux/mca.h b/include/linux/mca.h index 5cff2923092b..37972704617f 100644 --- a/include/linux/mca.h +++ b/include/linux/mca.h @@ -94,6 +94,7 @@ struct mca_bus { struct mca_driver { const short *id_table; void *driver_data; + int integrated_id; struct device_driver driver; }; #define to_mca_driver(mdriver) container_of(mdriver, struct mca_driver, driver) @@ -125,6 +126,7 @@ extern enum MCA_AdapterStatus mca_device_status(struct mca_device *mca_dev); extern struct bus_type mca_bus_type; extern int mca_register_driver(struct mca_driver *drv); +extern int mca_register_driver_integrated(struct mca_driver *, int); extern void mca_unregister_driver(struct mca_driver *drv); /* WARNING: only called by the boot time device setup */ -- cgit v1.2.3 From 55c0d1f83e481dd6c77f52f7dcfeb043b8b740fa Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Wed, 9 May 2007 02:33:37 -0700 Subject: Move sig_kernel_* et al macros to linux/signal.h This patch moves the sig_kernel_* and related macros from kernel/signal.c to linux/signal.h, and cleans them up slightly. I need the sig_kernel_* macros for default signal behavior in the utrace code, and want to avoid duplication or overhead to share the knowledge. Signed-off-by: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/signal.h | 125 +++++++++++++++++++++++++++++++++++++++++++++++++ kernel/signal.c | 119 ---------------------------------------------- 2 files changed, 125 insertions(+), 119 deletions(-) (limited to 'include/linux') diff --git a/include/linux/signal.h b/include/linux/signal.h index 14749056dd63..3fa0fab4a04b 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -243,6 +243,131 @@ extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, extern struct kmem_cache *sighand_cachep; +/* + * In POSIX a signal is sent either to a specific thread (Linux task) + * or to the process as a whole (Linux thread group). How the signal + * is sent determines whether it's to one thread or the whole group, + * which determines which signal mask(s) are involved in blocking it + * from being delivered until later. When the signal is delivered, + * either it's caught or ignored by a user handler or it has a default + * effect that applies to the whole thread group (POSIX process). + * + * The possible effects an unblocked signal set to SIG_DFL can have are: + * ignore - Nothing Happens + * terminate - kill the process, i.e. all threads in the group, + * similar to exit_group. The group leader (only) reports + * WIFSIGNALED status to its parent. + * coredump - write a core dump file describing all threads using + * the same mm and then kill all those threads + * stop - stop all the threads in the group, i.e. TASK_STOPPED state + * + * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored. + * Other signals when not blocked and set to SIG_DFL behaves as follows. + * The job control signals also have other special effects. + * + * +--------------------+------------------+ + * | POSIX signal | default action | + * +--------------------+------------------+ + * | SIGHUP | terminate | + * | SIGINT | terminate | + * | SIGQUIT | coredump | + * | SIGILL | coredump | + * | SIGTRAP | coredump | + * | SIGABRT/SIGIOT | coredump | + * | SIGBUS | coredump | + * | SIGFPE | coredump | + * | SIGKILL | terminate(+) | + * | SIGUSR1 | terminate | + * | SIGSEGV | coredump | + * | SIGUSR2 | terminate | + * | SIGPIPE | terminate | + * | SIGALRM | terminate | + * | SIGTERM | terminate | + * | SIGCHLD | ignore | + * | SIGCONT | ignore(*) | + * | SIGSTOP | stop(*)(+) | + * | SIGTSTP | stop(*) | + * | SIGTTIN | stop(*) | + * | SIGTTOU | stop(*) | + * | SIGURG | ignore | + * | SIGXCPU | coredump | + * | SIGXFSZ | coredump | + * | SIGVTALRM | terminate | + * | SIGPROF | terminate | + * | SIGPOLL/SIGIO | terminate | + * | SIGSYS/SIGUNUSED | coredump | + * | SIGSTKFLT | terminate | + * | SIGWINCH | ignore | + * | SIGPWR | terminate | + * | SIGRTMIN-SIGRTMAX | terminate | + * +--------------------+------------------+ + * | non-POSIX signal | default action | + * +--------------------+------------------+ + * | SIGEMT | coredump | + * +--------------------+------------------+ + * + * (+) For SIGKILL and SIGSTOP the action is "always", not just "default". + * (*) Special job control effects: + * When SIGCONT is sent, it resumes the process (all threads in the group) + * from TASK_STOPPED state and also clears any pending/queued stop signals + * (any of those marked with "stop(*)"). This happens regardless of blocking, + * catching, or ignoring SIGCONT. When any stop signal is sent, it clears + * any pending/queued SIGCONT signals; this happens regardless of blocking, + * catching, or ignored the stop signal, though (except for SIGSTOP) the + * default action of stopping the process may happen later or never. + */ + +#ifdef SIGEMT +#define SIGEMT_MASK rt_sigmask(SIGEMT) +#else +#define SIGEMT_MASK 0 +#endif + +#if SIGRTMIN > BITS_PER_LONG +#define rt_sigmask(sig) (1ULL << ((sig)-1)) +#else +#define rt_sigmask(sig) sigmask(sig) +#endif +#define siginmask(sig, mask) (rt_sigmask(sig) & (mask)) + +#define SIG_KERNEL_ONLY_MASK (\ + rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) + +#define SIG_KERNEL_STOP_MASK (\ + rt_sigmask(SIGSTOP) | rt_sigmask(SIGTSTP) | \ + rt_sigmask(SIGTTIN) | rt_sigmask(SIGTTOU) ) + +#define SIG_KERNEL_COREDUMP_MASK (\ + rt_sigmask(SIGQUIT) | rt_sigmask(SIGILL) | \ + rt_sigmask(SIGTRAP) | rt_sigmask(SIGABRT) | \ + rt_sigmask(SIGFPE) | rt_sigmask(SIGSEGV) | \ + rt_sigmask(SIGBUS) | rt_sigmask(SIGSYS) | \ + rt_sigmask(SIGXCPU) | rt_sigmask(SIGXFSZ) | \ + SIGEMT_MASK ) + +#define SIG_KERNEL_IGNORE_MASK (\ + rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \ + rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) ) + +#define sig_kernel_only(sig) \ + (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_ONLY_MASK)) +#define sig_kernel_coredump(sig) \ + (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_COREDUMP_MASK)) +#define sig_kernel_ignore(sig) \ + (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_IGNORE_MASK)) +#define sig_kernel_stop(sig) \ + (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_STOP_MASK)) + +#define sig_needs_tasklist(sig) ((sig) == SIGCONT) + +#define sig_user_defined(t, signr) \ + (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ + ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) + +#define sig_fatal(t, signr) \ + (!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ + (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL) + #endif /* __KERNEL__ */ #endif /* _LINUX_SIGNAL_H */ diff --git a/kernel/signal.c b/kernel/signal.c index 1368e67c8482..4c8f49eadf7d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -38,125 +38,6 @@ static struct kmem_cache *sigqueue_cachep; -/* - * In POSIX a signal is sent either to a specific thread (Linux task) - * or to the process as a whole (Linux thread group). How the signal - * is sent determines whether it's to one thread or the whole group, - * which determines which signal mask(s) are involved in blocking it - * from being delivered until later. When the signal is delivered, - * either it's caught or ignored by a user handler or it has a default - * effect that applies to the whole thread group (POSIX process). - * - * The possible effects an unblocked signal set to SIG_DFL can have are: - * ignore - Nothing Happens - * terminate - kill the process, i.e. all threads in the group, - * similar to exit_group. The group leader (only) reports - * WIFSIGNALED status to its parent. - * coredump - write a core dump file describing all threads using - * the same mm and then kill all those threads - * stop - stop all the threads in the group, i.e. TASK_STOPPED state - * - * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored. - * Other signals when not blocked and set to SIG_DFL behaves as follows. - * The job control signals also have other special effects. - * - * +--------------------+------------------+ - * | POSIX signal | default action | - * +--------------------+------------------+ - * | SIGHUP | terminate | - * | SIGINT | terminate | - * | SIGQUIT | coredump | - * | SIGILL | coredump | - * | SIGTRAP | coredump | - * | SIGABRT/SIGIOT | coredump | - * | SIGBUS | coredump | - * | SIGFPE | coredump | - * | SIGKILL | terminate(+) | - * | SIGUSR1 | terminate | - * | SIGSEGV | coredump | - * | SIGUSR2 | terminate | - * | SIGPIPE | terminate | - * | SIGALRM | terminate | - * | SIGTERM | terminate | - * | SIGCHLD | ignore | - * | SIGCONT | ignore(*) | - * | SIGSTOP | stop(*)(+) | - * | SIGTSTP | stop(*) | - * | SIGTTIN | stop(*) | - * | SIGTTOU | stop(*) | - * | SIGURG | ignore | - * | SIGXCPU | coredump | - * | SIGXFSZ | coredump | - * | SIGVTALRM | terminate | - * | SIGPROF | terminate | - * | SIGPOLL/SIGIO | terminate | - * | SIGSYS/SIGUNUSED | coredump | - * | SIGSTKFLT | terminate | - * | SIGWINCH | ignore | - * | SIGPWR | terminate | - * | SIGRTMIN-SIGRTMAX | terminate | - * +--------------------+------------------+ - * | non-POSIX signal | default action | - * +--------------------+------------------+ - * | SIGEMT | coredump | - * +--------------------+------------------+ - * - * (+) For SIGKILL and SIGSTOP the action is "always", not just "default". - * (*) Special job control effects: - * When SIGCONT is sent, it resumes the process (all threads in the group) - * from TASK_STOPPED state and also clears any pending/queued stop signals - * (any of those marked with "stop(*)"). This happens regardless of blocking, - * catching, or ignoring SIGCONT. When any stop signal is sent, it clears - * any pending/queued SIGCONT signals; this happens regardless of blocking, - * catching, or ignored the stop signal, though (except for SIGSTOP) the - * default action of stopping the process may happen later or never. - */ - -#ifdef SIGEMT -#define M_SIGEMT M(SIGEMT) -#else -#define M_SIGEMT 0 -#endif - -#if SIGRTMIN > BITS_PER_LONG -#define M(sig) (1ULL << ((sig)-1)) -#else -#define M(sig) (1UL << ((sig)-1)) -#endif -#define T(sig, mask) (M(sig) & (mask)) - -#define SIG_KERNEL_ONLY_MASK (\ - M(SIGKILL) | M(SIGSTOP) ) - -#define SIG_KERNEL_STOP_MASK (\ - M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) ) - -#define SIG_KERNEL_COREDUMP_MASK (\ - M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \ - M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \ - M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT ) - -#define SIG_KERNEL_IGNORE_MASK (\ - M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) ) - -#define sig_kernel_only(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK)) -#define sig_kernel_coredump(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK)) -#define sig_kernel_ignore(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK)) -#define sig_kernel_stop(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) - -#define sig_needs_tasklist(sig) ((sig) == SIGCONT) - -#define sig_user_defined(t, signr) \ - (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ - ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) - -#define sig_fatal(t, signr) \ - (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ - (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL) static int sig_ignored(struct task_struct *t, int sig) { -- cgit v1.2.3 From 18d8362d517cb2bd97761294924fe6c2a6ee5e3c Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 9 May 2007 02:33:39 -0700 Subject: mutex_lock_interruptible(): add __must_check It's not sane to use mutex_lock_interruptible() and to then ignore the result. Ditto down_interruptible(), but I'm lazy. Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mutex.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mutex.h b/include/linux/mutex.h index b81bc2adaeff..0d50ea3df689 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -121,11 +121,12 @@ static inline int fastcall mutex_is_locked(struct mutex *lock) * Also see Documentation/mutex-design.txt. */ extern void fastcall mutex_lock(struct mutex *lock); -extern int fastcall mutex_lock_interruptible(struct mutex *lock); +extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock); #ifdef CONFIG_DEBUG_LOCK_ALLOC extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); -extern int mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass); +extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, + unsigned int subclass); #else # define mutex_lock_nested(lock, subclass) mutex_lock(lock) # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) -- cgit v1.2.3 From b89deed32ccc96098bd6bc953c64bba6b847774f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 9 May 2007 02:33:52 -0700 Subject: implement flush_work() A basic problem with flush_scheduled_work() is that it blocks behind _all_ presently-queued works, rather than just the work whcih the caller wants to flush. If the caller holds some lock, and if one of the queued work happens to want that lock as well then accidental deadlocks can occur. One example of this is the phy layer: it wants to flush work while holding rtnl_lock(). But if a linkwatch event happens to be queued, the phy code will deadlock because the linkwatch callback function takes rtnl_lock. So we implement a new function which will flush a *single* work - just the one which the caller wants to free up. Thus we avoid the accidental deadlocks which can arise from unrelated subsystems' callbacks taking shared locks. flush_work() non-blockingly dequeues the work_struct which we want to kill, then it waits for its handler to complete on all CPUs. Add ->current_work to the "struct cpu_workqueue_struct", it points to currently running "struct work_struct". When flush_work(work) detects ->current_work == work, it inserts a barrier at the _head_ of ->worklist (and thus right _after_ that work) and waits for completition. This means that the next work fired on that CPU will be this barrier, or another barrier queued by concurrent flush_work(), so the caller of flush_work() will be woken before any "regular" work has a chance to run. When wait_on_work() unlocks workqueue_mutex (or whatever we choose to protect against CPU hotplug), CPU may go away. But in that case take_over_work() will move a barrier we queued to another CPU, it will be fired sometime, and wait_on_work() will be woken. Actually, we are doing cleanup_workqueue_thread()->kthread_stop() before take_over_work(), so cwq->thread should complete its ->worklist (and thus the barrier), because currently we don't check kthread_should_stop() in run_workqueue(). But even if we did, everything should be ok. [akpm@osdl.org: cleanup] [akpm@osdl.org: add flush_work_keventd() wrapper] Signed-off-by: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/workqueue.h | 4 +- kernel/workqueue.c | 95 +++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 95 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index f16ba1e0687d..26a70992dec8 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -178,6 +178,8 @@ extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delay extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay); extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); +extern void flush_work(struct workqueue_struct *wq, struct work_struct *work); +extern void flush_work_keventd(struct work_struct *work); extern int FASTCALL(schedule_work(struct work_struct *work)); extern int FASTCALL(run_scheduled_work(struct work_struct *work)); @@ -199,7 +201,7 @@ int execute_in_process_context(work_func_t fn, struct execute_work *); * Kill off a pending schedule_delayed_work(). Note that the work callback * function may still be running on return from cancel_delayed_work(), unless * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or - * cancel_work_sync() to wait on it. + * flush_work() or cancel_work_sync() to wait on it. */ static inline int cancel_delayed_work(struct delayed_work *work) { diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b7bb37ab03bc..918d55267a12 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -46,6 +46,7 @@ struct cpu_workqueue_struct { struct workqueue_struct *wq; struct task_struct *thread; + struct work_struct *current_work; int run_depth; /* Detect run_workqueue() recursion depth */ @@ -120,6 +121,7 @@ static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work && work_pending(work) && !list_empty(&work->entry)) { work_func_t f = work->func; + cwq->current_work = work; list_del_init(&work->entry); spin_unlock_irqrestore(&cwq->lock, flags); @@ -128,6 +130,7 @@ static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work f(work); spin_lock_irqsave(&cwq->lock, flags); + cwq->current_work = NULL; ret = 1; } spin_unlock_irqrestore(&cwq->lock, flags); @@ -166,6 +169,17 @@ int fastcall run_scheduled_work(struct work_struct *work) } EXPORT_SYMBOL(run_scheduled_work); +static void insert_work(struct cpu_workqueue_struct *cwq, + struct work_struct *work, int tail) +{ + set_wq_data(work, cwq); + if (tail) + list_add_tail(&work->entry, &cwq->worklist); + else + list_add(&work->entry, &cwq->worklist); + wake_up(&cwq->more_work); +} + /* Preempt must be disabled. */ static void __queue_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) @@ -173,9 +187,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, unsigned long flags; spin_lock_irqsave(&cwq->lock, flags); - set_wq_data(work, cwq); - list_add_tail(&work->entry, &cwq->worklist); - wake_up(&cwq->more_work); + insert_work(cwq, work, 1); spin_unlock_irqrestore(&cwq->lock, flags); } @@ -305,6 +317,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) struct work_struct, entry); work_func_t f = work->func; + cwq->current_work = work; list_del_init(cwq->worklist.next); spin_unlock_irqrestore(&cwq->lock, flags); @@ -325,6 +338,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) } spin_lock_irqsave(&cwq->lock, flags); + cwq->current_work = NULL; } cwq->run_depth--; spin_unlock_irqrestore(&cwq->lock, flags); @@ -449,6 +463,75 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) } EXPORT_SYMBOL_GPL(flush_workqueue); +static void wait_on_work(struct cpu_workqueue_struct *cwq, + struct work_struct *work) +{ + struct wq_barrier barr; + int running = 0; + + spin_lock_irq(&cwq->lock); + if (unlikely(cwq->current_work == work)) { + init_wq_barrier(&barr); + insert_work(cwq, &barr.work, 0); + running = 1; + } + spin_unlock_irq(&cwq->lock); + + if (unlikely(running)) { + mutex_unlock(&workqueue_mutex); + wait_for_completion(&barr.done); + mutex_lock(&workqueue_mutex); + } +} + +/** + * flush_work - block until a work_struct's callback has terminated + * @wq: the workqueue on which the work is queued + * @work: the work which is to be flushed + * + * flush_work() will attempt to cancel the work if it is queued. If the work's + * callback appears to be running, flush_work() will block until it has + * completed. + * + * flush_work() is designed to be used when the caller is tearing down data + * structures which the callback function operates upon. It is expected that, + * prior to calling flush_work(), the caller has arranged for the work to not + * be requeued. + */ +void flush_work(struct workqueue_struct *wq, struct work_struct *work) +{ + struct cpu_workqueue_struct *cwq; + + mutex_lock(&workqueue_mutex); + cwq = get_wq_data(work); + /* Was it ever queued ? */ + if (!cwq) + goto out; + + /* + * This work can't be re-queued, and the lock above protects us + * from take_over_work(), no need to re-check that get_wq_data() + * is still the same when we take cwq->lock. + */ + spin_lock_irq(&cwq->lock); + list_del_init(&work->entry); + work_release(work); + spin_unlock_irq(&cwq->lock); + + if (is_single_threaded(wq)) { + /* Always use first cpu's area. */ + wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work); + } else { + int cpu; + + for_each_online_cpu(cpu) + wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); + } +out: + mutex_unlock(&workqueue_mutex); +} +EXPORT_SYMBOL_GPL(flush_work); + static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, int cpu, int freezeable) { @@ -650,6 +733,12 @@ void flush_scheduled_work(void) } EXPORT_SYMBOL(flush_scheduled_work); +void flush_work_keventd(struct work_struct *work) +{ + flush_work(keventd_wq, work); +} +EXPORT_SYMBOL(flush_work_keventd); + /** * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work. * @wq: the controlling workqueue structure -- cgit v1.2.3 From 19a75d83ffeab004cfcfac64024ad3997bac7220 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 9 May 2007 02:33:56 -0700 Subject: kblockd: use flush_work Switch the kblockd flushing from a global flush to a more specific flush_work(). (akpm: bypassed maintainers, sorry. There are other patches which depend on this) Cc: "Maciej W. Rozycki" Cc: David Howells Cc: Jens Axboe Cc: Nick Piggin Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- block/as-iosched.c | 2 +- block/ll_rw_blk.c | 7 +++---- include/linux/blkdev.h | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/block/as-iosched.c b/block/as-iosched.c index 640aa839d63f..109e91b91ffa 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -1306,7 +1306,7 @@ static void as_exit_queue(elevator_t *e) struct as_data *ad = e->elevator_data; del_timer_sync(&ad->antic_timer); - kblockd_flush(); + kblockd_flush_work(&ad->antic_work); BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index d99d402953a3..c059767c552c 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -1712,7 +1712,6 @@ EXPORT_SYMBOL(blk_stop_queue); void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->unplug_timer); - kblockd_flush(); } EXPORT_SYMBOL(blk_sync_queue); @@ -3632,11 +3631,11 @@ int kblockd_schedule_work(struct work_struct *work) EXPORT_SYMBOL(kblockd_schedule_work); -void kblockd_flush(void) +void kblockd_flush_work(struct work_struct *work) { - flush_workqueue(kblockd_workqueue); + flush_work(kblockd_workqueue, work); } -EXPORT_SYMBOL(kblockd_flush); +EXPORT_SYMBOL(kblockd_flush_work); int __init blk_dev_init(void) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index a686eabe22d6..db5b00a792f5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -854,7 +854,7 @@ static inline void put_dev_sector(Sector p) struct work_struct; int kblockd_schedule_work(struct work_struct *work); -void kblockd_flush(void); +void kblockd_flush_work(struct work_struct *work); #define MODULE_ALIAS_BLOCKDEV(major,minor) \ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) -- cgit v1.2.3 From 7c9cb38302e78d24e37f7d8a2ea7eed4ae5f2fa7 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 9 May 2007 02:34:01 -0700 Subject: relay: use plain timer instead of delayed work relay doesn't need to use schedule_delayed_work() for waking readers when a simple timer will do. Signed-off-by: Tom Zanussi Cc: Satyam Sharma Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/relay.h | 3 ++- kernel/relay.c | 35 ++++++++++++++++------------------- 2 files changed, 18 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/relay.h b/include/linux/relay.h index 759a0f97bec2..6cd8c4425fc7 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -38,7 +39,7 @@ struct rchan_buf size_t subbufs_consumed; /* count of sub-buffers consumed */ struct rchan *chan; /* associated channel */ wait_queue_head_t read_wait; /* reader wait queue */ - struct delayed_work wake_readers; /* reader wake-up work struct */ + struct timer_list timer; /* reader wake-up timer */ struct dentry *dentry; /* channel file dentry */ struct kref kref; /* channel buffer refcount */ struct page **page_array; /* array of current buffer pages */ diff --git a/kernel/relay.c b/kernel/relay.c index 577f251c7e28..e804589c863c 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -310,16 +310,13 @@ static struct rchan_callbacks default_channel_callbacks = { /** * wakeup_readers - wake up readers waiting on a channel - * @work: work struct that contains the the channel buffer + * @data: contains the the channel buffer * - * This is the work function used to defer reader waking. The - * reason waking is deferred is that calling directly from write - * causes problems if you're writing from say the scheduler. + * This is the timer function used to defer reader waking. */ -static void wakeup_readers(struct work_struct *work) +static void wakeup_readers(unsigned long data) { - struct rchan_buf *buf = - container_of(work, struct rchan_buf, wake_readers.work); + struct rchan_buf *buf = (struct rchan_buf *)data; wake_up_interruptible(&buf->read_wait); } @@ -337,11 +334,9 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init) if (init) { init_waitqueue_head(&buf->read_wait); kref_init(&buf->kref); - INIT_DELAYED_WORK(&buf->wake_readers, NULL); - } else { - cancel_delayed_work(&buf->wake_readers); - flush_scheduled_work(); - } + setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); + } else + del_timer_sync(&buf->timer); buf->subbufs_produced = 0; buf->subbufs_consumed = 0; @@ -447,8 +442,7 @@ end: static void relay_close_buf(struct rchan_buf *buf) { buf->finalized = 1; - cancel_delayed_work(&buf->wake_readers); - flush_scheduled_work(); + del_timer_sync(&buf->timer); kref_put(&buf->kref, relay_remove_buf); } @@ -608,11 +602,14 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) buf->dentry->d_inode->i_size += buf->chan->subbuf_size - buf->padding[old_subbuf]; smp_mb(); - if (waitqueue_active(&buf->read_wait)) { - PREPARE_DELAYED_WORK(&buf->wake_readers, - wakeup_readers); - schedule_delayed_work(&buf->wake_readers, 1); - } + if (waitqueue_active(&buf->read_wait)) + /* + * Calling wake_up_interruptible() from here + * will deadlock if we happen to be logging + * from the scheduler (trying to re-grab + * rq->lock), so defer it. + */ + __mod_timer(&buf->timer, jiffies + 1); } old = buf->data; -- cgit v1.2.3 From 6f7cc11aa6c7d5002e16096c7590944daece70ed Mon Sep 17 00:00:00 2001 From: Gautham R Shenoy Date: Wed, 9 May 2007 02:34:02 -0700 Subject: Extend notifier_call_chain to count nr_calls made Since 2.6.18-something, the community has been bugged by the problem to provide a clean and a stable mechanism to postpone a cpu-hotplug event as lock_cpu_hotplug was badly broken. This is another proposal towards solving that problem. This one is along the lines of the solution provided in kernel/workqueue.c Instead of having a global mechanism like lock_cpu_hotplug, we allow the subsytems to define their own per-subsystem hot cpu mutexes. These would be taken(released) where ever we are currently calling lock_cpu_hotplug(unlock_cpu_hotplug). Also, in the per-subsystem hotcpu callback function,we take this mutex before we handle any pre-cpu-hotplug events and release it once we finish handling the post-cpu-hotplug events. A standard means for doing this has been provided in [PATCH 2/4] and demonstrated in [PATCH 3/4]. The ordering of these per-subsystem mutexes might still prove to be a problem, but hopefully lockdep should help us get out of that muddle. The patch set to be applied against linux-2.6.19-rc5 is as follows: [PATCH 1/4] : Extend notifier_call_chain with an option to specify the number of notifications to be sent and also count the number of notifications actually sent. [PATCH 2/4] : Define events CPU_LOCK_ACQUIRE and CPU_LOCK_RELEASE and send out notifications for these in _cpu_up and _cpu_down. This would help us standardise the acquire and release of the subsystem locks in the hotcpu callback functions of these subsystems. [PATCH 3/4] : Eliminate lock_cpu_hotplug from kernel/sched.c. [PATCH 4/4] : In workqueue_cpu_callback function, acquire(release) the workqueue_mutex while handling CPU_LOCK_ACQUIRE(CPU_LOCK_RELEASE). If the per-subsystem-locking approach survives the test of time, we can expect a slow phasing out of lock_cpu_hotplug, which has not yet been eliminated in these patches :) This patch: Provide notifier_call_chain with an option to call only a specified number of notifiers and also record the number of call to notifiers made. The need for this enhancement was identified in the post entitled "Slab - Eliminate lock_cpu_hotplug from slab" (http://lkml.org/lkml/2006/10/28/92) by Ravikiran G Thirumalai and Andrew Morton. This patch adds two additional parameters to notifier_call_chain API namely - int nr_to_calls : Number of notifier_functions to be called. The don't care value is -1. - unsigned int *nr_calls : Records the total number of notifier_funtions called by notifier_call_chain. The don't care value is NULL. [michal.k.k.piotrowski@gmail.com: build fix] Credit: Andrew Morton Signed-off-by: Gautham R Shenoy Signed-off-by: Michal Piotrowski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/notifier.h | 52 +++++++++++++++------------ kernel/sys.c | 94 +++++++++++++++++++++++++++++++++++++++--------- 2 files changed, 107 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 10a43ed0527e..e34221bf8946 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -112,32 +112,40 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); #ifdef __KERNEL__ -extern int atomic_notifier_chain_register(struct atomic_notifier_head *, - struct notifier_block *); -extern int blocking_notifier_chain_register(struct blocking_notifier_head *, - struct notifier_block *); -extern int raw_notifier_chain_register(struct raw_notifier_head *, - struct notifier_block *); -extern int srcu_notifier_chain_register(struct srcu_notifier_head *, - struct notifier_block *); - -extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *, - struct notifier_block *); -extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *, - struct notifier_block *); -extern int raw_notifier_chain_unregister(struct raw_notifier_head *, - struct notifier_block *); -extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *, - struct notifier_block *); - -extern int atomic_notifier_call_chain(struct atomic_notifier_head *, +extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, + struct notifier_block *nb); +extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh, + struct notifier_block *nb); +extern int raw_notifier_chain_register(struct raw_notifier_head *nh, + struct notifier_block *nb); +extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh, + struct notifier_block *nb); + +extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, + struct notifier_block *nb); +extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, + struct notifier_block *nb); +extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh, + struct notifier_block *nb); +extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, + struct notifier_block *nb); + +extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh, unsigned long val, void *v); -extern int blocking_notifier_call_chain(struct blocking_notifier_head *, +extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); +extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh, unsigned long val, void *v); -extern int raw_notifier_call_chain(struct raw_notifier_head *, +extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); +extern int raw_notifier_call_chain(struct raw_notifier_head *nh, unsigned long val, void *v); -extern int srcu_notifier_call_chain(struct srcu_notifier_head *, +extern int __raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); +extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v); +extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); #define NOTIFY_DONE 0x0000 /* Don't care */ #define NOTIFY_OK 0x0001 /* Suits me */ diff --git a/kernel/sys.c b/kernel/sys.c index 8ecfe3473779..d4985df21b60 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -134,19 +134,39 @@ static int notifier_chain_unregister(struct notifier_block **nl, return -ENOENT; } +/** + * notifier_call_chain - Informs the registered notifiers about an event. + * @nl: Pointer to head of the blocking notifier chain + * @val: Value passed unmodified to notifier function + * @v: Pointer passed unmodified to notifier function + * @nr_to_call: Number of notifier functions to be called. Don't care + * value of this parameter is -1. + * @nr_calls: Records the number of notifications sent. Don't care + * value of this field is NULL. + * @returns: notifier_call_chain returns the value returned by the + * last notifier function called. + */ + static int __kprobes notifier_call_chain(struct notifier_block **nl, - unsigned long val, void *v) + unsigned long val, void *v, + int nr_to_call, int *nr_calls) { int ret = NOTIFY_DONE; struct notifier_block *nb, *next_nb; nb = rcu_dereference(*nl); - while (nb) { + + while (nb && nr_to_call) { next_nb = rcu_dereference(nb->next); ret = nb->notifier_call(nb, val, v); + + if (nr_calls) + (*nr_calls)++; + if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) break; nb = next_nb; + nr_to_call--; } return ret; } @@ -205,10 +225,12 @@ int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); /** - * atomic_notifier_call_chain - Call functions in an atomic notifier chain + * __atomic_notifier_call_chain - Call functions in an atomic notifier chain * @nh: Pointer to head of the atomic notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function + * @nr_to_call: See the comment for notifier_call_chain. + * @nr_calls: See the comment for notifier_call_chain. * * Calls each function in a notifier chain in turn. The functions * run in an atomic context, so they must not block. @@ -222,19 +244,27 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); * of the last notifier function called. */ -int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh, - unsigned long val, void *v) +int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v, + int nr_to_call, int *nr_calls) { int ret; rcu_read_lock(); - ret = notifier_call_chain(&nh->head, val, v); + ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); rcu_read_unlock(); return ret; } -EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); +EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain); + +int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v) +{ + return __atomic_notifier_call_chain(nh, val, v, -1, NULL); +} +EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); /* * Blocking notifier chain routines. All access to the chain is * synchronized by an rwsem. @@ -304,10 +334,12 @@ int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); /** - * blocking_notifier_call_chain - Call functions in a blocking notifier chain + * __blocking_notifier_call_chain - Call functions in a blocking notifier chain * @nh: Pointer to head of the blocking notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function + * @nr_to_call: See comment for notifier_call_chain. + * @nr_calls: See comment for notifier_call_chain. * * Calls each function in a notifier chain in turn. The functions * run in a process context, so they are allowed to block. @@ -320,8 +352,9 @@ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); * of the last notifier function called. */ -int blocking_notifier_call_chain(struct blocking_notifier_head *nh, - unsigned long val, void *v) +int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v, + int nr_to_call, int *nr_calls) { int ret = NOTIFY_DONE; @@ -332,12 +365,19 @@ int blocking_notifier_call_chain(struct blocking_notifier_head *nh, */ if (rcu_dereference(nh->head)) { down_read(&nh->rwsem); - ret = notifier_call_chain(&nh->head, val, v); + ret = notifier_call_chain(&nh->head, val, v, nr_to_call, + nr_calls); up_read(&nh->rwsem); } return ret; } +EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain); +int blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v) +{ + return __blocking_notifier_call_chain(nh, val, v, -1, NULL); +} EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); /* @@ -383,10 +423,12 @@ int raw_notifier_chain_unregister(struct raw_notifier_head *nh, EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); /** - * raw_notifier_call_chain - Call functions in a raw notifier chain + * __raw_notifier_call_chain - Call functions in a raw notifier chain * @nh: Pointer to head of the raw notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function + * @nr_to_call: See comment for notifier_call_chain. + * @nr_calls: See comment for notifier_call_chain * * Calls each function in a notifier chain in turn. The functions * run in an undefined context. @@ -400,10 +442,19 @@ EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); * of the last notifier function called. */ +int __raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v, + int nr_to_call, int *nr_calls) +{ + return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); +} + +EXPORT_SYMBOL_GPL(__raw_notifier_call_chain); + int raw_notifier_call_chain(struct raw_notifier_head *nh, unsigned long val, void *v) { - return notifier_call_chain(&nh->head, val, v); + return __raw_notifier_call_chain(nh, val, v, -1, NULL); } EXPORT_SYMBOL_GPL(raw_notifier_call_chain); @@ -478,10 +529,12 @@ int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); /** - * srcu_notifier_call_chain - Call functions in an SRCU notifier chain + * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain * @nh: Pointer to head of the SRCU notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function + * @nr_to_call: See comment for notifier_call_chain. + * @nr_calls: See comment for notifier_call_chain * * Calls each function in a notifier chain in turn. The functions * run in a process context, so they are allowed to block. @@ -494,18 +547,25 @@ EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); * of the last notifier function called. */ -int srcu_notifier_call_chain(struct srcu_notifier_head *nh, - unsigned long val, void *v) +int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, + unsigned long val, void *v, + int nr_to_call, int *nr_calls) { int ret; int idx; idx = srcu_read_lock(&nh->srcu); - ret = notifier_call_chain(&nh->head, val, v); + ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); srcu_read_unlock(&nh->srcu, idx); return ret; } +EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain); +int srcu_notifier_call_chain(struct srcu_notifier_head *nh, + unsigned long val, void *v) +{ + return __srcu_notifier_call_chain(nh, val, v, -1, NULL); +} EXPORT_SYMBOL_GPL(srcu_notifier_call_chain); /** -- cgit v1.2.3 From baaca49f415b25fdbe2a8f3c22b39929e450fbfd Mon Sep 17 00:00:00 2001 From: Gautham R Shenoy Date: Wed, 9 May 2007 02:34:03 -0700 Subject: Define and use new events,CPU_LOCK_ACQUIRE and CPU_LOCK_RELEASE This is an attempt to provide an alternate mechanism for postponing a hotplug event instead of using a global mechanism like lock_cpu_hotplug. The proposal is to add two new events namely CPU_LOCK_ACQUIRE and CPU_LOCK_RELEASE. The notification for these two events would be sent out before and after a cpu_hotplug event respectively. During the CPU_LOCK_ACQUIRE event, a cpu-hotplug-aware subsystem is supposed to acquire any per-subsystem hotcpu mutex ( Eg. workqueue_mutex in kernel/workqueue.c ). During the CPU_LOCK_RELEASE release event the cpu-hotplug-aware subsystem is supposed to release the per-subsystem hotcpu mutex. The reasons for defining new events as opposed to reusing the existing events like CPU_UP_PREPARE/CPU_UP_FAILED/CPU_ONLINE for locking/unlocking of per-subsystem hotcpu mutexes are as follow: - CPU_LOCK_ACQUIRE: All hotcpu mutexes are taken before subsystems start handling pre-hotplug events like CPU_UP_PREPARE/CPU_DOWN_PREPARE etc, thus ensuring a clean handling of these events. - CPU_LOCK_RELEASE: The hotcpu mutexes will be released only after all subsystems have handled post-hotplug events like CPU_DOWN_FAILED, CPU_DEAD,CPU_ONLINE etc thereby ensuring that there are no subsequent clashes amongst the interdependent subsystems after a cpu hotplugs. This patch also uses __raw_notifier_call chain in _cpu_up to take care of the dependency between the two consequetive calls to raw_notifier_call_chain. [akpm@linux-foundation.org: fix a bug] Signed-off-by: Gautham R Shenoy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/notifier.h | 2 ++ kernel/cpu.c | 19 ++++++++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/notifier.h b/include/linux/notifier.h index e34221bf8946..1903e5490c04 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -194,6 +194,8 @@ extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ +#define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */ +#define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */ #endif /* __KERNEL__ */ #endif /* _LINUX_NOTIFIER_H */ diff --git a/kernel/cpu.c b/kernel/cpu.c index 36e70845cfc3..48810498b355 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -132,12 +132,15 @@ static int _cpu_down(unsigned int cpu) if (!cpu_online(cpu)) return -EINVAL; + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, + (void *)(long)cpu); err = raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, (void *)(long)cpu); if (err == NOTIFY_BAD) { printk("%s: attempt to take down CPU %u failed\n", __FUNCTION__, cpu); - return -EINVAL; + err = -EINVAL; + goto out_release; } /* Ensure that we are not runnable on dying cpu */ @@ -185,6 +188,9 @@ out_thread: err = kthread_stop(p); out_allowed: set_cpus_allowed(current, old_allowed); +out_release: + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, + (void *)(long)cpu); return err; } @@ -206,13 +212,15 @@ int cpu_down(unsigned int cpu) /* Requires cpu_add_remove_lock to be held */ static int __cpuinit _cpu_up(unsigned int cpu) { - int ret; + int ret, nr_calls = 0; void *hcpu = (void *)(long)cpu; if (cpu_online(cpu) || !cpu_present(cpu)) return -EINVAL; - ret = raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); + ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu, + -1, &nr_calls); if (ret == NOTIFY_BAD) { printk("%s: attempt to bring up CPU %u failed\n", __FUNCTION__, cpu); @@ -233,8 +241,9 @@ static int __cpuinit _cpu_up(unsigned int cpu) out_notify: if (ret != 0) - raw_notifier_call_chain(&cpu_chain, - CPU_UP_CANCELED, hcpu); + __raw_notifier_call_chain(&cpu_chain, + CPU_UP_CANCELED, hcpu, nr_calls, NULL); + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); return ret; } -- cgit v1.2.3 From 7097a87afe937a5879528d52880c2d95f089e96c Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 9 May 2007 02:34:10 -0700 Subject: workqueue: kill run_scheduled_work() Because it has no callers. Actually, I think the whole idea of run_scheduled_work() was not right, not good to mix "unqueue this work and execute its ->func()" in one function. Signed-off-by: Oleg Nesterov Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/workqueue.h | 1 - kernel/workqueue.c | 73 ----------------------------------------------- 2 files changed, 74 deletions(-) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 26a70992dec8..2a58f16e1961 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -182,7 +182,6 @@ extern void flush_work(struct workqueue_struct *wq, struct work_struct *work); extern void flush_work_keventd(struct work_struct *work); extern int FASTCALL(schedule_work(struct work_struct *work)); -extern int FASTCALL(run_scheduled_work(struct work_struct *work)); extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a981add58fb9..ea422254f8bf 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -98,79 +98,6 @@ static inline void *get_wq_data(struct work_struct *work) return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); } -static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) -{ - int ret = 0; - unsigned long flags; - - spin_lock_irqsave(&cwq->lock, flags); - /* - * We need to re-validate the work info after we've gotten - * the cpu_workqueue lock. We can run the work now iff: - * - * - the wq_data still matches the cpu_workqueue_struct - * - AND the work is still marked pending - * - AND the work is still on a list (which will be this - * workqueue_struct list) - * - * All these conditions are important, because we - * need to protect against the work being run right - * now on another CPU (all but the last one might be - * true if it's currently running and has not been - * released yet, for example). - */ - if (get_wq_data(work) == cwq - && work_pending(work) - && !list_empty(&work->entry)) { - work_func_t f = work->func; - cwq->current_work = work; - list_del_init(&work->entry); - spin_unlock_irqrestore(&cwq->lock, flags); - - if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) - work_release(work); - f(work); - - spin_lock_irqsave(&cwq->lock, flags); - cwq->current_work = NULL; - ret = 1; - } - spin_unlock_irqrestore(&cwq->lock, flags); - return ret; -} - -/** - * run_scheduled_work - run scheduled work synchronously - * @work: work to run - * - * This checks if the work was pending, and runs it - * synchronously if so. It returns a boolean to indicate - * whether it had any scheduled work to run or not. - * - * NOTE! This _only_ works for normal work_structs. You - * CANNOT use this for delayed work, because the wq data - * for delayed work will not point properly to the per- - * CPU workqueue struct, but will change! - */ -int fastcall run_scheduled_work(struct work_struct *work) -{ - for (;;) { - struct cpu_workqueue_struct *cwq; - - if (!work_pending(work)) - return 0; - if (list_empty(&work->entry)) - return 0; - /* NOTE! This depends intimately on __queue_work! */ - cwq = get_wq_data(work); - if (!cwq) - return 0; - if (__run_work(cwq, work)) - return 1; - } -} -EXPORT_SYMBOL(run_scheduled_work); - static void insert_work(struct cpu_workqueue_struct *cwq, struct work_struct *work, int tail) { -- cgit v1.2.3 From 1634c48f8b85dcb05101f1eb2eab9af40b5976da Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 9 May 2007 02:34:18 -0700 Subject: make cancel_rearming_delayed_work() work on any workqueue, not just keventd_wq cancel_rearming_delayed_workqueue(wq, dwork) doesn't need the first parameter. We don't hang on un-queued dwork any longer, and work->data doesn't change its type. This means we can always figure out "wq" from dwork when it is needed. Remove this parameter, and rename the function to cancel_rearming_delayed_work(). Re-create an inline "obsolete" cancel_rearming_delayed_workqueue(wq) which just calls cancel_rearming_delayed_work(). Signed-off-by: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/workqueue.h | 13 ++++++++++--- kernel/workqueue.c | 27 +++++++++------------------ 2 files changed, 19 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 2a58f16e1961..27110c04f21e 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -191,9 +191,6 @@ extern int current_is_keventd(void); extern int keventd_up(void); extern void init_workqueues(void); -void cancel_rearming_delayed_work(struct delayed_work *work); -void cancel_rearming_delayed_workqueue(struct workqueue_struct *, - struct delayed_work *); int execute_in_process_context(work_func_t fn, struct execute_work *); /* @@ -212,4 +209,14 @@ static inline int cancel_delayed_work(struct delayed_work *work) return ret; } +extern void cancel_rearming_delayed_work(struct delayed_work *work); + +/* Obsolete. use cancel_rearming_delayed_work() */ +static inline +void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, + struct delayed_work *work) +{ + cancel_rearming_delayed_work(work); +} + #endif diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 985902e2e071..41eaffd125ca 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -555,32 +555,23 @@ void flush_work_keventd(struct work_struct *work) EXPORT_SYMBOL(flush_work_keventd); /** - * cancel_rearming_delayed_workqueue - kill off a delayed work whose handler rearms the delayed work. - * @wq: the controlling workqueue structure + * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work. * @dwork: the delayed work struct * * Note that the work callback function may still be running on return from * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it. */ -void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, - struct delayed_work *dwork) +void cancel_rearming_delayed_work(struct delayed_work *dwork) { - /* Was it ever queued ? */ - if (!get_wq_data(&dwork->work)) - return; + struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); - while (!cancel_delayed_work(dwork)) - flush_workqueue(wq); -} -EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); + /* Was it ever queued ? */ + if (cwq != NULL) { + struct workqueue_struct *wq = cwq->wq; -/** - * cancel_rearming_delayed_work - kill off a delayed keventd work whose handler rearms the delayed work. - * @dwork: the delayed work struct - */ -void cancel_rearming_delayed_work(struct delayed_work *dwork) -{ - cancel_rearming_delayed_workqueue(keventd_wq, dwork); + while (!cancel_delayed_work(dwork)) + flush_workqueue(wq); + } } EXPORT_SYMBOL(cancel_rearming_delayed_work); -- cgit v1.2.3 From 23b2e5991afde5af91a1a661d7f47ee56120759e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 9 May 2007 02:34:19 -0700 Subject: workqueue: kill NOAUTOREL works We don't have any users, and it is not so trivial to use NOAUTOREL works correctly. It is better to simplify API. Delete NOAUTOREL support and rename work_release to work_clear_pending to avoid a confusion. Signed-off-by: Oleg Nesterov Acked-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/workqueue.h | 64 +++++++---------------------------------------- kernel/workqueue.c | 5 ++-- 2 files changed, 11 insertions(+), 58 deletions(-) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 27110c04f21e..e1581dce5890 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -24,15 +24,13 @@ typedef void (*work_func_t)(struct work_struct *work); struct work_struct { atomic_long_t data; #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ -#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */ #define WORK_STRUCT_FLAG_MASK (3UL) #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) struct list_head entry; work_func_t func; }; -#define WORK_DATA_INIT(autorelease) \ - ATOMIC_LONG_INIT((autorelease) << WORK_STRUCT_NOAUTOREL) +#define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) struct delayed_work { struct work_struct work; @@ -44,14 +42,8 @@ struct execute_work { }; #define __WORK_INITIALIZER(n, f) { \ - .data = WORK_DATA_INIT(0), \ - .entry = { &(n).entry, &(n).entry }, \ - .func = (f), \ - } - -#define __WORK_INITIALIZER_NAR(n, f) { \ - .data = WORK_DATA_INIT(1), \ - .entry = { &(n).entry, &(n).entry }, \ + .data = WORK_DATA_INIT(), \ + .entry = { &(n).entry, &(n).entry }, \ .func = (f), \ } @@ -60,23 +52,12 @@ struct execute_work { .timer = TIMER_INITIALIZER(NULL, 0, 0), \ } -#define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \ - .work = __WORK_INITIALIZER_NAR((n).work, (f)), \ - .timer = TIMER_INITIALIZER(NULL, 0, 0), \ - } - #define DECLARE_WORK(n, f) \ struct work_struct n = __WORK_INITIALIZER(n, f) -#define DECLARE_WORK_NAR(n, f) \ - struct work_struct n = __WORK_INITIALIZER_NAR(n, f) - #define DECLARE_DELAYED_WORK(n, f) \ struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) -#define DECLARE_DELAYED_WORK_NAR(n, f) \ - struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f) - /* * initialize a work item's function pointer */ @@ -95,16 +76,9 @@ struct execute_work { * assignment of the work data initializer allows the compiler * to generate better code. */ -#define INIT_WORK(_work, _func) \ - do { \ - (_work)->data = (atomic_long_t) WORK_DATA_INIT(0); \ - INIT_LIST_HEAD(&(_work)->entry); \ - PREPARE_WORK((_work), (_func)); \ - } while (0) - -#define INIT_WORK_NAR(_work, _func) \ +#define INIT_WORK(_work, _func) \ do { \ - (_work)->data = (atomic_long_t) WORK_DATA_INIT(1); \ + (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ INIT_LIST_HEAD(&(_work)->entry); \ PREPARE_WORK((_work), (_func)); \ } while (0) @@ -115,12 +89,6 @@ struct execute_work { init_timer(&(_work)->timer); \ } while (0) -#define INIT_DELAYED_WORK_NAR(_work, _func) \ - do { \ - INIT_WORK_NAR(&(_work)->work, (_func)); \ - init_timer(&(_work)->timer); \ - } while (0) - #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ do { \ INIT_WORK(&(_work)->work, (_func)); \ @@ -143,24 +111,10 @@ struct execute_work { work_pending(&(w)->work) /** - * work_release - Release a work item under execution - * @work: The work item to release - * - * This is used to release a work item that has been initialised with automatic - * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work - * function the opportunity to grab auxiliary data from the container of the - * work_struct before clearing the pending bit as the work_struct may be - * subject to deallocation the moment the pending bit is cleared. - * - * In such a case, this should be called in the work function after it has - * fetched any data it may require from the containter of the work_struct. - * After this function has been called, the work_struct may be scheduled for - * further execution or it may be deallocated unless other precautions are - * taken. - * - * This should also be used to release a delayed work item. + * work_clear_pending - for internal use only, mark a work item as not pending + * @work: The work item in question */ -#define work_release(work) \ +#define work_clear_pending(work) \ clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) @@ -205,7 +159,7 @@ static inline int cancel_delayed_work(struct delayed_work *work) ret = del_timer(&work->timer); if (ret) - work_release(&work->work); + work_clear_pending(&work->work); return ret; } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 41eaffd125ca..0611de815a8f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -246,8 +246,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) spin_unlock_irq(&cwq->lock); BUG_ON(get_wq_data(work) != cwq); - if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) - work_release(work); + work_clear_pending(work); f(work); if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { @@ -453,7 +452,7 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work) */ spin_lock_irq(&cwq->lock); list_del_init(&work->entry); - work_release(work); + work_clear_pending(work); spin_unlock_irq(&cwq->lock); for_each_cpu_mask(cpu, *cpu_map) -- cgit v1.2.3 From 28e53bddf814485699a4142bc056fd37d4e11dd4 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 9 May 2007 02:34:22 -0700 Subject: unify flush_work/flush_work_keventd and rename it to cancel_work_sync flush_work(wq, work) doesn't need the first parameter, we can use cwq->wq (this was possible from the very beginnig, I missed this). So we can unify flush_work_keventd and flush_work. Also, rename flush_work() to cancel_work_sync() and fix all callers. Perhaps this is not the best name, but "flush_work" is really bad. (akpm: this is why the earlier patches bypassed maintainers) Signed-off-by: Oleg Nesterov Cc: Jeff Garzik Cc: "David S. Miller" Cc: Jens Axboe Cc: Tejun Heo Cc: Auke Kok , Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- block/ll_rw_blk.c | 2 +- drivers/ata/libata-core.c | 8 ++++---- drivers/net/e1000/e1000_main.c | 2 +- drivers/net/phy/phy.c | 4 ++-- drivers/net/tg3.c | 2 +- fs/aio.c | 4 ++-- include/linux/workqueue.h | 21 ++++++++++++--------- kernel/workqueue.c | 36 +++++++++++++++++------------------- net/ipv4/ipvs/ip_vs_ctl.c | 2 +- 9 files changed, 41 insertions(+), 40 deletions(-) (limited to 'include/linux') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index c059767c552c..df506571ed60 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3633,7 +3633,7 @@ EXPORT_SYMBOL(kblockd_schedule_work); void kblockd_flush_work(struct work_struct *work) { - flush_work(kblockd_workqueue, work); + cancel_work_sync(work); } EXPORT_SYMBOL(kblockd_flush_work); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index b74e56caba6f..fef87dd70d17 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1316,7 +1316,7 @@ void ata_port_flush_task(struct ata_port *ap) spin_unlock_irqrestore(ap->lock, flags); DPRINTK("flush #1\n"); - flush_work(ata_wq, &ap->port_task.work); /* akpm: seems unneeded */ + cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */ /* * At this point, if a task is running, it's guaranteed to see @@ -1327,7 +1327,7 @@ void ata_port_flush_task(struct ata_port *ap) if (ata_msg_ctl(ap)) ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", __FUNCTION__); - flush_work(ata_wq, &ap->port_task.work); + cancel_work_sync(&ap->port_task.work); } spin_lock_irqsave(ap->lock, flags); @@ -6475,9 +6475,9 @@ void ata_port_detach(struct ata_port *ap) /* Flush hotplug task. The sequence is similar to * ata_port_flush_task(). */ - flush_work(ata_aux_wq, &ap->hotplug_task.work); /* akpm: why? */ + cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */ cancel_delayed_work(&ap->hotplug_task); - flush_work(ata_aux_wq, &ap->hotplug_task.work); + cancel_work_sync(&ap->hotplug_task.work); skip_eh: /* remove the associated SCSI host */ diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 397e25bdbfec..637ae8f68791 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -1214,7 +1214,7 @@ e1000_remove(struct pci_dev *pdev) int i; #endif - flush_work_keventd(&adapter->reset_task); + cancel_work_sync(&adapter->reset_task); e1000_release_manageability(adapter); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index f445c465b14e..f71dab347667 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -663,9 +663,9 @@ int phy_stop_interrupts(struct phy_device *phydev) /* * Finish any pending work; we might have been scheduled to be called - * from keventd ourselves, but flush_work_keventd() handles that. + * from keventd ourselves, but cancel_work_sync() handles that. */ - flush_work_keventd(&phydev->phy_queue); + cancel_work_sync(&phydev->phy_queue); free_irq(phydev->irq, phydev); diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 0c0f9c817321..923b9c725cc3 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -7386,7 +7386,7 @@ static int tg3_close(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); - flush_work_keventd(&tp->reset_task); + cancel_work_sync(&tp->reset_task); netif_stop_queue(dev); diff --git a/fs/aio.c b/fs/aio.c index d18690bb03e9..ac1c1587aa02 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -348,7 +348,7 @@ void fastcall exit_aio(struct mm_struct *mm) /* * Ensure we don't leave the ctx on the aio_wq */ - flush_work(aio_wq, &ctx->wq.work); + cancel_work_sync(&ctx->wq.work); if (1 != atomic_read(&ctx->users)) printk(KERN_DEBUG @@ -371,7 +371,7 @@ void fastcall __put_ioctx(struct kioctx *ctx) BUG_ON(ctx->reqs_active); cancel_delayed_work(&ctx->wq); - flush_work(aio_wq, &ctx->wq.work); + cancel_work_sync(&ctx->wq.work); aio_free_ring(ctx); mmdrop(ctx->mm); ctx->mm = NULL; diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index e1581dce5890..d555f31c0746 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -128,30 +128,33 @@ extern struct workqueue_struct *__create_workqueue(const char *name, extern void destroy_workqueue(struct workqueue_struct *wq); extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); -extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay)); +extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, + struct delayed_work *work, unsigned long delay)); extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, - struct delayed_work *work, unsigned long delay); + struct delayed_work *work, unsigned long delay); + extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); -extern void flush_work(struct workqueue_struct *wq, struct work_struct *work); -extern void flush_work_keventd(struct work_struct *work); +extern void flush_scheduled_work(void); extern int FASTCALL(schedule_work(struct work_struct *work)); -extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); - -extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); +extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, + unsigned long delay)); +extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, + unsigned long delay); extern int schedule_on_each_cpu(work_func_t func); -extern void flush_scheduled_work(void); extern int current_is_keventd(void); extern int keventd_up(void); extern void init_workqueues(void); int execute_in_process_context(work_func_t fn, struct execute_work *); +extern void cancel_work_sync(struct work_struct *work); + /* * Kill off a pending schedule_delayed_work(). Note that the work callback * function may still be running on return from cancel_delayed_work(), unless * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or - * flush_work() or cancel_work_sync() to wait on it. + * cancel_work_sync() to wait on it. */ static inline int cancel_delayed_work(struct delayed_work *work) { diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 63885abf1ba0..c9ab4293904f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -413,23 +413,23 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq, } /** - * flush_work - block until a work_struct's callback has terminated - * @wq: the workqueue on which the work is queued + * cancel_work_sync - block until a work_struct's callback has terminated * @work: the work which is to be flushed * - * flush_work() will attempt to cancel the work if it is queued. If the work's - * callback appears to be running, flush_work() will block until it has - * completed. + * cancel_work_sync() will attempt to cancel the work if it is queued. If the + * work's callback appears to be running, cancel_work_sync() will block until + * it has completed. * - * flush_work() is designed to be used when the caller is tearing down data - * structures which the callback function operates upon. It is expected that, - * prior to calling flush_work(), the caller has arranged for the work to not - * be requeued. + * cancel_work_sync() is designed to be used when the caller is tearing down + * data structures which the callback function operates upon. It is expected + * that, prior to calling cancel_work_sync(), the caller has arranged for the + * work to not be requeued. */ -void flush_work(struct workqueue_struct *wq, struct work_struct *work) +void cancel_work_sync(struct work_struct *work) { - const cpumask_t *cpu_map = wq_cpu_map(wq); struct cpu_workqueue_struct *cwq; + struct workqueue_struct *wq; + const cpumask_t *cpu_map; int cpu; might_sleep(); @@ -448,10 +448,13 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work) work_clear_pending(work); spin_unlock_irq(&cwq->lock); + wq = cwq->wq; + cpu_map = wq_cpu_map(wq); + for_each_cpu_mask(cpu, *cpu_map) wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); } -EXPORT_SYMBOL_GPL(flush_work); +EXPORT_SYMBOL_GPL(cancel_work_sync); static struct workqueue_struct *keventd_wq; @@ -540,18 +543,13 @@ void flush_scheduled_work(void) } EXPORT_SYMBOL(flush_scheduled_work); -void flush_work_keventd(struct work_struct *work) -{ - flush_work(keventd_wq, work); -} -EXPORT_SYMBOL(flush_work_keventd); - /** * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work. * @dwork: the delayed work struct * * Note that the work callback function may still be running on return from - * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it. + * cancel_delayed_work(). Run flush_workqueue() or cancel_work_sync() to wait + * on it. */ void cancel_rearming_delayed_work(struct delayed_work *dwork) { diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index 342e836677a1..68fe1d4d0210 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c @@ -2387,7 +2387,7 @@ void ip_vs_control_cleanup(void) EnterFunction(2); ip_vs_trash_cleanup(); cancel_rearming_delayed_work(&defense_work); - flush_work_keventd(&defense_work.work); + cancel_work_sync(&defense_work.work); ip_vs_kill_estimator(&ip_vs_stats); unregister_sysctl_table(sysctl_header); proc_net_remove("ip_vs_stats"); -- cgit v1.2.3 From 73c279927f89561ecb45b2dfdf9314bafcfd9f67 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 9 May 2007 02:34:32 -0700 Subject: kthread: don't depend on work queues Currently there is a circular reference between work queue initialization and kthread initialization. This prevents the kthread infrastructure from initializing until after work queues have been initialized. We want the properties of tasks created with kthread_create to be as close as possible to the init_task and to not be contaminated by user processes. The later we start our kthreadd that creates these tasks the harder it is to avoid contamination from user processes and the more of a mess we have to clean up because the defaults have changed on us. So this patch modifies the kthread support to not use work queues but to instead use a simple list of structures, and to have kthreadd start from init_task immediately after our kernel thread that execs /sbin/init. By being a true child of init_task we only have to change those process settings that we want to have different from init_task, such as our process name, the cpus that are allowed, blocking all signals and setting SIGCHLD to SIG_IGN so that all of our children are reaped automatically. By being a true child of init_task we also naturally get our ppid set to 0 and do not wind up as a child of PID == 1. Ensuring that tasks generated by kthread_create will not slow down the functioning of the wait family of functions. [akpm@linux-foundation.org: use interruptible sleeps] Signed-off-by: Eric W. Biederman Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kthread.h | 3 ++ init/main.c | 5 ++ kernel/kthread.c | 124 ++++++++++++++++++++++++++---------------------- 3 files changed, 76 insertions(+), 56 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 1c65e7a9f186..00dd957e245b 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -30,4 +30,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu); int kthread_stop(struct task_struct *k); int kthread_should_stop(void); +int kthreadd(void *unused); +extern struct task_struct *kthreadd_task; + #endif /* _LINUX_KTHREAD_H */ diff --git a/init/main.c b/init/main.c index c1537e0ddceb..e8d080cab443 100644 --- a/init/main.c +++ b/init/main.c @@ -54,6 +54,7 @@ #include #include #include +#include #include #include @@ -425,8 +426,12 @@ static void __init setup_command_line(char *command_line) static void noinline rest_init(void) __releases(kernel_lock) { + int pid; + kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); numa_default_policy(); + pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); + kthreadd_task = find_task_by_pid(pid); unlock_kernel(); /* diff --git a/kernel/kthread.c b/kernel/kthread.c index 87c50ccd1d4e..0eb0070a3c57 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -1,7 +1,7 @@ /* Kernel thread helper functions. * Copyright (C) 2004 IBM Corporation, Rusty Russell. * - * Creation is done via keventd, so that we get a clean environment + * Creation is done via kthreadd, so that we get a clean environment * even if we're invoked from userspace (think modprobe, hotplug cpu, * etc.). */ @@ -15,24 +15,22 @@ #include #include -/* - * We dont want to execute off keventd since it might - * hold a semaphore our callers hold too: - */ -static struct workqueue_struct *helper_wq; +static DEFINE_SPINLOCK(kthread_create_lock); +static LIST_HEAD(kthread_create_list); +struct task_struct *kthreadd_task; struct kthread_create_info { - /* Information passed to kthread() from keventd. */ + /* Information passed to kthread() from kthreadd. */ int (*threadfn)(void *data); void *data; struct completion started; - /* Result passed back to kthread_create() from keventd. */ + /* Result passed back to kthread_create() from kthreadd. */ struct task_struct *result; struct completion done; - struct work_struct work; + struct list_head list; }; struct kthread_stop_info @@ -60,42 +58,17 @@ int kthread_should_stop(void) } EXPORT_SYMBOL(kthread_should_stop); -static void kthread_exit_files(void) -{ - struct fs_struct *fs; - struct task_struct *tsk = current; - - exit_fs(tsk); /* current->fs->count--; */ - fs = init_task.fs; - tsk->fs = fs; - atomic_inc(&fs->count); - exit_files(tsk); - current->files = init_task.files; - atomic_inc(&tsk->files->count); -} - static int kthread(void *_create) { struct kthread_create_info *create = _create; int (*threadfn)(void *data); void *data; - sigset_t blocked; int ret = -EINTR; - kthread_exit_files(); - - /* Copy data: it's on keventd's stack */ + /* Copy data: it's on kthread's stack */ threadfn = create->threadfn; data = create->data; - /* Block and flush all signals (in case we're not from keventd). */ - sigfillset(&blocked); - sigprocmask(SIG_BLOCK, &blocked, NULL); - flush_signals(current); - - /* By default we can run anywhere, unlike keventd. */ - set_cpus_allowed(current, CPU_MASK_ALL); - /* OK, tell user we're spawned, wait for stop or wakeup */ __set_current_state(TASK_INTERRUPTIBLE); complete(&create->started); @@ -112,11 +85,8 @@ static int kthread(void *_create) return 0; } -/* We are keventd: create a thread. */ -static void keventd_create_kthread(struct work_struct *work) +static void create_kthread(struct kthread_create_info *create) { - struct kthread_create_info *create = - container_of(work, struct kthread_create_info, work); int pid; /* We want our own signal handler (we take no signals by default). */ @@ -162,17 +132,14 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), create.data = data; init_completion(&create.started); init_completion(&create.done); - INIT_WORK(&create.work, keventd_create_kthread); - - /* - * The workqueue needs to start up first: - */ - if (!helper_wq) - create.work.func(&create.work); - else { - queue_work(helper_wq, &create.work); - wait_for_completion(&create.done); - } + + spin_lock(&kthread_create_lock); + list_add_tail(&create.list, &kthread_create_list); + wake_up_process(kthreadd_task); + spin_unlock(&kthread_create_lock); + + wait_for_completion(&create.done); + if (!IS_ERR(create.result)) { va_list args; va_start(args, namefmt); @@ -180,7 +147,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), namefmt, args); va_end(args); } - return create.result; } EXPORT_SYMBOL(kthread_create); @@ -245,12 +211,58 @@ int kthread_stop(struct task_struct *k) } EXPORT_SYMBOL(kthread_stop); -static __init int helper_init(void) + +static __init void kthreadd_setup(void) { - helper_wq = create_singlethread_workqueue("kthread"); - BUG_ON(!helper_wq); + struct task_struct *tsk = current; + struct k_sigaction sa; + sigset_t blocked; - return 0; + set_task_comm(tsk, "kthreadd"); + + /* Block and flush all signals */ + sigfillset(&blocked); + sigprocmask(SIG_BLOCK, &blocked, NULL); + flush_signals(tsk); + + /* SIG_IGN makes children autoreap: see do_notify_parent(). */ + sa.sa.sa_handler = SIG_IGN; + sa.sa.sa_flags = 0; + siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); + do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); + + set_user_nice(current, -5); + set_cpus_allowed(current, CPU_MASK_ALL); } -core_initcall(helper_init); +int kthreadd(void *unused) +{ + /* Setup a clean context for our children to inherit. */ + kthreadd_setup(); + + current->flags |= PF_NOFREEZE; + + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (list_empty(&kthread_create_list)) + schedule(); + __set_current_state(TASK_RUNNING); + + spin_lock(&kthread_create_lock); + while (!list_empty(&kthread_create_list)) { + struct kthread_create_info *create; + + create = list_entry(kthread_create_list.next, + struct kthread_create_info, list); + list_del_init(&create->list); + spin_unlock(&kthread_create_lock); + + create_kthread(create); + + spin_lock(&kthread_create_lock); + } + spin_unlock(&kthread_create_lock); + } + + return 0; +} -- cgit v1.2.3 From 10ab825bdef8df510f99c703a5a2d9b13a4e31a5 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 9 May 2007 02:34:37 -0700 Subject: change kernel threads to ignore signals instead of blocking them Currently kernel threads use sigprocmask(SIG_BLOCK) to protect against signals. This doesn't prevent the signal delivery, this only blocks signal_wake_up(). Every "killall -33 kthreadd" means a "struct siginfo" leak. Change kthreadd_setup() to set all handlers to SIG_IGN instead of blocking them (make a new helper ignore_signals() for that). If the kernel thread needs some signal, it should use allow_signal() anyway, and in that case it should not use CLONE_SIGHAND. Note that we can't change daemonize() (should die!) in the same way, because it can be used along with CLONE_SIGHAND. This means that allow_signal() still should unblock the signal to work correctly with daemonize()ed threads. However, disallow_signal() doesn't block the signal any longer but ignores it. NOTE: with or without this patch the kernel threads are not protected from handle_stop_signal(), this seems harmless, but not good. Signed-off-by: Oleg Nesterov Acked-by: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 1 + kernel/exit.c | 2 +- kernel/kthread.c | 17 +++-------------- kernel/signal.c | 10 ++++++++++ 4 files changed, 15 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 3d95c480f58d..28000b1658f9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1317,6 +1317,7 @@ extern int in_egroup_p(gid_t); extern void proc_caches_init(void); extern void flush_signals(struct task_struct *); +extern void ignore_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *, int force_default); extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); diff --git a/kernel/exit.c b/kernel/exit.c index bc982cd72743..b0c6f0c3a2df 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -347,7 +347,7 @@ int disallow_signal(int sig) return -EINVAL; spin_lock_irq(¤t->sighand->siglock); - sigaddset(¤t->blocked, sig); + current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); return 0; diff --git a/kernel/kthread.c b/kernel/kthread.c index 0eb0070a3c57..df8a8e8f6ca4 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -215,24 +215,13 @@ EXPORT_SYMBOL(kthread_stop); static __init void kthreadd_setup(void) { struct task_struct *tsk = current; - struct k_sigaction sa; - sigset_t blocked; set_task_comm(tsk, "kthreadd"); - /* Block and flush all signals */ - sigfillset(&blocked); - sigprocmask(SIG_BLOCK, &blocked, NULL); - flush_signals(tsk); + ignore_signals(tsk); - /* SIG_IGN makes children autoreap: see do_notify_parent(). */ - sa.sa.sa_handler = SIG_IGN; - sa.sa.sa_flags = 0; - siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); - do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); - - set_user_nice(current, -5); - set_cpus_allowed(current, CPU_MASK_ALL); + set_user_nice(tsk, -5); + set_cpus_allowed(tsk, CPU_MASK_ALL); } int kthreadd(void *unused) diff --git a/kernel/signal.c b/kernel/signal.c index 23ae6d62fc41..2ac3a668d9dd 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -209,6 +209,16 @@ void flush_signals(struct task_struct *t) spin_unlock_irqrestore(&t->sighand->siglock, flags); } +void ignore_signals(struct task_struct *t) +{ + int i; + + for (i = 0; i < _NSIG; ++i) + t->sighand->action[i].sa.sa_handler = SIG_IGN; + + flush_signals(t); +} + /* * Flush all handlers for a task. */ -- cgit v1.2.3 From 8842c9655b2b7f0e8e6c50a773b649e5d8a57678 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Wed, 9 May 2007 02:34:46 -0700 Subject: remove nfs4_acl_add_ace() nfs4_acl_add_ace() can now be removed. Signed-off-by: Adrian Bunk Acked-by: Neil Brown Acked-by: J. Bruce Fields Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/nfsd/nfs4acl.c | 17 ----------------- include/linux/nfs4_acl.h | 1 - 2 files changed, 18 deletions(-) (limited to 'include/linux') diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c index 673a53c014a3..cc3b7badd486 100644 --- a/fs/nfsd/nfs4acl.c +++ b/fs/nfsd/nfs4acl.c @@ -137,7 +137,6 @@ struct ace_container { static short ace2type(struct nfs4_ace *); static void _posix_to_nfsv4_one(struct posix_acl *, struct nfs4_acl *, unsigned int); -void nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t); struct nfs4_acl * nfs4_acl_posix_to_nfsv4(struct posix_acl *pacl, struct posix_acl *dpacl, @@ -785,21 +784,6 @@ nfs4_acl_new(int n) return acl; } -void -nfs4_acl_add_ace(struct nfs4_acl *acl, u32 type, u32 flag, u32 access_mask, - int whotype, uid_t who) -{ - struct nfs4_ace *ace = acl->aces + acl->naces; - - ace->type = type; - ace->flag = flag; - ace->access_mask = access_mask; - ace->whotype = whotype; - ace->who = who; - - acl->naces++; -} - static struct { char *string; int stringlen; @@ -851,6 +835,5 @@ nfs4_acl_write_who(int who, char *p) } EXPORT_SYMBOL(nfs4_acl_new); -EXPORT_SYMBOL(nfs4_acl_add_ace); EXPORT_SYMBOL(nfs4_acl_get_whotype); EXPORT_SYMBOL(nfs4_acl_write_who); diff --git a/include/linux/nfs4_acl.h b/include/linux/nfs4_acl.h index 409b6e02f337..c9c05a78e9bb 100644 --- a/include/linux/nfs4_acl.h +++ b/include/linux/nfs4_acl.h @@ -44,7 +44,6 @@ #define NFS4_ACL_MAX 170 struct nfs4_acl *nfs4_acl_new(int); -void nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t); int nfs4_acl_get_whotype(char *, u32); int nfs4_acl_write_who(int who, char *p); int nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group, -- cgit v1.2.3 From 7ac1bea5507218da03f6005d228789da5a831c3f Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 9 May 2007 02:34:48 -0700 Subject: knfsd: rename sk_defer_lock to sk_lock Now that sk_defer_lock protects two different things, make the name more generic. Also don't bother with disabling _bh as the lock is only ever taken from process context. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sunrpc/svcsock.h | 3 ++- net/sunrpc/svcauth_unix.c | 10 +++++----- net/sunrpc/svcsock.c | 13 +++++++------ 3 files changed, 14 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 7909687557bf..e21dd93ac4b7 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -37,7 +37,8 @@ struct svc_sock { atomic_t sk_reserved; /* space on outq that is reserved */ - spinlock_t sk_defer_lock; /* protects sk_deferred */ + spinlock_t sk_lock; /* protects sk_deferred and + * sk_info_authunix */ struct list_head sk_deferred; /* deferred requests that need to * be revisted */ struct mutex sk_mutex; /* to serialize sending data */ diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 2bd23ea2aa8b..07dcd20cbee4 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -385,7 +385,7 @@ ip_map_cached_get(struct svc_rqst *rqstp) { struct ip_map *ipm; struct svc_sock *svsk = rqstp->rq_sock; - spin_lock_bh(&svsk->sk_defer_lock); + spin_lock(&svsk->sk_lock); ipm = svsk->sk_info_authunix; if (ipm != NULL) { if (!cache_valid(&ipm->h)) { @@ -395,13 +395,13 @@ ip_map_cached_get(struct svc_rqst *rqstp) * same IP address. */ svsk->sk_info_authunix = NULL; - spin_unlock_bh(&svsk->sk_defer_lock); + spin_unlock(&svsk->sk_lock); cache_put(&ipm->h, &ip_map_cache); return NULL; } cache_get(&ipm->h); } - spin_unlock_bh(&svsk->sk_defer_lock); + spin_unlock(&svsk->sk_lock); return ipm; } @@ -410,14 +410,14 @@ ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm) { struct svc_sock *svsk = rqstp->rq_sock; - spin_lock_bh(&svsk->sk_defer_lock); + spin_lock(&svsk->sk_lock); if (svsk->sk_sock->type == SOCK_STREAM && svsk->sk_info_authunix == NULL) { /* newly cached, keep the reference */ svsk->sk_info_authunix = ipm; ipm = NULL; } - spin_unlock_bh(&svsk->sk_defer_lock); + spin_unlock(&svsk->sk_lock); if (ipm) cache_put(&ipm->h, &ip_map_cache); } diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 22f61aee4824..fdb1386f5dcd 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -53,7 +53,8 @@ * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. * when both need to be taken (rare), svc_serv->sv_lock is first. * BKL protects svc_serv->sv_nrthread. - * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list + * svc_sock->sk_lock protects the svc_sock->sk_deferred list + * and the ->sk_info_authunix cache. * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply. * * Some flags can be set to certain values at any time @@ -1633,7 +1634,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, svsk->sk_server = serv; atomic_set(&svsk->sk_inuse, 1); svsk->sk_lastrecv = get_seconds(); - spin_lock_init(&svsk->sk_defer_lock); + spin_lock_init(&svsk->sk_lock); INIT_LIST_HEAD(&svsk->sk_deferred); INIT_LIST_HEAD(&svsk->sk_ready); mutex_init(&svsk->sk_mutex); @@ -1857,9 +1858,9 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many) dprintk("revisit queued\n"); svsk = dr->svsk; dr->svsk = NULL; - spin_lock_bh(&svsk->sk_defer_lock); + spin_lock(&svsk->sk_lock); list_add(&dr->handle.recent, &svsk->sk_deferred); - spin_unlock_bh(&svsk->sk_defer_lock); + spin_unlock(&svsk->sk_lock); set_bit(SK_DEFERRED, &svsk->sk_flags); svc_sock_enqueue(svsk); svc_sock_put(svsk); @@ -1925,7 +1926,7 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) return NULL; - spin_lock_bh(&svsk->sk_defer_lock); + spin_lock(&svsk->sk_lock); clear_bit(SK_DEFERRED, &svsk->sk_flags); if (!list_empty(&svsk->sk_deferred)) { dr = list_entry(svsk->sk_deferred.next, @@ -1934,6 +1935,6 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) list_del_init(&dr->handle.recent); set_bit(SK_DEFERRED, &svsk->sk_flags); } - spin_unlock_bh(&svsk->sk_defer_lock); + spin_unlock(&svsk->sk_lock); return dr; } -- cgit v1.2.3 From cd123012d99fde4759500fee611e724e4f3016e3 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 9 May 2007 02:34:50 -0700 Subject: RPC: add wrapper for svc_reserve to account for checksum When the kernel calls svc_reserve to downsize the expected size of an RPC reply, it fails to account for the possibility of a checksum at the end of the packet. If a client mounts a NFSv2/3 with sec=krb5i/p, and does I/O then you'll generally see messages similar to this in the server's ring buffer: RPC request reserved 164 but used 208 While I was never able to verify it, I suspect that this problem is also the root cause of some oopses I've seen under these conditions: https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=227726 This is probably also a problem for other sec= types and for NFSv4. The large reserved size for NFSv4 compound packets seems to generally paper over the problem, however. This patch adds a wrapper for svc_reserve that accounts for the possibility of a checksum. It also fixes up the appropriate callers of svc_reserve to call the wrapper. For now, it just uses a hardcoded value that I determined via testing. That value may need to be revised upward as things change, or we may want to eventually add a new auth_op that attempts to calculate this somehow. Unfortunately, there doesn't seem to be a good way to reliably determine the expected checksum length prior to actually calculating it, particularly with schemes like spkm3. Signed-off-by: Jeff Layton Acked-by: Neil Brown Cc: Trond Myklebust Acked-by: J. Bruce Fields Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/nfsd/nfs3proc.c | 2 +- fs/nfsd/nfsproc.c | 2 +- include/linux/sunrpc/svc.h | 19 +++++++++++++++++++ net/sunrpc/svc.c | 2 +- 4 files changed, 22 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index 7f5bad0393b1..eac82830bfd7 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -177,7 +177,7 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp, if (max_blocksize < resp->count) resp->count = max_blocksize; - svc_reserve(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4); + svc_reserve_auth(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4); fh_copy(&resp->fh, &argp->fh); nfserr = nfsd_read(rqstp, &resp->fh, NULL, diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 5cc2eec981b8..b2c7147aa921 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -155,7 +155,7 @@ nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp, argp->count); argp->count = NFSSVC_MAXBLKSIZE_V2; } - svc_reserve(rqstp, (19<<2) + argp->count + 4); + svc_reserve_auth(rqstp, (19<<2) + argp->count + 4); resp->count = argp->count; nfserr = nfsd_read(rqstp, fh_copy(&resp->fh, &argp->fh), NULL, diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 35fa4d5aadd0..4a7ae8ab6eb8 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -396,4 +396,23 @@ char * svc_print_addr(struct svc_rqst *, char *, size_t); #define RPC_MAX_ADDRBUFLEN (63U) +/* + * When we want to reduce the size of the reserved space in the response + * buffer, we need to take into account the size of any checksum data that + * may be at the end of the packet. This is difficult to determine exactly + * for all cases without actually generating the checksum, so we just use a + * static value. + */ +static inline void +svc_reserve_auth(struct svc_rqst *rqstp, int space) +{ + int added_space = 0; + + switch(rqstp->rq_authop->flavour) { + case RPC_AUTH_GSS: + added_space = RPC_MAX_AUTH_SIZE; + } + return svc_reserve(rqstp, space + added_space); +} + #endif /* SUNRPC_SVC_H */ diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index b7503c103ae8..e673ef993904 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -907,7 +907,7 @@ svc_process(struct svc_rqst *rqstp) * better idea of reply size */ if (procp->pc_xdrressize) - svc_reserve(rqstp, procp->pc_xdrressize<<2); + svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); /* Call the function that processes the request. */ if (!versp->vs_dispatch) { -- cgit v1.2.3 From b8522ead3534c6cd06752b47a3bc380956191a2a Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 9 May 2007 02:34:58 -0700 Subject: aio is unlikely Stick an unlikely() around is_aio(): I assert that most IO is synchronous. Cc: Suparna Bhattacharya Cc: Ingo Molnar Cc: Benjamin LaHaise Cc: Zach Brown Cc: Ulrich Drepper Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/aio.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/aio.h b/include/linux/aio.h index a30ef13c9e62..43dc2ebfaa0e 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -226,7 +226,8 @@ int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, __put_ioctx(kioctx); \ } while (0) -#define in_aio() !is_sync_wait(current->io_wait) +#define in_aio() (unlikely(!is_sync_wait(current->io_wait))) + /* may be used for debugging */ #define warn_if_async() \ do { \ -- cgit v1.2.3 From f34c506b0385b43abd25c490335036ecbb173aed Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 9 May 2007 02:34:59 -0700 Subject: declare struct ktime Some smarty went and inflicted ktime_t as a typedef upon us, so we cannot forward declare it. Create a new `union ktime', map ktime_t onto that. Now we need to kill off this ktime_t thing. Cc: Ingo Molnar Cc: Thomas Gleixner Cc: john stultz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ktime.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 81bb9c7a4eb3..c762954bda14 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -43,7 +43,7 @@ * plain scalar nanosecond based representation can be selected by the * config switch CONFIG_KTIME_SCALAR. */ -typedef union { +union ktime { s64 tv64; #if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR) struct { @@ -54,7 +54,9 @@ typedef union { # endif } tv; #endif -} ktime_t; +}; + +typedef union ktime ktime_t; /* Kill this */ #define KTIME_MAX ((s64)~((u64)1 << 63)) #if (BITS_PER_LONG == 64) -- cgit v1.2.3 From c19384b5b296905d4988c7c684ff540a0f9d65be Mon Sep 17 00:00:00 2001 From: Pierre Peiffer Date: Wed, 9 May 2007 02:35:02 -0700 Subject: Make futex_wait() use an hrtimer for timeout This patch modifies futex_wait() to use an hrtimer + schedule() in place of schedule_timeout(). schedule_timeout() is tick based, therefore the timeout granularity is the tick (1 ms, 4 ms or 10 ms depending on HZ). By using a high resolution timer for timeout wakeup, we can attain a much finer timeout granularity (in the microsecond range). This parallels what is already done for futex_lock_pi(). The timeout passed to the syscall is no longer converted to jiffies and is therefore passed to do_futex() and futex_wait() as an absolute ktime_t therefore keeping nanosecond resolution. Also this removes the need to pass the nanoseconds timeout part to futex_lock_pi() in val2. In futex_wait(), if there is no timeout then a regular schedule() is performed. Otherwise, an hrtimer is fired before schedule() is called. [akpm@linux-foundation.org: fix `make headers_check'] Signed-off-by: Sebastien Dugue Signed-off-by: Pierre Peiffer Cc: Ingo Molnar Cc: Ulrich Drepper Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/futex.h | 4 ++- kernel/futex.c | 89 ++++++++++++++++++++++++++------------------------- kernel/futex_compat.c | 19 ++++++----- 3 files changed, 57 insertions(+), 55 deletions(-) (limited to 'include/linux') diff --git a/include/linux/futex.h b/include/linux/futex.h index 820125c628c1..34e54f2b8997 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -3,6 +3,8 @@ #include +union ktime; + /* Second argument to futex syscall */ @@ -94,7 +96,7 @@ struct robust_list_head { #define ROBUST_LIST_LIMIT 2048 #ifdef __KERNEL__ -long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, +long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, u32 __user *uaddr2, u32 val2, u32 val3); extern int diff --git a/kernel/futex.c b/kernel/futex.c index 685ee2362a5e..e1246ccbf89a 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1001,16 +1001,16 @@ static void unqueue_me_pi(struct futex_q *q, struct futex_hash_bucket *hb) } static long futex_wait_restart(struct restart_block *restart); -static int futex_wait_abstime(u32 __user *uaddr, u32 val, - int timed, unsigned long abs_time) +static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) { struct task_struct *curr = current; DECLARE_WAITQUEUE(wait, curr); struct futex_hash_bucket *hb; struct futex_q q; - unsigned long time_left = 0; u32 uval; int ret; + struct hrtimer_sleeper t; + int rem = 0; q.pi_state = NULL; retry: @@ -1088,20 +1088,29 @@ static int futex_wait_abstime(u32 __user *uaddr, u32 val, * !plist_node_empty() is safe here without any lock. * q.lock_ptr != 0 is not safe, because of ordering against wakeup. */ - time_left = 0; if (likely(!plist_node_empty(&q.list))) { - unsigned long rel_time; - - if (timed) { - unsigned long now = jiffies; - if (time_after(now, abs_time)) - rel_time = 0; - else - rel_time = abs_time - now; - } else - rel_time = MAX_SCHEDULE_TIMEOUT; + if (!abs_time) + schedule(); + else { + hrtimer_init(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + hrtimer_init_sleeper(&t, current); + t.timer.expires = *abs_time; + + hrtimer_start(&t.timer, t.timer.expires, HRTIMER_MODE_ABS); + + /* + * the timer could have already expired, in which + * case current would be flagged for rescheduling. + * Don't bother calling schedule. + */ + if (likely(t.task)) + schedule(); + + hrtimer_cancel(&t.timer); - time_left = schedule_timeout(rel_time); + /* Flag if a timeout occured */ + rem = (t.task == NULL); + } } __set_current_state(TASK_RUNNING); @@ -1113,14 +1122,14 @@ static int futex_wait_abstime(u32 __user *uaddr, u32 val, /* If we were woken (and unqueued), we succeeded, whatever. */ if (!unqueue_me(&q)) return 0; - if (time_left == 0) + if (rem) return -ETIMEDOUT; /* * We expect signal_pending(current), but another thread may * have handled it for us already. */ - if (time_left == MAX_SCHEDULE_TIMEOUT) + if (!abs_time) return -ERESTARTSYS; else { struct restart_block *restart; @@ -1128,8 +1137,7 @@ static int futex_wait_abstime(u32 __user *uaddr, u32 val, restart->fn = futex_wait_restart; restart->arg0 = (unsigned long)uaddr; restart->arg1 = (unsigned long)val; - restart->arg2 = (unsigned long)timed; - restart->arg3 = abs_time; + restart->arg2 = (unsigned long)abs_time; return -ERESTART_RESTARTBLOCK; } @@ -1141,21 +1149,15 @@ static int futex_wait_abstime(u32 __user *uaddr, u32 val, return ret; } -static int futex_wait(u32 __user *uaddr, u32 val, unsigned long rel_time) -{ - int timed = (rel_time != MAX_SCHEDULE_TIMEOUT); - return futex_wait_abstime(uaddr, val, timed, jiffies+rel_time); -} static long futex_wait_restart(struct restart_block *restart) { u32 __user *uaddr = (u32 __user *)restart->arg0; u32 val = (u32)restart->arg1; - int timed = (int)restart->arg2; - unsigned long abs_time = restart->arg3; + ktime_t *abs_time = (ktime_t *)restart->arg2; restart->fn = do_no_restart_syscall; - return (long)futex_wait_abstime(uaddr, val, timed, abs_time); + return (long)futex_wait(uaddr, val, abs_time); } @@ -1165,8 +1167,8 @@ static long futex_wait_restart(struct restart_block *restart) * if there are waiters then it will block, it does PI, etc. (Due to * races the kernel might see a 0 value of the futex too.) */ -static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, - long nsec, int trylock) +static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, + int trylock) { struct hrtimer_sleeper timeout, *to = NULL; struct task_struct *curr = current; @@ -1178,11 +1180,11 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, if (refill_pi_state_cache()) return -ENOMEM; - if (sec != MAX_SCHEDULE_TIMEOUT) { + if (time) { to = &timeout; hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); - to->timer.expires = ktime_set(sec, nsec); + to->timer.expires = *time; } q.pi_state = NULL; @@ -1818,7 +1820,7 @@ void exit_robust_list(struct task_struct *curr) } } -long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, +long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3) { int ret; @@ -1844,13 +1846,13 @@ long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, ret = futex_wake_op(uaddr, uaddr2, val, val2, val3); break; case FUTEX_LOCK_PI: - ret = futex_lock_pi(uaddr, val, timeout, val2, 0); + ret = futex_lock_pi(uaddr, val, timeout, 0); break; case FUTEX_UNLOCK_PI: ret = futex_unlock_pi(uaddr); break; case FUTEX_TRYLOCK_PI: - ret = futex_lock_pi(uaddr, 0, timeout, val2, 1); + ret = futex_lock_pi(uaddr, 0, timeout, 1); break; default: ret = -ENOSYS; @@ -1863,21 +1865,20 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, struct timespec __user *utime, u32 __user *uaddr2, u32 val3) { - struct timespec t; - unsigned long timeout = MAX_SCHEDULE_TIMEOUT; + struct timespec ts; + ktime_t t, *tp = NULL; u32 val2 = 0; if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) { - if (copy_from_user(&t, utime, sizeof(t)) != 0) + if (copy_from_user(&ts, utime, sizeof(ts)) != 0) return -EFAULT; - if (!timespec_valid(&t)) + if (!timespec_valid(&ts)) return -EINVAL; + + t = timespec_to_ktime(ts); if (op == FUTEX_WAIT) - timeout = timespec_to_jiffies(&t) + 1; - else { - timeout = t.tv_sec; - val2 = t.tv_nsec; - } + t = ktime_add(ktime_get(), t); + tp = &t; } /* * requeue parameter in 'utime' if op == FUTEX_REQUEUE. @@ -1885,7 +1886,7 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE) val2 = (u32) (unsigned long) utime; - return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3); + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); } static int futexfs_get_sb(struct file_system_type *fs_type, diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 50f24eea6cd0..dff27c471ea6 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -141,24 +141,23 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, struct compat_timespec __user *utime, u32 __user *uaddr2, u32 val3) { - struct timespec t; - unsigned long timeout = MAX_SCHEDULE_TIMEOUT; + struct timespec ts; + ktime_t t, *tp = NULL; int val2 = 0; if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) { - if (get_compat_timespec(&t, utime)) + if (get_compat_timespec(&ts, utime)) return -EFAULT; - if (!timespec_valid(&t)) + if (!timespec_valid(&ts)) return -EINVAL; + + t = timespec_to_ktime(ts); if (op == FUTEX_WAIT) - timeout = timespec_to_jiffies(&t) + 1; - else { - timeout = t.tv_sec; - val2 = t.tv_nsec; - } + t = ktime_add(ktime_get(), t); + tp = &t; } if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE) val2 = (int) (unsigned long) utime; - return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3); + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); } -- cgit v1.2.3 From d0aa7a70bf03b9de9e995ab272293be1f7937822 Mon Sep 17 00:00:00 2001 From: Pierre Peiffer Date: Wed, 9 May 2007 02:35:02 -0700 Subject: futex_requeue_pi optimization This patch provides the futex_requeue_pi functionality, which allows some threads waiting on a normal futex to be requeued on the wait-queue of a PI-futex. This provides an optimization, already used for (normal) futexes, to be used with the PI-futexes. This optimization is currently used by the glibc in pthread_broadcast, when using "normal" mutexes. With futex_requeue_pi, it can be used with PRIO_INHERIT mutexes too. Signed-off-by: Pierre Peiffer Cc: Ingo Molnar Cc: Ulrich Drepper Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/futex.h | 9 +- kernel/futex.c | 541 +++++++++++++++++++++++++++++++++++++++++++----- kernel/futex_compat.c | 3 +- kernel/rtmutex.c | 41 +--- kernel/rtmutex_common.h | 34 +++ 5 files changed, 540 insertions(+), 88 deletions(-) (limited to 'include/linux') diff --git a/include/linux/futex.h b/include/linux/futex.h index 34e54f2b8997..1bd8dfcb037b 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -17,6 +17,7 @@ union ktime; #define FUTEX_LOCK_PI 6 #define FUTEX_UNLOCK_PI 7 #define FUTEX_TRYLOCK_PI 8 +#define FUTEX_CMP_REQUEUE_PI 9 /* * Support for robust futexes: the kernel cleans up held futexes at @@ -84,10 +85,15 @@ struct robust_list_head { */ #define FUTEX_OWNER_DIED 0x40000000 +/* + * Some processes have been requeued on this PI-futex + */ +#define FUTEX_WAITER_REQUEUED 0x20000000 + /* * The rest of the robust-futex field is for the TID: */ -#define FUTEX_TID_MASK 0x3fffffff +#define FUTEX_TID_MASK 0x0fffffff /* * This limit protects against a deliberately circular list. @@ -111,6 +117,7 @@ handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); * We set bit 0 to indicate if it's an inode-based key. */ union futex_key { + u32 __user *uaddr; struct { unsigned long pgoff; struct inode *inode; diff --git a/kernel/futex.c b/kernel/futex.c index e1246ccbf89a..4a60ef55dab4 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -53,6 +53,12 @@ #include "rtmutex_common.h" +#ifdef CONFIG_DEBUG_RT_MUTEXES +# include "rtmutex-debug.h" +#else +# include "rtmutex.h" +#endif + #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) /* @@ -102,6 +108,12 @@ struct futex_q { /* Optional priority inheritance state: */ struct futex_pi_state *pi_state; struct task_struct *task; + + /* + * This waiter is used in case of requeue from a + * normal futex to a PI-futex + */ + struct rt_mutex_waiter waiter; }; /* @@ -180,6 +192,9 @@ int get_futex_key(u32 __user *uaddr, union futex_key *key) if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ)) return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES; + /* Save the user address in the ley */ + key->uaddr = uaddr; + /* * Private mappings are handled in a simple way. * @@ -439,7 +454,8 @@ void exit_pi_state_list(struct task_struct *curr) } static int -lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) +lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, + union futex_key *key, struct futex_pi_state **ps) { struct futex_pi_state *pi_state = NULL; struct futex_q *this, *next; @@ -450,7 +466,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) head = &hb->chain; plist_for_each_entry_safe(this, next, head, list) { - if (match_futex(&this->key, &me->key)) { + if (match_futex(&this->key, key)) { /* * Another waiter already exists - bump up * the refcount and return its pi_state: @@ -465,7 +481,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) WARN_ON(!atomic_read(&pi_state->refcount)); atomic_inc(&pi_state->refcount); - me->pi_state = pi_state; + *ps = pi_state; return 0; } @@ -492,7 +508,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); /* Store the key for possible exit cleanups: */ - pi_state->key = me->key; + pi_state->key = *key; spin_lock_irq(&p->pi_lock); WARN_ON(!list_empty(&pi_state->list)); @@ -502,7 +518,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) put_task_struct(p); - me->pi_state = pi_state; + *ps = pi_state; return 0; } @@ -562,6 +578,8 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) */ if (!(uval & FUTEX_OWNER_DIED)) { newval = FUTEX_WAITERS | new_owner->pid; + /* Keep the FUTEX_WAITER_REQUEUED flag if it was set */ + newval |= (uval & FUTEX_WAITER_REQUEUED); pagefault_disable(); curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); @@ -665,6 +683,254 @@ out: return ret; } +/* + * Called from futex_requeue_pi. + * Set FUTEX_WAITERS and FUTEX_WAITER_REQUEUED flags on the + * PI-futex value; search its associated pi_state if an owner exist + * or create a new one without owner. + */ +static inline int +lookup_pi_state_for_requeue(u32 __user *uaddr, struct futex_hash_bucket *hb, + union futex_key *key, + struct futex_pi_state **pi_state) +{ + u32 curval, uval, newval; + +retry: + /* + * We can't handle a fault cleanly because we can't + * release the locks here. Simply return the fault. + */ + if (get_futex_value_locked(&curval, uaddr)) + return -EFAULT; + + /* set the flags FUTEX_WAITERS and FUTEX_WAITER_REQUEUED */ + if ((curval & (FUTEX_WAITERS | FUTEX_WAITER_REQUEUED)) + != (FUTEX_WAITERS | FUTEX_WAITER_REQUEUED)) { + /* + * No waiters yet, we prepare the futex to have some waiters. + */ + + uval = curval; + newval = uval | FUTEX_WAITERS | FUTEX_WAITER_REQUEUED; + + pagefault_disable(); + curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); + pagefault_enable(); + + if (unlikely(curval == -EFAULT)) + return -EFAULT; + if (unlikely(curval != uval)) + goto retry; + } + + if (!(curval & FUTEX_TID_MASK) + || lookup_pi_state(curval, hb, key, pi_state)) { + /* the futex has no owner (yet) or the lookup failed: + allocate one pi_state without owner */ + + *pi_state = alloc_pi_state(); + + /* Already stores the key: */ + (*pi_state)->key = *key; + + /* init the mutex without owner */ + __rt_mutex_init(&(*pi_state)->pi_mutex, NULL); + } + + return 0; +} + +/* + * Keep the first nr_wake waiter from futex1, wake up one, + * and requeue the next nr_requeue waiters following hashed on + * one physical page to another physical page (PI-futex uaddr2) + */ +static int futex_requeue_pi(u32 __user *uaddr1, u32 __user *uaddr2, + int nr_wake, int nr_requeue, u32 *cmpval) +{ + union futex_key key1, key2; + struct futex_hash_bucket *hb1, *hb2; + struct plist_head *head1; + struct futex_q *this, *next; + struct futex_pi_state *pi_state2 = NULL; + struct rt_mutex_waiter *waiter, *top_waiter = NULL; + struct rt_mutex *lock2 = NULL; + int ret, drop_count = 0; + + if (refill_pi_state_cache()) + return -ENOMEM; + +retry: + /* + * First take all the futex related locks: + */ + down_read(¤t->mm->mmap_sem); + + ret = get_futex_key(uaddr1, &key1); + if (unlikely(ret != 0)) + goto out; + ret = get_futex_key(uaddr2, &key2); + if (unlikely(ret != 0)) + goto out; + + hb1 = hash_futex(&key1); + hb2 = hash_futex(&key2); + + double_lock_hb(hb1, hb2); + + if (likely(cmpval != NULL)) { + u32 curval; + + ret = get_futex_value_locked(&curval, uaddr1); + + if (unlikely(ret)) { + spin_unlock(&hb1->lock); + if (hb1 != hb2) + spin_unlock(&hb2->lock); + + /* + * If we would have faulted, release mmap_sem, fault + * it in and start all over again. + */ + up_read(¤t->mm->mmap_sem); + + ret = get_user(curval, uaddr1); + + if (!ret) + goto retry; + + return ret; + } + if (curval != *cmpval) { + ret = -EAGAIN; + goto out_unlock; + } + } + + head1 = &hb1->chain; + plist_for_each_entry_safe(this, next, head1, list) { + if (!match_futex (&this->key, &key1)) + continue; + if (++ret <= nr_wake) { + wake_futex(this); + } else { + /* + * FIRST: get and set the pi_state + */ + if (!pi_state2) { + int s; + /* do this only the first time we requeue someone */ + s = lookup_pi_state_for_requeue(uaddr2, hb2, + &key2, &pi_state2); + if (s) { + ret = s; + goto out_unlock; + } + + lock2 = &pi_state2->pi_mutex; + spin_lock(&lock2->wait_lock); + + /* Save the top waiter of the wait_list */ + if (rt_mutex_has_waiters(lock2)) + top_waiter = rt_mutex_top_waiter(lock2); + } else + atomic_inc(&pi_state2->refcount); + + + this->pi_state = pi_state2; + + /* + * SECOND: requeue futex_q to the correct hashbucket + */ + + /* + * If key1 and key2 hash to the same bucket, no need to + * requeue. + */ + if (likely(head1 != &hb2->chain)) { + plist_del(&this->list, &hb1->chain); + plist_add(&this->list, &hb2->chain); + this->lock_ptr = &hb2->lock; +#ifdef CONFIG_DEBUG_PI_LIST + this->list.plist.lock = &hb2->lock; +#endif + } + this->key = key2; + get_futex_key_refs(&key2); + drop_count++; + + + /* + * THIRD: queue it to lock2 + */ + spin_lock_irq(&this->task->pi_lock); + waiter = &this->waiter; + waiter->task = this->task; + waiter->lock = lock2; + plist_node_init(&waiter->list_entry, this->task->prio); + plist_node_init(&waiter->pi_list_entry, this->task->prio); + plist_add(&waiter->list_entry, &lock2->wait_list); + this->task->pi_blocked_on = waiter; + spin_unlock_irq(&this->task->pi_lock); + + if (ret - nr_wake >= nr_requeue) + break; + } + } + + /* If we've requeued some tasks and the top_waiter of the rt_mutex + has changed, we must adjust the priority of the owner, if any */ + if (drop_count) { + struct task_struct *owner = rt_mutex_owner(lock2); + if (owner && + (top_waiter != (waiter = rt_mutex_top_waiter(lock2)))) { + int chain_walk = 0; + + spin_lock_irq(&owner->pi_lock); + if (top_waiter) + plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); + else + /* + * There was no waiters before the requeue, + * the flag must be updated + */ + mark_rt_mutex_waiters(lock2); + + plist_add(&waiter->pi_list_entry, &owner->pi_waiters); + __rt_mutex_adjust_prio(owner); + if (owner->pi_blocked_on) { + chain_walk = 1; + get_task_struct(owner); + } + + spin_unlock_irq(&owner->pi_lock); + spin_unlock(&lock2->wait_lock); + + if (chain_walk) + rt_mutex_adjust_prio_chain(owner, 0, lock2, NULL, + current); + } else { + /* No owner or the top_waiter does not change */ + mark_rt_mutex_waiters(lock2); + spin_unlock(&lock2->wait_lock); + } + } + +out_unlock: + spin_unlock(&hb1->lock); + if (hb1 != hb2) + spin_unlock(&hb2->lock); + + /* drop_futex_key_refs() must be called outside the spinlocks. */ + while (--drop_count >= 0) + drop_futex_key_refs(&key1); + +out: + up_read(¤t->mm->mmap_sem); + return ret; +} + /* * Wake up all waiters hashed on the physical page that is mapped * to this virtual address: @@ -984,9 +1250,10 @@ static int unqueue_me(struct futex_q *q) /* * PI futexes can not be requeued and must remove themself from the - * hash bucket. The hash bucket lock is held on entry and dropped here. + * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry + * and dropped here. */ -static void unqueue_me_pi(struct futex_q *q, struct futex_hash_bucket *hb) +static void unqueue_me_pi(struct futex_q *q) { WARN_ON(plist_node_empty(&q->list)); plist_del(&q->list, &q->list.plist); @@ -995,11 +1262,65 @@ static void unqueue_me_pi(struct futex_q *q, struct futex_hash_bucket *hb) free_pi_state(q->pi_state); q->pi_state = NULL; - spin_unlock(&hb->lock); + spin_unlock(q->lock_ptr); drop_futex_key_refs(&q->key); } +/* + * Fixup the pi_state owner with current. + * + * The cur->mm semaphore must be held, it is released at return of this + * function. + */ +static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, + struct futex_hash_bucket *hb, + struct task_struct *curr) +{ + u32 newtid = curr->pid | FUTEX_WAITERS; + struct futex_pi_state *pi_state = q->pi_state; + u32 uval, curval, newval; + int ret; + + /* Owner died? */ + if (pi_state->owner != NULL) { + spin_lock_irq(&pi_state->owner->pi_lock); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + spin_unlock_irq(&pi_state->owner->pi_lock); + } else + newtid |= FUTEX_OWNER_DIED; + + pi_state->owner = curr; + + spin_lock_irq(&curr->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &curr->pi_state_list); + spin_unlock_irq(&curr->pi_lock); + + /* Unqueue and drop the lock */ + unqueue_me_pi(q); + up_read(&curr->mm->mmap_sem); + /* + * We own it, so we have to replace the pending owner + * TID. This must be atomic as we have preserve the + * owner died bit here. + */ + ret = get_user(uval, uaddr); + while (!ret) { + newval = (uval & FUTEX_OWNER_DIED) | newtid; + newval |= (uval & FUTEX_WAITER_REQUEUED); + curval = futex_atomic_cmpxchg_inatomic(uaddr, + uval, newval); + if (curval == -EFAULT) + ret = -EFAULT; + if (curval == uval) + break; + uval = curval; + } + return ret; +} + static long futex_wait_restart(struct restart_block *restart); static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) { @@ -1009,7 +1330,7 @@ static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) struct futex_q q; u32 uval; int ret; - struct hrtimer_sleeper t; + struct hrtimer_sleeper t, *to = NULL; int rem = 0; q.pi_state = NULL; @@ -1063,6 +1384,14 @@ static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) if (uval != val) goto out_unlock_release_sem; + /* + * This rt_mutex_waiter structure is prepared here and will + * be used only if this task is requeued from a normal futex to + * a PI-futex with futex_requeue_pi. + */ + debug_rt_mutex_init_waiter(&q.waiter); + q.waiter.task = NULL; + /* Only actually queue if *uaddr contained val. */ __queue_me(&q, hb); @@ -1092,6 +1421,7 @@ static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) if (!abs_time) schedule(); else { + to = &t; hrtimer_init(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(&t, current); t.timer.expires = *abs_time; @@ -1119,6 +1449,66 @@ static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) * we are the only user of it. */ + if (q.pi_state) { + /* + * We were woken but have been requeued on a PI-futex. + * We have to complete the lock acquisition by taking + * the rtmutex. + */ + + struct rt_mutex *lock = &q.pi_state->pi_mutex; + + spin_lock(&lock->wait_lock); + if (unlikely(q.waiter.task)) { + remove_waiter(lock, &q.waiter); + } + spin_unlock(&lock->wait_lock); + + if (rem) + ret = -ETIMEDOUT; + else + ret = rt_mutex_timed_lock(lock, to, 1); + + down_read(&curr->mm->mmap_sem); + spin_lock(q.lock_ptr); + + /* + * Got the lock. We might not be the anticipated owner if we + * did a lock-steal - fix up the PI-state in that case. + */ + if (!ret && q.pi_state->owner != curr) { + /* + * We MUST play with the futex we were requeued on, + * NOT the current futex. + * We can retrieve it from the key of the pi_state + */ + uaddr = q.pi_state->key.uaddr; + + /* mmap_sem and hash_bucket lock are unlocked at + return of this function */ + ret = fixup_pi_state_owner(uaddr, &q, hb, curr); + } else { + /* + * Catch the rare case, where the lock was released + * when we were on the way back before we locked + * the hash bucket. + */ + if (ret && q.pi_state->owner == curr) { + if (rt_mutex_trylock(&q.pi_state->pi_mutex)) + ret = 0; + } + /* Unqueue and drop the lock */ + unqueue_me_pi(&q); + up_read(&curr->mm->mmap_sem); + } + + debug_rt_mutex_free_waiter(&q.waiter); + + return ret; + } + + debug_rt_mutex_free_waiter(&q.waiter); + /* If we were woken (and unqueued), we succeeded, whatever. */ if (!unqueue_me(&q)) return 0; @@ -1161,6 +1551,51 @@ static long futex_wait_restart(struct restart_block *restart) } +static void set_pi_futex_owner(struct futex_hash_bucket *hb, + union futex_key *key, struct task_struct *p) +{ + struct plist_head *head; + struct futex_q *this, *next; + struct futex_pi_state *pi_state = NULL; + struct rt_mutex *lock; + + /* Search a waiter that should already exists */ + + head = &hb->chain; + + plist_for_each_entry_safe(this, next, head, list) { + if (match_futex (&this->key, key)) { + pi_state = this->pi_state; + break; + } + } + + BUG_ON(!pi_state); + + /* set p as pi_state's owner */ + lock = &pi_state->pi_mutex; + + spin_lock(&lock->wait_lock); + spin_lock_irq(&p->pi_lock); + + list_add(&pi_state->list, &p->pi_state_list); + pi_state->owner = p; + + + /* set p as pi_mutex's owner */ + debug_rt_mutex_proxy_lock(lock, p); + WARN_ON(rt_mutex_owner(lock)); + rt_mutex_set_owner(lock, p, 0); + rt_mutex_deadlock_account_lock(lock, p); + + plist_add(&rt_mutex_top_waiter(lock)->pi_list_entry, + &p->pi_waiters); + __rt_mutex_adjust_prio(p); + + spin_unlock_irq(&p->pi_lock); + spin_unlock(&lock->wait_lock); +} + /* * Userspace tried a 0 -> TID atomic transition of the futex value * and failed. The kernel side here does the whole locking operation: @@ -1175,7 +1610,7 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, struct futex_hash_bucket *hb; u32 uval, newval, curval; struct futex_q q; - int ret, attempt = 0; + int ret, lock_held, attempt = 0; if (refill_pi_state_cache()) return -ENOMEM; @@ -1198,6 +1633,8 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, hb = queue_lock(&q, -1, NULL); retry_locked: + lock_held = 0; + /* * To avoid races, we attempt to take the lock here again * (by doing a 0 -> TID atomic cmpxchg), while holding all @@ -1216,7 +1653,16 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) { if (!detect && 0) force_sig(SIGKILL, current); - ret = -EDEADLK; + /* + * Normally, this check is done in user space. + * In case of requeue, the owner may attempt to lock this futex, + * even if the ownership has already been given by the previous + * waker. + * In the usual case, this is a case of deadlock, but not in case + * of REQUEUE_PI. + */ + if (!(curval & FUTEX_WAITER_REQUEUED)) + ret = -EDEADLK; goto out_unlock_release_sem; } @@ -1228,7 +1674,18 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, goto out_unlock_release_sem; uval = curval; - newval = uval | FUTEX_WAITERS; + /* + * In case of a requeue, check if there already is an owner + * If not, just take the futex. + */ + if ((curval & FUTEX_WAITER_REQUEUED) && !(curval & FUTEX_TID_MASK)) { + /* set current as futex owner */ + newval = curval | current->pid; + lock_held = 1; + } else + /* Set the WAITERS flag, so the owner will know it has someone + to wake at next unlock */ + newval = curval | FUTEX_WAITERS; pagefault_disable(); curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); @@ -1239,11 +1696,16 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, if (unlikely(curval != uval)) goto retry_locked; + if (lock_held) { + set_pi_futex_owner(hb, &q.key, curr); + goto out_unlock_release_sem; + } + /* * We dont have the lock. Look up the PI state (or create it if * we are the first waiter): */ - ret = lookup_pi_state(uval, hb, &q); + ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state); if (unlikely(ret)) { /* @@ -1306,45 +1768,10 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case. */ - if (!ret && q.pi_state->owner != curr) { - u32 newtid = current->pid | FUTEX_WAITERS; - - /* Owner died? */ - if (q.pi_state->owner != NULL) { - spin_lock_irq(&q.pi_state->owner->pi_lock); - WARN_ON(list_empty(&q.pi_state->list)); - list_del_init(&q.pi_state->list); - spin_unlock_irq(&q.pi_state->owner->pi_lock); - } else - newtid |= FUTEX_OWNER_DIED; - - q.pi_state->owner = current; - - spin_lock_irq(¤t->pi_lock); - WARN_ON(!list_empty(&q.pi_state->list)); - list_add(&q.pi_state->list, ¤t->pi_state_list); - spin_unlock_irq(¤t->pi_lock); - - /* Unqueue and drop the lock */ - unqueue_me_pi(&q, hb); - up_read(&curr->mm->mmap_sem); - /* - * We own it, so we have to replace the pending owner - * TID. This must be atomic as we have preserve the - * owner died bit here. - */ - ret = get_user(uval, uaddr); - while (!ret) { - newval = (uval & FUTEX_OWNER_DIED) | newtid; - curval = futex_atomic_cmpxchg_inatomic(uaddr, - uval, newval); - if (curval == -EFAULT) - ret = -EFAULT; - if (curval == uval) - break; - uval = curval; - } - } else { + if (!ret && q.pi_state->owner != curr) + /* mmap_sem is unlocked at return of this function */ + ret = fixup_pi_state_owner(uaddr, &q, hb, curr); + else { /* * Catch the rare case, where the lock was released * when we were on the way back before we locked @@ -1355,7 +1782,7 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, ret = 0; } /* Unqueue and drop the lock */ - unqueue_me_pi(&q, hb); + unqueue_me_pi(&q); up_read(&curr->mm->mmap_sem); } @@ -1724,6 +2151,8 @@ retry: * userspace. */ mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; + /* Also keep the FUTEX_WAITER_REQUEUED flag if set */ + mval |= (uval & FUTEX_WAITER_REQUEUED); nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); if (nval == -EFAULT) @@ -1854,6 +2283,9 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, case FUTEX_TRYLOCK_PI: ret = futex_lock_pi(uaddr, 0, timeout, 1); break; + case FUTEX_CMP_REQUEUE_PI: + ret = futex_requeue_pi(uaddr, uaddr2, val, val2, &val3); + break; default: ret = -ENOSYS; } @@ -1883,7 +2315,8 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, /* * requeue parameter in 'utime' if op == FUTEX_REQUEUE. */ - if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE) + if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE + || op == FUTEX_CMP_REQUEUE_PI) val2 = (u32) (unsigned long) utime; return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index dff27c471ea6..338a9b489fbc 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -156,7 +156,8 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, t = ktime_add(ktime_get(), t); tp = &t; } - if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE) + if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE + || op == FUTEX_CMP_REQUEUE_PI) val2 = (int) (unsigned long) utime; return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 180978cb2f75..12879f6c1ec3 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -56,7 +56,7 @@ * state. */ -static void +void rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner, unsigned long mask) { @@ -80,29 +80,6 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) clear_rt_mutex_waiters(lock); } -/* - * We can speed up the acquire/release, if the architecture - * supports cmpxchg and if there's no debugging state to be set up - */ -#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) -# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) -static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) -{ - unsigned long owner, *p = (unsigned long *) &lock->owner; - - do { - owner = *p; - } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); -} -#else -# define rt_mutex_cmpxchg(l,c,n) (0) -static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) -{ - lock->owner = (struct task_struct *) - ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); -} -#endif - /* * Calculate task priority from the waiter list priority * @@ -123,7 +100,7 @@ int rt_mutex_getprio(struct task_struct *task) * * This can be both boosting and unboosting. task->pi_lock must be held. */ -static void __rt_mutex_adjust_prio(struct task_struct *task) +void __rt_mutex_adjust_prio(struct task_struct *task) { int prio = rt_mutex_getprio(task); @@ -159,11 +136,11 @@ int max_lock_depth = 1024; * Decreases task's usage by one - may thus free the task. * Returns 0 or -EDEADLK. */ -static int rt_mutex_adjust_prio_chain(struct task_struct *task, - int deadlock_detect, - struct rt_mutex *orig_lock, - struct rt_mutex_waiter *orig_waiter, - struct task_struct *top_task) +int rt_mutex_adjust_prio_chain(struct task_struct *task, + int deadlock_detect, + struct rt_mutex *orig_lock, + struct rt_mutex_waiter *orig_waiter, + struct task_struct *top_task) { struct rt_mutex *lock; struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; @@ -524,8 +501,8 @@ static void wakeup_next_waiter(struct rt_mutex *lock) * * Must be called with lock->wait_lock held */ -static void remove_waiter(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter) +void remove_waiter(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter) { int first = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h index 9c75856e791e..242ec7ee740b 100644 --- a/kernel/rtmutex_common.h +++ b/kernel/rtmutex_common.h @@ -112,6 +112,29 @@ static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock) return (unsigned long)lock->owner & RT_MUTEX_OWNER_PENDING; } +/* + * We can speed up the acquire/release, if the architecture + * supports cmpxchg and if there's no debugging state to be set up + */ +#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) +# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) +static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) +{ + unsigned long owner, *p = (unsigned long *) &lock->owner; + + do { + owner = *p; + } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); +} +#else +# define rt_mutex_cmpxchg(l,c,n) (0) +static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) +{ + lock->owner = (struct task_struct *) + ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); +} +#endif + /* * PI-futex support (proxy locking functions, etc.): */ @@ -120,4 +143,15 @@ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner); extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, struct task_struct *proxy_owner); + +extern void rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner, + unsigned long mask); +extern void __rt_mutex_adjust_prio(struct task_struct *task); +extern int rt_mutex_adjust_prio_chain(struct task_struct *task, + int deadlock_detect, + struct rt_mutex *orig_lock, + struct rt_mutex_waiter *orig_waiter, + struct task_struct *top_task); +extern void remove_waiter(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter); #endif -- cgit v1.2.3 From 34f01cc1f512fa783302982776895c73714ebbc2 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 9 May 2007 02:35:04 -0700 Subject: FUTEX: new PRIVATE futexes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Analysis of current linux futex code : -------------------------------------- A central hash table futex_queues[] holds all contexts (futex_q) of waiting threads. Each futex_wait()/futex_wait() has to obtain a spinlock on a hash slot to perform lookups or insert/deletion of a futex_q. When a futex_wait() is done, calling thread has to : 1) - Obtain a read lock on mmap_sem to be able to validate the user pointer (calling find_vma()). This validation tells us if the futex uses an inode based store (mapped file), or mm based store (anonymous mem) 2) - compute a hash key 3) - Atomic increment of reference counter on an inode or a mm_struct 4) - lock part of futex_queues[] hash table 5) - perform the test on value of futex. (rollback is value != expected_value, returns EWOULDBLOCK) (various loops if test triggers mm faults) 6) queue the context into hash table, release the lock got in 4) 7) - release the read_lock on mmap_sem 8) Eventually unqueue the context (but rarely, as this part  may be done by the futex_wake()) Futexes were designed to improve scalability but current implementation has various problems : - Central hashtable : This means scalability problems if many processes/threads want to use futexes at the same time. This means NUMA unbalance because this hashtable is located on one node. - Using mmap_sem on every futex() syscall : Even if mmap_sem is a rw_semaphore, up_read()/down_read() are doing atomic ops on mmap_sem, dirtying cache line : - lot of cache line ping pongs on SMP configurations. mmap_sem is also extensively used by mm code (page faults, mmap()/munmap()) Highly threaded processes might suffer from mmap_sem contention. mmap_sem is also used by oprofile code. Enabling oprofile hurts threaded programs because of contention on the mmap_sem cache line. - Using an atomic_inc()/atomic_dec() on inode ref counter or mm ref counter: It's also a cache line ping pong on SMP. It also increases mmap_sem hold time because of cache misses. Most of these scalability problems come from the fact that futexes are in one global namespace. As we use a central hash table, we must make sure they are all using the same reference (given by the mm subsystem). We chose to force all futexes be 'shared'. This has a cost. But fact is POSIX defined PRIVATE and SHARED, allowing clear separation, and optimal performance if carefuly implemented. Time has come for linux to have better threading performance. The goal is to permit new futex commands to avoid : - Taking the mmap_sem semaphore, conflicting with other subsystems. - Modifying a ref_count on mm or an inode, still conflicting with mm or fs. This is possible because, for one process using PTHREAD_PROCESS_PRIVATE futexes, we only need to distinguish futexes by their virtual address, no matter the underlying mm storage is. If glibc wants to exploit this new infrastructure, it should use new _PRIVATE futex subcommands for PTHREAD_PROCESS_PRIVATE futexes. And be prepared to fallback on old subcommands for old kernels. Using one global variable with the FUTEX_PRIVATE_FLAG or 0 value should be OK. PTHREAD_PROCESS_SHARED futexes should still use the old subcommands. Compatibility with old applications is preserved, they still hit the scalability problems, but new applications can fly :) Note : the same SHARED futex (mapped on a file) can be used by old binaries *and* new binaries, because both binaries will use the old subcommands. Note : Vast majority of futexes should be using PROCESS_PRIVATE semantic, as this is the default semantic. Almost all applications should benefit of this changes (new kernel and updated libc) Some bench results on a Pentium M 1.6 GHz (SMP kernel on a UP machine) /* calling futex_wait(addr, value) with value != *addr */ 433 cycles per futex(FUTEX_WAIT) call (mixing 2 futexes) 424 cycles per futex(FUTEX_WAIT) call (using one futex) 334 cycles per futex(FUTEX_WAIT_PRIVATE) call (mixing 2 futexes) 334 cycles per futex(FUTEX_WAIT_PRIVATE) call (using one futex) For reference : 187 cycles per getppid() call 188 cycles per umask() call 181 cycles per ni_syscall() call Signed-off-by: Eric Dumazet Pierre Peiffer Cc: "Ulrich Drepper" Cc: "Nick Piggin" Cc: "Ingo Molnar" Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/futex.h | 29 ++++- kernel/futex.c | 324 ++++++++++++++++++++++++++++++++------------------ 2 files changed, 236 insertions(+), 117 deletions(-) (limited to 'include/linux') diff --git a/include/linux/futex.h b/include/linux/futex.h index 1bd8dfcb037b..899fc7f20edd 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -19,6 +19,18 @@ union ktime; #define FUTEX_TRYLOCK_PI 8 #define FUTEX_CMP_REQUEUE_PI 9 +#define FUTEX_PRIVATE_FLAG 128 +#define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG + +#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) +#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) +#define FUTEX_REQUEUE_PRIVATE (FUTEX_REQUEUE | FUTEX_PRIVATE_FLAG) +#define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG) +#define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG) +#define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG) +#define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG) +#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) + /* * Support for robust futexes: the kernel cleans up held futexes at * thread exit time. @@ -114,8 +126,18 @@ handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); * Don't rearrange members without looking at hash_futex(). * * offset is aligned to a multiple of sizeof(u32) (== 4) by definition. - * We set bit 0 to indicate if it's an inode-based key. - */ + * We use the two low order bits of offset to tell what is the kind of key : + * 00 : Private process futex (PTHREAD_PROCESS_PRIVATE) + * (no reference on an inode or mm) + * 01 : Shared futex (PTHREAD_PROCESS_SHARED) + * mapped on a file (reference on the underlying inode) + * 10 : Shared futex (PTHREAD_PROCESS_SHARED) + * (but private mapping on an mm, and reference taken on it) +*/ + +#define FUT_OFF_INODE 1 /* We set bit 0 if key has a reference on inode */ +#define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */ + union futex_key { u32 __user *uaddr; struct { @@ -134,7 +156,8 @@ union futex_key { int offset; } both; }; -int get_futex_key(u32 __user *uaddr, union futex_key *key); +int get_futex_key(u32 __user *uaddr, struct rw_semaphore *shared, + union futex_key *key); void get_futex_key_refs(union futex_key *key); void drop_futex_key_refs(union futex_key *key); diff --git a/kernel/futex.c b/kernel/futex.c index 4a60ef55dab4..b7ce15c67e32 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -16,6 +16,9 @@ * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar * Copyright (C) 2006 Timesys Corp., Thomas Gleixner * + * PRIVATE futexes by Eric Dumazet + * Copyright (C) 2007 Eric Dumazet + * * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly * enough at me, Linus for the original (flawed) idea, Matthew * Kirkwood for proof-of-concept implementation. @@ -150,19 +153,26 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2) && key1->both.offset == key2->both.offset); } -/* - * Get parameters which are the keys for a futex. +/** + * get_futex_key - Get parameters which are the keys for a futex. + * @uaddr: virtual address of the futex + * @shared: NULL for a PROCESS_PRIVATE futex, + * ¤t->mm->mmap_sem for a PROCESS_SHARED futex + * @key: address where result is stored. + * + * Returns a negative error code or 0 + * The key words are stored in *key on success. * * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode, * offset_within_page). For private mappings, it's (uaddr, current->mm). * We can usually work out the index without swapping in the page. * - * Returns: 0, or negative error code. - * The key words are stored in *key on success. - * - * Should be called with ¤t->mm->mmap_sem but NOT any spinlocks. + * fshared is NULL for PROCESS_PRIVATE futexes + * For other futexes, it points to ¤t->mm->mmap_sem and + * caller must have taken the reader lock. but NOT any spinlocks. */ -int get_futex_key(u32 __user *uaddr, union futex_key *key) +int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared, + union futex_key *key) { unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; @@ -174,10 +184,24 @@ int get_futex_key(u32 __user *uaddr, union futex_key *key) * The futex address must be "naturally" aligned. */ key->both.offset = address % PAGE_SIZE; - if (unlikely((key->both.offset % sizeof(u32)) != 0)) + if (unlikely((address % sizeof(u32)) != 0)) return -EINVAL; address -= key->both.offset; + /* + * PROCESS_PRIVATE futexes are fast. + * As the mm cannot disappear under us and the 'key' only needs + * virtual address, we dont even have to find the underlying vma. + * Note : We do have to check 'uaddr' is a valid user address, + * but access_ok() should be faster than find_vma() + */ + if (!fshared) { + if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) + return -EFAULT; + key->private.mm = mm; + key->private.address = address; + return 0; + } /* * The futex is hashed differently depending on whether * it's in a shared or private mapping. So check vma first. @@ -205,6 +229,7 @@ int get_futex_key(u32 __user *uaddr, union futex_key *key) * mappings of _writable_ handles. */ if (likely(!(vma->vm_flags & VM_MAYSHARE))) { + key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */ key->private.mm = mm; key->private.address = address; return 0; @@ -214,7 +239,7 @@ int get_futex_key(u32 __user *uaddr, union futex_key *key) * Linear file mappings are also simple. */ key->shared.inode = vma->vm_file->f_path.dentry->d_inode; - key->both.offset++; /* Bit 0 of offset indicates inode-based key. */ + key->both.offset |= FUT_OFF_INODE; /* inode-based key. */ if (likely(!(vma->vm_flags & VM_NONLINEAR))) { key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff); @@ -242,16 +267,18 @@ EXPORT_SYMBOL_GPL(get_futex_key); * Take a reference to the resource addressed by a key. * Can be called while holding spinlocks. * - * NOTE: mmap_sem MUST be held between get_futex_key() and calling this - * function, if it is called at all. mmap_sem keeps key->shared.inode valid. */ inline void get_futex_key_refs(union futex_key *key) { - if (key->both.ptr != 0) { - if (key->both.offset & 1) + if (key->both.ptr == 0) + return; + switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { + case FUT_OFF_INODE: atomic_inc(&key->shared.inode->i_count); - else + break; + case FUT_OFF_MMSHARED: atomic_inc(&key->private.mm->mm_count); + break; } } EXPORT_SYMBOL_GPL(get_futex_key_refs); @@ -262,11 +289,15 @@ EXPORT_SYMBOL_GPL(get_futex_key_refs); */ void drop_futex_key_refs(union futex_key *key) { - if (key->both.ptr != 0) { - if (key->both.offset & 1) + if (key->both.ptr == 0) + return; + switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { + case FUT_OFF_INODE: iput(key->shared.inode); - else + break; + case FUT_OFF_MMSHARED: mmdrop(key->private.mm); + break; } } EXPORT_SYMBOL_GPL(drop_futex_key_refs); @@ -283,28 +314,38 @@ static inline int get_futex_value_locked(u32 *dest, u32 __user *from) } /* - * Fault handling. Called with current->mm->mmap_sem held. + * Fault handling. + * if fshared is non NULL, current->mm->mmap_sem is already held */ -static int futex_handle_fault(unsigned long address, int attempt) +static int futex_handle_fault(unsigned long address, + struct rw_semaphore *fshared, int attempt) { struct vm_area_struct * vma; struct mm_struct *mm = current->mm; + int ret = -EFAULT; - if (attempt > 2 || !(vma = find_vma(mm, address)) || - vma->vm_start > address || !(vma->vm_flags & VM_WRITE)) - return -EFAULT; + if (attempt > 2) + return ret; - switch (handle_mm_fault(mm, vma, address, 1)) { - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - default: - return -EFAULT; + if (!fshared) + down_read(&mm->mmap_sem); + vma = find_vma(mm, address); + if (vma && address >= vma->vm_start && + (vma->vm_flags & VM_WRITE)) { + switch (handle_mm_fault(mm, vma, address, 1)) { + case VM_FAULT_MINOR: + ret = 0; + current->min_flt++; + break; + case VM_FAULT_MAJOR: + ret = 0; + current->maj_flt++; + break; + } } - return 0; + if (!fshared) + up_read(&mm->mmap_sem); + return ret; } /* @@ -647,7 +688,8 @@ double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) * Wake up all waiters hashed on the physical page that is mapped * to this virtual address: */ -static int futex_wake(u32 __user *uaddr, int nr_wake) +static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared, + int nr_wake) { struct futex_hash_bucket *hb; struct futex_q *this, *next; @@ -655,9 +697,10 @@ static int futex_wake(u32 __user *uaddr, int nr_wake) union futex_key key; int ret; - down_read(¤t->mm->mmap_sem); + if (fshared) + down_read(fshared); - ret = get_futex_key(uaddr, &key); + ret = get_futex_key(uaddr, fshared, &key); if (unlikely(ret != 0)) goto out; @@ -679,7 +722,8 @@ static int futex_wake(u32 __user *uaddr, int nr_wake) spin_unlock(&hb->lock); out: - up_read(¤t->mm->mmap_sem); + if (fshared) + up_read(fshared); return ret; } @@ -746,7 +790,9 @@ retry: * and requeue the next nr_requeue waiters following hashed on * one physical page to another physical page (PI-futex uaddr2) */ -static int futex_requeue_pi(u32 __user *uaddr1, u32 __user *uaddr2, +static int futex_requeue_pi(u32 __user *uaddr1, + struct rw_semaphore *fshared, + u32 __user *uaddr2, int nr_wake, int nr_requeue, u32 *cmpval) { union futex_key key1, key2; @@ -765,12 +811,13 @@ retry: /* * First take all the futex related locks: */ - down_read(¤t->mm->mmap_sem); + if (fshared) + down_read(fshared); - ret = get_futex_key(uaddr1, &key1); + ret = get_futex_key(uaddr1, fshared, &key1); if (unlikely(ret != 0)) goto out; - ret = get_futex_key(uaddr2, &key2); + ret = get_futex_key(uaddr2, fshared, &key2); if (unlikely(ret != 0)) goto out; @@ -793,7 +840,8 @@ retry: * If we would have faulted, release mmap_sem, fault * it in and start all over again. */ - up_read(¤t->mm->mmap_sem); + if (fshared) + up_read(fshared); ret = get_user(curval, uaddr1); @@ -927,7 +975,8 @@ out_unlock: drop_futex_key_refs(&key1); out: - up_read(¤t->mm->mmap_sem); + if (fshared) + up_read(fshared); return ret; } @@ -936,7 +985,8 @@ out: * to this virtual address: */ static int -futex_wake_op(u32 __user *uaddr1, u32 __user *uaddr2, +futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared, + u32 __user *uaddr2, int nr_wake, int nr_wake2, int op) { union futex_key key1, key2; @@ -946,12 +996,13 @@ futex_wake_op(u32 __user *uaddr1, u32 __user *uaddr2, int ret, op_ret, attempt = 0; retryfull: - down_read(¤t->mm->mmap_sem); + if (fshared) + down_read(fshared); - ret = get_futex_key(uaddr1, &key1); + ret = get_futex_key(uaddr1, fshared, &key1); if (unlikely(ret != 0)) goto out; - ret = get_futex_key(uaddr2, &key2); + ret = get_futex_key(uaddr2, fshared, &key2); if (unlikely(ret != 0)) goto out; @@ -991,11 +1042,10 @@ retry: * still holding the mmap_sem. */ if (attempt++) { - if (futex_handle_fault((unsigned long)uaddr2, - attempt)) { - ret = -EFAULT; + ret = futex_handle_fault((unsigned long)uaddr2, + fshared, attempt); + if (ret) goto out; - } goto retry; } @@ -1003,7 +1053,8 @@ retry: * If we would have faulted, release mmap_sem, * fault it in and start all over again. */ - up_read(¤t->mm->mmap_sem); + if (fshared) + up_read(fshared); ret = get_user(dummy, uaddr2); if (ret) @@ -1040,7 +1091,8 @@ retry: if (hb1 != hb2) spin_unlock(&hb2->lock); out: - up_read(¤t->mm->mmap_sem); + if (fshared) + up_read(fshared); return ret; } @@ -1048,7 +1100,8 @@ out: * Requeue all waiters hashed on one physical page to another * physical page. */ -static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2, +static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, + u32 __user *uaddr2, int nr_wake, int nr_requeue, u32 *cmpval) { union futex_key key1, key2; @@ -1058,12 +1111,13 @@ static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2, int ret, drop_count = 0; retry: - down_read(¤t->mm->mmap_sem); + if (fshared) + down_read(fshared); - ret = get_futex_key(uaddr1, &key1); + ret = get_futex_key(uaddr1, fshared, &key1); if (unlikely(ret != 0)) goto out; - ret = get_futex_key(uaddr2, &key2); + ret = get_futex_key(uaddr2, fshared, &key2); if (unlikely(ret != 0)) goto out; @@ -1086,7 +1140,8 @@ static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2, * If we would have faulted, release mmap_sem, fault * it in and start all over again. */ - up_read(¤t->mm->mmap_sem); + if (fshared) + up_read(fshared); ret = get_user(curval, uaddr1); @@ -1139,7 +1194,8 @@ out_unlock: drop_futex_key_refs(&key1); out: - up_read(¤t->mm->mmap_sem); + if (fshared) + up_read(fshared); return ret; } @@ -1273,7 +1329,8 @@ static void unqueue_me_pi(struct futex_q *q) * The cur->mm semaphore must be held, it is released at return of this * function. */ -static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, +static int fixup_pi_state_owner(u32 __user *uaddr, struct rw_semaphore *fshared, + struct futex_q *q, struct futex_hash_bucket *hb, struct task_struct *curr) { @@ -1300,7 +1357,8 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, /* Unqueue and drop the lock */ unqueue_me_pi(q); - up_read(&curr->mm->mmap_sem); + if (fshared) + up_read(fshared); /* * We own it, so we have to replace the pending owner * TID. This must be atomic as we have preserve the @@ -1321,8 +1379,15 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, return ret; } +/* + * In case we must use restart_block to restart a futex_wait, + * we encode in the 'arg3' shared capability + */ +#define ARG3_SHARED 1 + static long futex_wait_restart(struct restart_block *restart); -static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) +static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, + u32 val, ktime_t *abs_time) { struct task_struct *curr = current; DECLARE_WAITQUEUE(wait, curr); @@ -1335,9 +1400,10 @@ static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) q.pi_state = NULL; retry: - down_read(&curr->mm->mmap_sem); + if (fshared) + down_read(fshared); - ret = get_futex_key(uaddr, &q.key); + ret = get_futex_key(uaddr, fshared, &q.key); if (unlikely(ret != 0)) goto out_release_sem; @@ -1360,8 +1426,8 @@ static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) * a wakeup when *uaddr != val on entry to the syscall. This is * rare, but normal. * - * We hold the mmap semaphore, so the mapping cannot have changed - * since we looked it up in get_futex_key. + * for shared futexes, we hold the mmap semaphore, so the mapping + * cannot have changed since we looked it up in get_futex_key. */ ret = get_futex_value_locked(&uval, uaddr); @@ -1372,7 +1438,8 @@ static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) * If we would have faulted, release mmap_sem, fault it in and * start all over again. */ - up_read(&curr->mm->mmap_sem); + if (fshared) + up_read(fshared); ret = get_user(uval, uaddr); @@ -1399,7 +1466,8 @@ static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) * Now the futex is queued and we have checked the data, we * don't want to hold mmap_sem while we sleep. */ - up_read(&curr->mm->mmap_sem); + if (fshared) + up_read(fshared); /* * There might have been scheduling since the queue_me(), as we @@ -1469,7 +1537,8 @@ static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) else ret = rt_mutex_timed_lock(lock, to, 1); - down_read(&curr->mm->mmap_sem); + if (fshared) + down_read(fshared); spin_lock(q.lock_ptr); /* @@ -1486,7 +1555,8 @@ static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) /* mmap_sem and hash_bucket lock are unlocked at return of this function */ - ret = fixup_pi_state_owner(uaddr, &q, hb, curr); + ret = fixup_pi_state_owner(uaddr, fshared, + &q, hb, curr); } else { /* * Catch the rare case, where the lock was released @@ -1499,7 +1569,8 @@ static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) } /* Unqueue and drop the lock */ unqueue_me_pi(&q); - up_read(&curr->mm->mmap_sem); + if (fshared) + up_read(fshared); } debug_rt_mutex_free_waiter(&q.waiter); @@ -1528,6 +1599,9 @@ static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) restart->arg0 = (unsigned long)uaddr; restart->arg1 = (unsigned long)val; restart->arg2 = (unsigned long)abs_time; + restart->arg3 = 0; + if (fshared) + restart->arg3 |= ARG3_SHARED; return -ERESTART_RESTARTBLOCK; } @@ -1535,7 +1609,8 @@ static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time) queue_unlock(&q, hb); out_release_sem: - up_read(&curr->mm->mmap_sem); + if (fshared) + up_read(fshared); return ret; } @@ -1545,9 +1620,12 @@ static long futex_wait_restart(struct restart_block *restart) u32 __user *uaddr = (u32 __user *)restart->arg0; u32 val = (u32)restart->arg1; ktime_t *abs_time = (ktime_t *)restart->arg2; + struct rw_semaphore *fshared = NULL; restart->fn = do_no_restart_syscall; - return (long)futex_wait(uaddr, val, abs_time); + if (restart->arg3 & ARG3_SHARED) + fshared = ¤t->mm->mmap_sem; + return (long)futex_wait(uaddr, fshared, val, abs_time); } @@ -1602,8 +1680,8 @@ static void set_pi_futex_owner(struct futex_hash_bucket *hb, * if there are waiters then it will block, it does PI, etc. (Due to * races the kernel might see a 0 value of the futex too.) */ -static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, - int trylock) +static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, + int detect, ktime_t *time, int trylock) { struct hrtimer_sleeper timeout, *to = NULL; struct task_struct *curr = current; @@ -1624,9 +1702,10 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, q.pi_state = NULL; retry: - down_read(&curr->mm->mmap_sem); + if (fshared) + down_read(fshared); - ret = get_futex_key(uaddr, &q.key); + ret = get_futex_key(uaddr, fshared, &q.key); if (unlikely(ret != 0)) goto out_release_sem; @@ -1747,7 +1826,8 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, * Now the futex is queued and we have checked the data, we * don't want to hold mmap_sem while we sleep. */ - up_read(&curr->mm->mmap_sem); + if (fshared) + up_read(fshared); WARN_ON(!q.pi_state); /* @@ -1761,7 +1841,8 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, ret = ret ? 0 : -EWOULDBLOCK; } - down_read(&curr->mm->mmap_sem); + if (fshared) + down_read(fshared); spin_lock(q.lock_ptr); /* @@ -1770,7 +1851,7 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, */ if (!ret && q.pi_state->owner != curr) /* mmap_sem is unlocked at return of this function */ - ret = fixup_pi_state_owner(uaddr, &q, hb, curr); + ret = fixup_pi_state_owner(uaddr, fshared, &q, hb, curr); else { /* * Catch the rare case, where the lock was released @@ -1783,7 +1864,8 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, } /* Unqueue and drop the lock */ unqueue_me_pi(&q); - up_read(&curr->mm->mmap_sem); + if (fshared) + up_read(fshared); } if (!detect && ret == -EDEADLK && 0) @@ -1795,7 +1877,8 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, queue_unlock(&q, hb); out_release_sem: - up_read(&curr->mm->mmap_sem); + if (fshared) + up_read(fshared); return ret; uaddr_faulted: @@ -1806,15 +1889,16 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, * still holding the mmap_sem. */ if (attempt++) { - if (futex_handle_fault((unsigned long)uaddr, attempt)) { - ret = -EFAULT; + ret = futex_handle_fault((unsigned long)uaddr, fshared, + attempt); + if (ret) goto out_unlock_release_sem; - } goto retry_locked; } queue_unlock(&q, hb); - up_read(&curr->mm->mmap_sem); + if (fshared) + up_read(fshared); ret = get_user(uval, uaddr); if (!ret && (uval != -EFAULT)) @@ -1828,7 +1912,7 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time, * This is the in-kernel slowpath: we look up the PI state (if any), * and do the rt-mutex unlock. */ -static int futex_unlock_pi(u32 __user *uaddr) +static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared) { struct futex_hash_bucket *hb; struct futex_q *this, *next; @@ -1848,9 +1932,10 @@ retry: /* * First take all the futex related locks: */ - down_read(¤t->mm->mmap_sem); + if (fshared) + down_read(fshared); - ret = get_futex_key(uaddr, &key); + ret = get_futex_key(uaddr, fshared, &key); if (unlikely(ret != 0)) goto out; @@ -1909,7 +1994,8 @@ retry_locked: out_unlock: spin_unlock(&hb->lock); out: - up_read(¤t->mm->mmap_sem); + if (fshared) + up_read(fshared); return ret; @@ -1921,15 +2007,16 @@ pi_faulted: * still holding the mmap_sem. */ if (attempt++) { - if (futex_handle_fault((unsigned long)uaddr, attempt)) { - ret = -EFAULT; + ret = futex_handle_fault((unsigned long)uaddr, fshared, + attempt); + if (ret) goto out_unlock; - } goto retry_locked; } spin_unlock(&hb->lock); - up_read(¤t->mm->mmap_sem); + if (fshared) + up_read(fshared); ret = get_user(uval, uaddr); if (!ret && (uval != -EFAULT)) @@ -1981,6 +2068,7 @@ static int futex_fd(u32 __user *uaddr, int signal) struct futex_q *q; struct file *filp; int ret, err; + struct rw_semaphore *fshared; static unsigned long printk_interval; if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) { @@ -2022,11 +2110,12 @@ static int futex_fd(u32 __user *uaddr, int signal) } q->pi_state = NULL; - down_read(¤t->mm->mmap_sem); - err = get_futex_key(uaddr, &q->key); + fshared = ¤t->mm->mmap_sem; + down_read(fshared); + err = get_futex_key(uaddr, fshared, &q->key); if (unlikely(err != 0)) { - up_read(¤t->mm->mmap_sem); + up_read(fshared); kfree(q); goto error; } @@ -2038,7 +2127,7 @@ static int futex_fd(u32 __user *uaddr, int signal) filp->private_data = q; queue_me(q, ret, filp); - up_read(¤t->mm->mmap_sem); + up_read(fshared); /* Now we map fd to filp, so userspace can access it */ fd_install(ret, filp); @@ -2167,7 +2256,7 @@ retry: */ if (!pi) { if (uval & FUTEX_WAITERS) - futex_wake(uaddr, 1); + futex_wake(uaddr, &curr->mm->mmap_sem, 1); } } return 0; @@ -2223,7 +2312,8 @@ void exit_robust_list(struct task_struct *curr) return; if (pending) - handle_futex_death((void __user *)pending + futex_offset, curr, pip); + handle_futex_death((void __user *)pending + futex_offset, + curr, pip); while (entry != &head->list) { /* @@ -2253,38 +2343,43 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3) { int ret; + int cmd = op & FUTEX_CMD_MASK; + struct rw_semaphore *fshared = NULL; + + if (!(op & FUTEX_PRIVATE_FLAG)) + fshared = ¤t->mm->mmap_sem; - switch (op) { + switch (cmd) { case FUTEX_WAIT: - ret = futex_wait(uaddr, val, timeout); + ret = futex_wait(uaddr, fshared, val, timeout); break; case FUTEX_WAKE: - ret = futex_wake(uaddr, val); + ret = futex_wake(uaddr, fshared, val); break; case FUTEX_FD: /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */ ret = futex_fd(uaddr, val); break; case FUTEX_REQUEUE: - ret = futex_requeue(uaddr, uaddr2, val, val2, NULL); + ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL); break; case FUTEX_CMP_REQUEUE: - ret = futex_requeue(uaddr, uaddr2, val, val2, &val3); + ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3); break; case FUTEX_WAKE_OP: - ret = futex_wake_op(uaddr, uaddr2, val, val2, val3); + ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3); break; case FUTEX_LOCK_PI: - ret = futex_lock_pi(uaddr, val, timeout, 0); + ret = futex_lock_pi(uaddr, fshared, val, timeout, 0); break; case FUTEX_UNLOCK_PI: - ret = futex_unlock_pi(uaddr); + ret = futex_unlock_pi(uaddr, fshared); break; case FUTEX_TRYLOCK_PI: - ret = futex_lock_pi(uaddr, 0, timeout, 1); + ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1); break; case FUTEX_CMP_REQUEUE_PI: - ret = futex_requeue_pi(uaddr, uaddr2, val, val2, &val3); + ret = futex_requeue_pi(uaddr, fshared, uaddr2, val, val2, &val3); break; default: ret = -ENOSYS; @@ -2300,23 +2395,24 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, struct timespec ts; ktime_t t, *tp = NULL; u32 val2 = 0; + int cmd = op & FUTEX_CMD_MASK; - if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) { + if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI)) { if (copy_from_user(&ts, utime, sizeof(ts)) != 0) return -EFAULT; if (!timespec_valid(&ts)) return -EINVAL; t = timespec_to_ktime(ts); - if (op == FUTEX_WAIT) + if (cmd == FUTEX_WAIT) t = ktime_add(ktime_get(), t); tp = &t; } /* - * requeue parameter in 'utime' if op == FUTEX_REQUEUE. + * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE. */ - if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE - || op == FUTEX_CMP_REQUEUE_PI) + if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE + || cmd == FUTEX_CMP_REQUEUE_PI) val2 = (u32) (unsigned long) utime; return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); -- cgit v1.2.3 From 01f2705daf5a36208e69d7cf95db9c330f843af6 Mon Sep 17 00:00:00 2001 From: Nate Diller Date: Wed, 9 May 2007 02:35:07 -0700 Subject: fs: convert core functions to zero_user_page It's very common for file systems to need to zero part or all of a page, the simplist way is just to use kmap_atomic() and memset(). There's actually a library function in include/linux/highmem.h that does exactly that, but it's confusingly named memclear_highpage_flush(), which is descriptive of *how* it does the work rather than what the *purpose* is. So this patchset renames the function to zero_user_page(), and calls it from the various places that currently open code it. This first patch introduces the new function call, and converts all the core kernel callsites, both the open-coded ones and the old memclear_highpage_flush() ones. Following this patch is a series of conversions for each file system individually, per AKPM, and finally a patch deprecating the old call. The diffstat below shows the entire patchset. [akpm@linux-foundation.org: fix a few things] Signed-off-by: Nate Diller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/loop.c | 6 +----- fs/buffer.c | 56 +++++++++++-------------------------------------- fs/direct-io.c | 8 ++----- fs/mpage.c | 15 +++++-------- include/linux/highmem.h | 28 +++++++++++++++++-------- mm/filemap_xip.c | 7 +------ mm/truncate.c | 3 ++- 7 files changed, 42 insertions(+), 81 deletions(-) (limited to 'include/linux') diff --git a/drivers/block/loop.c b/drivers/block/loop.c index af6d7274a7cc..18cdd8c77626 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -243,17 +243,13 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, transfer_result = lo_do_transfer(lo, WRITE, page, offset, bvec->bv_page, bv_offs, size, IV); if (unlikely(transfer_result)) { - char *kaddr; - /* * The transfer failed, but we still write the data to * keep prepare/commit calls balanced. */ printk(KERN_ERR "loop: transfer error block %llu\n", (unsigned long long)index); - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + offset, 0, size); - kunmap_atomic(kaddr, KM_USER0); + zero_user_page(page, offset, size, KM_USER0); } flush_dcache_page(page); ret = aops->commit_write(file, page, offset, diff --git a/fs/buffer.c b/fs/buffer.c index eb820b82a636..fc2d763a8d78 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1846,13 +1846,8 @@ static int __block_prepare_write(struct inode *inode, struct page *page, if (block_start >= to) break; if (buffer_new(bh)) { - void *kaddr; - clear_buffer_new(bh); - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr+block_start, 0, bh->b_size); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user_page(page, block_start, bh->b_size, KM_USER0); set_buffer_uptodate(bh); mark_buffer_dirty(bh); } @@ -1940,10 +1935,8 @@ int block_read_full_page(struct page *page, get_block_t *get_block) SetPageError(page); } if (!buffer_mapped(bh)) { - void *kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + i * blocksize, 0, blocksize); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user_page(page, i * blocksize, blocksize, + KM_USER0); if (!err) set_buffer_uptodate(bh); continue; @@ -2086,7 +2079,6 @@ int cont_prepare_write(struct page *page, unsigned offset, long status; unsigned zerofrom; unsigned blocksize = 1 << inode->i_blkbits; - void *kaddr; while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) { status = -ENOMEM; @@ -2108,10 +2100,8 @@ int cont_prepare_write(struct page *page, unsigned offset, PAGE_CACHE_SIZE, get_block); if (status) goto out_unmap; - kaddr = kmap_atomic(new_page, KM_USER0); - memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom); - flush_dcache_page(new_page); - kunmap_atomic(kaddr, KM_USER0); + zero_user_page(page, zerofrom, PAGE_CACHE_SIZE - zerofrom, + KM_USER0); generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE); unlock_page(new_page); page_cache_release(new_page); @@ -2138,10 +2128,7 @@ int cont_prepare_write(struct page *page, unsigned offset, if (status) goto out1; if (zerofrom < offset) { - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr+zerofrom, 0, offset-zerofrom); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0); __block_commit_write(inode, page, zerofrom, offset); } return 0; @@ -2340,10 +2327,7 @@ failed: * Error recovery is pretty slack. Clear the page and mark it dirty * so we'll later zero out any blocks which _were_ allocated. */ - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr, 0, PAGE_CACHE_SIZE); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); SetPageUptodate(page); set_page_dirty(page); return ret; @@ -2382,7 +2366,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block, loff_t i_size = i_size_read(inode); const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset; - void *kaddr; int ret; /* Is the page fully inside i_size? */ @@ -2413,10 +2396,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block, * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); out: ret = mpage_writepage(page, get_block, wbc); if (ret == -EAGAIN) @@ -2437,7 +2417,6 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from) unsigned to; struct page *page; const struct address_space_operations *a_ops = mapping->a_ops; - char *kaddr; int ret = 0; if ((offset & (blocksize - 1)) == 0) @@ -2451,10 +2430,8 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from) to = (offset + blocksize) & ~(blocksize - 1); ret = a_ops->prepare_write(NULL, page, offset, to); if (ret == 0) { - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, + KM_USER0); /* * It would be more correct to call aops->commit_write() * here, but this is more efficient. @@ -2480,7 +2457,6 @@ int block_truncate_page(struct address_space *mapping, struct inode *inode = mapping->host; struct page *page; struct buffer_head *bh; - void *kaddr; int err; blocksize = 1 << inode->i_blkbits; @@ -2534,11 +2510,7 @@ int block_truncate_page(struct address_space *mapping, goto unlock; } - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + offset, 0, length); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); - + zero_user_page(page, offset, length, KM_USER0); mark_buffer_dirty(bh); err = 0; @@ -2559,7 +2531,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block, loff_t i_size = i_size_read(inode); const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset; - void *kaddr; /* Is the page fully inside i_size? */ if (page->index < end_index) @@ -2585,10 +2556,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); return __block_write_full_page(inode, page, get_block, wbc); } diff --git a/fs/direct-io.c b/fs/direct-io.c index d9d0833444f5..8aa2d8b04ef1 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -867,7 +867,6 @@ static int do_direct_IO(struct dio *dio) do_holes: /* Handle holes */ if (!buffer_mapped(map_bh)) { - char *kaddr; loff_t i_size_aligned; /* AKPM: eargh, -ENOTBLK is a hack */ @@ -888,11 +887,8 @@ do_holes: page_cache_release(page); goto out; } - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + (block_in_page << blkbits), - 0, 1 << blkbits); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user_page(page, block_in_page << blkbits, + 1 << blkbits, KM_USER0); dio->block_in_file++; block_in_page++; goto next_block; diff --git a/fs/mpage.c b/fs/mpage.c index fa2441f57b41..0fb914fc2ee0 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -284,11 +284,9 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, } if (first_hole != blocks_per_page) { - char *kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + (first_hole << blkbits), 0, - PAGE_CACHE_SIZE - (first_hole << blkbits)); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user_page(page, first_hole << blkbits, + PAGE_CACHE_SIZE - (first_hole << blkbits), + KM_USER0); if (first_hole == 0) { SetPageUptodate(page); unlock_page(page); @@ -576,14 +574,11 @@ page_is_mapped: * written out to the file." */ unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); - char *kaddr; if (page->index > end_index || !offset) goto confused; - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, + KM_USER0); } /* diff --git a/include/linux/highmem.h b/include/linux/highmem.h index a515eb0afdfb..b5f2ab42d984 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -94,17 +94,27 @@ static inline void clear_highpage(struct page *page) /* * Same but also flushes aliased cache contents to RAM. + * + * This must be a macro because KM_USER0 and friends aren't defined if + * !CONFIG_HIGHMEM */ -static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size) +#define zero_user_page(page, offset, size, km_type) \ + do { \ + void *kaddr; \ + \ + BUG_ON((offset) + (size) > PAGE_SIZE); \ + \ + kaddr = kmap_atomic(page, km_type); \ + memset((char *)kaddr + (offset), 0, (size)); \ + flush_dcache_page(page); \ + kunmap_atomic(kaddr, (km_type)); \ + } while (0) + + +static inline void memclear_highpage_flush(struct page *page, + unsigned int offset, unsigned int size) { - void *kaddr; - - BUG_ON(offset + size > PAGE_SIZE); - - kaddr = kmap_atomic(page, KM_USER0); - memset((char *)kaddr + offset, 0, size); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user_page(page, offset, size, KM_USER0); } #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index cbb335813ec0..1b49dab9b25d 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -434,7 +434,6 @@ xip_truncate_page(struct address_space *mapping, loff_t from) unsigned blocksize; unsigned length; struct page *page; - void *kaddr; BUG_ON(!mapping->a_ops->get_xip_page); @@ -458,11 +457,7 @@ xip_truncate_page(struct address_space *mapping, loff_t from) else return PTR_ERR(page); } - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + offset, 0, length); - kunmap_atomic(kaddr, KM_USER0); - - flush_dcache_page(page); + zero_user_page(page, offset, length, KM_USER0); return 0; } EXPORT_SYMBOL_GPL(xip_truncate_page); diff --git a/mm/truncate.c b/mm/truncate.c index 0f4b6d18ab0e..4fbe1a2da5fb 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include /* grr. try_to_release_page, @@ -46,7 +47,7 @@ void do_invalidatepage(struct page *page, unsigned long offset) static inline void truncate_partial_page(struct page *page, unsigned partial) { - memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial); + zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0); if (PagePrivate(page)) do_invalidatepage(page, partial); } -- cgit v1.2.3 From f37bc2712b54ec641e0c0c8634f1a4b61d9956c0 Mon Sep 17 00:00:00 2001 From: Nate Diller Date: Wed, 9 May 2007 02:35:09 -0700 Subject: fs: deprecate memclear_highpage_flush Now that all the in-tree users are converted over to zero_user_page(), deprecate the old memclear_highpage_flush() call. Signed-off-by: Nate Diller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/highmem.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/highmem.h b/include/linux/highmem.h index b5f2ab42d984..98e2cce996a4 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -110,8 +110,7 @@ static inline void clear_highpage(struct page *page) kunmap_atomic(kaddr, (km_type)); \ } while (0) - -static inline void memclear_highpage_flush(struct page *page, +static inline void __deprecated memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size) { zero_user_page(page, offset, size, KM_USER0); -- cgit v1.2.3 From 8bb7844286fb8c9fce6f65d8288aeb09d03a5e0d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 9 May 2007 02:35:10 -0700 Subject: Add suspend-related notifications for CPU hotplug Since nonboot CPUs are now disabled after tasks and devices have been frozen and the CPU hotplug infrastructure is used for this purpose, we need special CPU hotplug notifications that will help the CPU-hotplug-aware subsystems distinguish normal CPU hotplug events from CPU hotplug events related to a system-wide suspend or resume operation in progress. This patch introduces such notifications and causes them to be used during suspend and resume transitions. It also changes all of the CPU-hotplug-aware subsystems to take these notifications into consideration (for now they are handled in the same way as the corresponding "normal" ones). [oleg@tv-sign.ru: cleanups] Signed-off-by: Rafael J. Wysocki Cc: Gautham R Shenoy Cc: Pavel Machek Signed-off-by: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/cpu-hotplug.txt | 9 ++++++-- arch/i386/kernel/cpu/intel_cacheinfo.c | 2 ++ arch/i386/kernel/cpu/mcheck/therm_throt.c | 2 ++ arch/i386/kernel/cpuid.c | 2 ++ arch/i386/kernel/microcode.c | 3 +++ arch/i386/kernel/msr.c | 2 ++ arch/ia64/kernel/err_inject.c | 2 ++ arch/ia64/kernel/palinfo.c | 2 ++ arch/ia64/kernel/salinfo.c | 2 ++ arch/ia64/kernel/topology.c | 2 ++ arch/powerpc/kernel/sysfs.c | 2 ++ arch/powerpc/mm/numa.c | 3 +++ arch/s390/appldata/appldata_base.c | 2 ++ arch/s390/kernel/smp.c | 2 ++ arch/x86_64/kernel/mce.c | 2 ++ arch/x86_64/kernel/mce_amd.c | 2 ++ arch/x86_64/kernel/vsyscall.c | 2 +- block/ll_rw_blk.c | 2 +- drivers/base/topology.c | 3 +++ drivers/cpufreq/cpufreq.c | 3 +++ drivers/cpufreq/cpufreq_stats.c | 2 ++ drivers/hwmon/coretemp.c | 2 ++ drivers/infiniband/hw/ehca/ehca_irq.c | 6 ++++++ drivers/kvm/kvm_main.c | 3 +++ fs/buffer.c | 2 +- fs/xfs/xfs_mount.c | 3 +++ include/linux/notifier.h | 12 +++++++++++ kernel/cpu.c | 34 ++++++++++++++++--------------- kernel/hrtimer.c | 2 ++ kernel/profile.c | 4 ++++ kernel/rcupdate.c | 2 ++ kernel/relay.c | 2 ++ kernel/sched.c | 10 +++++++++ kernel/softirq.c | 4 ++++ kernel/softlockup.c | 4 ++++ kernel/timer.c | 2 ++ kernel/workqueue.c | 2 ++ lib/radix-tree.c | 2 +- mm/page_alloc.c | 5 ++++- mm/slab.c | 6 ++++++ mm/slub.c | 2 ++ mm/swap.c | 2 +- mm/vmscan.c | 2 +- mm/vmstat.c | 3 +++ net/core/dev.c | 2 +- net/core/flow.c | 2 +- net/iucv/iucv.c | 6 ++++++ 47 files changed, 152 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt index cc60d29b954c..b6d24c22274b 100644 --- a/Documentation/cpu-hotplug.txt +++ b/Documentation/cpu-hotplug.txt @@ -217,14 +217,17 @@ Q: What happens when a CPU is being logically offlined? A: The following happen, listed in no particular order :-) - A notification is sent to in-kernel registered modules by sending an event - CPU_DOWN_PREPARE + CPU_DOWN_PREPARE or CPU_DOWN_PREPARE_FROZEN, depending on whether or not the + CPU is being offlined while tasks are frozen due to a suspend operation in + progress - All process is migrated away from this outgoing CPU to a new CPU - All interrupts targeted to this CPU is migrated to a new CPU - timers/bottom half/task lets are also migrated to a new CPU - Once all services are migrated, kernel calls an arch specific routine __cpu_disable() to perform arch specific cleanup. - Once this is successful, an event for successful cleanup is sent by an event - CPU_DEAD. + CPU_DEAD (or CPU_DEAD_FROZEN if tasks are frozen due to a suspend while the + CPU is being offlined). "It is expected that each service cleans up when the CPU_DOWN_PREPARE notifier is called, when CPU_DEAD is called its expected there is nothing @@ -242,9 +245,11 @@ A: This is what you would need in your kernel code to receive notifications. switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: foobar_online_action(cpu); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: foobar_dead_action(cpu); break; } diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index 80b4c5d421b1..e5be819492ef 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c @@ -733,9 +733,11 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, sys_dev = get_cpu_sysdev(cpu); switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: cache_add_dev(sys_dev); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: cache_remove_dev(sys_dev); break; } diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c index 065005c3f168..5b0a040213c2 100644 --- a/arch/i386/kernel/cpu/mcheck/therm_throt.c +++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c @@ -137,10 +137,12 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb, mutex_lock(&therm_cpu_lock); switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: err = thermal_throttle_add_dev(sys_dev); WARN_ON(err); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: thermal_throttle_remove_dev(sys_dev); break; } diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c index eeae0d992337..5c2faa10e9fa 100644 --- a/arch/i386/kernel/cpuid.c +++ b/arch/i386/kernel/cpuid.c @@ -169,9 +169,11 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long ac switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: cpuid_device_create(cpu); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); break; } diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c index cbe7ec8dbb9f..7d934e493e74 100644 --- a/arch/i386/kernel/microcode.c +++ b/arch/i386/kernel/microcode.c @@ -775,10 +775,13 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) sys_dev = get_cpu_sysdev(cpu); switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: mc_sysdev_add(sys_dev); break; case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: mc_sysdev_remove(sys_dev); break; } diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c index 8cd0a91ce107..0c1069b8d638 100644 --- a/arch/i386/kernel/msr.c +++ b/arch/i386/kernel/msr.c @@ -153,9 +153,11 @@ static int msr_class_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: msr_device_create(cpu); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); break; } diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c index d3e9f33e8bdd..6a49600cf337 100644 --- a/arch/ia64/kernel/err_inject.c +++ b/arch/ia64/kernel/err_inject.c @@ -236,9 +236,11 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb, sys_dev = get_cpu_sysdev(cpu); switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: err_inject_add_dev(sys_dev); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: err_inject_remove_dev(sys_dev); break; } diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index a71df9ae0397..85829e27785c 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -975,9 +975,11 @@ static int palinfo_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: create_palinfo_proc_entries(hotcpu); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: remove_palinfo_proc_entries(hotcpu); break; } diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index a51f1d0bfb70..89f6b138a62c 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -582,6 +582,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu struct salinfo_data *data; switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: spin_lock_irqsave(&data_saved_lock, flags); for (i = 0, data = salinfo_data; i < ARRAY_SIZE(salinfo_data); @@ -592,6 +593,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu spin_unlock_irqrestore(&data_saved_lock, flags); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: spin_lock_irqsave(&data_saved_lock, flags); for (i = 0, data = salinfo_data; i < ARRAY_SIZE(salinfo_data); diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 687500ddb4b8..94ae3c87d828 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -412,9 +412,11 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, sys_dev = get_cpu_sysdev(cpu); switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: cache_add_dev(sys_dev); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: cache_remove_dev(sys_dev); break; } diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index cae39d9dfe48..68991c2d4a1b 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -342,10 +342,12 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: register_cpu_online(cpu); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_DEAD: + case CPU_DEAD_FROZEN: unregister_cpu_online(cpu); break; #endif diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index b3a592b25ab3..de45aa82d97b 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -252,12 +252,15 @@ static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, switch (action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: numa_setup_cpu(lcpu); ret = NOTIFY_OK; break; #ifdef CONFIG_HOTPLUG_CPU case CPU_DEAD: + case CPU_DEAD_FROZEN: case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: unmap_cpu_from_node(lcpu); break; ret = NOTIFY_OK; diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index ee89b33145d5..81a2b92ab0c2 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -567,9 +567,11 @@ appldata_cpu_notify(struct notifier_block *self, { switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: appldata_online_cpu((long) hcpu); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: appldata_offline_cpu((long) hcpu); break; default: diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index b7977027a28f..09f028a3266b 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -789,10 +789,12 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self, switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: if (sysdev_create_file(s, &attr_capability)) return NOTIFY_BAD; break; case CPU_DEAD: + case CPU_DEAD_FROZEN: sysdev_remove_file(s, &attr_capability); break; } diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index 442169640e45..a14375dd5425 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c @@ -720,9 +720,11 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: mce_create_device(cpu); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: mce_remove_device(cpu); break; } diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c index d0bd5d66e103..03356e64f9c8 100644 --- a/arch/x86_64/kernel/mce_amd.c +++ b/arch/x86_64/kernel/mce_amd.c @@ -654,9 +654,11 @@ static int threshold_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: threshold_create_device(cpu); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: threshold_remove_device(cpu); break; default: diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c index dc32cef96195..51d4c6fa88c8 100644 --- a/arch/x86_64/kernel/vsyscall.c +++ b/arch/x86_64/kernel/vsyscall.c @@ -327,7 +327,7 @@ static int __cpuinit cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) { long cpu = (long)arg; - if (action == CPU_ONLINE) + if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); return NOTIFY_DONE; } diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index df506571ed60..cd54672da99f 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3507,7 +3507,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action, * If a CPU goes away, splice its entries to the current CPU * and trigger a run of the softirq */ - if (action == CPU_DEAD) { + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { int cpu = (unsigned long) hcpu; local_irq_disable(); diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 067a9e8bc377..8d8cdfec6529 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -126,10 +126,13 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: rc = topology_add_dev(cpu); break; case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: + case CPU_DEAD_FROZEN: topology_remove_dev(cpu); break; } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 893dbaf386fb..eb37fba9b7ef 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1685,9 +1685,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, if (sys_dev) { switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: cpufreq_add_dev(sys_dev); break; case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: if (unlikely(lock_policy_rwsem_write(cpu))) BUG(); @@ -1699,6 +1701,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, __cpufreq_remove_dev(sys_dev); break; case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: cpufreq_add_dev(sys_dev); break; } diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index d1c7cac9316c..d2f0cbd8b8f3 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -313,9 +313,11 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: cpufreq_update_policy(cpu); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: cpufreq_stats_free_table(cpu); break; } diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 03b1f650d1c4..75e3911810a3 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -309,9 +309,11 @@ static int coretemp_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: coretemp_device_add(cpu); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: coretemp_device_remove(cpu); break; } diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index f284be1c9166..82dda2faf4d0 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c @@ -745,6 +745,7 @@ static int comp_pool_callback(struct notifier_block *nfb, switch (action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); if(!create_comp_task(pool, cpu)) { ehca_gen_err("Can't create comp_task for cpu: %x", cpu); @@ -752,24 +753,29 @@ static int comp_pool_callback(struct notifier_block *nfb, } break; case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); kthread_bind(cct->task, any_online_cpu(cpu_online_map)); destroy_comp_task(pool, cpu); break; case CPU_ONLINE: + case CPU_ONLINE_FROZEN: ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu); cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); kthread_bind(cct->task, cpu); wake_up_process(cct->task); break; case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu); break; case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu); destroy_comp_task(pool, cpu); take_over_work(pool, cpu); diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index c8b8cfa332bb..0d892600ff00 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -2889,7 +2889,9 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, switch (val) { case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", cpu); decache_vcpus_on_cpu(cpu); @@ -2897,6 +2899,7 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, NULL, 0, 1); break; case CPU_ONLINE: + case CPU_ONLINE_FROZEN: printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", cpu); smp_call_function_single(cpu, kvm_arch_ops->hardware_enable, diff --git a/fs/buffer.c b/fs/buffer.c index fc2d763a8d78..aecd057cd0e0 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2946,7 +2946,7 @@ static void buffer_exit_cpu(int cpu) static int buffer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { - if (action == CPU_DEAD) + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) buffer_exit_cpu((unsigned long)hcpu); return NOTIFY_OK; } diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index f5aa3ef855fb..a96bde6df96d 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1734,11 +1734,13 @@ xfs_icsb_cpu_notify( per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu); switch (action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: /* Easy Case - initialize the area and locks, and * then rebalance when online does everything else for us. */ memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); break; case CPU_ONLINE: + case CPU_ONLINE_FROZEN: xfs_icsb_lock(mp); xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0); xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0); @@ -1746,6 +1748,7 @@ xfs_icsb_cpu_notify( xfs_icsb_unlock(mp); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: /* Disable all the counters, then fold the dead cpu's * count into the total on the global superblock and * re-enable the counters. */ diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 1903e5490c04..9431101bf876 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -197,5 +197,17 @@ extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, #define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */ #define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */ +/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend + * operation in progress + */ +#define CPU_TASKS_FROZEN 0x0010 + +#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN) +#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN) +#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN) +#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) +#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) +#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) + #endif /* __KERNEL__ */ #endif /* _LINUX_NOTIFIER_H */ diff --git a/kernel/cpu.c b/kernel/cpu.c index 28cb6c71a47a..369d2892687d 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -120,12 +120,13 @@ static int take_cpu_down(void *unused) } /* Requires cpu_add_remove_lock to be held */ -static int _cpu_down(unsigned int cpu) +static int _cpu_down(unsigned int cpu, int tasks_frozen) { int err, nr_calls = 0; struct task_struct *p; cpumask_t old_allowed, tmp; void *hcpu = (void *)(long)cpu; + unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; if (num_online_cpus() == 1) return -EBUSY; @@ -134,11 +135,11 @@ static int _cpu_down(unsigned int cpu) return -EINVAL; raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); - err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, + err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err == NOTIFY_BAD) { - __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, hcpu, - nr_calls, NULL); + __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, + hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", __FUNCTION__, cpu); err = -EINVAL; @@ -157,7 +158,7 @@ static int _cpu_down(unsigned int cpu) if (IS_ERR(p) || cpu_online(cpu)) { /* CPU didn't die: tell everyone. Can't complain. */ - if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, + if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, hcpu) == NOTIFY_BAD) BUG(); @@ -176,7 +177,8 @@ static int _cpu_down(unsigned int cpu) __cpu_die(cpu); /* CPU is completely dead: tell everyone. Too late to complain. */ - if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu) == NOTIFY_BAD) + if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod, + hcpu) == NOTIFY_BAD) BUG(); check_for_tasks(cpu); @@ -186,8 +188,7 @@ out_thread: out_allowed: set_cpus_allowed(current, old_allowed); out_release: - raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, - (void *)(long)cpu); + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); return err; } @@ -199,7 +200,7 @@ int cpu_down(unsigned int cpu) if (cpu_hotplug_disabled) err = -EBUSY; else - err = _cpu_down(cpu); + err = _cpu_down(cpu, 0); mutex_unlock(&cpu_add_remove_lock); return err; @@ -207,16 +208,17 @@ int cpu_down(unsigned int cpu) #endif /*CONFIG_HOTPLUG_CPU*/ /* Requires cpu_add_remove_lock to be held */ -static int __cpuinit _cpu_up(unsigned int cpu) +static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) { int ret, nr_calls = 0; void *hcpu = (void *)(long)cpu; + unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; if (cpu_online(cpu) || !cpu_present(cpu)) return -EINVAL; raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); - ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu, + ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); if (ret == NOTIFY_BAD) { printk("%s: attempt to bring up CPU %u failed\n", @@ -234,12 +236,12 @@ static int __cpuinit _cpu_up(unsigned int cpu) BUG_ON(!cpu_online(cpu)); /* Now call notifier in preparation. */ - raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); + raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); out_notify: if (ret != 0) __raw_notifier_call_chain(&cpu_chain, - CPU_UP_CANCELED, hcpu, nr_calls, NULL); + CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); return ret; @@ -253,7 +255,7 @@ int __cpuinit cpu_up(unsigned int cpu) if (cpu_hotplug_disabled) err = -EBUSY; else - err = _cpu_up(cpu); + err = _cpu_up(cpu, 0); mutex_unlock(&cpu_add_remove_lock); return err; @@ -283,7 +285,7 @@ int disable_nonboot_cpus(void) for_each_online_cpu(cpu) { if (cpu == first_cpu) continue; - error = _cpu_down(cpu); + error = _cpu_down(cpu, 1); if (!error) { cpu_set(cpu, frozen_cpus); printk("CPU%d is down\n", cpu); @@ -318,7 +320,7 @@ void enable_nonboot_cpus(void) suspend_cpu_hotplug = 1; printk("Enabling non-boot CPUs ...\n"); for_each_cpu_mask(cpu, frozen_cpus) { - error = _cpu_up(cpu); + error = _cpu_up(cpu, 1); if (!error) { printk("CPU%d is up\n", cpu); continue; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index c9f4f044a8a8..23c03f43e196 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1411,11 +1411,13 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, switch (action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: init_hrtimers_cpu(cpu); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_DEAD: + case CPU_DEAD_FROZEN: clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu); migrate_hrtimers(cpu); break; diff --git a/kernel/profile.c b/kernel/profile.c index 9bfadb248dd8..cc91b9bf759d 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -340,6 +340,7 @@ static int __devinit profile_cpu_callback(struct notifier_block *info, switch (action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: node = cpu_to_node(cpu); per_cpu(cpu_profile_flip, cpu) = 0; if (!per_cpu(cpu_profile_hits, cpu)[1]) { @@ -365,10 +366,13 @@ static int __devinit profile_cpu_callback(struct notifier_block *info, __free_page(page); return NOTIFY_BAD; case CPU_ONLINE: + case CPU_ONLINE_FROZEN: cpu_set(cpu, prof_cpu_mask); break; case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: + case CPU_DEAD_FROZEN: cpu_clear(cpu, prof_cpu_mask); if (per_cpu(cpu_profile_hits, cpu)[0]) { page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 3554b76da84c..2c2dd8410dc4 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -558,9 +558,11 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, long cpu = (long)hcpu; switch (action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: rcu_online_cpu(cpu); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: rcu_offline_cpu(cpu); break; default: diff --git a/kernel/relay.c b/kernel/relay.c index e804589c863c..61a504900eaa 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -484,6 +484,7 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb, switch(action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: mutex_lock(&relay_channels_mutex); list_for_each_entry(chan, &relay_channels, list) { if (chan->buf[hotcpu]) @@ -500,6 +501,7 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb, mutex_unlock(&relay_channels_mutex); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: /* No need to flush the cpu : will be flushed upon * final relay_flush() call. */ break; diff --git a/kernel/sched.c b/kernel/sched.c index fe1a9c2b855a..799d23b4e35d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5394,6 +5394,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) break; case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: p = kthread_create(migration_thread, hcpu, "migration/%d",cpu); if (IS_ERR(p)) return NOTIFY_BAD; @@ -5407,12 +5408,14 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) break; case CPU_ONLINE: + case CPU_ONLINE_FROZEN: /* Strictly unneccessary, as first user will wake it. */ wake_up_process(cpu_rq(cpu)->migration_thread); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: if (!cpu_rq(cpu)->migration_thread) break; /* Unbind it from offline cpu so it can run. Fall thru. */ @@ -5423,6 +5426,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) break; case CPU_DEAD: + case CPU_DEAD_FROZEN: migrate_live_tasks(cpu); rq = cpu_rq(cpu); kthread_stop(rq->migration_thread); @@ -6912,14 +6916,20 @@ static int update_sched_domains(struct notifier_block *nfb, { switch (action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: detach_destroy_domains(&cpu_online_map); return NOTIFY_OK; case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: case CPU_ONLINE: + case CPU_ONLINE_FROZEN: case CPU_DEAD: + case CPU_DEAD_FROZEN: /* * Fall through and re-initialise the domains. */ diff --git a/kernel/softirq.c b/kernel/softirq.c index 8b75008e2bd8..0b9886a00e74 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -593,6 +593,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); if (IS_ERR(p)) { printk("ksoftirqd for %i failed\n", hotcpu); @@ -602,16 +603,19 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, per_cpu(ksoftirqd, hotcpu) = p; break; case CPU_ONLINE: + case CPU_ONLINE_FROZEN: wake_up_process(per_cpu(ksoftirqd, hotcpu)); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: if (!per_cpu(ksoftirqd, hotcpu)) break; /* Unbind so it can run. Fall thru. */ kthread_bind(per_cpu(ksoftirqd, hotcpu), any_online_cpu(cpu_online_map)); case CPU_DEAD: + case CPU_DEAD_FROZEN: p = per_cpu(ksoftirqd, hotcpu); per_cpu(ksoftirqd, hotcpu) = NULL; kthread_stop(p); diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 8fa7040247ad..0131e296ffb4 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -146,6 +146,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) switch (action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: BUG_ON(per_cpu(watchdog_task, hotcpu)); p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); if (IS_ERR(p)) { @@ -157,16 +158,19 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) kthread_bind(p, hotcpu); break; case CPU_ONLINE: + case CPU_ONLINE_FROZEN: wake_up_process(per_cpu(watchdog_task, hotcpu)); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: if (!per_cpu(watchdog_task, hotcpu)) break; /* Unbind so it can run. Fall thru. */ kthread_bind(per_cpu(watchdog_task, hotcpu), any_online_cpu(cpu_online_map)); case CPU_DEAD: + case CPU_DEAD_FROZEN: p = per_cpu(watchdog_task, hotcpu); per_cpu(watchdog_task, hotcpu) = NULL; kthread_stop(p); diff --git a/kernel/timer.c b/kernel/timer.c index 58f6dd07c80b..de85f8491c1d 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1293,11 +1293,13 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self, long cpu = (long)hcpu; switch(action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: if (init_timers_cpu(cpu) < 0) return NOTIFY_BAD; break; #ifdef CONFIG_HOTPLUG_CPU case CPU_DEAD: + case CPU_DEAD_FROZEN: migrate_timers(cpu); break; #endif diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b976ed87dd37..fb56fedd5c02 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -799,6 +799,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, struct cpu_workqueue_struct *cwq; struct workqueue_struct *wq; + action &= ~CPU_TASKS_FROZEN; + switch (action) { case CPU_LOCK_ACQUIRE: mutex_lock(&workqueue_mutex); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index d69ddbe43865..402eb4eb6b23 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1004,7 +1004,7 @@ static int radix_tree_callback(struct notifier_block *nfb, struct radix_tree_preload *rtp; /* Free per-cpu pool of perloaded nodes */ - if (action == CPU_DEAD) { + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { rtp = &per_cpu(radix_tree_preloads, cpu); while (rtp->nr) { kmem_cache_free(radix_tree_node_cachep, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6fd0b7455b0b..d53cbf8acb8e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2148,11 +2148,14 @@ static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, switch (action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: if (process_zones(cpu)) ret = NOTIFY_BAD; break; case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: + case CPU_DEAD_FROZEN: free_zone_pagesets(cpu); break; default: @@ -3012,7 +3015,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self, { int cpu = (unsigned long)hcpu; - if (action == CPU_DEAD) { + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { local_irq_disable(); __drain_pages(cpu); vm_events_fold_cpu(cpu); diff --git a/mm/slab.c b/mm/slab.c index 1a7a10de2a4d..6f3d6e240c61 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1190,6 +1190,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, mutex_lock(&cache_chain_mutex); break; case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: /* * We need to do this right in the beginning since * alloc_arraycache's are going to use this list. @@ -1276,10 +1277,12 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, } break; case CPU_ONLINE: + case CPU_ONLINE_FROZEN: start_cpu_timer(cpu); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: /* * Shutdown cache reaper. Note that the cache_chain_mutex is * held so that if cache_reap() is invoked it cannot do @@ -1291,9 +1294,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, per_cpu(reap_work, cpu).work.func = NULL; break; case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: start_cpu_timer(cpu); break; case CPU_DEAD: + case CPU_DEAD_FROZEN: /* * Even if all the cpus of a node are down, we don't free the * kmem_list3 of any cache. This to avoid a race between @@ -1305,6 +1310,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, /* fall thru */ #endif case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; diff --git a/mm/slub.c b/mm/slub.c index f7c120b93c41..a581fa8ae11a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2514,7 +2514,9 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, switch (action) { case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: + case CPU_DEAD_FROZEN: for_all_slabs(__flush_cpu_slab, cpu); break; default: diff --git a/mm/swap.c b/mm/swap.c index 218c52a24a21..d3cb966fe992 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -488,7 +488,7 @@ static int cpu_swap_callback(struct notifier_block *nfb, long *committed; committed = &per_cpu(committed_space, (long)hcpu); - if (action == CPU_DEAD) { + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { atomic_add(*committed, &vm_committed_space); *committed = 0; __lru_add_drain((long)hcpu); diff --git a/mm/vmscan.c b/mm/vmscan.c index 1c8e75a1cfcd..1be5a6376ef0 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1528,7 +1528,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb, pg_data_t *pgdat; cpumask_t mask; - if (action == CPU_ONLINE) { + if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { for_each_online_pgdat(pgdat) { mask = node_to_cpumask(pgdat->node_id); if (any_online_cpu(mask) != NR_CPUS) diff --git a/mm/vmstat.c b/mm/vmstat.c index 6c488d6ac425..9a66dc4aed43 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -650,8 +650,11 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, { switch (action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: + case CPU_DEAD_FROZEN: refresh_zone_stat_thresholds(); break; default: diff --git a/net/core/dev.c b/net/core/dev.c index 4317c1be4d3f..8301e2ac747f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3450,7 +3450,7 @@ static int dev_cpu_callback(struct notifier_block *nfb, unsigned int cpu, oldcpu = (unsigned long)ocpu; struct softnet_data *sd, *oldsd; - if (action != CPU_DEAD) + if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) return NOTIFY_OK; local_irq_disable(); diff --git a/net/core/flow.c b/net/core/flow.c index 5d25697920b1..051430545a05 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -338,7 +338,7 @@ static int flow_cache_cpu(struct notifier_block *nfb, unsigned long action, void *hcpu) { - if (action == CPU_DEAD) + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) __flow_cache_shrink((unsigned long)hcpu, 0); return NOTIFY_OK; } diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index fb3faf72e850..b7333061016d 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -556,6 +556,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, switch (action) { case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: if (!percpu_populate(iucv_irq_data, sizeof(struct iucv_irq_data), GFP_KERNEL|GFP_DMA, cpu)) @@ -567,15 +568,20 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, } break; case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: + case CPU_DEAD_FROZEN: percpu_depopulate(iucv_param, cpu); percpu_depopulate(iucv_irq_data, cpu); break; case CPU_ONLINE: + case CPU_ONLINE_FROZEN: case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu); break; case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: cpumask = iucv_buffer_cpumask; cpu_clear(cpu, cpumask); if (cpus_empty(cpumask)) -- cgit v1.2.3 From d1187ed21026fd512b87851d0ca26d9ae16f9059 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 9 May 2007 02:35:12 -0700 Subject: vmstat: use our own timer events vmstat is currently using the cache reaper to periodically bring the statistics up to date. The cache reaper does only exists in SLUB as a way to provide compatibility with SLAB. This patch removes the vmstat calls from the slab allocators and provides its own handling. The advantage is also that we can use a different frequency for the updates. Refreshing vm stats is a pretty fast job so we can run this every second and stagger this by only one tick. This will lead to some overlap in large systems. F.e a system running at 250 HZ with 1024 processors will have 4 vm updates occurring at once. However, the vm stats update only accesses per node information. It is only necessary to stagger the vm statistics updates per processor in each node. Vm counter updates occurring on distant nodes will not cause cacheline contention. We could implement an alternate approach that runs the first processor on each node at the second and then each of the other processor on a node on a subsequent tick. That may be useful to keep a large amount of the second free of timer activity. Maybe the timer folks will have some feedback on this one? [jirislaby@gmail.com: add missing break] Cc: Arjan van de Ven Signed-off-by: Christoph Lameter Signed-off-by: Jiri Slaby Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmstat.h | 3 --- mm/slab.c | 1 - mm/slub.c | 1 - mm/vmstat.c | 40 ++++++++++++++++++++++++++++++++++++---- 4 files changed, 36 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index acb1f105870c..d9325cf8a134 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -212,8 +212,6 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_zone_state(struct zone *, enum zone_stat_item); void refresh_cpu_vm_stats(int); -void refresh_vm_stats(void); - #else /* CONFIG_SMP */ /* @@ -260,7 +258,6 @@ static inline void __dec_zone_page_state(struct page *page, #define mod_zone_page_state __mod_zone_page_state static inline void refresh_cpu_vm_stats(int cpu) { } -static inline void refresh_vm_stats(void) { } #endif #endif /* _LINUX_VMSTAT_H */ diff --git a/mm/slab.c b/mm/slab.c index 6f3d6e240c61..e50908b2bfac 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4156,7 +4156,6 @@ next: check_irq_on(); mutex_unlock(&cache_chain_mutex); next_reap_node(); - refresh_cpu_vm_stats(smp_processor_id()); out: /* Set up the next iteration */ schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC)); diff --git a/mm/slub.c b/mm/slub.c index a581fa8ae11a..dbb206503a8d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2580,7 +2580,6 @@ static DEFINE_PER_CPU(struct delayed_work, reap_work); static void cache_reap(struct work_struct *unused) { next_reap_node(); - refresh_cpu_vm_stats(smp_processor_id()); schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); } diff --git a/mm/vmstat.c b/mm/vmstat.c index 9a66dc4aed43..9d824643a22f 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -640,6 +640,22 @@ const struct seq_operations vmstat_op = { #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_SMP +static DEFINE_PER_CPU(struct delayed_work, vmstat_work); + +static void vmstat_update(struct work_struct *w) +{ + refresh_cpu_vm_stats(smp_processor_id()); + schedule_delayed_work(&__get_cpu_var(vmstat_work), HZ); +} + +static void __devinit start_cpu_timer(int cpu) +{ + struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); + + INIT_DELAYED_WORK(vmstat_work, vmstat_update); + schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu); +} + /* * Use the cpu notifier to insure that the thresholds are recalculated * when necessary. @@ -648,11 +664,22 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { + long cpu = (long)hcpu; + switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + start_cpu_timer(cpu); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); + per_cpu(vmstat_work, cpu).work.func = NULL; + break; + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: + start_cpu_timer(cpu); + break; case CPU_DEAD: case CPU_DEAD_FROZEN: refresh_zone_stat_thresholds(); @@ -668,8 +695,13 @@ static struct notifier_block __cpuinitdata vmstat_notifier = int __init setup_vmstat(void) { + int cpu; + refresh_zone_stat_thresholds(); register_cpu_notifier(&vmstat_notifier); + + for_each_online_cpu(cpu) + start_cpu_timer(cpu); return 0; } module_init(setup_vmstat) -- cgit v1.2.3 From 4037d452202e34214e8a939fa5621b2b3bbb45b7 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 9 May 2007 02:35:14 -0700 Subject: Move remote node draining out of slab allocators Currently the slab allocators contain callbacks into the page allocator to perform the draining of pagesets on remote nodes. This requires SLUB to have a whole subsystem in order to be compatible with SLAB. Moving node draining out of the slab allocators avoids a section of code in SLUB. Move the node draining so that is is done when the vm statistics are updated. At that point we are already touching all the cachelines with the pagesets of a processor. Add a expire counter there. If we have to update per zone or global vm statistics then assume that the pageset will require subsequent draining. The expire counter will be decremented on each vm stats update pass until it reaches zero. Then we will drain one batch from the pageset. The draining will cause vm counter updates which will then cause another expiration until the pcp is empty. So we will drain a batch every 3 seconds. Note that remote node draining is a somewhat esoteric feature that is required on large NUMA systems because otherwise significant portions of system memory can become trapped in pcp queues. The number of pcp is determined by the number of processors and nodes in a system. A system with 4 processors and 2 nodes has 8 pcps which is okay. But a system with 1024 processors and 512 nodes has 512k pcps with a high potential for large amount of memory being caught in them. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 6 +--- include/linux/mmzone.h | 3 ++ mm/page_alloc.c | 45 +++++++++------------------ mm/slab.c | 6 ---- mm/slub.c | 84 -------------------------------------------------- mm/vmstat.c | 54 +++++++++++++++++++++++++++++--- 6 files changed, 67 insertions(+), 131 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 97a36c3d96e2..0d2ef0b082a6 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -176,10 +176,6 @@ extern void FASTCALL(free_cold_page(struct page *page)); #define free_page(addr) free_pages((addr),0) void page_alloc_init(void); -#ifdef CONFIG_NUMA -void drain_node_pages(int node); -#else -static inline void drain_node_pages(int node) { }; -#endif +void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); #endif /* __LINUX_GFP_H */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 2f1544e83042..d09b1345a3a1 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -83,6 +83,9 @@ struct per_cpu_pages { struct per_cpu_pageset { struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ +#ifdef CONFIG_NUMA + s8 expire; +#endif #ifdef CONFIG_SMP s8 stat_threshold; s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d53cbf8acb8e..f9b5d6d5f4d6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -691,43 +691,26 @@ static void __init setup_nr_node_ids(void) {} #ifdef CONFIG_NUMA /* - * Called from the slab reaper to drain pagesets on a particular node that - * belongs to the currently executing processor. + * Called from the vmstat counter updater to drain pagesets of this + * currently executing processor on remote nodes after they have + * expired. + * * Note that this function must be called with the thread pinned to * a single processor. */ -void drain_node_pages(int nodeid) +void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { - int i; - enum zone_type z; unsigned long flags; + int to_drain; - for (z = 0; z < MAX_NR_ZONES; z++) { - struct zone *zone = NODE_DATA(nodeid)->node_zones + z; - struct per_cpu_pageset *pset; - - if (!populated_zone(zone)) - continue; - - pset = zone_pcp(zone, smp_processor_id()); - for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { - struct per_cpu_pages *pcp; - - pcp = &pset->pcp[i]; - if (pcp->count) { - int to_drain; - - local_irq_save(flags); - if (pcp->count >= pcp->batch) - to_drain = pcp->batch; - else - to_drain = pcp->count; - free_pages_bulk(zone, to_drain, &pcp->list, 0); - pcp->count -= to_drain; - local_irq_restore(flags); - } - } - } + local_irq_save(flags); + if (pcp->count >= pcp->batch) + to_drain = pcp->batch; + else + to_drain = pcp->count; + free_pages_bulk(zone, to_drain, &pcp->list, 0); + pcp->count -= to_drain; + local_irq_restore(flags); } #endif diff --git a/mm/slab.c b/mm/slab.c index e50908b2bfac..944b20581f8c 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -928,12 +928,6 @@ static void next_reap_node(void) { int node = __get_cpu_var(reap_node); - /* - * Also drain per cpu pages on remote zones - */ - if (node != numa_node_id()) - drain_node_pages(node); - node = next_node(node, node_online_map); if (unlikely(node >= MAX_NUMNODES)) node = first_node(node_online_map); diff --git a/mm/slub.c b/mm/slub.c index dbb206503a8d..bd2efae02bcd 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2530,90 +2530,6 @@ static struct notifier_block __cpuinitdata slab_notifier = #endif -#ifdef CONFIG_NUMA - -/***************************************************************** - * Generic reaper used to support the page allocator - * (the cpu slabs are reaped by a per slab workqueue). - * - * Maybe move this to the page allocator? - ****************************************************************/ - -static DEFINE_PER_CPU(unsigned long, reap_node); - -static void init_reap_node(int cpu) -{ - int node; - - node = next_node(cpu_to_node(cpu), node_online_map); - if (node == MAX_NUMNODES) - node = first_node(node_online_map); - - __get_cpu_var(reap_node) = node; -} - -static void next_reap_node(void) -{ - int node = __get_cpu_var(reap_node); - - /* - * Also drain per cpu pages on remote zones - */ - if (node != numa_node_id()) - drain_node_pages(node); - - node = next_node(node, node_online_map); - if (unlikely(node >= MAX_NUMNODES)) - node = first_node(node_online_map); - __get_cpu_var(reap_node) = node; -} -#else -#define init_reap_node(cpu) do { } while (0) -#define next_reap_node(void) do { } while (0) -#endif - -#define REAPTIMEOUT_CPUC (2*HZ) - -#ifdef CONFIG_SMP -static DEFINE_PER_CPU(struct delayed_work, reap_work); - -static void cache_reap(struct work_struct *unused) -{ - next_reap_node(); - schedule_delayed_work(&__get_cpu_var(reap_work), - REAPTIMEOUT_CPUC); -} - -static void __devinit start_cpu_timer(int cpu) -{ - struct delayed_work *reap_work = &per_cpu(reap_work, cpu); - - /* - * When this gets called from do_initcalls via cpucache_init(), - * init_workqueues() has already run, so keventd will be setup - * at that time. - */ - if (keventd_up() && reap_work->work.func == NULL) { - init_reap_node(cpu); - INIT_DELAYED_WORK(reap_work, cache_reap); - schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); - } -} - -static int __init cpucache_init(void) -{ - int cpu; - - /* - * Register the timers that drain pcp pages and update vm statistics - */ - for_each_online_cpu(cpu) - start_cpu_timer(cpu); - return 0; -} -__initcall(cpucache_init); -#endif - void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) { struct kmem_cache *s = get_slab(size, gfpflags); diff --git a/mm/vmstat.c b/mm/vmstat.c index 006eb7621869..9832d9a41d8c 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -281,6 +281,17 @@ EXPORT_SYMBOL(dec_zone_page_state); /* * Update the zone counters for one cpu. + * + * Note that refresh_cpu_vm_stats strives to only access + * node local memory. The per cpu pagesets on remote zones are placed + * in the memory local to the processor using that pageset. So the + * loop over all zones will access a series of cachelines local to + * the processor. + * + * The call to zone_page_state_add updates the cachelines with the + * statistics in the remote zone struct as well as the global cachelines + * with the global counters. These could cause remote node cache line + * bouncing and will have to be only done when necessary. */ void refresh_cpu_vm_stats(int cpu) { @@ -289,21 +300,54 @@ void refresh_cpu_vm_stats(int cpu) unsigned long flags; for_each_zone(zone) { - struct per_cpu_pageset *pcp; + struct per_cpu_pageset *p; if (!populated_zone(zone)) continue; - pcp = zone_pcp(zone, cpu); + p = zone_pcp(zone, cpu); for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) - if (pcp->vm_stat_diff[i]) { + if (p->vm_stat_diff[i]) { local_irq_save(flags); - zone_page_state_add(pcp->vm_stat_diff[i], + zone_page_state_add(p->vm_stat_diff[i], zone, i); - pcp->vm_stat_diff[i] = 0; + p->vm_stat_diff[i] = 0; +#ifdef CONFIG_NUMA + /* 3 seconds idle till flush */ + p->expire = 3; +#endif local_irq_restore(flags); } +#ifdef CONFIG_NUMA + /* + * Deal with draining the remote pageset of this + * processor + * + * Check if there are pages remaining in this pageset + * if not then there is nothing to expire. + */ + if (!p->expire || (!p->pcp[0].count && !p->pcp[1].count)) + continue; + + /* + * We never drain zones local to this processor. + */ + if (zone_to_nid(zone) == numa_node_id()) { + p->expire = 0; + continue; + } + + p->expire--; + if (p->expire) + continue; + + if (p->pcp[0].count) + drain_zone_pages(zone, p->pcp + 0); + + if (p->pcp[1].count) + drain_zone_pages(zone, p->pcp + 1); +#endif } } -- cgit v1.2.3 From b52f52a093bb1e841e014c2087b5bee7162da413 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 May 2007 02:35:15 -0700 Subject: clocksource: fix resume logic We need to make sure that the clocksources are resumed, when timekeeping is resumed. The current resume logic does not guarantee this. Add a resume function pointer to the clocksource struct, so clocksource drivers which need to reinitialize the clocksource can provide a resume function. Add a resume function, which calls the maybe available clocksource resume functions and resets the watchdog function, so a stable TSC can be used accross suspend/resume. Signed-off-by: Thomas Gleixner Cc: john stultz Cc: Andi Kleen Cc: Ingo Molnar Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/clocksource.h | 3 +++ kernel/time/clocksource.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ kernel/timer.c | 2 ++ 3 files changed, 50 insertions(+) (limited to 'include/linux') diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 2665ca04cf8f..bf297b03a4e4 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -49,6 +49,7 @@ struct clocksource; * @shift: cycle to nanosecond divisor (power of two) * @flags: flags describing special properties * @vread: vsyscall based read + * @resume: resume function for the clocksource, if necessary * @cycle_interval: Used internally by timekeeping core, please ignore. * @xtime_interval: Used internally by timekeeping core, please ignore. */ @@ -65,6 +66,7 @@ struct clocksource { u32 shift; unsigned long flags; cycle_t (*vread)(void); + void (*resume)(void); /* timekeeping specific data, ignore */ cycle_t cycle_interval; @@ -209,6 +211,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c, extern int clocksource_register(struct clocksource*); extern struct clocksource* clocksource_get_next(void); extern void clocksource_change_rating(struct clocksource *cs, int rating); +extern void clocksource_resume(void); #ifdef CONFIG_GENERIC_TIME_VSYSCALL extern void update_vsyscall(struct timespec *ts, struct clocksource *c); diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index db0c725de5ea..3db5c3c460d7 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -74,6 +74,8 @@ static struct clocksource *watchdog; static struct timer_list watchdog_timer; static DEFINE_SPINLOCK(watchdog_lock); static cycle_t watchdog_last; +static int watchdog_resumed; + /* * Interval: 0.5sec Threshold: 0.0625s */ @@ -98,15 +100,26 @@ static void clocksource_watchdog(unsigned long data) struct clocksource *cs, *tmp; cycle_t csnow, wdnow; int64_t wd_nsec, cs_nsec; + int resumed; spin_lock(&watchdog_lock); + resumed = watchdog_resumed; + if (unlikely(resumed)) + watchdog_resumed = 0; + wdnow = watchdog->read(); wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); watchdog_last = wdnow; list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { csnow = cs->read(); + + if (unlikely(resumed)) { + cs->wd_last = csnow; + continue; + } + /* Initialized ? */ if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && @@ -136,6 +149,13 @@ static void clocksource_watchdog(unsigned long data) } spin_unlock(&watchdog_lock); } +static void clocksource_resume_watchdog(void) +{ + spin_lock(&watchdog_lock); + watchdog_resumed = 1; + spin_unlock(&watchdog_lock); +} + static void clocksource_check_watchdog(struct clocksource *cs) { struct clocksource *cse; @@ -182,8 +202,33 @@ static void clocksource_check_watchdog(struct clocksource *cs) if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; } + +static inline void clocksource_resume_watchdog(void) { } #endif +/** + * clocksource_resume - resume the clocksource(s) + */ +void clocksource_resume(void) +{ + struct list_head *tmp; + unsigned long flags; + + spin_lock_irqsave(&clocksource_lock, flags); + + list_for_each(tmp, &clocksource_list) { + struct clocksource *cs; + + cs = list_entry(tmp, struct clocksource, list); + if (cs->resume) + cs->resume(); + } + + clocksource_resume_watchdog(); + + spin_unlock_irqrestore(&clocksource_lock, flags); +} + /** * clocksource_get_next - Returns the selected clocksource * diff --git a/kernel/timer.c b/kernel/timer.c index de85f8491c1d..59a28b1752f8 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1499,6 +1499,8 @@ unregister_time_interpolator(struct time_interpolator *ti) prev = &curr->next; } + clocksource_resume(); + write_seqlock_irqsave(&xtime_lock, flags); if (ti == time_interpolator) { /* we lost the best time-interpolator: */ -- cgit v1.2.3 From e61a1c1c4f240cec61300c8f27518c3e47570fd4 Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Wed, 9 May 2007 02:35:15 -0700 Subject: Allow arch to initialize arch field of the module structure This will later allow an arch to add module specific information via linker generated tables instead of poking directly in the module object structure. Signed-off-by: Roman Zippel Signed-off-by: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/module.h | 3 +++ scripts/mod/modpost.c | 1 + 2 files changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/module.h b/include/linux/module.h index 6d3dc9c4ff96..792d483c9af7 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -356,6 +356,9 @@ struct module keeping pointers to this stuff */ char *args; }; +#ifndef MODULE_ARCH_INIT +#define MODULE_ARCH_INIT {} +#endif /* FIXME: It'd be nice to isolate modules during init, too, so they aren't used before they (may) fail. But presently too much code diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 480e18b00aa6..113dc77b9f60 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -1343,6 +1343,7 @@ static void add_header(struct buffer *b, struct module *mod) buf_printf(b, "#ifdef CONFIG_MODULE_UNLOAD\n" " .exit = cleanup_module,\n" "#endif\n"); + buf_printf(b, " .arch = MODULE_ARCH_INIT,\n"); buf_printf(b, "};\n"); } -- cgit v1.2.3 From f7e4217b007d1f73e7e3cf10ba4fea4a608c603f Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Wed, 9 May 2007 02:35:17 -0700 Subject: rename thread_info to stack This finally renames the thread_info field in task structure to stack, so that the assumptions about this field are gone and archs have more freedom about placing the thread_info structure. Nonbroken archs which have a proper thread pointer can do the access to both current thread and task structure via a single pointer. It'll allow for a few more cleanups of the fork code, from which e.g. ia64 could benefit. Signed-off-by: Roman Zippel [akpm@linux-foundation.org: build fix] Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Russell King Cc: Ian Molton Cc: Haavard Skinnemoen Cc: Mikael Starvik Cc: David Howells Cc: Yoshinori Sato Cc: "Luck, Tony" Cc: Hirokazu Takata Cc: Geert Uytterhoeven Cc: Roman Zippel Cc: Greg Ungerer Cc: Ralf Baechle Cc: Ralf Baechle Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: Paul Mundt Cc: Kazumoto Kojima Cc: Richard Curnow Cc: William Lee Irwin III Cc: "David S. Miller" Cc: Jeff Dike Cc: Paolo 'Blaisorblade' Giarrusso Cc: Miles Bader Cc: Andi Kleen Cc: Chris Zankel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/blackfin/kernel/ptrace.c | 6 +++--- arch/h8300/kernel/asm-offsets.c | 2 +- arch/ia64/kernel/mca.c | 2 +- arch/mips/kernel/asm-offsets.c | 2 +- arch/parisc/kernel/asm-offsets.c | 2 +- arch/powerpc/kernel/asm-offsets.c | 2 +- arch/ppc/kernel/asm-offsets.c | 2 +- arch/s390/kernel/asm-offsets.c | 2 +- arch/sparc/kernel/asm-offsets.c | 2 +- arch/v850/kernel/asm-offsets.c | 2 +- arch/xtensa/kernel/asm-offsets.c | 2 +- include/asm-alpha/thread_info.h | 8 ++++---- include/asm-blackfin/processor.h | 6 +++--- include/asm-blackfin/system.h | 4 ++-- include/asm-m68k/thread_info.h | 6 +++--- include/asm-x86_64/system.h | 2 +- include/linux/init_task.h | 2 +- include/linux/sched.h | 8 ++++---- kernel/fork.c | 4 ++-- 19 files changed, 33 insertions(+), 33 deletions(-) (limited to 'include/linux') diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c index d7c8e514cb92..e718bb4a1ef0 100644 --- a/arch/blackfin/kernel/ptrace.c +++ b/arch/blackfin/kernel/ptrace.c @@ -73,7 +73,7 @@ static inline struct pt_regs *get_user_regs(struct task_struct *task) { return (struct pt_regs *) - ((unsigned long)task->thread_info + + ((unsigned long)task_stack_page(task) + (THREAD_SIZE - sizeof(struct pt_regs))); } @@ -99,7 +99,7 @@ static inline long get_reg(struct task_struct *task, int regno) unsigned char *reg_ptr; struct pt_regs *regs = - (struct pt_regs *)((unsigned long)task->thread_info + + (struct pt_regs *)((unsigned long)task_stack_page(task) + (THREAD_SIZE - sizeof(struct pt_regs))); reg_ptr = (char *)regs; @@ -125,7 +125,7 @@ put_reg(struct task_struct *task, int regno, unsigned long data) char * reg_ptr; struct pt_regs *regs = - (struct pt_regs *)((unsigned long)task->thread_info + + (struct pt_regs *)((unsigned long)task_stack_page(task) + (THREAD_SIZE - sizeof(struct pt_regs))); reg_ptr = (char *)regs; diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c index b78b82ad28a3..fc30b4fd0914 100644 --- a/arch/h8300/kernel/asm-offsets.c +++ b/arch/h8300/kernel/asm-offsets.c @@ -30,7 +30,7 @@ int main(void) DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); - DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info)); + DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack)); DEFINE(TASK_MM, offsetof(struct task_struct, mm)); DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 1d7cc7e2ce32..f8ae709de0b5 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1689,7 +1689,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, ti->preempt_count = 1; ti->task = p; ti->cpu = cpu; - p->thread_info = ti; + p->stack = ti; p->state = TASK_UNINTERRUPTIBLE; cpu_set(cpu, p->cpus_allowed); INIT_LIST_HEAD(&p->tasks); diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 761a779d5c4f..3b27309d54b1 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -82,7 +82,7 @@ void output_task_defines(void) { text("/* MIPS task_struct offsets. */"); offset("#define TASK_STATE ", struct task_struct, state); - offset("#define TASK_THREAD_INFO ", struct task_struct, thread_info); + offset("#define TASK_THREAD_INFO ", struct task_struct, stack); offset("#define TASK_FLAGS ", struct task_struct, flags); offset("#define TASK_MM ", struct task_struct, mm); offset("#define TASK_PID ", struct task_struct, pid); diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 54fdb959149c..d3b7917a87cb 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -54,7 +54,7 @@ int main(void) { - DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info)); + DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack)); DEFINE(TASK_STATE, offsetof(struct task_struct, state)); DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending)); diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 8f48560b7ee2..37bc35e69dbe 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -58,7 +58,7 @@ int main(void) #ifdef CONFIG_PPC64 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); #else - DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); + DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); DEFINE(PTRACE, offsetof(struct task_struct, ptrace)); #endif /* CONFIG_PPC64 */ diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c index c5850a272650..e8e94321b59e 100644 --- a/arch/ppc/kernel/asm-offsets.c +++ b/arch/ppc/kernel/asm-offsets.c @@ -35,7 +35,7 @@ int main(void) { DEFINE(THREAD, offsetof(struct task_struct, thread)); - DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); + DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); DEFINE(MM, offsetof(struct task_struct, mm)); DEFINE(PTRACE, offsetof(struct task_struct, ptrace)); DEFINE(KSP, offsetof(struct thread_struct, ksp)); diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index ec514fe5ccd0..1375f8a4469e 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -15,7 +15,7 @@ int main(void) { - DEFINE(__THREAD_info, offsetof(struct task_struct, thread_info),); + DEFINE(__THREAD_info, offsetof(struct task_struct, stack),); DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp),); DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info),); DEFINE(__THREAD_mm_segment, diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c index 29d7cfd1c970..6773ed76e414 100644 --- a/arch/sparc/kernel/asm-offsets.c +++ b/arch/sparc/kernel/asm-offsets.c @@ -28,7 +28,7 @@ int foo(void) DEFINE(AOFF_task_gid, offsetof(struct task_struct, gid)); DEFINE(AOFF_task_euid, offsetof(struct task_struct, euid)); DEFINE(AOFF_task_egid, offsetof(struct task_struct, egid)); - /* DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); */ + /* DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); */ DEFINE(ASIZ_task_uid, sizeof(current->uid)); DEFINE(ASIZ_task_gid, sizeof(current->gid)); DEFINE(ASIZ_task_euid, sizeof(current->euid)); diff --git a/arch/v850/kernel/asm-offsets.c b/arch/v850/kernel/asm-offsets.c index 24f291369070..cee5c3142d41 100644 --- a/arch/v850/kernel/asm-offsets.c +++ b/arch/v850/kernel/asm-offsets.c @@ -29,7 +29,7 @@ int main (void) DEFINE (TASK_PTRACE, offsetof (struct task_struct, ptrace)); DEFINE (TASK_BLOCKED, offsetof (struct task_struct, blocked)); DEFINE (TASK_THREAD, offsetof (struct task_struct, thread)); - DEFINE (TASK_THREAD_INFO, offsetof (struct task_struct, thread_info)); + DEFINE (TASK_THREAD_INFO, offsetof (struct task_struct, stack)); DEFINE (TASK_MM, offsetof (struct task_struct, mm)); DEFINE (TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm)); DEFINE (TASK_PID, offsetof (struct task_struct, pid)); diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index b256cfbef344..698079b3a336 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -70,7 +70,7 @@ int main(void) DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm)); DEFINE(TASK_PID, offsetof (struct task_struct, pid)); DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); - DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, thread_info)); + DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack)); DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); BLANK(); diff --git a/include/asm-alpha/thread_info.h b/include/asm-alpha/thread_info.h index eeb3bef91e11..f4defc2bd3fb 100644 --- a/include/asm-alpha/thread_info.h +++ b/include/asm-alpha/thread_info.h @@ -97,7 +97,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); 1 << TIF_UAC_SIGBUS) #define SET_UNALIGN_CTL(task,value) ({ \ - (task)->thread_info->flags = (((task)->thread_info->flags & \ + task_thread_info(task)->flags = ((task_thread_info(task)->flags & \ ~ALPHA_UAC_MASK) \ | (((value) << ALPHA_UAC_SHIFT) & (1<thread_info->flags & (1 << TIF_UAC_NOPRINT)) \ + put_user((task_thread_info(task)->flags & (1 << TIF_UAC_NOPRINT))\ >> ALPHA_UAC_SHIFT \ - | ((task)->thread_info->flags & (1 << TIF_UAC_SIGBUS)) \ + | (task_thread_info(task)->flags & (1 << TIF_UAC_SIGBUS))\ >> (ALPHA_UAC_SHIFT + 1) \ - | ((task)->thread_info->flags & (1 << TIF_UAC_NOFIX)) \ + | (task_thread_info(task)->flags & (1 << TIF_UAC_NOFIX))\ >> (ALPHA_UAC_SHIFT - 1), \ (int __user *)(value)); \ }) diff --git a/include/asm-blackfin/processor.h b/include/asm-blackfin/processor.h index 997465c93e82..0336ff132c16 100644 --- a/include/asm-blackfin/processor.h +++ b/include/asm-blackfin/processor.h @@ -58,10 +58,10 @@ do { \ (_regs)->pc = (_pc); \ if (current->mm) \ (_regs)->p5 = current->mm->start_data; \ - current->thread_info->l1_task_info.stack_start \ + task_thread_info(current)->l1_task_info.stack_start \ = (void *)current->mm->context.stack_start; \ - current->thread_info->l1_task_info.lowest_sp = (void *)(_usp); \ - memcpy(L1_SCRATCH_TASK_INFO, ¤t->thread_info->l1_task_info, \ + task_thread_info(current)->l1_task_info.lowest_sp = (void *)(_usp); \ + memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info, \ sizeof(*L1_SCRATCH_TASK_INFO)); \ wrusp(_usp); \ } while(0) diff --git a/include/asm-blackfin/system.h b/include/asm-blackfin/system.h index b5bf6e7cb5e8..5e5f1a0566c0 100644 --- a/include/asm-blackfin/system.h +++ b/include/asm-blackfin/system.h @@ -239,9 +239,9 @@ asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_stru #define switch_to(prev,next,last) \ do { \ - memcpy (&prev->thread_info->l1_task_info, L1_SCRATCH_TASK_INFO, \ + memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \ sizeof *L1_SCRATCH_TASK_INFO); \ - memcpy (L1_SCRATCH_TASK_INFO, &next->thread_info->l1_task_info, \ + memcpy (L1_SCRATCH_TASK_INFO, &task_thread_info(next)->l1_task_info, \ sizeof *L1_SCRATCH_TASK_INFO); \ (last) = resume (prev, next); \ } while (0) diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h index c4d622a57dfb..d635a3752488 100644 --- a/include/asm-m68k/thread_info.h +++ b/include/asm-m68k/thread_info.h @@ -37,17 +37,17 @@ struct thread_info { #define init_stack (init_thread_union.stack) #define task_thread_info(tsk) (&(tsk)->thread.info) -#define task_stack_page(tsk) ((void *)(tsk)->thread_info) +#define task_stack_page(tsk) ((tsk)->stack) #define current_thread_info() task_thread_info(current) #define __HAVE_THREAD_FUNCTIONS #define setup_thread_stack(p, org) ({ \ - *(struct task_struct **)(p)->thread_info = (p); \ + *(struct task_struct **)(p)->stack = (p); \ task_thread_info(p)->task = (p); \ }) -#define end_of_stack(p) ((unsigned long *)(p)->thread_info + 1) +#define end_of_stack(p) ((unsigned long *)(p)->stack + 1) /* entry.S relies on these definitions! * bits 0-7 are tested at every exception exit diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index b7b8021e8c43..ead9f9a56234 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -39,7 +39,7 @@ [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \ [ti_flags] "i" (offsetof(struct thread_info, flags)),\ [tif_fork] "i" (TIF_FORK), \ - [thread_info] "i" (offsetof(struct task_struct, thread_info)), \ + [thread_info] "i" (offsetof(struct task_struct, stack)), \ [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ : "memory", "cc" __EXTRA_CLOBBER) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 795102309bf1..45170b2fa253 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -95,7 +95,7 @@ extern struct group_info init_groups; #define INIT_TASK(tsk) \ { \ .state = 0, \ - .thread_info = &init_thread_info, \ + .stack = &init_thread_info, \ .usage = ATOMIC_INIT(2), \ .flags = 0, \ .lock_depth = -1, \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 28000b1658f9..17b72d88c4cb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -817,7 +817,7 @@ struct prio_array; struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ - struct thread_info *thread_info; + void *stack; atomic_t usage; unsigned int flags; /* per process flags, defined below */ unsigned int ptrace; @@ -1513,8 +1513,8 @@ static inline void unlock_task_sighand(struct task_struct *tsk, #ifndef __HAVE_THREAD_FUNCTIONS -#define task_thread_info(task) (task)->thread_info -#define task_stack_page(task) ((void*)((task)->thread_info)) +#define task_thread_info(task) ((struct thread_info *)(task)->stack) +#define task_stack_page(task) ((task)->stack) static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) { @@ -1524,7 +1524,7 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct static inline unsigned long *end_of_stack(struct task_struct *p) { - return (unsigned long *)(p->thread_info + 1); + return (unsigned long *)(task_thread_info(p) + 1); } #endif diff --git a/kernel/fork.c b/kernel/fork.c index a8dd75d4992b..5dd3979747f5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -105,7 +105,7 @@ static struct kmem_cache *mm_cachep; void free_task(struct task_struct *tsk) { - free_thread_info(tsk->thread_info); + free_thread_info(tsk->stack); rt_mutex_debug_task_free(tsk); free_task_struct(tsk); } @@ -175,7 +175,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) } *tsk = *orig; - tsk->thread_info = ti; + tsk->stack = ti; setup_thread_stack(tsk, orig); #ifdef CONFIG_CC_STACKPROTECTOR -- cgit v1.2.3 From 0d7ebbbc6eaa5539f78ab20ed6ff1725a4e332ef Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Wed, 9 May 2007 02:35:27 -0700 Subject: compiler: introduce __used and __maybe_unused __used is defined to be __attribute__((unused)) for all pre-3.3 gcc compilers to suppress warnings for unused functions because perhaps they are referenced only in inline assembly. It is defined to be __attribute__((used)) for gcc 3.3 and later so that the code is still emitted for such functions. __maybe_unused is defined to be __attribute__((unused)) for both function and variable use if it could possibly be unreferenced due to the evaluation of preprocessor macros. Function prototypes shall be marked with __maybe_unused if the actual definition of the function is dependant on preprocessor macros. No update to compiler-intel.h is necessary because ICC supports both __attribute__((used)) and __attribute__((unused)) as specified by the gcc manual. __attribute_used__ is deprecated and will be removed once all current code is converted to using __used. Cc: Rusty Russell Cc: Adrian Bunk Signed-off-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compiler-gcc.h | 1 + include/linux/compiler-gcc3.h | 6 ++++-- include/linux/compiler-gcc4.h | 3 ++- include/linux/compiler.h | 21 ++++++++++++++++++--- 4 files changed, 25 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index a9f794716a81..03ec2311fb29 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -40,3 +40,4 @@ #define noinline __attribute__((noinline)) #define __attribute_pure__ __attribute__((pure)) #define __attribute_const__ __attribute__((__const__)) +#define __maybe_unused __attribute__((unused)) diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h index ecd621fd27d2..a9e2863c2dbf 100644 --- a/include/linux/compiler-gcc3.h +++ b/include/linux/compiler-gcc3.h @@ -4,9 +4,11 @@ #include #if __GNUC_MINOR__ >= 3 -# define __attribute_used__ __attribute__((__used__)) +# define __used __attribute__((__used__)) +# define __attribute_used__ __used /* deprecated */ #else -# define __attribute_used__ __attribute__((__unused__)) +# define __used __attribute__((__unused__)) +# define __attribute_used__ __used /* deprecated */ #endif #if __GNUC_MINOR__ >= 4 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index fd0cc7c4a636..a03e9398a6c2 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h @@ -12,7 +12,8 @@ # define __inline __inline __attribute__((always_inline)) #endif -#define __attribute_used__ __attribute__((__used__)) +#define __used __attribute__((__used__)) +#define __attribute_used__ __used /* deprecated */ #define __must_check __attribute__((warn_unused_result)) #define __compiler_offsetof(a,b) __builtin_offsetof(a,b) #define __always_inline inline __attribute__((always_inline)) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 3b6949b41745..498c35920762 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -108,15 +108,30 @@ extern void __chk_io_ptr(const void __iomem *); * Allow us to avoid 'defined but not used' warnings on functions and data, * as well as force them to be emitted to the assembly file. * - * As of gcc 3.3, static functions that are not marked with attribute((used)) - * may be elided from the assembly file. As of gcc 3.3, static data not so + * As of gcc 3.4, static functions that are not marked with attribute((used)) + * may be elided from the assembly file. As of gcc 3.4, static data not so * marked will not be elided, but this may change in a future gcc version. * + * NOTE: Because distributions shipped with a backported unit-at-a-time + * compiler in gcc 3.3, we must define __used to be __attribute__((used)) + * for gcc >=3.3 instead of 3.4. + * * In prior versions of gcc, such functions and data would be emitted, but * would be warned about except with attribute((unused)). + * + * Mark functions that are referenced only in inline assembly as __used so + * the code is emitted even though it appears to be unreferenced. */ #ifndef __attribute_used__ -# define __attribute_used__ /* unimplemented */ +# define __attribute_used__ /* deprecated */ +#endif + +#ifndef __used +# define __used /* unimplemented */ +#endif + +#ifndef __maybe_unused +# define __maybe_unused /* unimplemented */ #endif /* -- cgit v1.2.3 From 5a87ede94595f58934000e26e8b13398e63868b5 Mon Sep 17 00:00:00 2001 From: "Antonino A. Daplas" Date: Wed, 9 May 2007 02:35:32 -0700 Subject: svgalib: move fb_get_caps to svgalib Move fb_get_caps() method to svgalib.c as svga_get_caps() so it can be used by s3fb, arkfb and vt8623fb. Signed-off-by: Antonino Daplas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/arkfb.c | 1 + drivers/video/s3fb.c | 19 +------------------ drivers/video/svgalib.c | 17 +++++++++++++++++ drivers/video/vt8623fb.c | 1 + include/linux/svga.h | 2 ++ 5 files changed, 22 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c index 9288dd11dcef..ba6fede5c466 100644 --- a/drivers/video/arkfb.c +++ b/drivers/video/arkfb.c @@ -919,6 +919,7 @@ static struct fb_ops arkfb_ops = { .fb_fillrect = arkfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = arkfb_imageblit, + .fb_get_caps = svga_get_caps, }; diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c index 756fafb41d78..d11735895a01 100644 --- a/drivers/video/s3fb.c +++ b/drivers/video/s3fb.c @@ -796,23 +796,6 @@ static int s3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) return 0; } -/* Get capabilities of accelerator based on the mode */ - -static void s3fb_get_caps(struct fb_info *info, struct fb_blit_caps *caps, - struct fb_var_screeninfo *var) -{ - if (var->bits_per_pixel == 0) { - /* can only support 256 8x16 bitmap */ - caps->x = 1 << (8 - 1); - caps->y = 1 << (16 - 1); - caps->len = 256; - } else { - caps->x = ~(u32)0; - caps->y = ~(u32)0; - caps->len = ~(u32)0; - } -} - /* ------------------------------------------------------------------------- */ /* Frame buffer operations */ @@ -829,7 +812,7 @@ static struct fb_ops s3fb_ops = { .fb_fillrect = s3fb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = s3fb_imageblit, - .fb_get_caps = s3fb_get_caps, + .fb_get_caps = svga_get_caps, }; /* ------------------------------------------------------------------------- */ diff --git a/drivers/video/svgalib.c b/drivers/video/svgalib.c index 079cdc911e48..25df928d37d8 100644 --- a/drivers/video/svgalib.c +++ b/drivers/video/svgalib.c @@ -347,6 +347,23 @@ int svga_get_tilemax(struct fb_info *info) return 256; } +/* Get capabilities of accelerator based on the mode */ + +void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps, + struct fb_var_screeninfo *var) +{ + if (var->bits_per_pixel == 0) { + /* can only support 256 8x16 bitmap */ + caps->x = 1 << (8 - 1); + caps->y = 1 << (16 - 1); + caps->len = 256; + } else { + caps->x = (var->bits_per_pixel == 4) ? 1 << (8 - 1) : ~(u32)0; + caps->y = ~(u32)0; + caps->len = ~(u32)0; + } +} +EXPORT_SYMBOL(svga_get_caps); /* ------------------------------------------------------------------------- */ diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c index 8883b6f36a7a..5e9755e464a1 100644 --- a/drivers/video/vt8623fb.c +++ b/drivers/video/vt8623fb.c @@ -635,6 +635,7 @@ static struct fb_ops vt8623fb_ops = { .fb_fillrect = vt8623fb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = vt8623fb_imageblit, + .fb_get_caps = svga_get_caps, }; diff --git a/include/linux/svga.h b/include/linux/svga.h index e1cc552e04fe..13ad0b82ac28 100644 --- a/include/linux/svga.h +++ b/include/linux/svga.h @@ -113,6 +113,8 @@ void svga_tilefill(struct fb_info *info, struct fb_tilerect *rect); void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit); void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor); int svga_get_tilemax(struct fb_info *info); +void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps, + struct fb_var_screeninfo *var); int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node); int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node); -- cgit v1.2.3 From 880169dd2edc4297b7811a0542be9766ca6945bc Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Wed, 9 May 2007 02:35:33 -0700 Subject: fbdev: add support for AVR32 Provide framebuffer page protection flags and definitions of fb_readl/fb_writel for AVR32. Signed-off-by: Haavard Skinnemoen Cc: "Antonino A. Daplas" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/fbmem.c | 4 ++++ include/linux/fb.h | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 08d4e11d9121..38c2e2558f5e 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1236,6 +1236,10 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; #elif defined(__arm__) || defined(__sh__) || defined(__m32r__) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); +#elif defined(__avr32__) + vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot) + & ~_PAGE_CACHABLE) + | (_PAGE_BUFFER | _PAGE_DIRTY)); #elif defined(__ia64__) if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); diff --git a/include/linux/fb.h b/include/linux/fb.h index dff7a728948c..c654d0e9ce33 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -868,7 +868,7 @@ struct fb_info { #define fb_writeq sbus_writeq #define fb_memset sbus_memset_io -#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || (defined(__sh__) && !defined(__SH5__)) || defined(__powerpc__) +#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || (defined(__sh__) && !defined(__SH5__)) || defined(__powerpc__) || defined(__avr32__) #define fb_readb __raw_readb #define fb_readw __raw_readw -- cgit v1.2.3 From 5b479c91da90eef605f851508744bfe8269591a0 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 9 May 2007 02:35:39 -0700 Subject: md: improve partition detection in md array md currently uses ->media_changed to make sure rescan_partitions is call on md array after they are assembled. However that doesn't happen until the array is opened, which is later than some people would like. So use blkdev_ioctl to do the rescan immediately that the array has been assembled. This means we can remove all the ->change infrastructure as it was only used to trigger a partition rescan. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 26 ++++++++------------------ drivers/md/raid1.c | 1 - drivers/md/raid5.c | 2 -- include/linux/raid/md_k.h | 1 - 4 files changed, 8 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/drivers/md/md.c b/drivers/md/md.c index 65814b0340cb..2901d0c0ee9e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3104,6 +3104,7 @@ static int do_md_run(mddev_t * mddev) struct gendisk *disk; struct mdk_personality *pers; char b[BDEVNAME_SIZE]; + struct block_device *bdev; if (list_empty(&mddev->disks)) /* cannot run an array with no devices.. */ @@ -3331,7 +3332,13 @@ static int do_md_run(mddev_t * mddev) md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ - mddev->changed = 1; + bdev = bdget_disk(mddev->gendisk, 0); + if (bdev) { + bd_set_size(bdev, mddev->array_size << 1); + blkdev_ioctl(bdev->bd_inode, NULL, BLKRRPART, 0); + bdput(bdev); + } + md_new_event(mddev); kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE); return 0; @@ -3453,7 +3460,6 @@ static int do_md_stop(mddev_t * mddev, int mode) mddev->pers = NULL; set_capacity(disk, 0); - mddev->changed = 1; if (mddev->ro) mddev->ro = 0; @@ -4593,20 +4599,6 @@ static int md_release(struct inode *inode, struct file * file) return 0; } -static int md_media_changed(struct gendisk *disk) -{ - mddev_t *mddev = disk->private_data; - - return mddev->changed; -} - -static int md_revalidate(struct gendisk *disk) -{ - mddev_t *mddev = disk->private_data; - - mddev->changed = 0; - return 0; -} static struct block_device_operations md_fops = { .owner = THIS_MODULE, @@ -4614,8 +4606,6 @@ static struct block_device_operations md_fops = .release = md_release, .ioctl = md_ioctl, .getgeo = md_getgeo, - .media_changed = md_media_changed, - .revalidate_disk= md_revalidate, }; static int md_thread(void * arg) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 97ee870b265d..1b7130cad21f 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2063,7 +2063,6 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors) */ mddev->array_size = sectors>>1; set_capacity(mddev->gendisk, mddev->array_size << 1); - mddev->changed = 1; if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) { mddev->recovery_cp = mddev->size << 1; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 061375ee6592..a72e70ad0975 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3864,7 +3864,6 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) sectors &= ~((sector_t)mddev->chunk_size/512 - 1); mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1; set_capacity(mddev->gendisk, mddev->array_size << 1); - mddev->changed = 1; if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { mddev->recovery_cp = mddev->size << 1; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); @@ -3999,7 +3998,6 @@ static void end_reshape(raid5_conf_t *conf) conf->mddev->array_size = conf->mddev->size * (conf->raid_disks - conf->max_degraded); set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); - conf->mddev->changed = 1; bdev = bdget_disk(conf->mddev->gendisk, 0); if (bdev) { diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index de72c49747c8..a121f36f4437 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -201,7 +201,6 @@ struct mddev_s struct mutex reconfig_mutex; atomic_t active; - int changed; /* true if we might need to reread partition info */ int degraded; /* whether md should consider * adding a spare */ -- cgit v1.2.3 From 18137207236285989dfc0ee7f929b954199228f3 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Thu, 10 May 2007 00:01:07 +0200 Subject: ide: fix UDMA/MWDMA/SWDMA masks (v3) * use 0x00 instead of 0x80 to disable ->{ultra,mwdma,swdma}_mask * add udma_mask field to ide_pci_device_t and use it to initialize ->ultra_mask in aec62xx, cmd64x, pdc202xx_{new,old} and piix drivers * fix UDMA masks to match with chipset specific *_ratemask() (alim15x3, hpt366, serverworks and siimage drivers need UDMA mask filtering method - done in the next patch) v2: * piix: fix cable detection for 82801AA_1 and 82372FB_1 [ Noticed by Sergei Shtylyov . ] * cmd64x: use hwif->cds->udma_mask [ Suggested by Sergei Shtylyov . ] * aec62xx: fix newly introduced bug - check DMA status not command register [ Noticed by Sergei Shtylyov . ] v3: * piix: use hwif->cds->udma_mask [ Suggested by Sergei Shtylyov . ] Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide.c | 3 -- drivers/ide/pci/aec62xx.c | 19 +++++++++-- drivers/ide/pci/alim15x3.c | 13 ++++++-- drivers/ide/pci/cmd64x.c | 17 +++++----- drivers/ide/pci/pdc202xx_new.c | 10 +++++- drivers/ide/pci/pdc202xx_old.c | 7 +++- drivers/ide/pci/piix.c | 73 ++++++++++++++++++------------------------ drivers/ide/pci/sis5513.c | 5 ++- include/linux/ide.h | 1 + 9 files changed, 88 insertions(+), 60 deletions(-) (limited to 'include/linux') diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index ae5bf2be6f52..c06424fe647b 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -216,9 +216,6 @@ static void init_hwif_data(ide_hwif_t *hwif, unsigned int index) hwif->bus_state = BUSSTATE_ON; hwif->atapi_dma = 0; /* disable all atapi dma */ - hwif->ultra_mask = 0x80; /* disable all ultra */ - hwif->mwdma_mask = 0x80; /* disable all mwdma */ - hwif->swdma_mask = 0x80; /* disable all swdma */ init_completion(&hwif->gendev_rel_comp); diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c index 73bdf64dbbfc..abe0b1bb55ff 100644 --- a/drivers/ide/pci/aec62xx.c +++ b/drivers/ide/pci/aec62xx.c @@ -261,11 +261,13 @@ static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const ch static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif) { + struct pci_dev *dev = hwif->pci_dev; + hwif->autodma = 0; hwif->tuneproc = &aec62xx_tune_drive; hwif->speedproc = &aec62xx_tune_chipset; - if (hwif->pci_dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF) + if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF) hwif->serialized = hwif->channel; if (hwif->mate) @@ -277,7 +279,15 @@ static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif) return; } - hwif->ultra_mask = 0x7f; + hwif->ultra_mask = hwif->cds->udma_mask; + + /* atp865 and atp865r */ + if (hwif->ultra_mask == 0x3f) { + /* check bit 0x10 of DMA status register */ + if (inb(pci_resource_start(dev, 4) + 2) & 0x10) + hwif->ultra_mask = 0x7f; /* udma0-6 */ + } + hwif->mwdma_mask = 0x07; hwif->ide_dma_check = &aec62xx_config_drive_xfer_rate; @@ -344,6 +354,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = { .autodma = AUTODMA, .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, .bootable = OFF_BOARD, + .udma_mask = 0x07, /* udma0-2 */ },{ /* 1 */ .name = "AEC6260", .init_setup = init_setup_aec62xx, @@ -353,6 +364,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = { .channels = 2, .autodma = NOAUTODMA, .bootable = OFF_BOARD, + .udma_mask = 0x1f, /* udma0-4 */ },{ /* 2 */ .name = "AEC6260R", .init_setup = init_setup_aec62xx, @@ -363,6 +375,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = { .autodma = AUTODMA, .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, .bootable = NEVER_BOARD, + .udma_mask = 0x1f, /* udma0-4 */ },{ /* 3 */ .name = "AEC6X80", .init_setup = init_setup_aec6x80, @@ -372,6 +385,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = { .channels = 2, .autodma = AUTODMA, .bootable = OFF_BOARD, + .udma_mask = 0x3f, /* udma0-5 */ },{ /* 4 */ .name = "AEC6X80R", .init_setup = init_setup_aec6x80, @@ -382,6 +396,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = { .autodma = AUTODMA, .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, .bootable = OFF_BOARD, + .udma_mask = 0x3f, /* udma0-5 */ } }; diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c index 946a12746cb5..26c27d946c67 100644 --- a/drivers/ide/pci/alim15x3.c +++ b/drivers/ide/pci/alim15x3.c @@ -783,8 +783,17 @@ static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif) hwif->atapi_dma = 1; - if (m5229_revision > 0x20) - hwif->ultra_mask = 0x7f; + if (m5229_revision <= 0x20) + hwif->ultra_mask = 0x00; /* no udma */ + else if (m5229_revision < 0xC2) + hwif->ultra_mask = 0x07; /* udma0-2 */ + else if (m5229_revision == 0xC2 || m5229_revision == 0xC3) + hwif->ultra_mask = 0x1f; /* udma0-4 */ + else if (m5229_revision == 0xC4) + hwif->ultra_mask = 0x3f; /* udma0-5 */ + else + hwif->ultra_mask = 0x7f; /* udma0-6 */ + hwif->mwdma_mask = 0x07; hwif->swdma_mask = 0x07; diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c index 77f51ab6d439..610c45f7b4e2 100644 --- a/drivers/ide/pci/cmd64x.c +++ b/drivers/ide/pci/cmd64x.c @@ -644,15 +644,12 @@ static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif) hwif->atapi_dma = 1; - hwif->ultra_mask = 0x3f; - hwif->mwdma_mask = 0x07; + hwif->ultra_mask = hwif->cds->udma_mask; + + if (dev->device == PCI_DEVICE_ID_CMD_646 && class_rev < 5) + hwif->ultra_mask = 0x00; - if (dev->device == PCI_DEVICE_ID_CMD_643) - hwif->ultra_mask = 0x80; - if (dev->device == PCI_DEVICE_ID_CMD_646) - hwif->ultra_mask = (class_rev > 0x04) ? 0x07 : 0x80; - if (dev->device == PCI_DEVICE_ID_CMD_648) - hwif->ultra_mask = 0x1f; + hwif->mwdma_mask = 0x07; hwif->ide_dma_check = &cmd64x_config_drive_for_dma; if (!(hwif->udma_four)) @@ -716,6 +713,7 @@ static ide_pci_device_t cmd64x_chipsets[] __devinitdata = { .autodma = AUTODMA, .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}}, .bootable = ON_BOARD, + .udma_mask = 0x00, /* no udma */ },{ /* 1 */ .name = "CMD646", .init_setup = init_setup_cmd646, @@ -725,6 +723,7 @@ static ide_pci_device_t cmd64x_chipsets[] __devinitdata = { .autodma = AUTODMA, .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, .bootable = ON_BOARD, + .udma_mask = 0x07, /* udma0-2 */ },{ /* 2 */ .name = "CMD648", .init_setup = init_setup_cmd64x, @@ -734,6 +733,7 @@ static ide_pci_device_t cmd64x_chipsets[] __devinitdata = { .autodma = AUTODMA, .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, .bootable = ON_BOARD, + .udma_mask = 0x1f, /* udma0-4 */ },{ /* 3 */ .name = "CMD649", .init_setup = init_setup_cmd64x, @@ -743,6 +743,7 @@ static ide_pci_device_t cmd64x_chipsets[] __devinitdata = { .autodma = AUTODMA, .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, .bootable = ON_BOARD, + .udma_mask = 0x3f, /* udma0-5 */ } }; diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c index 2cdd629c653d..a23853445af9 100644 --- a/drivers/ide/pci/pdc202xx_new.c +++ b/drivers/ide/pci/pdc202xx_new.c @@ -543,7 +543,8 @@ static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif) hwif->drives[0].autotune = hwif->drives[1].autotune = 1; hwif->atapi_dma = 1; - hwif->ultra_mask = 0x7f; + + hwif->ultra_mask = hwif->cds->udma_mask; hwif->mwdma_mask = 0x07; hwif->err_stops_fifo = 1; @@ -619,6 +620,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = { .channels = 2, .autodma = AUTODMA, .bootable = OFF_BOARD, + .udma_mask = 0x3f, /* udma0-5 */ },{ /* 1 */ .name = "PDC20269", .init_setup = init_setup_pdcnew, @@ -627,6 +629,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = { .channels = 2, .autodma = AUTODMA, .bootable = OFF_BOARD, + .udma_mask = 0x7f, /* udma0-6*/ },{ /* 2 */ .name = "PDC20270", .init_setup = init_setup_pdc20270, @@ -635,6 +638,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = { .channels = 2, .autodma = AUTODMA, .bootable = OFF_BOARD, + .udma_mask = 0x3f, /* udma0-5 */ },{ /* 3 */ .name = "PDC20271", .init_setup = init_setup_pdcnew, @@ -643,6 +647,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = { .channels = 2, .autodma = AUTODMA, .bootable = OFF_BOARD, + .udma_mask = 0x7f, /* udma0-6*/ },{ /* 4 */ .name = "PDC20275", .init_setup = init_setup_pdcnew, @@ -651,6 +656,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = { .channels = 2, .autodma = AUTODMA, .bootable = OFF_BOARD, + .udma_mask = 0x7f, /* udma0-6*/ },{ /* 5 */ .name = "PDC20276", .init_setup = init_setup_pdc20276, @@ -659,6 +665,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = { .channels = 2, .autodma = AUTODMA, .bootable = OFF_BOARD, + .udma_mask = 0x7f, /* udma0-6*/ },{ /* 6 */ .name = "PDC20277", .init_setup = init_setup_pdcnew, @@ -667,6 +674,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = { .channels = 2, .autodma = AUTODMA, .bootable = OFF_BOARD, + .udma_mask = 0x7f, /* udma0-6*/ } }; diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c index a7a639fe1eaf..d7a38067fb34 100644 --- a/drivers/ide/pci/pdc202xx_old.c +++ b/drivers/ide/pci/pdc202xx_old.c @@ -478,7 +478,7 @@ static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif) hwif->drives[0].autotune = hwif->drives[1].autotune = 1; - hwif->ultra_mask = 0x3f; + hwif->ultra_mask = hwif->cds->udma_mask; hwif->mwdma_mask = 0x07; hwif->swdma_mask = 0x07; hwif->atapi_dma = 1; @@ -587,6 +587,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = { .autodma = AUTODMA, .bootable = OFF_BOARD, .extra = 16, + .udma_mask = 0x07, /* udma0-2 */ },{ /* 1 */ .name = "PDC20262", .init_setup = init_setup_pdc202ata4, @@ -597,6 +598,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = { .autodma = AUTODMA, .bootable = OFF_BOARD, .extra = 48, + .udma_mask = 0x1f, /* udma0-4 */ },{ /* 2 */ .name = "PDC20263", .init_setup = init_setup_pdc202ata4, @@ -607,6 +609,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = { .autodma = AUTODMA, .bootable = OFF_BOARD, .extra = 48, + .udma_mask = 0x1f, /* udma0-4 */ },{ /* 3 */ .name = "PDC20265", .init_setup = init_setup_pdc20265, @@ -617,6 +620,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = { .autodma = AUTODMA, .bootable = OFF_BOARD, .extra = 48, + .udma_mask = 0x3f, /* udma0-5 */ },{ /* 4 */ .name = "PDC20267", .init_setup = init_setup_pdc202xx, @@ -627,6 +631,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = { .autodma = AUTODMA, .bootable = OFF_BOARD, .extra = 48, + .udma_mask = 0x3f, /* udma0-5 */ } }; diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c index 061d300ab8be..17ea44edb4eb 100644 --- a/drivers/ide/pci/piix.c +++ b/drivers/ide/pci/piix.c @@ -524,26 +524,14 @@ static void __devinit init_hwif_piix(ide_hwif_t *hwif) hwif->ide_dma_clear_irq = &piix_dma_clear_irq; hwif->atapi_dma = 1; - hwif->ultra_mask = 0x3f; + + hwif->ultra_mask = hwif->cds->udma_mask; hwif->mwdma_mask = 0x06; hwif->swdma_mask = 0x04; - switch(hwif->pci_dev->device) { - case PCI_DEVICE_ID_INTEL_82371FB_0: - case PCI_DEVICE_ID_INTEL_82371FB_1: - case PCI_DEVICE_ID_INTEL_82371SB_1: - hwif->ultra_mask = 0x80; - break; - case PCI_DEVICE_ID_INTEL_82371AB: - case PCI_DEVICE_ID_INTEL_82443MX_1: - case PCI_DEVICE_ID_INTEL_82451NX: - case PCI_DEVICE_ID_INTEL_82801AB_1: - hwif->ultra_mask = 0x07; - break; - default: - if (!hwif->udma_four) - hwif->udma_four = piix_cable_detect(hwif); - break; + if (hwif->ultra_mask & 0x78) { + if (!hwif->udma_four) + hwif->udma_four = piix_cable_detect(hwif); } if (no_piix_dma) @@ -557,7 +545,7 @@ static void __devinit init_hwif_piix(ide_hwif_t *hwif) hwif->drives[0].autodma = hwif->autodma; } -#define DECLARE_PIIX_DEV(name_str) \ +#define DECLARE_PIIX_DEV(name_str, udma) \ { \ .name = name_str, \ .init_chipset = init_chipset_piix, \ @@ -566,11 +554,12 @@ static void __devinit init_hwif_piix(ide_hwif_t *hwif) .autodma = AUTODMA, \ .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ .bootable = ON_BOARD, \ + .udma_mask = udma, \ } static ide_pci_device_t piix_pci_info[] __devinitdata = { - /* 0 */ DECLARE_PIIX_DEV("PIIXa"), - /* 1 */ DECLARE_PIIX_DEV("PIIXb"), + /* 0 */ DECLARE_PIIX_DEV("PIIXa", 0x00), /* no udma */ + /* 1 */ DECLARE_PIIX_DEV("PIIXb", 0x00), /* no udma */ /* 2 */ { /* @@ -587,28 +576,28 @@ static ide_pci_device_t piix_pci_info[] __devinitdata = { .flags = IDEPCI_FLAG_ISA_PORTS }, - /* 3 */ DECLARE_PIIX_DEV("PIIX3"), - /* 4 */ DECLARE_PIIX_DEV("PIIX4"), - /* 5 */ DECLARE_PIIX_DEV("ICH0"), - /* 6 */ DECLARE_PIIX_DEV("PIIX4"), - /* 7 */ DECLARE_PIIX_DEV("ICH"), - /* 8 */ DECLARE_PIIX_DEV("PIIX4"), - /* 9 */ DECLARE_PIIX_DEV("PIIX4"), - /* 10 */ DECLARE_PIIX_DEV("ICH2"), - /* 11 */ DECLARE_PIIX_DEV("ICH2M"), - /* 12 */ DECLARE_PIIX_DEV("ICH3M"), - /* 13 */ DECLARE_PIIX_DEV("ICH3"), - /* 14 */ DECLARE_PIIX_DEV("ICH4"), - /* 15 */ DECLARE_PIIX_DEV("ICH5"), - /* 16 */ DECLARE_PIIX_DEV("C-ICH"), - /* 17 */ DECLARE_PIIX_DEV("ICH4"), - /* 18 */ DECLARE_PIIX_DEV("ICH5-SATA"), - /* 19 */ DECLARE_PIIX_DEV("ICH5"), - /* 20 */ DECLARE_PIIX_DEV("ICH6"), - /* 21 */ DECLARE_PIIX_DEV("ICH7"), - /* 22 */ DECLARE_PIIX_DEV("ICH4"), - /* 23 */ DECLARE_PIIX_DEV("ESB2"), - /* 24 */ DECLARE_PIIX_DEV("ICH8M"), + /* 3 */ DECLARE_PIIX_DEV("PIIX3", 0x00), /* no udma */ + /* 4 */ DECLARE_PIIX_DEV("PIIX4", 0x07), /* udma0-2 */ + /* 5 */ DECLARE_PIIX_DEV("ICH0", 0x07), /* udma0-2 */ + /* 6 */ DECLARE_PIIX_DEV("PIIX4", 0x07), /* udma0-2 */ + /* 7 */ DECLARE_PIIX_DEV("ICH", 0x1f), /* udma0-4 */ + /* 8 */ DECLARE_PIIX_DEV("PIIX4", 0x1f), /* udma0-4 */ + /* 9 */ DECLARE_PIIX_DEV("PIIX4", 0x07), /* udma0-2 */ + /* 10 */ DECLARE_PIIX_DEV("ICH2", 0x3f), /* udma0-5 */ + /* 11 */ DECLARE_PIIX_DEV("ICH2M", 0x3f), /* udma0-5 */ + /* 12 */ DECLARE_PIIX_DEV("ICH3M", 0x3f), /* udma0-5 */ + /* 13 */ DECLARE_PIIX_DEV("ICH3", 0x3f), /* udma0-5 */ + /* 14 */ DECLARE_PIIX_DEV("ICH4", 0x3f), /* udma0-5 */ + /* 15 */ DECLARE_PIIX_DEV("ICH5", 0x3f), /* udma0-5 */ + /* 16 */ DECLARE_PIIX_DEV("C-ICH", 0x3f), /* udma0-5 */ + /* 17 */ DECLARE_PIIX_DEV("ICH4", 0x3f), /* udma0-5 */ + /* 18 */ DECLARE_PIIX_DEV("ICH5-SATA", 0x3f), /* udma0-5 */ + /* 19 */ DECLARE_PIIX_DEV("ICH5", 0x3f), /* udma0-5 */ + /* 20 */ DECLARE_PIIX_DEV("ICH6", 0x3f), /* udma0-5 */ + /* 21 */ DECLARE_PIIX_DEV("ICH7", 0x3f), /* udma0-5 */ + /* 22 */ DECLARE_PIIX_DEV("ICH4", 0x3f), /* udma0-5 */ + /* 23 */ DECLARE_PIIX_DEV("ESB2", 0x3f), /* udma0-5 */ + /* 24 */ DECLARE_PIIX_DEV("ICH8M", 0x3f), /* udma0-5 */ }; /** diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c index 2ba0669f36a1..6fe4ecb6c06c 100644 --- a/drivers/ide/pci/sis5513.c +++ b/drivers/ide/pci/sis5513.c @@ -858,6 +858,8 @@ static unsigned int __devinit ata66_sis5513 (ide_hwif_t *hwif) static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif) { + u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f }; + hwif->autodma = 0; if (!hwif->irq) @@ -873,7 +875,8 @@ static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif) } hwif->atapi_dma = 1; - hwif->ultra_mask = 0x7f; + + hwif->ultra_mask = udma_rates[chipset_family]; hwif->mwdma_mask = 0x07; hwif->swdma_mask = 0x07; diff --git a/include/linux/ide.h b/include/linux/ide.h index 418dfb5adadd..c9375c863584 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -1257,6 +1257,7 @@ typedef struct ide_pci_device_s { unsigned int extra; struct ide_pci_device_s *next; u8 flags; + u8 udma_mask; } ide_pci_device_t; extern int ide_setup_pci_device(struct pci_dev *, ide_pci_device_t *); -- cgit v1.2.3 From 2d5eaa6dd744a641e75503232a01f52d0768884c Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Thu, 10 May 2007 00:01:08 +0200 Subject: ide: rework the code for selecting the best DMA transfer mode (v3) Depends on the "ide: fix UDMA/MWDMA/SWDMA masks" patch. * add ide_hwif_t.udma_filter hook for filtering UDMA mask (use it in alim15x3, hpt366, siimage and serverworks drivers) * add ide_max_dma_mode() for finding best DMA mode for the device (loosely based on some older libata-core.c code) * convert ide_dma_speed() users to use ide_max_dma_mode() * make ide_rate_filter() take "ide_drive_t *drive" as an argument instead of "u8 mode" and teach it to how to use UDMA mask to do filtering * use ide_rate_filter() in hpt366 driver * remove no longer needed ide_dma_speed() and *_ratemask() * unexport eighty_ninty_three() v2: * rename ->filter_udma_mask to ->udma_filter [ Suggested by Sergei Shtylyov . ] v3: * updated for scc_pata driver (fixes XFER_UDMA_6 filtering for user-space originated transfer mode change requests when 100MHz clock is used) Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/arm/icside.c | 2 +- drivers/ide/cris/ide-cris.c | 2 +- drivers/ide/ide-dma.c | 74 ++++++++++++++++++++++++ drivers/ide/ide-iops.c | 2 - drivers/ide/ide-lib.c | 125 ++++++----------------------------------- drivers/ide/ide.c | 1 + drivers/ide/pci/aec62xx.c | 32 +---------- drivers/ide/pci/alim15x3.c | 76 ++++++------------------- drivers/ide/pci/atiixp.c | 20 +------ drivers/ide/pci/cmd64x.c | 65 +++++---------------- drivers/ide/pci/cs5535.c | 20 +------ drivers/ide/pci/hpt34x.c | 9 +-- drivers/ide/pci/hpt366.c | 67 +++++++++++----------- drivers/ide/pci/it8213.c | 20 +------ drivers/ide/pci/it821x.c | 20 +------ drivers/ide/pci/jmicron.c | 21 +------ drivers/ide/pci/pdc202xx_new.c | 14 +---- drivers/ide/pci/pdc202xx_old.c | 27 +-------- drivers/ide/pci/piix.c | 66 +--------------------- drivers/ide/pci/scc_pata.c | 21 +------ drivers/ide/pci/serverworks.c | 31 ++++++---- drivers/ide/pci/siimage.c | 45 +++++++-------- drivers/ide/pci/sis5513.c | 14 +---- drivers/ide/pci/slc90e66.c | 13 +---- drivers/ide/pci/tc86c001.c | 9 +-- drivers/ide/pci/triflex.c | 4 +- include/linux/ide.h | 10 ++-- 27 files changed, 236 insertions(+), 574 deletions(-) (limited to 'include/linux') diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c index e2953fc1fafb..f383ace2a087 100644 --- a/drivers/ide/arm/icside.c +++ b/drivers/ide/arm/icside.c @@ -342,7 +342,7 @@ static int icside_dma_check(ide_drive_t *drive) * Enable DMA on any drive that has multiword DMA */ if (id->field_valid & 2) { - xfer_mode = ide_dma_speed(drive, 0); + xfer_mode = ide_max_dma_mode(drive); goto out; } diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c index 5e8efc89255a..9691d089fce8 100644 --- a/drivers/ide/cris/ide-cris.c +++ b/drivers/ide/cris/ide-cris.c @@ -1004,7 +1004,7 @@ static int cris_ide_build_dmatable (ide_drive_t *drive) static int cris_config_drive_for_dma (ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, 1); + u8 speed = ide_max_dma_mode(drive); if (!speed) return 0; diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index fd213088b06b..f28fabb791fe 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c @@ -705,6 +705,80 @@ int ide_use_dma(ide_drive_t *drive) EXPORT_SYMBOL_GPL(ide_use_dma); +static const u8 xfer_mode_bases[] = { + XFER_UDMA_0, + XFER_MW_DMA_0, + XFER_SW_DMA_0, +}; + +static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base) +{ + struct hd_driveid *id = drive->id; + ide_hwif_t *hwif = drive->hwif; + unsigned int mask = 0; + + switch(base) { + case XFER_UDMA_0: + if ((id->field_valid & 4) == 0) + break; + + mask = id->dma_ultra & hwif->ultra_mask; + + if (hwif->udma_filter) + mask &= hwif->udma_filter(drive); + + if ((mask & 0x78) && (eighty_ninty_three(drive) == 0)) + mask &= 0x07; + break; + case XFER_MW_DMA_0: + mask = id->dma_mword & hwif->mwdma_mask; + break; + case XFER_SW_DMA_0: + mask = id->dma_1word & hwif->swdma_mask; + break; + default: + BUG(); + break; + } + + return mask; +} + +/** + * ide_max_dma_mode - compute DMA speed + * @drive: IDE device + * + * Checks the drive capabilities and returns the speed to use + * for the DMA transfer. Returns 0 if the drive is incapable + * of DMA transfers. + */ + +u8 ide_max_dma_mode(ide_drive_t *drive) +{ + ide_hwif_t *hwif = drive->hwif; + unsigned int mask; + int x, i; + u8 mode = 0; + + if (drive->media != ide_disk && hwif->atapi_dma == 0) + return 0; + + for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) { + mask = ide_get_mode_mask(drive, xfer_mode_bases[i]); + x = fls(mask) - 1; + if (x >= 0) { + mode = xfer_mode_bases[i] + x; + break; + } + } + + printk(KERN_DEBUG "%s: selected mode 0x%x\n", drive->name, mode); + + return mode; +} + +EXPORT_SYMBOL_GPL(ide_max_dma_mode); + void ide_dma_verbose(ide_drive_t *drive) { struct hd_driveid *id = drive->id; diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index 3caa176b3155..ed6128f6cd98 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -592,8 +592,6 @@ u8 eighty_ninty_three (ide_drive_t *drive) return 1; } -EXPORT_SYMBOL(eighty_ninty_three); - int ide_ata66_check (ide_drive_t *drive, ide_task_t *args) { if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) && diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c index 68719314df3f..4557fc5a3ea3 100644 --- a/drivers/ide/ide-lib.c +++ b/drivers/ide/ide-lib.c @@ -69,123 +69,34 @@ char *ide_xfer_verbose (u8 xfer_rate) EXPORT_SYMBOL(ide_xfer_verbose); /** - * ide_dma_speed - compute DMA speed - * @drive: drive - * @mode: modes available - * - * Checks the drive capabilities and returns the speed to use - * for the DMA transfer. Returns 0 if the drive is incapable - * of DMA transfers. - */ - -u8 ide_dma_speed(ide_drive_t *drive, u8 mode) -{ - struct hd_driveid *id = drive->id; - ide_hwif_t *hwif = HWIF(drive); - u8 ultra_mask, mwdma_mask, swdma_mask; - u8 speed = 0; - - if (drive->media != ide_disk && hwif->atapi_dma == 0) - return 0; - - /* Capable of UltraDMA modes? */ - ultra_mask = id->dma_ultra & hwif->ultra_mask; - - if (!(id->field_valid & 4)) - mode = 0; /* fallback to MW/SW DMA if no UltraDMA */ - - switch (mode) { - case 4: - if (ultra_mask & 0x40) { - speed = XFER_UDMA_6; - break; - } - case 3: - if (ultra_mask & 0x20) { - speed = XFER_UDMA_5; - break; - } - case 2: - if (ultra_mask & 0x10) { - speed = XFER_UDMA_4; - break; - } - if (ultra_mask & 0x08) { - speed = XFER_UDMA_3; - break; - } - case 1: - if (ultra_mask & 0x04) { - speed = XFER_UDMA_2; - break; - } - if (ultra_mask & 0x02) { - speed = XFER_UDMA_1; - break; - } - if (ultra_mask & 0x01) { - speed = XFER_UDMA_0; - break; - } - case 0: - mwdma_mask = id->dma_mword & hwif->mwdma_mask; - - if (mwdma_mask & 0x04) { - speed = XFER_MW_DMA_2; - break; - } - if (mwdma_mask & 0x02) { - speed = XFER_MW_DMA_1; - break; - } - if (mwdma_mask & 0x01) { - speed = XFER_MW_DMA_0; - break; - } - - swdma_mask = id->dma_1word & hwif->swdma_mask; - - if (swdma_mask & 0x04) { - speed = XFER_SW_DMA_2; - break; - } - if (swdma_mask & 0x02) { - speed = XFER_SW_DMA_1; - break; - } - if (swdma_mask & 0x01) { - speed = XFER_SW_DMA_0; - break; - } - } - - return speed; -} -EXPORT_SYMBOL(ide_dma_speed); - - -/** - * ide_rate_filter - return best speed for mode - * @mode: modes available + * ide_rate_filter - filter transfer mode + * @drive: IDE device * @speed: desired speed * - * Given the available DMA/UDMA mode this function returns + * Given the available transfer modes this function returns * the best available speed at or below the speed requested. + * + * FIXME: filter also PIO/SWDMA/MWDMA modes */ -u8 ide_rate_filter (u8 mode, u8 speed) +u8 ide_rate_filter(ide_drive_t *drive, u8 speed) { #ifdef CONFIG_BLK_DEV_IDEDMA - static u8 speed_max[] = { - XFER_MW_DMA_2, XFER_UDMA_2, XFER_UDMA_4, - XFER_UDMA_5, XFER_UDMA_6 - }; + ide_hwif_t *hwif = drive->hwif; + u8 mask = hwif->ultra_mask, mode = XFER_MW_DMA_2; + + if (hwif->udma_filter) + mask = hwif->udma_filter(drive); + + if ((mask & 0x78) && (eighty_ninty_three(drive) == 0)) + mask &= 0x07; + + if (mask) + mode = fls(mask) - 1 + XFER_UDMA_0; // printk("%s: mode 0x%02x, speed 0x%02x\n", __FUNCTION__, mode, speed); - /* So that we remember to update this if new modes appear */ - BUG_ON(mode > 4); - return min(speed, speed_max[mode]); + return min(speed, mode); #else /* !CONFIG_BLK_DEV_IDEDMA */ return min(speed, (u8)XFER_PIO_4); #endif /* CONFIG_BLK_DEV_IDEDMA */ diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index c06424fe647b..73f752142f9e 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -477,6 +477,7 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif) hwif->tuneproc = tmp_hwif->tuneproc; hwif->speedproc = tmp_hwif->speedproc; + hwif->udma_filter = tmp_hwif->udma_filter; hwif->selectproc = tmp_hwif->selectproc; hwif->reset_poll = tmp_hwif->reset_poll; hwif->pre_reset = tmp_hwif->pre_reset; diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c index abe0b1bb55ff..099539e8c7a3 100644 --- a/drivers/ide/pci/aec62xx.c +++ b/drivers/ide/pci/aec62xx.c @@ -87,38 +87,12 @@ static u8 pci_bus_clock_list_ultra (u8 speed, struct chipset_bus_clock_list_entr return chipset_table->ultra_settings; } -static u8 aec62xx_ratemask (ide_drive_t *drive) -{ - ide_hwif_t *hwif = HWIF(drive); - u8 mode; - - switch(hwif->pci_dev->device) { - case PCI_DEVICE_ID_ARTOP_ATP865: - case PCI_DEVICE_ID_ARTOP_ATP865R: - mode = (inb(hwif->channel ? - hwif->mate->dma_status : - hwif->dma_status) & 0x10) ? 4 : 3; - break; - case PCI_DEVICE_ID_ARTOP_ATP860: - case PCI_DEVICE_ID_ARTOP_ATP860R: - mode = 2; - break; - case PCI_DEVICE_ID_ARTOP_ATP850UF: - default: - return 1; - } - - if (!eighty_ninty_three(drive)) - mode = min(mode, (u8)1); - return mode; -} - static int aec6210_tune_chipset (ide_drive_t *drive, u8 xferspeed) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; u16 d_conf = 0; - u8 speed = ide_rate_filter(aec62xx_ratemask(drive), xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); u8 ultra = 0, ultra_conf = 0; u8 tmp0 = 0, tmp1 = 0, tmp2 = 0; unsigned long flags; @@ -145,7 +119,7 @@ static int aec6260_tune_chipset (ide_drive_t *drive, u8 xferspeed) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; - u8 speed = ide_rate_filter(aec62xx_ratemask(drive), xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); u8 unit = (drive->select.b.unit & 0x01); u8 tmp1 = 0, tmp2 = 0; u8 ultra = 0, drive_conf = 0, ultra_conf = 0; @@ -183,7 +157,7 @@ static int aec62xx_tune_chipset (ide_drive_t *drive, u8 speed) static int config_chipset_for_dma (ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, aec62xx_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); if (!(speed)) return 0; diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c index 26c27d946c67..76643b626bdd 100644 --- a/drivers/ide/pci/alim15x3.c +++ b/drivers/ide/pci/alim15x3.c @@ -378,74 +378,31 @@ static void ali15x3_tune_drive (ide_drive_t *drive, u8 pio) } /** - * ali15x3_can_ultra - check for ultra DMA support - * @drive: drive to do the check + * ali_udma_filter - compute UDMA mask + * @drive: IDE device * - * Check the drive and controller revisions. Return 0 if UDMA is - * not available, or 1 if UDMA can be used. The actual rules for - * the ALi are + * Return available UDMA modes. + * + * The actual rules for the ALi are: * No UDMA on revisions <= 0x20 * Disk only for revisions < 0xC2 * Not WDC drives for revisions < 0xC2 * * FIXME: WDC ifdef needs to die */ - -static u8 ali15x3_can_ultra (ide_drive_t *drive) -{ -#ifndef CONFIG_WDC_ALI15X3 - struct hd_driveid *id = drive->id; -#endif /* CONFIG_WDC_ALI15X3 */ - - if (m5229_revision <= 0x20) { - return 0; - } else if ((m5229_revision < 0xC2) && -#ifndef CONFIG_WDC_ALI15X3 - ((chip_is_1543c_e && strstr(id->model, "WDC ")) || - (drive->media!=ide_disk))) { -#else /* CONFIG_WDC_ALI15X3 */ - (drive->media!=ide_disk)) { -#endif /* CONFIG_WDC_ALI15X3 */ - return 0; - } else { - return 1; - } -} -/** - * ali15x3_ratemask - generate DMA mode list - * @drive: drive to compute against - * - * Generate a list of the available DMA modes for the drive. - * FIXME: this function contains lots of bogus masking we can dump - * - * Return the highest available mode (UDMA33, UDMA66, UDMA100,..) - */ - -static u8 ali15x3_ratemask (ide_drive_t *drive) +static u8 ali_udma_filter(ide_drive_t *drive) { - u8 mode = 0, can_ultra = ali15x3_can_ultra(drive); - - if (m5229_revision > 0xC4 && can_ultra) { - mode = 4; - } else if (m5229_revision == 0xC4 && can_ultra) { - mode = 3; - } else if (m5229_revision >= 0xC2 && can_ultra) { - mode = 2; - } else if (can_ultra) { - return 1; - } else { - return 0; + if (m5229_revision > 0x20 && m5229_revision < 0xC2) { + if (drive->media != ide_disk) + return 0; +#ifndef CONFIG_WDC_ALI15X3 + if (chip_is_1543c_e && strstr(drive->id->model, "WDC ")) + return 0; +#endif } - /* - * If the drive sees no suitable cable then UDMA 33 - * is the highest permitted mode - */ - - if (!eighty_ninty_three(drive)) - mode = min(mode, (u8)1); - return mode; + return drive->hwif->ultra_mask; } /** @@ -461,7 +418,7 @@ static int ali15x3_tune_chipset (ide_drive_t *drive, u8 xferspeed) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; - u8 speed = ide_rate_filter(ali15x3_ratemask(drive), xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); u8 speed1 = speed; u8 unit = (drive->select.b.unit & 0x01); u8 tmpbyte = 0x00; @@ -511,7 +468,7 @@ static int ali15x3_tune_chipset (ide_drive_t *drive, u8 xferspeed) static int config_chipset_for_dma (ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, ali15x3_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); if (!(speed)) return 0; @@ -771,6 +728,7 @@ static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif) hwif->autodma = 0; hwif->tuneproc = &ali15x3_tune_drive; hwif->speedproc = &ali15x3_tune_chipset; + hwif->udma_filter = &ali_udma_filter; /* don't use LBA48 DMA on ALi devices before rev 0xC5 */ hwif->no_lba48_dma = (m5229_revision <= 0xC4) ? 1 : 0; diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c index 2d48af32e3f4..f7e80d6076d7 100644 --- a/drivers/ide/pci/atiixp.c +++ b/drivers/ide/pci/atiixp.c @@ -48,22 +48,6 @@ static int save_mdma_mode[4]; static DEFINE_SPINLOCK(atiixp_lock); -/** - * atiixp_ratemask - compute rate mask for ATIIXP IDE - * @drive: IDE drive to compute for - * - * Returns the available modes for the ATIIXP IDE controller. - */ - -static u8 atiixp_ratemask(ide_drive_t *drive) -{ - u8 mode = 3; - - if (!eighty_ninty_three(drive)) - mode = min(mode, (u8)1); - return mode; -} - /** * atiixp_dma_2_pio - return the PIO mode matching DMA * @xfer_rate: transfer speed @@ -189,7 +173,7 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed) u16 tmp16; u8 speed, pio; - speed = ide_rate_filter(atiixp_ratemask(drive), xferspeed); + speed = ide_rate_filter(drive, xferspeed); spin_lock_irqsave(&atiixp_lock, flags); @@ -233,7 +217,7 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed) static int atiixp_config_drive_for_dma(ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, atiixp_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); if (!speed) return 0; diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c index 610c45f7b4e2..19f5ac1f866c 100644 --- a/drivers/ide/pci/cmd64x.c +++ b/drivers/ide/pci/cmd64x.c @@ -292,55 +292,6 @@ static void cmd64x_tune_drive (ide_drive_t *drive, u8 pio) (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio); } -static u8 cmd64x_ratemask (ide_drive_t *drive) -{ - struct pci_dev *dev = HWIF(drive)->pci_dev; - u8 mode = 0; - - switch(dev->device) { - case PCI_DEVICE_ID_CMD_649: - mode = 3; - break; - case PCI_DEVICE_ID_CMD_648: - mode = 2; - break; - case PCI_DEVICE_ID_CMD_643: - return 0; - - case PCI_DEVICE_ID_CMD_646: - { - unsigned int class_rev = 0; - pci_read_config_dword(dev, - PCI_CLASS_REVISION, &class_rev); - class_rev &= 0xff; - /* - * UltraDMA only supported on PCI646U and PCI646U2, which - * correspond to revisions 0x03, 0x05 and 0x07 respectively. - * Actually, although the CMD tech support people won't - * tell me the details, the 0x03 revision cannot support - * UDMA correctly without hardware modifications, and even - * then it only works with Quantum disks due to some - * hold time assumptions in the 646U part which are fixed - * in the 646U2. - * - * So we only do UltraDMA on revision 0x05 and 0x07 chipsets. - */ - switch(class_rev) { - case 0x07: - case 0x05: - return 1; - case 0x03: - case 0x01: - default: - return 0; - } - } - } - if (!eighty_ninty_three(drive)) - mode = min(mode, (u8)1); - return mode; -} - static int cmd64x_tune_chipset (ide_drive_t *drive, u8 speed) { ide_hwif_t *hwif = HWIF(drive); @@ -348,7 +299,7 @@ static int cmd64x_tune_chipset (ide_drive_t *drive, u8 speed) u8 unit = drive->dn & 0x01; u8 regU = 0, pciU = hwif->channel ? UDIDETCR1 : UDIDETCR0; - speed = ide_rate_filter(cmd64x_ratemask(drive), speed); + speed = ide_rate_filter(drive, speed); if (speed >= XFER_SW_DMA_0) { (void) pci_read_config_byte(dev, pciU, ®U); @@ -403,7 +354,7 @@ static int cmd64x_tune_chipset (ide_drive_t *drive, u8 speed) static int config_chipset_for_dma (ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, cmd64x_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); if (!speed) return 0; @@ -646,6 +597,18 @@ static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif) hwif->ultra_mask = hwif->cds->udma_mask; + /* + * UltraDMA only supported on PCI646U and PCI646U2, which + * correspond to revisions 0x03, 0x05 and 0x07 respectively. + * Actually, although the CMD tech support people won't + * tell me the details, the 0x03 revision cannot support + * UDMA correctly without hardware modifications, and even + * then it only works with Quantum disks due to some + * hold time assumptions in the 646U part which are fixed + * in the 646U2. + * + * So we only do UltraDMA on revision 0x05 and 0x07 chipsets. + */ if (dev->device == PCI_DEVICE_ID_CMD_646 && class_rev < 5) hwif->ultra_mask = 0x00; diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c index 45f43efbf92c..66a101e470d0 100644 --- a/drivers/ide/pci/cs5535.c +++ b/drivers/ide/pci/cs5535.c @@ -127,20 +127,6 @@ static void cs5535_set_speed(ide_drive_t *drive, u8 speed) } } -static u8 cs5535_ratemask(ide_drive_t *drive) -{ - /* eighty93 will return 1 if it's 80core and capable of - exceeding udma2, 0 otherwise. we need ratemask to set - the max speed and if we can > udma2 then we return 2 - which selects speed_max as udma4 which is the 5535's max - speed, and 1 selects udma2 which is the max for 40c */ - if (!eighty_ninty_three(drive)) - return 1; - - return 2; -} - - /**** * cs5535_set_drive - Configure the drive to the new speed * @drive: Drive to set up @@ -151,7 +137,7 @@ static u8 cs5535_ratemask(ide_drive_t *drive) */ static int cs5535_set_drive(ide_drive_t *drive, u8 speed) { - speed = ide_rate_filter(cs5535_ratemask(drive), speed); + speed = ide_rate_filter(drive, speed); ide_config_drive_speed(drive, speed); cs5535_set_speed(drive, speed); @@ -180,9 +166,7 @@ static void cs5535_tuneproc(ide_drive_t *drive, u8 xferspeed) static int cs5535_config_drive_for_dma(ide_drive_t *drive) { - u8 speed; - - speed = ide_dma_speed(drive, cs5535_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); /* If no DMA speed was available then let dma_check hit pio */ if (!speed) { diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c index 924eaa3a5708..473e1b33dbf7 100644 --- a/drivers/ide/pci/hpt34x.c +++ b/drivers/ide/pci/hpt34x.c @@ -43,15 +43,10 @@ #define HPT343_DEBUG_DRIVE_INFO 0 -static u8 hpt34x_ratemask (ide_drive_t *drive) -{ - return 1; -} - static int hpt34x_tune_chipset (ide_drive_t *drive, u8 xferspeed) { struct pci_dev *dev = HWIF(drive)->pci_dev; - u8 speed = ide_rate_filter(hpt34x_ratemask(drive), xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); u32 reg1= 0, tmp1 = 0, reg2 = 0, tmp2 = 0; u8 hi_speed, lo_speed; @@ -98,7 +93,7 @@ static void hpt34x_tune_drive (ide_drive_t *drive, u8 pio) static int config_chipset_for_dma (ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, hpt34x_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); if (!(speed)) return 0; diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c index cf9d344d19f8..de5ad9c35dc6 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/pci/hpt366.c @@ -514,43 +514,31 @@ static int check_in_drive_list(ide_drive_t *drive, const char **list) return 0; } -static u8 hpt3xx_ratemask(ide_drive_t *drive) -{ - struct hpt_info *info = pci_get_drvdata(HWIF(drive)->pci_dev); - u8 mode = info->max_mode; - - if (!eighty_ninty_three(drive) && mode) - mode = min(mode, (u8)1); - return mode; -} - /* * Note for the future; the SATA hpt37x we must set * either PIO or UDMA modes 0,4,5 */ - -static u8 hpt3xx_ratefilter(ide_drive_t *drive, u8 speed) + +static u8 hpt3xx_udma_filter(ide_drive_t *drive) { struct hpt_info *info = pci_get_drvdata(HWIF(drive)->pci_dev); u8 chip_type = info->chip_type; - u8 mode = hpt3xx_ratemask(drive); - - if (drive->media != ide_disk) - return min(speed, (u8)XFER_PIO_4); + u8 mode = info->max_mode; + u8 mask; switch (mode) { case 0x04: - speed = min_t(u8, speed, XFER_UDMA_6); + mask = 0x7f; break; case 0x03: - speed = min_t(u8, speed, XFER_UDMA_5); + mask = 0x3f; if (chip_type >= HPT374) break; if (!check_in_drive_list(drive, bad_ata100_5)) goto check_bad_ata33; /* fall thru */ case 0x02: - speed = min_t(u8, speed, XFER_UDMA_4); + mask = 0x1f; /* * CHECK ME, Does this need to be changed to HPT374 ?? @@ -561,13 +549,13 @@ static u8 hpt3xx_ratefilter(ide_drive_t *drive, u8 speed) !check_in_drive_list(drive, bad_ata66_4)) goto check_bad_ata33; - speed = min_t(u8, speed, XFER_UDMA_3); + mask = 0x0f; if (HPT366_ALLOW_ATA66_3 && !check_in_drive_list(drive, bad_ata66_3)) goto check_bad_ata33; /* fall thru */ case 0x01: - speed = min_t(u8, speed, XFER_UDMA_2); + mask = 0x07; check_bad_ata33: if (chip_type >= HPT370A) @@ -577,10 +565,10 @@ static u8 hpt3xx_ratefilter(ide_drive_t *drive, u8 speed) /* fall thru */ case 0x00: default: - speed = min_t(u8, speed, XFER_MW_DMA_2); + mask = 0x00; break; } - return speed; + return mask; } static u32 get_speed_setting(u8 speed, struct hpt_info *info) @@ -608,12 +596,19 @@ static int hpt36x_tune_chipset(ide_drive_t *drive, u8 xferspeed) ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; struct hpt_info *info = pci_get_drvdata(dev); - u8 speed = hpt3xx_ratefilter(drive, xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); u8 itr_addr = drive->dn ? 0x44 : 0x40; - u32 itr_mask = speed < XFER_MW_DMA_0 ? 0x30070000 : - (speed < XFER_UDMA_0 ? 0xc0070000 : 0xc03800ff); - u32 new_itr = get_speed_setting(speed, info); u32 old_itr = 0; + u32 itr_mask, new_itr; + + /* TODO: move this to ide_rate_filter() [ check ->atapi_dma ] */ + if (drive->media != ide_disk) + speed = min_t(u8, speed, XFER_PIO_4); + + itr_mask = speed < XFER_MW_DMA_0 ? 0x30070000 : + (speed < XFER_UDMA_0 ? 0xc0070000 : 0xc03800ff); + + new_itr = get_speed_setting(speed, info); /* * Disable on-chip PIO FIFO/buffer (and PIO MST mode as well) @@ -633,12 +628,19 @@ static int hpt37x_tune_chipset(ide_drive_t *drive, u8 xferspeed) ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; struct hpt_info *info = pci_get_drvdata(dev); - u8 speed = hpt3xx_ratefilter(drive, xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); u8 itr_addr = 0x40 + (drive->dn * 4); - u32 itr_mask = speed < XFER_MW_DMA_0 ? 0x303c0000 : - (speed < XFER_UDMA_0 ? 0xc03c0000 : 0xc1c001ff); - u32 new_itr = get_speed_setting(speed, info); u32 old_itr = 0; + u32 itr_mask, new_itr; + + /* TODO: move this to ide_rate_filter() [ check ->atapi_dma ] */ + if (drive->media != ide_disk) + speed = min_t(u8, speed, XFER_PIO_4); + + itr_mask = speed < XFER_MW_DMA_0 ? 0x303c0000 : + (speed < XFER_UDMA_0 ? 0xc03c0000 : 0xc1c001ff); + + new_itr = get_speed_setting(speed, info); pci_read_config_dword(dev, itr_addr, &old_itr); new_itr = (new_itr & ~itr_mask) | (old_itr & itr_mask); @@ -676,7 +678,7 @@ static void hpt3xx_tune_drive(ide_drive_t *drive, u8 pio) */ static int config_chipset_for_dma(ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, hpt3xx_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); if (!speed) return 0; @@ -1271,6 +1273,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif) hwif->intrproc = &hpt3xx_intrproc; hwif->maskproc = &hpt3xx_maskproc; hwif->busproc = &hpt3xx_busproc; + hwif->udma_filter = &hpt3xx_udma_filter; /* * HPT3xxN chips have some complications: diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c index 424f00bb160d..02b56cb7bb1b 100644 --- a/drivers/ide/pci/it8213.c +++ b/drivers/ide/pci/it8213.c @@ -17,22 +17,6 @@ #include -/* - * it8213_ratemask - Compute available modes - * @drive: IDE drive - * - * Compute the available speeds for the devices on the interface. This - * is all modes to ATA133 clipped by drive cable setup. - */ - -static u8 it8213_ratemask (ide_drive_t *drive) -{ - u8 mode = 4; - if (!eighty_ninty_three(drive)) - mode = min_t(u8, mode, 1); - return mode; -} - /** * it8213_dma_2_pio - return the PIO mode matching DMA * @xfer_rate: transfer speed @@ -145,7 +129,7 @@ static int it8213_tune_chipset (ide_drive_t *drive, u8 xferspeed) ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; u8 maslave = 0x40; - u8 speed = ide_rate_filter(it8213_ratemask(drive), xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); int a_speed = 3 << (drive->dn * 4); int u_flag = 1 << drive->dn; int v_flag = 0x01 << drive->dn; @@ -222,7 +206,7 @@ static int it8213_tune_chipset (ide_drive_t *drive, u8 xferspeed) static int config_chipset_for_dma (ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, it8213_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); if (!speed) return 0; diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c index 4e1254813ee0..442f658c6ae7 100644 --- a/drivers/ide/pci/it821x.c +++ b/drivers/ide/pci/it821x.c @@ -228,22 +228,6 @@ static void it821x_clock_strategy(ide_drive_t *drive) } } -/** - * it821x_ratemask - Compute available modes - * @drive: IDE drive - * - * Compute the available speeds for the devices on the interface. This - * is all modes to ATA133 clipped by drive cable setup. - */ - -static u8 it821x_ratemask (ide_drive_t *drive) -{ - u8 mode = 4; - if (!eighty_ninty_three(drive)) - mode = min(mode, (u8)1); - return mode; -} - /** * it821x_tunepio - tune a drive * @drive: drive to tune @@ -438,7 +422,7 @@ static int it821x_tune_chipset (ide_drive_t *drive, byte xferspeed) ide_hwif_t *hwif = drive->hwif; struct it821x_dev *itdev = ide_get_hwifdata(hwif); - u8 speed = ide_rate_filter(it821x_ratemask(drive), xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); switch (speed) { case XFER_PIO_4: @@ -488,7 +472,7 @@ static int it821x_tune_chipset (ide_drive_t *drive, byte xferspeed) static int config_chipset_for_dma (ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, it821x_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); if (speed == 0) return 0; diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c index be4fc96c29e0..dbb3c199cba9 100644 --- a/drivers/ide/pci/jmicron.c +++ b/drivers/ide/pci/jmicron.c @@ -21,22 +21,6 @@ typedef enum { PORT_SATA = 2, } port_type; -/** - * jmicron_ratemask - Compute available modes - * @drive: IDE drive - * - * Compute the available speeds for the devices on the interface. This - * is all modes to ATA133 clipped by drive cable setup. - */ - -static u8 jmicron_ratemask(ide_drive_t *drive) -{ - u8 mode = 4; - if (!eighty_ninty_three(drive)) - mode = min(mode, (u8)1); - return mode; -} - /** * ata66_jmicron - Cable check * @hwif: IDE port @@ -129,8 +113,7 @@ static void config_jmicron_chipset_for_pio (ide_drive_t *drive, byte set_speed) static int jmicron_tune_chipset (ide_drive_t *drive, byte xferspeed) { - - u8 speed = ide_rate_filter(jmicron_ratemask(drive), xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); return ide_config_drive_speed(drive, speed); } @@ -145,7 +128,7 @@ static int jmicron_tune_chipset (ide_drive_t *drive, byte xferspeed) static int config_chipset_for_dma (ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, jmicron_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); if (!speed) return 0; diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c index a23853445af9..772ca4007de4 100644 --- a/drivers/ide/pci/pdc202xx_new.c +++ b/drivers/ide/pci/pdc202xx_new.c @@ -82,16 +82,6 @@ static u8 max_dma_rate(struct pci_dev *pdev) return mode; } -static u8 pdcnew_ratemask(ide_drive_t *drive) -{ - u8 mode = max_dma_rate(HWIF(drive)->pci_dev); - - if (!eighty_ninty_three(drive)) - mode = min_t(u8, mode, 1); - - return mode; -} - /** * get_indexed_reg - Get indexed register * @hwif: for the port address @@ -164,7 +154,7 @@ static int pdcnew_tune_chipset(ide_drive_t *drive, u8 speed) u8 adj = (drive->dn & 1) ? 0x08 : 0x00; int err; - speed = ide_rate_filter(pdcnew_ratemask(drive), speed); + speed = ide_rate_filter(drive, speed); /* * Issue SETFEATURES_XFER to the drive first. PDC202xx hardware will @@ -267,7 +257,7 @@ static int config_chipset_for_dma(ide_drive_t *drive) set_indexed_reg(hwif, 0x13 + adj, tmp | 0x03); } - speed = ide_dma_speed(drive, pdcnew_ratemask(drive)); + speed = ide_max_dma_mode(drive); if (!speed) return 0; diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c index d7a38067fb34..207a6191fac7 100644 --- a/drivers/ide/pci/pdc202xx_old.c +++ b/drivers/ide/pci/pdc202xx_old.c @@ -101,35 +101,12 @@ static const char *pdc_quirk_drives[] = { #define MC1 0x02 /* DMA"C" timing */ #define MC0 0x01 /* DMA"C" timing */ -static u8 pdc202xx_ratemask (ide_drive_t *drive) -{ - u8 mode; - - switch(HWIF(drive)->pci_dev->device) { - case PCI_DEVICE_ID_PROMISE_20267: - case PCI_DEVICE_ID_PROMISE_20265: - mode = 3; - break; - case PCI_DEVICE_ID_PROMISE_20263: - case PCI_DEVICE_ID_PROMISE_20262: - mode = 2; - break; - case PCI_DEVICE_ID_PROMISE_20246: - return 1; - default: - return 0; - } - if (!eighty_ninty_three(drive)) - mode = min(mode, (u8)1); - return mode; -} - static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; u8 drive_pci = 0x60 + (drive->dn << 2); - u8 speed = ide_rate_filter(pdc202xx_ratemask(drive), xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); u32 drive_conf; u8 AP, BP, CP, DP; @@ -308,7 +285,7 @@ chipset_is_set: if (drive->media == ide_disk) /* PREFETCH_EN */ pci_write_config_byte(dev, (drive_pci), AP|PREFETCH_EN); - speed = ide_dma_speed(drive, pdc202xx_ratemask(drive)); + speed = ide_max_dma_mode(drive); if (!(speed)) { /* restore original pci-config space */ diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c index 17ea44edb4eb..84d3938a430a 100644 --- a/drivers/ide/pci/piix.c +++ b/drivers/ide/pci/piix.c @@ -105,68 +105,6 @@ static int no_piix_dma; -/** - * piix_ratemask - compute rate mask for PIIX IDE - * @drive: IDE drive to compute for - * - * Returns the available modes for the PIIX IDE controller. - */ - -static u8 piix_ratemask (ide_drive_t *drive) -{ - struct pci_dev *dev = HWIF(drive)->pci_dev; - u8 mode; - - switch(dev->device) { - case PCI_DEVICE_ID_INTEL_82801EB_1: - mode = 3; - break; - /* UDMA 100 capable */ - case PCI_DEVICE_ID_INTEL_82801BA_8: - case PCI_DEVICE_ID_INTEL_82801BA_9: - case PCI_DEVICE_ID_INTEL_82801CA_10: - case PCI_DEVICE_ID_INTEL_82801CA_11: - case PCI_DEVICE_ID_INTEL_82801E_11: - case PCI_DEVICE_ID_INTEL_82801DB_1: - case PCI_DEVICE_ID_INTEL_82801DB_10: - case PCI_DEVICE_ID_INTEL_82801DB_11: - case PCI_DEVICE_ID_INTEL_82801EB_11: - case PCI_DEVICE_ID_INTEL_ESB_2: - case PCI_DEVICE_ID_INTEL_ICH6_19: - case PCI_DEVICE_ID_INTEL_ICH7_21: - case PCI_DEVICE_ID_INTEL_ESB2_18: - case PCI_DEVICE_ID_INTEL_ICH8_6: - mode = 3; - break; - /* UDMA 66 capable */ - case PCI_DEVICE_ID_INTEL_82801AA_1: - case PCI_DEVICE_ID_INTEL_82372FB_1: - mode = 2; - break; - /* UDMA 33 capable */ - case PCI_DEVICE_ID_INTEL_82371AB: - case PCI_DEVICE_ID_INTEL_82443MX_1: - case PCI_DEVICE_ID_INTEL_82451NX: - case PCI_DEVICE_ID_INTEL_82801AB_1: - return 1; - /* Non UDMA capable (MWDMA2) */ - case PCI_DEVICE_ID_INTEL_82371SB_1: - case PCI_DEVICE_ID_INTEL_82371FB_1: - case PCI_DEVICE_ID_INTEL_82371FB_0: - case PCI_DEVICE_ID_INTEL_82371MX: - default: - return 0; - } - - /* - * If we are UDMA66 capable fall back to UDMA33 - * if the drive cannot see an 80pin cable. - */ - if (!eighty_ninty_three(drive)) - mode = min_t(u8, mode, 1); - return mode; -} - /** * piix_dma_2_pio - return the PIO mode matching DMA * @xfer_rate: transfer speed @@ -301,7 +239,7 @@ static int piix_tune_chipset (ide_drive_t *drive, u8 xferspeed) ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; u8 maslave = hwif->channel ? 0x42 : 0x40; - u8 speed = ide_rate_filter(piix_ratemask(drive), xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); int a_speed = 3 << (drive->dn * 4); int u_flag = 1 << drive->dn; int v_flag = 0x01 << drive->dn; @@ -376,7 +314,7 @@ static int piix_tune_chipset (ide_drive_t *drive, u8 xferspeed) static int piix_config_drive_for_dma (ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, piix_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); /* * If no DMA speed was available or the chipset has DMA bugs diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c index f84bf791f72e..cbf936325355 100644 --- a/drivers/ide/pci/scc_pata.c +++ b/drivers/ide/pci/scc_pata.c @@ -189,23 +189,6 @@ scc_ide_outsl(unsigned long port, void *addr, u32 count) } } -/** - * scc_ratemask - Compute available modes - * @drive: IDE drive - * - * Compute the available speeds for the devices on the interface. - * Enforce UDMA33 as a limit if there is no 80pin cable present. - */ - -static u8 scc_ratemask(ide_drive_t *drive) -{ - u8 mode = 4; - - if (!eighty_ninty_three(drive)) - mode = min(mode, (u8)1); - return mode; -} - /** * scc_tuneproc - tune a drive PIO mode * @drive: drive to tune @@ -273,7 +256,7 @@ static void scc_tuneproc(ide_drive_t *drive, byte mode_wanted) static int scc_tune_chipset(ide_drive_t *drive, byte xferspeed) { ide_hwif_t *hwif = HWIF(drive); - u8 speed = ide_rate_filter(scc_ratemask(drive), xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); struct scc_ports *ports = ide_get_hwifdata(hwif); unsigned long ctl_base = ports->ctl; unsigned long cckctrl_port = ctl_base + 0xff0; @@ -347,7 +330,7 @@ static int scc_tune_chipset(ide_drive_t *drive, byte xferspeed) static int scc_config_chipset_for_dma(ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, scc_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); if (!speed) return 0; diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c index dbcd37a0c652..2fa6d92d16cc 100644 --- a/drivers/ide/pci/serverworks.c +++ b/drivers/ide/pci/serverworks.c @@ -65,16 +65,16 @@ static int check_in_drive_lists (ide_drive_t *drive, const char **list) return 0; } -static u8 svwks_ratemask (ide_drive_t *drive) +static u8 svwks_udma_filter(ide_drive_t *drive) { struct pci_dev *dev = HWIF(drive)->pci_dev; - u8 mode = 0; + u8 mask = 0; if (!svwks_revision) pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision); if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) - return 2; + return 0x1f; if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) { u32 reg = 0; if (isa_dev) @@ -86,25 +86,31 @@ static u8 svwks_ratemask (ide_drive_t *drive) if(drive->media == ide_disk) return 0; /* Check the OSB4 DMA33 enable bit */ - return ((reg & 0x00004000) == 0x00004000) ? 1 : 0; + return ((reg & 0x00004000) == 0x00004000) ? 0x07 : 0; } else if (svwks_revision < SVWKS_CSB5_REVISION_NEW) { - return 1; + return 0x07; } else if (svwks_revision >= SVWKS_CSB5_REVISION_NEW) { - u8 btr = 0; + u8 btr = 0, mode; pci_read_config_byte(dev, 0x5A, &btr); mode = btr & 0x3; - if (!eighty_ninty_three(drive)) - mode = min(mode, (u8)1); + /* If someone decides to do UDMA133 on CSB5 the same issue will bite so be inclusive */ if (mode > 2 && check_in_drive_lists(drive, svwks_bad_ata100)) mode = 2; + + switch(mode) { + case 2: mask = 0x1f; break; + case 1: mask = 0x07; break; + default: mask = 0x00; break; + } } if (((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) || (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) && (!(PCI_FUNC(dev->devfn) & 1))) - mode = 2; - return mode; + mask = 0x1f; + + return mask; } static u8 svwks_csb_check (struct pci_dev *dev) @@ -141,7 +147,7 @@ static int svwks_tune_chipset (ide_drive_t *drive, u8 xferspeed) if (xferspeed == 255) /* PIO auto-tuning */ speed = XFER_PIO_0 + pio; else - speed = ide_rate_filter(svwks_ratemask(drive), xferspeed); + speed = ide_rate_filter(drive, xferspeed); /* If we are about to put a disk into UDMA mode we screwed up. Our code assumes we never _ever_ do this on an OSB4 */ @@ -304,7 +310,7 @@ static void svwks_tune_drive (ide_drive_t *drive, u8 pio) static int config_chipset_for_dma (ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, svwks_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); if (!(speed)) speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5, NULL); @@ -500,6 +506,7 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif) hwif->tuneproc = &svwks_tune_drive; hwif->speedproc = &svwks_tune_chipset; + hwif->udma_filter = &svwks_udma_filter; hwif->atapi_dma = 1; diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c index c0188de3cc66..5314ec913724 100644 --- a/drivers/ide/pci/siimage.c +++ b/drivers/ide/pci/siimage.c @@ -122,45 +122,41 @@ static inline unsigned long siimage_seldev(ide_drive_t *drive, int r) } /** - * siimage_ratemask - Compute available modes - * @drive: IDE drive + * sil_udma_filter - compute UDMA mask + * @drive: IDE device + * + * Compute the available UDMA speeds for the device on the interface. * - * Compute the available speeds for the devices on the interface. * For the CMD680 this depends on the clocking mode (scsc), for the - * SI3312 SATA controller life is a bit simpler. Enforce UDMA33 - * as a limit if there is no 80pin cable present. + * SI3112 SATA controller life is a bit simpler. */ - -static byte siimage_ratemask (ide_drive_t *drive) + +static u8 sil_udma_filter(ide_drive_t *drive) { - ide_hwif_t *hwif = HWIF(drive); - u8 mode = 0, scsc = 0; + ide_hwif_t *hwif = drive->hwif; unsigned long base = (unsigned long) hwif->hwif_data; + u8 mask = 0, scsc = 0; if (hwif->mmio) scsc = hwif->INB(base + 0x4A); else pci_read_config_byte(hwif->pci_dev, 0x8A, &scsc); - if(is_sata(hwif)) - { - if(strstr(drive->id->model, "Maxtor")) - return 3; - return 4; + if (is_sata(hwif)) { + mask = strstr(drive->id->model, "Maxtor") ? 0x3f : 0x7f; + goto out; } - + if ((scsc & 0x30) == 0x10) /* 133 */ - mode = 4; + mask = 0x7f; else if ((scsc & 0x30) == 0x20) /* 2xPCI */ - mode = 4; + mask = 0x7f; else if ((scsc & 0x30) == 0x00) /* 100 */ - mode = 3; + mask = 0x3f; else /* Disabled ? */ BUG(); - - if (!eighty_ninty_three(drive)) - mode = min(mode, (u8)1); - return mode; +out: + return mask; } /** @@ -306,7 +302,7 @@ static int siimage_tune_chipset (ide_drive_t *drive, byte xferspeed) ide_hwif_t *hwif = HWIF(drive); u16 ultra = 0, multi = 0; u8 mode = 0, unit = drive->select.b.unit; - u8 speed = ide_rate_filter(siimage_ratemask(drive), xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); unsigned long base = (unsigned long)hwif->hwif_data; u8 scsc = 0, addr_mask = ((hwif->channel) ? ((hwif->mmio) ? 0xF4 : 0x84) : @@ -389,7 +385,7 @@ static int siimage_tune_chipset (ide_drive_t *drive, byte xferspeed) static int config_chipset_for_dma (ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, siimage_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); if (!speed) return 0; @@ -989,6 +985,7 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif) hwif->tuneproc = &siimage_tuneproc; hwif->reset_poll = &siimage_reset_poll; hwif->pre_reset = &siimage_pre_reset; + hwif->udma_filter = &sil_udma_filter; if(is_sata(hwif)) { static int first = 1; diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c index 6fe4ecb6c06c..83c80ed73e99 100644 --- a/drivers/ide/pci/sis5513.c +++ b/drivers/ide/pci/sis5513.c @@ -428,16 +428,6 @@ static int sis_get_info (char *buffer, char **addr, off_t offset, int count) } #endif /* defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_PROC_FS) */ -static u8 sis5513_ratemask (ide_drive_t *drive) -{ - u8 rates[] = { 0, 0, 1, 2, 3, 3, 4, 4 }; - u8 mode = rates[chipset_family]; - - if (!eighty_ninty_three(drive)) - mode = min(mode, (u8)1); - return mode; -} - /* * Configuration functions */ @@ -563,7 +553,7 @@ static int sis5513_tune_chipset (ide_drive_t *drive, u8 xferspeed) u8 drive_pci, reg, speed; u32 regdw; - speed = ide_rate_filter(sis5513_ratemask(drive), xferspeed); + speed = ide_rate_filter(drive, xferspeed); /* See config_art_rwp_pio for drive pci config registers */ drive_pci = 0x40; @@ -653,7 +643,7 @@ static void sis5513_tune_drive (ide_drive_t *drive, u8 pio) */ static int config_chipset_for_dma (ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, sis5513_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); #ifdef DEBUG printk("SIS5513: config_chipset_for_dma, drive %d, ultra %x\n", diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c index 852ccb36da1d..9e95a5cbf984 100644 --- a/drivers/ide/pci/slc90e66.c +++ b/drivers/ide/pci/slc90e66.c @@ -21,15 +21,6 @@ #include -static u8 slc90e66_ratemask (ide_drive_t *drive) -{ - u8 mode = 2; - - if (!eighty_ninty_three(drive)) - mode = min_t(u8, mode, 1); - return mode; -} - static u8 slc90e66_dma_2_pio (u8 xfer_rate) { switch(xfer_rate) { case XFER_UDMA_4: @@ -122,7 +113,7 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed) ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; u8 maslave = hwif->channel ? 0x42 : 0x40; - u8 speed = ide_rate_filter(slc90e66_ratemask(drive), xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); int sitre = 0, a_speed = 7 << (drive->dn * 4); int u_speed = 0, u_flag = 1 << drive->dn; u16 reg4042, reg44, reg48, reg4a; @@ -171,7 +162,7 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed) static int slc90e66_config_drive_for_dma (ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, slc90e66_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); if (!speed) return 0; diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c index 0b6d81d6ce48..168f035caa3f 100644 --- a/drivers/ide/pci/tc86c001.c +++ b/drivers/ide/pci/tc86c001.c @@ -13,18 +13,13 @@ #include #include -static inline u8 tc86c001_ratemask(ide_drive_t *drive) -{ - return eighty_ninty_three(drive) ? 2 : 1; -} - static int tc86c001_tune_chipset(ide_drive_t *drive, u8 speed) { ide_hwif_t *hwif = HWIF(drive); unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00); u16 mode, scr = hwif->INW(scr_port); - speed = ide_rate_filter(tc86c001_ratemask(drive), speed); + speed = ide_rate_filter(drive, speed); switch (speed) { case XFER_UDMA_4: mode = 0x00c0; break; @@ -174,7 +169,7 @@ static int tc86c001_busproc(ide_drive_t *drive, int state) static int config_chipset_for_dma(ide_drive_t *drive) { - u8 speed = ide_dma_speed(drive, tc86c001_ratemask(drive)); + u8 speed = ide_max_dma_mode(drive); if (!speed) return 0; diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/pci/triflex.c index 5e06179c3469..8a877235b949 100644 --- a/drivers/ide/pci/triflex.c +++ b/drivers/ide/pci/triflex.c @@ -48,7 +48,7 @@ static int triflex_tune_chipset(ide_drive_t *drive, u8 xferspeed) u16 timing = 0; u32 triflex_timings = 0; u8 unit = (drive->select.b.unit & 0x01); - u8 speed = ide_rate_filter(0, xferspeed); + u8 speed = ide_rate_filter(drive, xferspeed); pci_read_config_dword(dev, channel_offset, &triflex_timings); @@ -102,7 +102,7 @@ static void triflex_tune_drive(ide_drive_t *drive, u8 pio) static int triflex_config_drive_for_dma(ide_drive_t *drive) { - int speed = ide_dma_speed(drive, 0); /* No ultra speeds */ + u8 speed = ide_max_dma_mode(drive); if (!speed) return 0; diff --git a/include/linux/ide.h b/include/linux/ide.h index c9375c863584..23ab4dc05009 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -717,11 +717,8 @@ typedef struct hwif_s { int (*quirkproc)(ide_drive_t *); /* driver soft-power interface */ int (*busproc)(ide_drive_t *, int); -// /* host rate limiter */ -// u8 (*ratemask)(ide_drive_t *); -// /* device rate limiter */ -// u8 (*ratefilter)(ide_drive_t *, u8); #endif + u8 (*udma_filter)(ide_drive_t *); void (*ata_input_data)(ide_drive_t *, void *, u32); void (*ata_output_data)(ide_drive_t *, void *, u32); @@ -1279,6 +1276,7 @@ int ide_in_drive_list(struct hd_driveid *, const struct drive_list_entry *); int __ide_dma_bad_drive(ide_drive_t *); int __ide_dma_good_drive(ide_drive_t *); int ide_use_dma(ide_drive_t *); +u8 ide_max_dma_mode(ide_drive_t *); void ide_dma_off(ide_drive_t *); void ide_dma_verbose(ide_drive_t *); int ide_set_dma(ide_drive_t *); @@ -1305,6 +1303,7 @@ extern int __ide_dma_timeout(ide_drive_t *); #else static inline int ide_use_dma(ide_drive_t *drive) { return 0; } +static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; } static inline void ide_dma_off(ide_drive_t *drive) { ; } static inline void ide_dma_verbose(ide_drive_t *drive) { ; } static inline int ide_set_dma(ide_drive_t *drive) { return 1; } @@ -1349,8 +1348,7 @@ static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data) } /* ide-lib.c */ -extern u8 ide_dma_speed(ide_drive_t *drive, u8 mode); -extern u8 ide_rate_filter(u8 mode, u8 speed); +u8 ide_rate_filter(ide_drive_t *, u8); extern int ide_dma_enable(ide_drive_t *drive); extern char *ide_xfer_verbose(u8 xfer_rate); extern void ide_toggle_bounce(ide_drive_t *drive, int on); -- cgit v1.2.3 From 29e744d088e3555f4efbdf390f01088dd66993b6 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Thu, 10 May 2007 00:01:09 +0200 Subject: ide: add ide_tune_dma() helper After reworking the code responsible for selecting the best DMA transfer mode it is now possible to add generic ide_tune_dma() helper. Convert some IDE PCI host drivers to use it (the ones left need more work). Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-dma.c | 20 ++++++++++++++++++++ drivers/ide/pci/aec62xx.c | 13 +------------ drivers/ide/pci/atiixp.c | 22 +--------------------- drivers/ide/pci/cs5535.c | 15 +-------------- drivers/ide/pci/hpt34x.c | 20 +------------------- drivers/ide/pci/hpt366.c | 20 +------------------- drivers/ide/pci/it8213.c | 21 +-------------------- drivers/ide/pci/jmicron.c | 21 +-------------------- drivers/ide/pci/piix.c | 26 +------------------------- drivers/ide/pci/sis5513.c | 21 +-------------------- drivers/ide/pci/slc90e66.c | 13 +------------ drivers/ide/pci/tc86c001.c | 13 +------------ drivers/ide/pci/triflex.c | 13 +------------ include/linux/ide.h | 2 ++ 14 files changed, 34 insertions(+), 206 deletions(-) (limited to 'include/linux') diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index f28fabb791fe..5fe85191d49c 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c @@ -779,6 +779,26 @@ u8 ide_max_dma_mode(ide_drive_t *drive) EXPORT_SYMBOL_GPL(ide_max_dma_mode); +int ide_tune_dma(ide_drive_t *drive) +{ + u8 speed; + + /* TODO: use only ide_max_dma_mode() */ + if (!ide_use_dma(drive)) + return 0; + + speed = ide_max_dma_mode(drive); + + if (!speed) + return 0; + + drive->hwif->speedproc(drive, speed); + + return ide_dma_enable(drive); +} + +EXPORT_SYMBOL_GPL(ide_tune_dma); + void ide_dma_verbose(ide_drive_t *drive) { struct hd_driveid *id = drive->id; diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c index 099539e8c7a3..b173bc66ce1e 100644 --- a/drivers/ide/pci/aec62xx.c +++ b/drivers/ide/pci/aec62xx.c @@ -155,17 +155,6 @@ static int aec62xx_tune_chipset (ide_drive_t *drive, u8 speed) } } -static int config_chipset_for_dma (ide_drive_t *drive) -{ - u8 speed = ide_max_dma_mode(drive); - - if (!(speed)) - return 0; - - (void) aec62xx_tune_chipset(drive, speed); - return ide_dma_enable(drive); -} - static void aec62xx_tune_drive (ide_drive_t *drive, u8 pio) { pio = ide_get_best_pio_mode(drive, pio, 4, NULL); @@ -174,7 +163,7 @@ static void aec62xx_tune_drive (ide_drive_t *drive, u8 pio) static int aec62xx_config_drive_xfer_rate (ide_drive_t *drive) { - if (ide_use_dma(drive) && config_chipset_for_dma(drive)) + if (ide_tune_dma(drive)) return 0; if (ide_use_fast_pio(drive)) diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c index f7e80d6076d7..0e52ad722a72 100644 --- a/drivers/ide/pci/atiixp.c +++ b/drivers/ide/pci/atiixp.c @@ -206,26 +206,6 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed) return ide_config_drive_speed(drive, speed); } -/** - * atiixp_config_drive_for_dma - configure drive for DMA - * @drive: IDE drive to configure - * - * Set up a ATIIXP interface channel for the best available speed. - * We prefer UDMA if it is available and then MWDMA. If DMA is - * not available we switch to PIO and return 0. - */ - -static int atiixp_config_drive_for_dma(ide_drive_t *drive) -{ - u8 speed = ide_max_dma_mode(drive); - - if (!speed) - return 0; - - (void) atiixp_speedproc(drive, speed); - return ide_dma_enable(drive); -} - /** * atiixp_dma_check - set up an IDE device * @drive: IDE drive to configure @@ -240,7 +220,7 @@ static int atiixp_dma_check(ide_drive_t *drive) drive->init_speed = 0; - if (ide_use_dma(drive) && atiixp_config_drive_for_dma(drive)) + if (ide_tune_dma(drive)) return 0; if (ide_use_fast_pio(drive)) { diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c index 66a101e470d0..41925c47ef05 100644 --- a/drivers/ide/pci/cs5535.c +++ b/drivers/ide/pci/cs5535.c @@ -164,26 +164,13 @@ static void cs5535_tuneproc(ide_drive_t *drive, u8 xferspeed) cs5535_set_speed(drive, xferspeed); } -static int cs5535_config_drive_for_dma(ide_drive_t *drive) -{ - u8 speed = ide_max_dma_mode(drive); - - /* If no DMA speed was available then let dma_check hit pio */ - if (!speed) { - return 0; - } - - cs5535_set_drive(drive, speed); - return ide_dma_enable(drive); -} - static int cs5535_dma_check(ide_drive_t *drive) { u8 speed; drive->init_speed = 0; - if (ide_use_dma(drive) && cs5535_config_drive_for_dma(drive)) + if (ide_tune_dma(drive)) return 0; if (ide_use_fast_pio(drive)) { diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c index 473e1b33dbf7..2c24c3de8846 100644 --- a/drivers/ide/pci/hpt34x.c +++ b/drivers/ide/pci/hpt34x.c @@ -84,29 +84,11 @@ static void hpt34x_tune_drive (ide_drive_t *drive, u8 pio) (void) hpt34x_tune_chipset(drive, (XFER_PIO_0 + pio)); } -/* - * This allows the configuration of ide_pci chipset registers - * for cards that learn about the drive's UDMA, DMA, PIO capabilities - * after the drive is reported by the OS. Initially for designed for - * HPT343 UDMA chipset by HighPoint|Triones Technologies, Inc. - */ - -static int config_chipset_for_dma (ide_drive_t *drive) -{ - u8 speed = ide_max_dma_mode(drive); - - if (!(speed)) - return 0; - - (void) hpt34x_tune_chipset(drive, speed); - return ide_dma_enable(drive); -} - static int hpt34x_config_drive_xfer_rate (ide_drive_t *drive) { drive->init_speed = 0; - if (ide_use_dma(drive) && config_chipset_for_dma(drive)) + if (ide_tune_dma(drive)) #ifndef CONFIG_HPT34X_AUTODMA return -1; #else diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c index de5ad9c35dc6..fcbc5605b38e 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/pci/hpt366.c @@ -669,24 +669,6 @@ static void hpt3xx_tune_drive(ide_drive_t *drive, u8 pio) (void) hpt3xx_tune_chipset (drive, XFER_PIO_0 + pio); } -/* - * This allows the configuration of ide_pci chipset registers - * for cards that learn about the drive's UDMA, DMA, PIO capabilities - * after the drive is reported by the OS. Initially designed for - * HPT366 UDMA chipset by HighPoint|Triones Technologies, Inc. - * - */ -static int config_chipset_for_dma(ide_drive_t *drive) -{ - u8 speed = ide_max_dma_mode(drive); - - if (!speed) - return 0; - - (void) hpt3xx_tune_chipset(drive, speed); - return ide_dma_enable(drive); -} - static int hpt3xx_quirkproc(ide_drive_t *drive) { struct hd_driveid *id = drive->id; @@ -741,7 +723,7 @@ static int hpt366_config_drive_xfer_rate(ide_drive_t *drive) { drive->init_speed = 0; - if (ide_use_dma(drive) && config_chipset_for_dma(drive)) + if (ide_tune_dma(drive)) return 0; if (ide_use_fast_pio(drive)) diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c index 02b56cb7bb1b..c04a02687b95 100644 --- a/drivers/ide/pci/it8213.c +++ b/drivers/ide/pci/it8213.c @@ -197,25 +197,6 @@ static int it8213_tune_chipset (ide_drive_t *drive, u8 xferspeed) return ide_config_drive_speed(drive, speed); } -/* - * config_chipset_for_dma - configure for DMA - * @drive: drive to configure - * - * Called by the IDE layer when it wants the timings set up. - */ - -static int config_chipset_for_dma (ide_drive_t *drive) -{ - u8 speed = ide_max_dma_mode(drive); - - if (!speed) - return 0; - - it8213_tune_chipset(drive, speed); - - return ide_dma_enable(drive); -} - /** * it8213_configure_drive_for_dma - set up for DMA transfers * @drive: drive we are going to set up @@ -230,7 +211,7 @@ static int it8213_config_drive_for_dma (ide_drive_t *drive) { u8 pio; - if (ide_use_dma(drive) && config_chipset_for_dma(drive)) + if (ide_tune_dma(drive)) return 0; pio = ide_get_best_pio_mode(drive, 255, 4, NULL); diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c index dbb3c199cba9..76ed25147229 100644 --- a/drivers/ide/pci/jmicron.c +++ b/drivers/ide/pci/jmicron.c @@ -118,25 +118,6 @@ static int jmicron_tune_chipset (ide_drive_t *drive, byte xferspeed) return ide_config_drive_speed(drive, speed); } -/** - * config_chipset_for_dma - configure for DMA - * @drive: drive to configure - * - * As the JMicron snoops for timings all we actually need to do is - * make sure we don't set an invalid mode. - */ - -static int config_chipset_for_dma (ide_drive_t *drive) -{ - u8 speed = ide_max_dma_mode(drive); - - if (!speed) - return 0; - - jmicron_tune_chipset(drive, speed); - return ide_dma_enable(drive); -} - /** * jmicron_configure_drive_for_dma - set up for DMA transfers * @drive: drive we are going to set up @@ -147,7 +128,7 @@ static int config_chipset_for_dma (ide_drive_t *drive) static int jmicron_config_drive_for_dma (ide_drive_t *drive) { - if (ide_use_dma(drive) && config_chipset_for_dma(drive)) + if (ide_tune_dma(drive)) return 0; config_jmicron_chipset_for_pio(drive, 1); diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c index 84d3938a430a..8b219dd63024 100644 --- a/drivers/ide/pci/piix.c +++ b/drivers/ide/pci/piix.c @@ -303,30 +303,6 @@ static int piix_tune_chipset (ide_drive_t *drive, u8 xferspeed) return ide_config_drive_speed(drive, speed); } -/** - * piix_config_drive_for_dma - configure drive for DMA - * @drive: IDE drive to configure - * - * Set up a PIIX interface channel for the best available speed. - * We prefer UDMA if it is available and then MWDMA. If DMA is - * not available we switch to PIO and return 0. - */ - -static int piix_config_drive_for_dma (ide_drive_t *drive) -{ - u8 speed = ide_max_dma_mode(drive); - - /* - * If no DMA speed was available or the chipset has DMA bugs - * then disable DMA and use PIO - */ - if (!speed) - return 0; - - (void) piix_tune_chipset(drive, speed); - return ide_dma_enable(drive); -} - /** * piix_config_drive_xfer_rate - set up an IDE device * @drive: IDE drive to configure @@ -339,7 +315,7 @@ static int piix_config_drive_xfer_rate (ide_drive_t *drive) { drive->init_speed = 0; - if (ide_use_dma(drive) && piix_config_drive_for_dma(drive)) + if (ide_tune_dma(drive)) return 0; if (ide_use_fast_pio(drive)) diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c index 83c80ed73e99..41953fe4fa6b 100644 --- a/drivers/ide/pci/sis5513.c +++ b/drivers/ide/pci/sis5513.c @@ -638,32 +638,13 @@ static void sis5513_tune_drive (ide_drive_t *drive, u8 pio) (void) config_chipset_for_pio(drive, pio); } -/* - * ((id->hw_config & 0x4000|0x2000) && (HWIF(drive)->udma_four)) - */ -static int config_chipset_for_dma (ide_drive_t *drive) -{ - u8 speed = ide_max_dma_mode(drive); - -#ifdef DEBUG - printk("SIS5513: config_chipset_for_dma, drive %d, ultra %x\n", - drive->dn, drive->id->dma_ultra); -#endif - - if (!(speed)) - return 0; - - sis5513_tune_chipset(drive, speed); - return ide_dma_enable(drive); -} - static int sis5513_config_xfer_rate(ide_drive_t *drive) { config_art_rwp_pio(drive, 5); drive->init_speed = 0; - if (ide_use_dma(drive) && config_chipset_for_dma(drive)) + if (ide_tune_dma(drive)) return 0; if (ide_use_fast_pio(drive)) diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c index 9e95a5cbf984..c40f291f91e0 100644 --- a/drivers/ide/pci/slc90e66.c +++ b/drivers/ide/pci/slc90e66.c @@ -160,22 +160,11 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed) return ide_config_drive_speed(drive, speed); } -static int slc90e66_config_drive_for_dma (ide_drive_t *drive) -{ - u8 speed = ide_max_dma_mode(drive); - - if (!speed) - return 0; - - (void) slc90e66_tune_chipset(drive, speed); - return ide_dma_enable(drive); -} - static int slc90e66_config_drive_xfer_rate (ide_drive_t *drive) { drive->init_speed = 0; - if (ide_use_dma(drive) && slc90e66_config_drive_for_dma(drive)) + if (ide_tune_dma(drive)) return 0; if (ide_use_fast_pio(drive)) diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c index 168f035caa3f..cee619bb2eaf 100644 --- a/drivers/ide/pci/tc86c001.c +++ b/drivers/ide/pci/tc86c001.c @@ -167,20 +167,9 @@ static int tc86c001_busproc(ide_drive_t *drive, int state) return 0; } -static int config_chipset_for_dma(ide_drive_t *drive) -{ - u8 speed = ide_max_dma_mode(drive); - - if (!speed) - return 0; - - (void) tc86c001_tune_chipset(drive, speed); - return ide_dma_enable(drive); -} - static int tc86c001_config_drive_xfer_rate(ide_drive_t *drive) { - if (ide_use_dma(drive) && config_chipset_for_dma(drive)) + if (ide_tune_dma(drive)) return 0; if (ide_use_fast_pio(drive)) diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/pci/triflex.c index 8a877235b949..35e8c612638f 100644 --- a/drivers/ide/pci/triflex.c +++ b/drivers/ide/pci/triflex.c @@ -100,20 +100,9 @@ static void triflex_tune_drive(ide_drive_t *drive, u8 pio) (void) triflex_tune_chipset(drive, (XFER_PIO_0 + use_pio)); } -static int triflex_config_drive_for_dma(ide_drive_t *drive) -{ - u8 speed = ide_max_dma_mode(drive); - - if (!speed) - return 0; - - (void) triflex_tune_chipset(drive, speed); - return ide_dma_enable(drive); -} - static int triflex_config_drive_xfer_rate(ide_drive_t *drive) { - if (ide_use_dma(drive) && triflex_config_drive_for_dma(drive)) + if (ide_tune_dma(drive)) return 0; triflex_tune_drive(drive, 255); diff --git a/include/linux/ide.h b/include/linux/ide.h index 23ab4dc05009..d03fa2d5d75a 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -1277,6 +1277,7 @@ int __ide_dma_bad_drive(ide_drive_t *); int __ide_dma_good_drive(ide_drive_t *); int ide_use_dma(ide_drive_t *); u8 ide_max_dma_mode(ide_drive_t *); +int ide_tune_dma(ide_drive_t *); void ide_dma_off(ide_drive_t *); void ide_dma_verbose(ide_drive_t *); int ide_set_dma(ide_drive_t *); @@ -1304,6 +1305,7 @@ extern int __ide_dma_timeout(ide_drive_t *); #else static inline int ide_use_dma(ide_drive_t *drive) { return 0; } static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; } +static inline int ide_tune_dma(ide_drive_t *drive) { return 0; } static inline void ide_dma_off(ide_drive_t *drive) { ; } static inline void ide_dma_verbose(ide_drive_t *drive) { ; } static inline int ide_set_dma(ide_drive_t *drive) { return 1; } -- cgit v1.2.3 From ecfd80e4a514123070b4cfb674b817ba75055df2 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Thu, 10 May 2007 00:01:09 +0200 Subject: ide: make /proc/ide/ optional All important information/features should be already available through sysfs and ioctl interfaces. Add CONFIG_IDE_PROC_FS (CONFIG_SCSI_PROC_FS rip-off) config option, disabling it makes IDE driver ~5 kB smaller (on x86-32). While at it add CONFIG_PROC_FS=n versions of proc_ide_{create,destroy}() and remove no longer needed #ifdefs. Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/Kconfig | 11 +++++++++++ drivers/ide/Makefile | 2 +- drivers/ide/ide-cd.c | 4 ++-- drivers/ide/ide-disk.c | 4 ++-- drivers/ide/ide-floppy.c | 4 ++-- drivers/ide/ide-tape.c | 2 +- drivers/ide/ide.c | 15 ++++++--------- drivers/ide/pci/alim15x3.c | 8 ++++---- drivers/ide/pci/amd74xx.c | 6 +++--- drivers/ide/pci/cmd64x.c | 8 ++++---- drivers/ide/pci/sis5513.c | 6 +++--- drivers/scsi/ide-scsi.c | 2 +- include/linux/ide.h | 14 ++++++++------ 13 files changed, 48 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 5bdf64b77913..a678bbecb489 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -291,6 +291,17 @@ config IDE_TASK_IOCTL If you are unsure, say N here. +config IDE_PROC_FS + bool "legacy /proc/ide/ support" + depends on IDE && PROC_FS + default y + help + This option enables support for the various files in + /proc/ide. In Linux 2.6 this has been superseded by + files in sysfs but many legacy applications rely on this. + + If unsure say Y. + comment "IDE chipset support/bugfixes" config IDE_GENERIC diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile index d9f029e8ff74..75dc6969e0a7 100644 --- a/drivers/ide/Makefile +++ b/drivers/ide/Makefile @@ -20,7 +20,7 @@ ide-core-$(CONFIG_BLK_DEV_CMD640) += pci/cmd640.o # Core IDE code - must come before legacy ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o -ide-core-$(CONFIG_PROC_FS) += ide-proc.o +ide-core-$(CONFIG_IDE_PROC_FS) += ide-proc.o ide-core-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 638becda81c6..c3a0079789fd 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -3274,7 +3274,7 @@ int ide_cdrom_setup (ide_drive_t *drive) return 0; } -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_IDE_PROC_FS static sector_t ide_cdrom_capacity (ide_drive_t *drive) { @@ -3321,7 +3321,7 @@ static void ide_cd_release(struct kref *kref) static int ide_cd_probe(ide_drive_t *); -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_IDE_PROC_FS static int proc_idecd_read_capacity (char *page, char **start, off_t off, int count, int *eof, void *data) { diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 37aa6ddd9702..e7bc4d35c40c 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -559,7 +559,7 @@ static sector_t idedisk_capacity (ide_drive_t *drive) return drive->capacity64 - drive->sect0; } -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_IDE_PROC_FS static int smart_enable(ide_drive_t *drive) { @@ -683,7 +683,7 @@ static ide_proc_entry_t idedisk_proc[] = { #define idedisk_proc NULL -#endif /* CONFIG_PROC_FS */ +#endif /* CONFIG_IDE_PROC_FS */ static void idedisk_prepare_flush(request_queue_t *q, struct request *rq) { diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 57cd21c5b2c1..84d398f71e0f 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -1892,7 +1892,7 @@ static void ide_floppy_release(struct kref *kref) kfree(floppy); } -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_IDE_PROC_FS static int proc_idefloppy_read_capacity (char *page, char **start, off_t off, int count, int *eof, void *data) @@ -1914,7 +1914,7 @@ static ide_proc_entry_t idefloppy_proc[] = { #define idefloppy_proc NULL -#endif /* CONFIG_PROC_FS */ +#endif /* CONFIG_IDE_PROC_FS */ static int ide_floppy_probe(ide_drive_t *); diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 4e59239fef75..ad306ae24e24 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -4730,7 +4730,7 @@ static void ide_tape_release(struct kref *kref) kfree(tape); } -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_IDE_PROC_FS static int proc_idetape_read_name (char *page, char **start, off_t off, int count, int *eof, void *data) diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 73f752142f9e..7a96f6f07494 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -350,7 +350,7 @@ static int ide_system_bus_speed(void) return system_bus_speed; } -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_IDE_PROC_FS struct proc_dir_entry *proc_ide_root; #endif @@ -1892,7 +1892,7 @@ static void __init probe_for_hwifs (void) void ide_register_subdriver(ide_drive_t *drive, ide_driver_t *driver) { -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_IDE_PROC_FS ide_add_proc_entries(drive->proc, driver->proc, drive); #endif } @@ -1914,8 +1914,8 @@ EXPORT_SYMBOL(ide_register_subdriver); void ide_unregister_subdriver(ide_drive_t *drive, ide_driver_t *driver) { unsigned long flags; - -#ifdef CONFIG_PROC_FS + +#ifdef CONFIG_IDE_PROC_FS ide_remove_proc_entries(drive->proc, driver->proc); #endif down(&ide_setting_sem); @@ -2069,7 +2069,7 @@ static int __init ide_init(void) init_ide_data(); -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_IDE_PROC_FS proc_ide_root = proc_mkdir("ide", NULL); #endif @@ -2099,9 +2099,8 @@ static int __init ide_init(void) probe_for_hwifs(); initializing = 0; -#ifdef CONFIG_PROC_FS proc_ide_create(); -#endif + return 0; } @@ -2141,9 +2140,7 @@ void __exit cleanup_module (void) pnpide_exit(); #endif -#ifdef CONFIG_PROC_FS proc_ide_destroy(); -#endif bus_unregister(&ide_bus_type); } diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c index 76643b626bdd..428efdae0c7b 100644 --- a/drivers/ide/pci/alim15x3.c +++ b/drivers/ide/pci/alim15x3.c @@ -50,7 +50,7 @@ static u8 m5229_revision; static u8 chip_is_1543c_e; static struct pci_dev *isa_dev; -#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS) +#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) #include #include @@ -278,7 +278,7 @@ static int ali_get_info (char *buffer, char **addr, off_t offset, int count) return p-buffer; /* => must be less than 4k! */ } -#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS) */ +#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */ /** * ali15x3_tune_pio - set up chipset for PIO mode @@ -566,13 +566,13 @@ static unsigned int __devinit init_chipset_ali15x3 (struct pci_dev *dev, const c isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); -#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS) +#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) if (!ali_proc) { ali_proc = 1; bmide_dev = dev; ide_pci_create_host_proc("ali", ali_get_info); } -#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS) */ +#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */ local_irq_save(flags); diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c index 7989bdd842a2..becb1a5648b0 100644 --- a/drivers/ide/pci/amd74xx.c +++ b/drivers/ide/pci/amd74xx.c @@ -92,7 +92,7 @@ static unsigned char amd_cyc2udma[] = { 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3 * AMD /proc entry. */ -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_IDE_PROC_FS #include #include @@ -402,14 +402,14 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, const ch * Register /proc/ide/amd74xx entry */ -#if defined(DISPLAY_AMD_TIMINGS) && defined(CONFIG_PROC_FS) +#if defined(DISPLAY_AMD_TIMINGS) && defined(CONFIG_IDE_PROC_FS) if (!amd74xx_proc) { amd_base = pci_resource_start(dev, 4); bmide_dev = dev; ide_pci_create_host_proc("amd74xx", amd74xx_get_info); amd74xx_proc = 1; } -#endif /* DISPLAY_AMD_TIMINGS && CONFIG_PROC_FS */ +#endif /* DISPLAY_AMD_TIMINGS && CONFIG_IDE_PROC_FS */ return dev->irq; } diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c index 19f5ac1f866c..61ea96b5555c 100644 --- a/drivers/ide/pci/cmd64x.c +++ b/drivers/ide/pci/cmd64x.c @@ -74,7 +74,7 @@ #define UDIDETCR1 0x7B #define DTPR1 0x7C -#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_PROC_FS) +#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS) #include #include @@ -165,7 +165,7 @@ static int cmd64x_get_info (char *buffer, char **addr, off_t offset, int count) return p-buffer; /* => must be less than 4k! */ } -#endif /* defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_PROC_FS) */ +#endif /* defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */ static u8 quantize_timing(int timing, int quant) { @@ -548,7 +548,7 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha (void) pci_write_config_byte(dev, UDIDETCR0, 0xf0); #endif /* CONFIG_PPC */ -#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_PROC_FS) +#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS) cmd_devs[n_cmd_devs++] = dev; @@ -556,7 +556,7 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha cmd64x_proc = 1; ide_pci_create_host_proc("cmd64x", cmd64x_get_info); } -#endif /* DISPLAY_CMD64X_TIMINGS && CONFIG_PROC_FS */ +#endif /* DISPLAY_CMD64X_TIMINGS && CONFIG_IDE_PROC_FS */ return 0; } diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c index 41953fe4fa6b..2bde1b92784a 100644 --- a/drivers/ide/pci/sis5513.c +++ b/drivers/ide/pci/sis5513.c @@ -191,7 +191,7 @@ static char* chipset_capability[] = { "ATA 133 (1st gen)", "ATA 133 (2nd gen)" }; -#if defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_PROC_FS) +#if defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_IDE_PROC_FS) #include #include @@ -426,7 +426,7 @@ static int sis_get_info (char *buffer, char **addr, off_t offset, int count) return len > count ? count : len; } -#endif /* defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_PROC_FS) */ +#endif /* defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */ /* * Configuration functions @@ -797,7 +797,7 @@ static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const c break; } -#if defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_PROC_FS) +#if defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_IDE_PROC_FS) if (!sis_proc) { sis_proc = 1; bmide_dev = dev; diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index 2b5b8a93bc10..531df3a29c47 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c @@ -770,7 +770,7 @@ static void ide_scsi_remove(ide_drive_t *drive) static int ide_scsi_probe(ide_drive_t *); -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_IDE_PROC_FS static ide_proc_entry_t idescsi_proc[] = { { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL }, { NULL, 0, NULL, NULL } diff --git a/include/linux/ide.h b/include/linux/ide.h index d03fa2d5d75a..697c39dd66a1 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -912,15 +912,15 @@ typedef struct { write_proc_t *write_proc; } ide_proc_entry_t; -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_IDE_PROC_FS extern struct proc_dir_entry *proc_ide_root; -extern void proc_ide_create(void); -extern void proc_ide_destroy(void); -extern void create_proc_ide_interfaces(void); +void proc_ide_create(void); +void proc_ide_destroy(void); +void create_proc_ide_interfaces(void); void destroy_proc_ide_interface(ide_hwif_t *); -extern void ide_add_proc_entries(struct proc_dir_entry *, ide_proc_entry_t *, void *); -extern void ide_remove_proc_entries(struct proc_dir_entry *, ide_proc_entry_t *); +void ide_add_proc_entries(struct proc_dir_entry *, ide_proc_entry_t *, void *); +void ide_remove_proc_entries(struct proc_dir_entry *, ide_proc_entry_t *); read_proc_t proc_ide_read_capacity; read_proc_t proc_ide_read_geometry; @@ -944,6 +944,8 @@ void ide_pci_create_host_proc(const char *, get_info_t *); return len; \ } #else +static inline void proc_ide_create(void) { ; } +static inline void proc_ide_destroy(void) { ; } static inline void create_proc_ide_interfaces(void) { ; } static inline void destroy_proc_ide_interface(ide_hwif_t *hwif) { ; } #define PROC_IDE_READ_RETURN(page,start,off,count,eof,len) return 0; -- cgit v1.2.3 From 1497943ee692aa7519fa972d0e3a339649bf3a96 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Thu, 10 May 2007 00:01:10 +0200 Subject: ide: split off ioctl handling from IDE settings (v2) * do write permission and min/max checks in ide_procset_t functions * ide-disk.c: drive->id is always available so cleanup "multcount" setting accordingly * ide-disk.c: "address" setting was incorrectly defined as type TYPE_INTA, fix it by using type TYPE_BYTE and updating ide_drive_t->adressing field, the bug didn't trigger because this IDE setting uses custom ->set function * ide.c: add set_ksettings() for handling HDIO_SET_KEEPSETTINGS ioctl * ide.c: add set_unmaskirq() for handling HDIO_SET_UNMASKINTR ioctl * handle ioctls directly in generic_ide_ioclt() and idedisk_ioctl() instead of using IDE settings to deal with them * remove no longer needed ide_find_setting_by_ioctl() and {read,write}_ioctl fields from ide_settings_t, also remove now unused TYPE_INTA handling v2: * add missing EXPORT_SYMBOL_GPL(ide_setting_sem) needed now for ide-disk Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-cd.c | 2 +- drivers/ide/ide-disk.c | 81 ++++++++++++++++++---- drivers/ide/ide-floppy.c | 10 +-- drivers/ide/ide-tape.c | 30 ++++---- drivers/ide/ide.c | 176 ++++++++++++++++++++++++++++------------------- drivers/scsi/ide-scsi.c | 12 ++-- include/linux/ide.h | 16 ++--- 7 files changed, 203 insertions(+), 124 deletions(-) (limited to 'include/linux') diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index c3a0079789fd..29408cfd9869 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -3061,7 +3061,7 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive) static void ide_cdrom_add_settings(ide_drive_t *drive) { - ide_add_setting(drive, "dsc_overlap", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL); + ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL); } /* diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index e7bc4d35c40c..fb162cb3ebf5 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -737,6 +737,9 @@ static int set_multcount(ide_drive_t *drive, int arg) { struct request rq; + if (arg < 0 || arg > drive->id->max_multsect) + return -EINVAL; + if (drive->special.b.set_multmode) return -EBUSY; ide_init_drive_cmd (&rq); @@ -749,6 +752,9 @@ static int set_multcount(ide_drive_t *drive, int arg) static int set_nowerr(ide_drive_t *drive, int arg) { + if (arg < 0 || arg > 1) + return -EINVAL; + if (ide_spin_wait_hwgroup(drive)) return -EBUSY; drive->nowerr = arg; @@ -800,6 +806,9 @@ static int write_cache(ide_drive_t *drive, int arg) ide_task_t args; int err = 1; + if (arg < 0 || arg > 1) + return -EINVAL; + if (ide_id_has_flush_cache(drive->id)) { memset(&args, 0, sizeof(ide_task_t)); args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? @@ -835,6 +844,9 @@ static int set_acoustic (ide_drive_t *drive, int arg) { ide_task_t args; + if (arg < 0 || arg > 254) + return -EINVAL; + memset(&args, 0, sizeof(ide_task_t)); args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? SETFEATURES_EN_AAM : SETFEATURES_DIS_AAM; @@ -855,6 +867,9 @@ static int set_acoustic (ide_drive_t *drive, int arg) */ static int set_lba_addressing(ide_drive_t *drive, int arg) { + if (arg < 0 || arg > 2) + return -EINVAL; + drive->addressing = 0; if (HWIF(drive)->no_lba48) @@ -870,18 +885,18 @@ static void idedisk_add_settings(ide_drive_t *drive) { struct hd_driveid *id = drive->id; - ide_add_setting(drive, "bios_cyl", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->bios_cyl, NULL); - ide_add_setting(drive, "bios_head", SETTING_RW, -1, -1, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); - ide_add_setting(drive, "bios_sect", SETTING_RW, -1, -1, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); - ide_add_setting(drive, "address", SETTING_RW, HDIO_GET_ADDRESS, HDIO_SET_ADDRESS, TYPE_INTA, 0, 2, 1, 1, &drive->addressing, set_lba_addressing); - ide_add_setting(drive, "bswap", SETTING_READ, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->bswap, NULL); - ide_add_setting(drive, "multcount", id ? SETTING_RW : SETTING_READ, HDIO_GET_MULTCOUNT, HDIO_SET_MULTCOUNT, TYPE_BYTE, 0, id ? id->max_multsect : 0, 1, 1, &drive->mult_count, set_multcount); - ide_add_setting(drive, "nowerr", SETTING_RW, HDIO_GET_NOWERR, HDIO_SET_NOWERR, TYPE_BYTE, 0, 1, 1, 1, &drive->nowerr, set_nowerr); - ide_add_setting(drive, "lun", SETTING_RW, -1, -1, TYPE_INT, 0, 7, 1, 1, &drive->lun, NULL); - ide_add_setting(drive, "wcache", SETTING_RW, HDIO_GET_WCACHE, HDIO_SET_WCACHE, TYPE_BYTE, 0, 1, 1, 1, &drive->wcache, write_cache); - ide_add_setting(drive, "acoustic", SETTING_RW, HDIO_GET_ACOUSTIC, HDIO_SET_ACOUSTIC, TYPE_BYTE, 0, 254, 1, 1, &drive->acoustic, set_acoustic); - ide_add_setting(drive, "failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL); - ide_add_setting(drive, "max_failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->max_failures, NULL); + ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->bios_cyl, NULL); + ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); + ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); + ide_add_setting(drive, "address", SETTING_RW, TYPE_BYTE, 0, 2, 1, 1, &drive->addressing, set_lba_addressing); + ide_add_setting(drive, "bswap", SETTING_READ, TYPE_BYTE, 0, 1, 1, 1, &drive->bswap, NULL); + ide_add_setting(drive, "multcount", SETTING_RW, TYPE_BYTE, 0, id->max_multsect, 1, 1, &drive->mult_count, set_multcount); + ide_add_setting(drive, "nowerr", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->nowerr, set_nowerr); + ide_add_setting(drive, "lun", SETTING_RW, TYPE_INT, 0, 7, 1, 1, &drive->lun, NULL); + ide_add_setting(drive, "wcache", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->wcache, write_cache); + ide_add_setting(drive, "acoustic", SETTING_RW, TYPE_BYTE, 0, 254, 1, 1, &drive->acoustic, set_acoustic); + ide_add_setting(drive, "failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL); + ide_add_setting(drive, "max_failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->max_failures, NULL); } static void idedisk_setup (ide_drive_t *drive) @@ -1140,9 +1155,49 @@ static int idedisk_getgeo(struct block_device *bdev, struct hd_geometry *geo) static int idedisk_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { + unsigned long flags; struct block_device *bdev = inode->i_bdev; struct ide_disk_obj *idkp = ide_disk_g(bdev->bd_disk); - return generic_ide_ioctl(idkp->drive, file, bdev, cmd, arg); + ide_drive_t *drive = idkp->drive; + int err, (*setfunc)(ide_drive_t *, int); + u8 *val; + + switch (cmd) { + case HDIO_GET_ADDRESS: val = &drive->addressing; goto read_val; + case HDIO_GET_MULTCOUNT: val = &drive->mult_count; goto read_val; + case HDIO_GET_NOWERR: val = &drive->nowerr; goto read_val; + case HDIO_GET_WCACHE: val = &drive->wcache; goto read_val; + case HDIO_GET_ACOUSTIC: val = &drive->acoustic; goto read_val; + case HDIO_SET_ADDRESS: setfunc = set_lba_addressing; goto set_val; + case HDIO_SET_MULTCOUNT: setfunc = set_multcount; goto set_val; + case HDIO_SET_NOWERR: setfunc = set_nowerr; goto set_val; + case HDIO_SET_WCACHE: setfunc = write_cache; goto set_val; + case HDIO_SET_ACOUSTIC: setfunc = set_acoustic; goto set_val; + } + + return generic_ide_ioctl(drive, file, bdev, cmd, arg); + +read_val: + down(&ide_setting_sem); + spin_lock_irqsave(&ide_lock, flags); + err = *val; + spin_unlock_irqrestore(&ide_lock, flags); + up(&ide_setting_sem); + return err >= 0 ? put_user(err, (long __user *)arg) : err; + +set_val: + if (bdev != bdev->bd_contains) + err = -EINVAL; + else { + if (!capable(CAP_SYS_ADMIN)) + err = -EACCES; + else { + down(&ide_setting_sem); + err = setfunc(drive, arg); + up(&ide_setting_sem); + } + } + return err; } static int idedisk_media_changed(struct gendisk *disk) diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 84d398f71e0f..38fe45cf4019 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -1816,12 +1816,12 @@ static void idefloppy_add_settings(ide_drive_t *drive) idefloppy_floppy_t *floppy = drive->driver_data; /* - * drive setting name read/write ioctl ioctl data type min max mul_factor div_factor data pointer set function + * drive setting name read/write data type min max mul_factor div_factor data pointer set function */ - ide_add_setting(drive, "bios_cyl", SETTING_RW, -1, -1, TYPE_INT, 0, 1023, 1, 1, &drive->bios_cyl, NULL); - ide_add_setting(drive, "bios_head", SETTING_RW, -1, -1, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); - ide_add_setting(drive, "bios_sect", SETTING_RW, -1, -1, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); - ide_add_setting(drive, "ticks", SETTING_RW, -1, -1, TYPE_BYTE, 0, 255, 1, 1, &floppy->ticks, NULL); + ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 1023, 1, 1, &drive->bios_cyl, NULL); + ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); + ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); + ide_add_setting(drive, "ticks", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &floppy->ticks, NULL); } /* diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index ad306ae24e24..fa0d22de37a7 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -4566,22 +4566,22 @@ static void idetape_add_settings (ide_drive_t *drive) idetape_tape_t *tape = drive->driver_data; /* - * drive setting name read/write ioctl ioctl data type min max mul_factor div_factor data pointer set function + * drive setting name read/write data type min max mul_factor div_factor data pointer set function */ - ide_add_setting(drive, "buffer", SETTING_READ, -1, -1, TYPE_SHORT, 0, 0xffff, 1, 2, &tape->capabilities.buffer_size, NULL); - ide_add_setting(drive, "pipeline_min", SETTING_RW, -1, -1, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->min_pipeline, NULL); - ide_add_setting(drive, "pipeline", SETTING_RW, -1, -1, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_stages, NULL); - ide_add_setting(drive, "pipeline_max", SETTING_RW, -1, -1, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_pipeline, NULL); - ide_add_setting(drive, "pipeline_used",SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages, NULL); - ide_add_setting(drive, "pipeline_pending",SETTING_READ,-1, -1, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_pending_stages, NULL); - ide_add_setting(drive, "speed", SETTING_READ, -1, -1, TYPE_SHORT, 0, 0xffff, 1, 1, &tape->capabilities.speed, NULL); - ide_add_setting(drive, "stage", SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, 1, 1024, &tape->stage_size, NULL); - ide_add_setting(drive, "tdsc", SETTING_RW, -1, -1, TYPE_INT, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_frequency, NULL); - ide_add_setting(drive, "dsc_overlap", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL); - ide_add_setting(drive, "pipeline_head_speed_c",SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed, NULL); - ide_add_setting(drive, "pipeline_head_speed_u",SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, 1, 1, &tape->uncontrolled_pipeline_head_speed, NULL); - ide_add_setting(drive, "avg_speed", SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, 1, 1, &tape->avg_speed, NULL); - ide_add_setting(drive, "debug_level",SETTING_RW, -1, -1, TYPE_INT, 0, 0xffff, 1, 1, &tape->debug_level, NULL); + ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff, 1, 2, &tape->capabilities.buffer_size, NULL); + ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->min_pipeline, NULL); + ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_stages, NULL); + ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_pipeline, NULL); + ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages, NULL); + ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_pending_stages, NULL); + ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff, 1, 1, &tape->capabilities.speed, NULL); + ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1024, &tape->stage_size, NULL); + ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_frequency, NULL); + ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL); + ide_add_setting(drive, "pipeline_head_speed_c",SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed, NULL); + ide_add_setting(drive, "pipeline_head_speed_u",SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->uncontrolled_pipeline_head_speed,NULL); + ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->avg_speed, NULL); + ide_add_setting(drive, "debug_level", SETTING_RW, TYPE_INT, 0, 0xffff, 1, 1, &tape->debug_level, NULL); } /* diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 7a96f6f07494..8793a960210c 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -823,13 +823,13 @@ EXPORT_SYMBOL(ide_register_hw); DECLARE_MUTEX(ide_setting_sem); +EXPORT_SYMBOL_GPL(ide_setting_sem); + /** * __ide_add_setting - add an ide setting option * @drive: drive to use * @name: setting name * @rw: true if the function is read write - * @read_ioctl: function to call on read - * @write_ioctl: function to call on write * @data_type: type of data * @min: range minimum * @max: range maximum @@ -850,7 +850,7 @@ DECLARE_MUTEX(ide_setting_sem); * remove. */ -static int __ide_add_setting(ide_drive_t *drive, const char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set, int auto_remove) +static int __ide_add_setting(ide_drive_t *drive, const char *name, int rw, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set, int auto_remove) { ide_settings_t **p = (ide_settings_t **) &drive->settings, *setting = NULL; @@ -863,8 +863,6 @@ static int __ide_add_setting(ide_drive_t *drive, const char *name, int rw, int r goto abort; strcpy(setting->name, name); setting->rw = rw; - setting->read_ioctl = read_ioctl; - setting->write_ioctl = write_ioctl; setting->data_type = data_type; setting->min = min; setting->max = max; @@ -885,9 +883,9 @@ abort: return -1; } -int ide_add_setting(ide_drive_t *drive, const char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set) +int ide_add_setting(ide_drive_t *drive, const char *name, int rw, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set) { - return __ide_add_setting(drive, name, rw, read_ioctl, write_ioctl, data_type, min, max, mul_factor, div_factor, data, set, 1); + return __ide_add_setting(drive, name, rw, data_type, min, max, mul_factor, div_factor, data, set, 1); } EXPORT_SYMBOL(ide_add_setting); @@ -918,29 +916,6 @@ static void __ide_remove_setting (ide_drive_t *drive, char *name) kfree(setting); } -/** - * ide_find_setting_by_ioctl - find a drive specific ioctl - * @drive: drive to scan - * @cmd: ioctl command to handle - * - * Scan's the device setting table for a matching entry and returns - * this or NULL if no entry is found. The caller must hold the - * setting semaphore - */ - -static ide_settings_t *ide_find_setting_by_ioctl (ide_drive_t *drive, int cmd) -{ - ide_settings_t *setting = drive->settings; - - while (setting) { - if (setting->read_ioctl == cmd || setting->write_ioctl == cmd) - break; - setting = setting->next; - } - - return setting; -} - /** * ide_find_setting_by_name - find a drive specific setting * @drive: drive to scan @@ -1014,7 +989,6 @@ int ide_read_setting (ide_drive_t *drive, ide_settings_t *setting) val = *((u16 *) setting->data); break; case TYPE_INT: - case TYPE_INTA: val = *((u32 *) setting->data); break; } @@ -1076,17 +1050,14 @@ EXPORT_SYMBOL(ide_spin_wait_hwgroup); int ide_write_setting (ide_drive_t *drive, ide_settings_t *setting, int val) { - int i; - u32 *p; - if (!capable(CAP_SYS_ADMIN)) return -EACCES; + if (setting->set) + return setting->set(drive, val); if (!(setting->rw & SETTING_WRITE)) return -EPERM; if (val < setting->min || val > setting->max) return -EINVAL; - if (setting->set) - return setting->set(drive, val); if (ide_spin_wait_hwgroup(drive)) return -EBUSY; switch (setting->data_type) { @@ -1099,11 +1070,6 @@ int ide_write_setting (ide_drive_t *drive, ide_settings_t *setting, int val) case TYPE_INT: *((u32 *) setting->data) = val; break; - case TYPE_INTA: - p = (u32 *) setting->data; - for (i = 0; i < 1 << PARTN_BITS; i++, p++) - *p = val; - break; } spin_unlock_irq(&ide_lock); return 0; @@ -1111,6 +1077,12 @@ int ide_write_setting (ide_drive_t *drive, ide_settings_t *setting, int val) static int set_io_32bit(ide_drive_t *drive, int arg) { + if (drive->no_io_32bit) + return -EPERM; + + if (arg < 0 || arg > 1 + (SUPPORT_VLB_SYNC << 1)) + return -EINVAL; + drive->io_32bit = arg; #ifdef CONFIG_BLK_DEV_DTC2278 if (HWIF(drive)->chipset == ide_dtc2278) @@ -1119,12 +1091,28 @@ static int set_io_32bit(ide_drive_t *drive, int arg) return 0; } +static int set_ksettings(ide_drive_t *drive, int arg) +{ + if (arg < 0 || arg > 1) + return -EINVAL; + + if (ide_spin_wait_hwgroup(drive)) + return -EBUSY; + drive->keep_settings = arg; + spin_unlock_irq(&ide_lock); + + return 0; +} + static int set_using_dma (ide_drive_t *drive, int arg) { #ifdef CONFIG_BLK_DEV_IDEDMA ide_hwif_t *hwif = drive->hwif; int err = -EPERM; + if (arg < 0 || arg > 1) + return -EINVAL; + if (!drive->id || !(drive->id->capability & 1)) goto out; @@ -1157,6 +1145,9 @@ static int set_using_dma (ide_drive_t *drive, int arg) out: return err; #else + if (arg < 0 || arg > 1) + return -EINVAL; + return -EPERM; #endif } @@ -1165,6 +1156,9 @@ static int set_pio_mode (ide_drive_t *drive, int arg) { struct request rq; + if (arg < 0 || arg > 255) + return -EINVAL; + if (!HWIF(drive)->tuneproc) return -ENOSYS; if (drive->special.b.set_tune) @@ -1176,9 +1170,30 @@ static int set_pio_mode (ide_drive_t *drive, int arg) return 0; } +static int set_unmaskirq(ide_drive_t *drive, int arg) +{ + if (drive->no_unmask) + return -EPERM; + + if (arg < 0 || arg > 1) + return -EINVAL; + + if (ide_spin_wait_hwgroup(drive)) + return -EBUSY; + drive->unmask = arg; + spin_unlock_irq(&ide_lock); + + return 0; +} + static int set_xfer_rate (ide_drive_t *drive, int arg) { - int err = ide_wait_cmd(drive, + int err; + + if (arg < 0 || arg > 70) + return -EINVAL; + + err = ide_wait_cmd(drive, WIN_SETFEATURES, (u8) arg, SETFEATURES_XFER, 0, NULL); @@ -1193,25 +1208,24 @@ static int set_xfer_rate (ide_drive_t *drive, int arg) * ide_add_generic_settings - generic ide settings * @drive: drive being configured * - * Add the generic parts of the system settings to the /proc files and - * ioctls for this IDE device. The caller must not be holding the - * ide_setting_sem. + * Add the generic parts of the system settings to the /proc files. + * The caller must not be holding the ide_setting_sem. */ void ide_add_generic_settings (ide_drive_t *drive) { /* - * drive setting name read/write access read ioctl write ioctl data type min max mul_factor div_factor data pointer set function + * drive setting name read/write access data type min max mul_factor div_factor data pointer set function */ - __ide_add_setting(drive, "io_32bit", drive->no_io_32bit ? SETTING_READ : SETTING_RW, HDIO_GET_32BIT, HDIO_SET_32BIT, TYPE_BYTE, 0, 1 + (SUPPORT_VLB_SYNC << 1), 1, 1, &drive->io_32bit, set_io_32bit, 0); - __ide_add_setting(drive, "keepsettings", SETTING_RW, HDIO_GET_KEEPSETTINGS, HDIO_SET_KEEPSETTINGS, TYPE_BYTE, 0, 1, 1, 1, &drive->keep_settings, NULL, 0); - __ide_add_setting(drive, "nice1", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->nice1, NULL, 0); - __ide_add_setting(drive, "pio_mode", SETTING_WRITE, -1, HDIO_SET_PIO_MODE, TYPE_BYTE, 0, 255, 1, 1, NULL, set_pio_mode, 0); - __ide_add_setting(drive, "unmaskirq", drive->no_unmask ? SETTING_READ : SETTING_RW, HDIO_GET_UNMASKINTR, HDIO_SET_UNMASKINTR, TYPE_BYTE, 0, 1, 1, 1, &drive->unmask, NULL, 0); - __ide_add_setting(drive, "using_dma", SETTING_RW, HDIO_GET_DMA, HDIO_SET_DMA, TYPE_BYTE, 0, 1, 1, 1, &drive->using_dma, set_using_dma, 0); - __ide_add_setting(drive, "init_speed", SETTING_RW, -1, -1, TYPE_BYTE, 0, 70, 1, 1, &drive->init_speed, NULL, 0); - __ide_add_setting(drive, "current_speed", SETTING_RW, -1, -1, TYPE_BYTE, 0, 70, 1, 1, &drive->current_speed, set_xfer_rate, 0); - __ide_add_setting(drive, "number", SETTING_RW, -1, -1, TYPE_BYTE, 0, 3, 1, 1, &drive->dn, NULL, 0); + __ide_add_setting(drive, "io_32bit", drive->no_io_32bit ? SETTING_READ : SETTING_RW, TYPE_BYTE, 0, 1 + (SUPPORT_VLB_SYNC << 1), 1, 1, &drive->io_32bit, set_io_32bit, 0); + __ide_add_setting(drive, "keepsettings", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->keep_settings, NULL, 0); + __ide_add_setting(drive, "nice1", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->nice1, NULL, 0); + __ide_add_setting(drive, "pio_mode", SETTING_WRITE, TYPE_BYTE, 0, 255, 1, 1, NULL, set_pio_mode, 0); + __ide_add_setting(drive, "unmaskirq", drive->no_unmask ? SETTING_READ : SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->unmask, NULL, 0); + __ide_add_setting(drive, "using_dma", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->using_dma, set_using_dma, 0); + __ide_add_setting(drive, "init_speed", SETTING_RW, TYPE_BYTE, 0, 70, 1, 1, &drive->init_speed, NULL, 0); + __ide_add_setting(drive, "current_speed", SETTING_RW, TYPE_BYTE, 0, 70, 1, 1, &drive->current_speed, set_xfer_rate, 0); + __ide_add_setting(drive, "number", SETTING_RW, TYPE_BYTE, 0, 3, 1, 1, &drive->dn, NULL, 0); } /** @@ -1283,27 +1297,23 @@ static int generic_ide_resume(struct device *dev) int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device *bdev, unsigned int cmd, unsigned long arg) { - ide_settings_t *setting; + unsigned long flags; ide_driver_t *drv; - int err = 0; void __user *p = (void __user *)arg; + int err = 0, (*setfunc)(ide_drive_t *, int); + u8 *val; - down(&ide_setting_sem); - if ((setting = ide_find_setting_by_ioctl(drive, cmd)) != NULL) { - if (cmd == setting->read_ioctl) { - err = ide_read_setting(drive, setting); - up(&ide_setting_sem); - return err >= 0 ? put_user(err, (long __user *)arg) : err; - } else { - if (bdev != bdev->bd_contains) - err = -EINVAL; - else - err = ide_write_setting(drive, setting, arg); - up(&ide_setting_sem); - return err; - } + switch (cmd) { + case HDIO_GET_32BIT: val = &drive->io_32bit; goto read_val; + case HDIO_GET_KEEPSETTINGS: val = &drive->keep_settings; goto read_val; + case HDIO_GET_UNMASKINTR: val = &drive->unmask; goto read_val; + case HDIO_GET_DMA: val = &drive->using_dma; goto read_val; + case HDIO_SET_32BIT: setfunc = set_io_32bit; goto set_val; + case HDIO_SET_KEEPSETTINGS: setfunc = set_ksettings; goto set_val; + case HDIO_SET_PIO_MODE: setfunc = set_pio_mode; goto set_val; + case HDIO_SET_UNMASKINTR: setfunc = set_unmaskirq; goto set_val; + case HDIO_SET_DMA: setfunc = set_using_dma; goto set_val; } - up(&ide_setting_sem); switch (cmd) { case HDIO_OBSOLETE_IDENTITY: @@ -1432,6 +1442,28 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device default: return -EINVAL; } + +read_val: + down(&ide_setting_sem); + spin_lock_irqsave(&ide_lock, flags); + err = *val; + spin_unlock_irqrestore(&ide_lock, flags); + up(&ide_setting_sem); + return err >= 0 ? put_user(err, (long __user *)arg) : err; + +set_val: + if (bdev != bdev->bd_contains) + err = -EINVAL; + else { + if (!capable(CAP_SYS_ADMIN)) + err = -EACCES; + else { + down(&ide_setting_sem); + err = setfunc(drive, arg); + up(&ide_setting_sem); + } + } + return err; } EXPORT_SYMBOL(generic_ide_ioctl); diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index 531df3a29c47..4388b8ab69a1 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c @@ -726,13 +726,13 @@ static void idescsi_add_settings(ide_drive_t *drive) idescsi_scsi_t *scsi = drive_to_idescsi(drive); /* - * drive setting name read/write ioctl ioctl data type min max mul_factor div_factor data pointer set function + * drive setting name read/write data type min max mul_factor div_factor data pointer set function */ - ide_add_setting(drive, "bios_cyl", SETTING_RW, -1, -1, TYPE_INT, 0, 1023, 1, 1, &drive->bios_cyl, NULL); - ide_add_setting(drive, "bios_head", SETTING_RW, -1, -1, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); - ide_add_setting(drive, "bios_sect", SETTING_RW, -1, -1, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); - ide_add_setting(drive, "transform", SETTING_RW, -1, -1, TYPE_INT, 0, 3, 1, 1, &scsi->transform, NULL); - ide_add_setting(drive, "log", SETTING_RW, -1, -1, TYPE_INT, 0, 1, 1, 1, &scsi->log, NULL); + ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 1023, 1, 1, &drive->bios_cyl, NULL); + ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); + ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); + ide_add_setting(drive, "transform", SETTING_RW, TYPE_INT, 0, 3, 1, 1, &scsi->transform, NULL); + ide_add_setting(drive, "log", SETTING_RW, TYPE_INT, 0, 1, 1, 1, &scsi->log, NULL); } /* diff --git a/include/linux/ide.h b/include/linux/ide.h index 697c39dd66a1..591a0b55e31c 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -601,16 +601,11 @@ typedef struct ide_drive_s { unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */ unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */ unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */ - unsigned addressing; /* : 3; - * 0=28-bit - * 1=48-bit - * 2=48-bit doing 28-bit - * 3=64-bit - */ unsigned scsi : 1; /* 0=default, 1=ide-scsi emulation */ unsigned sleeping : 1; /* 1=sleeping & sleep field valid */ unsigned post_reset : 1; + u8 addressing; /* 0=28-bit, 1=48-bit, 2=48-bit doing 28-bit */ u8 quirk_list; /* considered quirky, set for a specific host */ u8 init_speed; /* transfer rate set at boot */ u8 current_speed; /* current transfer rate set */ @@ -870,9 +865,8 @@ typedef struct hwgroup_s { */ #define TYPE_INT 0 -#define TYPE_INTA 1 -#define TYPE_BYTE 2 -#define TYPE_SHORT 3 +#define TYPE_BYTE 1 +#define TYPE_SHORT 2 #define SETTING_READ (1 << 0) #define SETTING_WRITE (1 << 1) @@ -882,8 +876,6 @@ typedef int (ide_procset_t)(ide_drive_t *, int); typedef struct ide_settings_s { char *name; int rw; - int read_ioctl; - int write_ioctl; int data_type; int min; int max; @@ -896,7 +888,7 @@ typedef struct ide_settings_s { } ide_settings_t; extern struct semaphore ide_setting_sem; -extern int ide_add_setting(ide_drive_t *drive, const char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set); +int ide_add_setting(ide_drive_t *, const char *, int, int, int, int, int, int, void *, ide_procset_t *set); extern ide_settings_t *ide_find_setting_by_name(ide_drive_t *drive, char *name); extern int ide_read_setting(ide_drive_t *t, ide_settings_t *setting); extern int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int val); -- cgit v1.2.3 From 7662d046df09e80680b77b68de896beab45e675e Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Thu, 10 May 2007 00:01:10 +0200 Subject: ide: move IDE settings handling to ide-proc.c * move __ide_add_setting() ide_add_setting() __ide_remove_setting() auto_remove_settings() ide_find_setting_by_name() ide_read_setting() ide_write_setting() set_xfer_rate() ide_add_generic_settings() ide_register_subdriver() ide_unregister_subdriver() from ide.c to ide-proc.c * set_{io_32bit,pio_mode,using_dma}() cannot be marked static now, fix it * rename ide_[un]register_subdriver() to ide_proc_[un]register_driver(), update device drivers to use new names * add CONFIG_IDE_PROC_FS=n versions of ide_proc_[un]register_driver() and ide_add_generic_settings() * make ide_find_setting_by_name(), ide_{read,write}_setting() and ide_{add,remove}_proc_entries() static * cover IDE settings code in device drivers with CONFIG_IDE_PROC_FS #ifdef, also while at it cover with CONFIG_IDE_PROC_FS #ifdef ide_driver_t.proc * remove bogus comment from ide.h * cover with CONFIG_IDE_PROC_FS #ifdef .proc and .settings in ide_drive_t Besides saner code this patch results in the IDE core smaller by ~2 kB (on x86-32) and IDE disk driver by ~1 kB (ditto) when CONFIG_IDE_PROC_FS=n. Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-cd.c | 14 ++- drivers/ide/ide-disk.c | 16 +-- drivers/ide/ide-floppy.c | 16 +-- drivers/ide/ide-proc.c | 310 +++++++++++++++++++++++++++++++++++++++++++++- drivers/ide/ide-tape.c | 17 +-- drivers/ide/ide.c | 313 +---------------------------------------------- drivers/scsi/ide-scsi.c | 16 ++- include/linux/ide.h | 39 +++--- 8 files changed, 378 insertions(+), 363 deletions(-) (limited to 'include/linux') diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 29408cfd9869..252ab8295edf 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -3059,10 +3059,14 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive) return nslots; } +#ifdef CONFIG_IDE_PROC_FS static void ide_cdrom_add_settings(ide_drive_t *drive) { ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL); } +#else +static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; } +#endif /* * standard prep_rq_fn that builds 10 byte cmds @@ -3291,7 +3295,7 @@ static void ide_cd_remove(ide_drive_t *drive) { struct cdrom_info *info = drive->driver_data; - ide_unregister_subdriver(drive, info->driver); + ide_proc_unregister_driver(drive, info->driver); del_gendisk(info->disk); @@ -3336,8 +3340,6 @@ static ide_proc_entry_t idecd_proc[] = { { "capacity", S_IFREG|S_IRUGO, proc_idecd_read_capacity, NULL }, { NULL, 0, NULL, NULL } }; -#else -# define idecd_proc NULL #endif static ide_driver_t ide_cdrom_driver = { @@ -3355,7 +3357,9 @@ static ide_driver_t ide_cdrom_driver = { .end_request = ide_end_request, .error = __ide_error, .abort = __ide_abort, +#ifdef CONFIG_IDE_PROC_FS .proc = idecd_proc, +#endif }; static int idecd_open(struct inode * inode, struct file * file) @@ -3517,7 +3521,7 @@ static int ide_cd_probe(ide_drive_t *drive) ide_init_disk(g, drive); - ide_register_subdriver(drive, &ide_cdrom_driver); + ide_proc_register_driver(drive, &ide_cdrom_driver); kref_init(&info->kref); @@ -3534,7 +3538,7 @@ static int ide_cd_probe(ide_drive_t *drive) g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE; if (ide_cdrom_setup(drive)) { struct cdrom_device_info *devinfo = &info->devinfo; - ide_unregister_subdriver(drive, &ide_cdrom_driver); + ide_proc_unregister_driver(drive, &ide_cdrom_driver); kfree(info->buffer); kfree(info->toc); kfree(info->changer_info); diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index fb162cb3ebf5..7fff773f2df7 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -560,7 +560,6 @@ static sector_t idedisk_capacity (ide_drive_t *drive) } #ifdef CONFIG_IDE_PROC_FS - static int smart_enable(ide_drive_t *drive) { ide_task_t args; @@ -678,11 +677,6 @@ static ide_proc_entry_t idedisk_proc[] = { { "smart_thresholds", S_IFREG|S_IRUSR, proc_idedisk_read_smart_thresholds, NULL }, { NULL, 0, NULL, NULL } }; - -#else - -#define idedisk_proc NULL - #endif /* CONFIG_IDE_PROC_FS */ static void idedisk_prepare_flush(request_queue_t *q, struct request *rq) @@ -881,6 +875,7 @@ static int set_lba_addressing(ide_drive_t *drive, int arg) return 0; } +#ifdef CONFIG_IDE_PROC_FS static void idedisk_add_settings(ide_drive_t *drive) { struct hd_driveid *id = drive->id; @@ -898,6 +893,9 @@ static void idedisk_add_settings(ide_drive_t *drive) ide_add_setting(drive, "failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL); ide_add_setting(drive, "max_failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->max_failures, NULL); } +#else +static inline void idedisk_add_settings(ide_drive_t *drive) { ; } +#endif static void idedisk_setup (ide_drive_t *drive) { @@ -1016,7 +1014,7 @@ static void ide_disk_remove(ide_drive_t *drive) struct ide_disk_obj *idkp = drive->driver_data; struct gendisk *g = idkp->disk; - ide_unregister_subdriver(drive, idkp->driver); + ide_proc_unregister_driver(drive, idkp->driver); del_gendisk(g); @@ -1081,7 +1079,9 @@ static ide_driver_t idedisk_driver = { .end_request = ide_end_request, .error = __ide_error, .abort = __ide_abort, +#ifdef CONFIG_IDE_PROC_FS .proc = idedisk_proc, +#endif }; static int idedisk_open(struct inode *inode, struct file *filp) @@ -1257,7 +1257,7 @@ static int ide_disk_probe(ide_drive_t *drive) ide_init_disk(g, drive); - ide_register_subdriver(drive, &idedisk_driver); + ide_proc_register_driver(drive, &idedisk_driver); kref_init(&idkp->kref); diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 38fe45cf4019..f429be88c4f9 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -1811,6 +1811,7 @@ static int idefloppy_identify_device (ide_drive_t *drive,struct hd_driveid *id) return 0; } +#ifdef CONFIG_IDE_PROC_FS static void idefloppy_add_settings(ide_drive_t *drive) { idefloppy_floppy_t *floppy = drive->driver_data; @@ -1823,6 +1824,9 @@ static void idefloppy_add_settings(ide_drive_t *drive) ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); ide_add_setting(drive, "ticks", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &floppy->ticks, NULL); } +#else +static inline void idefloppy_add_settings(ide_drive_t *drive) { ; } +#endif /* * Driver initialization. @@ -1873,7 +1877,7 @@ static void ide_floppy_remove(ide_drive_t *drive) idefloppy_floppy_t *floppy = drive->driver_data; struct gendisk *g = floppy->disk; - ide_unregister_subdriver(drive, floppy->driver); + ide_proc_unregister_driver(drive, floppy->driver); del_gendisk(g); @@ -1893,7 +1897,6 @@ static void ide_floppy_release(struct kref *kref) } #ifdef CONFIG_IDE_PROC_FS - static int proc_idefloppy_read_capacity (char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -1909,11 +1912,6 @@ static ide_proc_entry_t idefloppy_proc[] = { { "geometry", S_IFREG|S_IRUGO, proc_ide_read_geometry, NULL }, { NULL, 0, NULL, NULL } }; - -#else - -#define idefloppy_proc NULL - #endif /* CONFIG_IDE_PROC_FS */ static int ide_floppy_probe(ide_drive_t *); @@ -1933,7 +1931,9 @@ static ide_driver_t idefloppy_driver = { .end_request = idefloppy_do_end_request, .error = __ide_error, .abort = __ide_abort, +#ifdef CONFIG_IDE_PROC_FS .proc = idefloppy_proc, +#endif }; static int idefloppy_open(struct inode *inode, struct file *filp) @@ -2159,7 +2159,7 @@ static int ide_floppy_probe(ide_drive_t *drive) ide_init_disk(g, drive); - ide_register_subdriver(drive, &idefloppy_driver); + ide_proc_register_driver(drive, &idefloppy_driver); kref_init(&floppy->kref); diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index a9e0b30fb1f2..949a6f609d84 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c @@ -3,6 +3,8 @@ * * Copyright (C) 1997-1998 Mark Lord * Copyright (C) 2003 Red Hat + * + * Some code was moved here from ide.c, see it for original copyrights. */ /* @@ -121,6 +123,265 @@ static int proc_ide_read_identify PROC_IDE_READ_RETURN(page,start,off,count,eof,len); } +/** + * __ide_add_setting - add an ide setting option + * @drive: drive to use + * @name: setting name + * @rw: true if the function is read write + * @data_type: type of data + * @min: range minimum + * @max: range maximum + * @mul_factor: multiplication scale + * @div_factor: divison scale + * @data: private data field + * @set: setting + * @auto_remove: setting auto removal flag + * + * Removes the setting named from the device if it is present. + * The function takes the settings_lock to protect against + * parallel changes. This function must not be called from IRQ + * context. Returns 0 on success or -1 on failure. + * + * BUGS: This code is seriously over-engineered. There is also + * magic about how the driver specific features are setup. If + * a driver is attached we assume the driver settings are auto + * remove. + */ + +static int __ide_add_setting(ide_drive_t *drive, const char *name, int rw, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set, int auto_remove) +{ + ide_settings_t **p = (ide_settings_t **) &drive->settings, *setting = NULL; + + down(&ide_setting_sem); + while ((*p) && strcmp((*p)->name, name) < 0) + p = &((*p)->next); + if ((setting = kzalloc(sizeof(*setting), GFP_KERNEL)) == NULL) + goto abort; + if ((setting->name = kmalloc(strlen(name) + 1, GFP_KERNEL)) == NULL) + goto abort; + strcpy(setting->name, name); + setting->rw = rw; + setting->data_type = data_type; + setting->min = min; + setting->max = max; + setting->mul_factor = mul_factor; + setting->div_factor = div_factor; + setting->data = data; + setting->set = set; + + setting->next = *p; + if (auto_remove) + setting->auto_remove = 1; + *p = setting; + up(&ide_setting_sem); + return 0; +abort: + up(&ide_setting_sem); + kfree(setting); + return -1; +} + +int ide_add_setting(ide_drive_t *drive, const char *name, int rw, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set) +{ + return __ide_add_setting(drive, name, rw, data_type, min, max, mul_factor, div_factor, data, set, 1); +} + +EXPORT_SYMBOL(ide_add_setting); + +/** + * __ide_remove_setting - remove an ide setting option + * @drive: drive to use + * @name: setting name + * + * Removes the setting named from the device if it is present. + * The caller must hold the setting semaphore. + */ + +static void __ide_remove_setting (ide_drive_t *drive, char *name) +{ + ide_settings_t **p, *setting; + + p = (ide_settings_t **) &drive->settings; + + while ((*p) && strcmp((*p)->name, name)) + p = &((*p)->next); + if ((setting = (*p)) == NULL) + return; + + (*p) = setting->next; + + kfree(setting->name); + kfree(setting); +} + +/** + * auto_remove_settings - remove driver specific settings + * @drive: drive + * + * Automatically remove all the driver specific settings for this + * drive. This function may not be called from IRQ context. The + * caller must hold ide_setting_sem. + */ + +static void auto_remove_settings (ide_drive_t *drive) +{ + ide_settings_t *setting; +repeat: + setting = drive->settings; + while (setting) { + if (setting->auto_remove) { + __ide_remove_setting(drive, setting->name); + goto repeat; + } + setting = setting->next; + } +} + +/** + * ide_find_setting_by_name - find a drive specific setting + * @drive: drive to scan + * @name: setting name + * + * Scan's the device setting table for a matching entry and returns + * this or NULL if no entry is found. The caller must hold the + * setting semaphore + */ + +static ide_settings_t *ide_find_setting_by_name(ide_drive_t *drive, char *name) +{ + ide_settings_t *setting = drive->settings; + + while (setting) { + if (strcmp(setting->name, name) == 0) + break; + setting = setting->next; + } + return setting; +} + +/** + * ide_read_setting - read an IDE setting + * @drive: drive to read from + * @setting: drive setting + * + * Read a drive setting and return the value. The caller + * must hold the ide_setting_sem when making this call. + * + * BUGS: the data return and error are the same return value + * so an error -EINVAL and true return of the same value cannot + * be told apart + */ + +static int ide_read_setting(ide_drive_t *drive, ide_settings_t *setting) +{ + int val = -EINVAL; + unsigned long flags; + + if ((setting->rw & SETTING_READ)) { + spin_lock_irqsave(&ide_lock, flags); + switch(setting->data_type) { + case TYPE_BYTE: + val = *((u8 *) setting->data); + break; + case TYPE_SHORT: + val = *((u16 *) setting->data); + break; + case TYPE_INT: + val = *((u32 *) setting->data); + break; + } + spin_unlock_irqrestore(&ide_lock, flags); + } + return val; +} + +/** + * ide_write_setting - read an IDE setting + * @drive: drive to read from + * @setting: drive setting + * @val: value + * + * Write a drive setting if it is possible. The caller + * must hold the ide_setting_sem when making this call. + * + * BUGS: the data return and error are the same return value + * so an error -EINVAL and true return of the same value cannot + * be told apart + * + * FIXME: This should be changed to enqueue a special request + * to the driver to change settings, and then wait on a sema for completion. + * The current scheme of polling is kludgy, though safe enough. + */ + +static int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int val) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (setting->set) + return setting->set(drive, val); + if (!(setting->rw & SETTING_WRITE)) + return -EPERM; + if (val < setting->min || val > setting->max) + return -EINVAL; + if (ide_spin_wait_hwgroup(drive)) + return -EBUSY; + switch (setting->data_type) { + case TYPE_BYTE: + *((u8 *) setting->data) = val; + break; + case TYPE_SHORT: + *((u16 *) setting->data) = val; + break; + case TYPE_INT: + *((u32 *) setting->data) = val; + break; + } + spin_unlock_irq(&ide_lock); + return 0; +} + +static int set_xfer_rate (ide_drive_t *drive, int arg) +{ + int err; + + if (arg < 0 || arg > 70) + return -EINVAL; + + err = ide_wait_cmd(drive, + WIN_SETFEATURES, (u8) arg, + SETFEATURES_XFER, 0, NULL); + + if (!err && arg) { + ide_set_xfer_rate(drive, (u8) arg); + ide_driveid_update(drive); + } + return err; +} + +/** + * ide_add_generic_settings - generic ide settings + * @drive: drive being configured + * + * Add the generic parts of the system settings to the /proc files. + * The caller must not be holding the ide_setting_sem. + */ + +void ide_add_generic_settings (ide_drive_t *drive) +{ +/* + * drive setting name read/write access data type min max mul_factor div_factor data pointer set function + */ + __ide_add_setting(drive, "io_32bit", drive->no_io_32bit ? SETTING_READ : SETTING_RW, TYPE_BYTE, 0, 1 + (SUPPORT_VLB_SYNC << 1), 1, 1, &drive->io_32bit, set_io_32bit, 0); + __ide_add_setting(drive, "keepsettings", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->keep_settings, NULL, 0); + __ide_add_setting(drive, "nice1", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->nice1, NULL, 0); + __ide_add_setting(drive, "pio_mode", SETTING_WRITE, TYPE_BYTE, 0, 255, 1, 1, NULL, set_pio_mode, 0); + __ide_add_setting(drive, "unmaskirq", drive->no_unmask ? SETTING_READ : SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->unmask, NULL, 0); + __ide_add_setting(drive, "using_dma", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->using_dma, set_using_dma, 0); + __ide_add_setting(drive, "init_speed", SETTING_RW, TYPE_BYTE, 0, 70, 1, 1, &drive->init_speed, NULL, 0); + __ide_add_setting(drive, "current_speed", SETTING_RW, TYPE_BYTE, 0, 70, 1, 1, &drive->current_speed, set_xfer_rate, 0); + __ide_add_setting(drive, "number", SETTING_RW, TYPE_BYTE, 0, 3, 1, 1, &drive->dn, NULL, 0); +} + static void proc_ide_settings_warn(void) { static int warned = 0; @@ -399,7 +660,7 @@ static ide_proc_entry_t generic_drive_entries[] = { { NULL, 0, NULL, NULL } }; -void ide_add_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p, void *data) +static void ide_add_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p, void *data) { struct proc_dir_entry *ent; @@ -415,7 +676,7 @@ void ide_add_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p, void } } -void ide_remove_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p) +static void ide_remove_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p) { if (!dir || !p) return; @@ -425,6 +686,51 @@ void ide_remove_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p) } } +void ide_proc_register_driver(ide_drive_t *drive, ide_driver_t *driver) +{ + ide_add_proc_entries(drive->proc, driver->proc, drive); +} + +EXPORT_SYMBOL(ide_proc_register_driver); + +/** + * ide_proc_unregister_driver - remove driver specific data + * @drive: drive + * @driver: driver + * + * Clean up the driver specific /proc files and IDE settings + * for a given drive. + * + * Takes ide_setting_sem and ide_lock. + * Caller must hold none of the locks. + */ + +void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver) +{ + unsigned long flags; + + ide_remove_proc_entries(drive->proc, driver->proc); + + down(&ide_setting_sem); + spin_lock_irqsave(&ide_lock, flags); + /* + * ide_setting_sem protects the settings list + * ide_lock protects the use of settings + * + * so we need to hold both, ide_settings_sem because we want to + * modify the settings list, and ide_lock because we cannot take + * a setting out that is being used. + * + * OTOH both ide_{read,write}_setting are only ever used under + * ide_setting_sem. + */ + auto_remove_settings(drive); + spin_unlock_irqrestore(&ide_lock, flags); + up(&ide_setting_sem); +} + +EXPORT_SYMBOL(ide_proc_unregister_driver); + static void create_proc_ide_drives(ide_hwif_t *hwif) { int d; diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index fa0d22de37a7..e82bfa5e0ab8 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -4561,6 +4561,8 @@ static void idetape_get_blocksize_from_block_descriptor(ide_drive_t *drive) printk(KERN_INFO "ide-tape: Adjusted block size - %d\n", tape->tape_block_size); #endif /* IDETAPE_DEBUG_INFO */ } + +#ifdef CONFIG_IDE_PROC_FS static void idetape_add_settings (ide_drive_t *drive) { idetape_tape_t *tape = drive->driver_data; @@ -4583,6 +4585,9 @@ static void idetape_add_settings (ide_drive_t *drive) ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->avg_speed, NULL); ide_add_setting(drive, "debug_level", SETTING_RW, TYPE_INT, 0, 0xffff, 1, 1, &tape->debug_level, NULL); } +#else +static inline void idetape_add_settings(ide_drive_t *drive) { ; } +#endif /* * ide_setup is called to: @@ -4703,7 +4708,7 @@ static void ide_tape_remove(ide_drive_t *drive) { idetape_tape_t *tape = drive->driver_data; - ide_unregister_subdriver(drive, tape->driver); + ide_proc_unregister_driver(drive, tape->driver); ide_unregister_region(tape->disk); @@ -4731,7 +4736,6 @@ static void ide_tape_release(struct kref *kref) } #ifdef CONFIG_IDE_PROC_FS - static int proc_idetape_read_name (char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -4749,11 +4753,6 @@ static ide_proc_entry_t idetape_proc[] = { { "name", S_IFREG|S_IRUGO, proc_idetape_read_name, NULL }, { NULL, 0, NULL, NULL } }; - -#else - -#define idetape_proc NULL - #endif static int ide_tape_probe(ide_drive_t *); @@ -4773,7 +4772,9 @@ static ide_driver_t idetape_driver = { .end_request = idetape_end_request, .error = __ide_error, .abort = __ide_abort, +#ifdef CONFIG_IDE_PROC_FS .proc = idetape_proc, +#endif }; /* @@ -4864,7 +4865,7 @@ static int ide_tape_probe(ide_drive_t *drive) ide_init_disk(g, drive); - ide_register_subdriver(drive, &idetape_driver); + ide_proc_register_driver(drive, &idetape_driver); kref_init(&tape->kref); diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 8793a960210c..614c5fd43cd2 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -825,178 +825,6 @@ DECLARE_MUTEX(ide_setting_sem); EXPORT_SYMBOL_GPL(ide_setting_sem); -/** - * __ide_add_setting - add an ide setting option - * @drive: drive to use - * @name: setting name - * @rw: true if the function is read write - * @data_type: type of data - * @min: range minimum - * @max: range maximum - * @mul_factor: multiplication scale - * @div_factor: divison scale - * @data: private data field - * @set: setting - * @auto_remove: setting auto removal flag - * - * Removes the setting named from the device if it is present. - * The function takes the settings_lock to protect against - * parallel changes. This function must not be called from IRQ - * context. Returns 0 on success or -1 on failure. - * - * BUGS: This code is seriously over-engineered. There is also - * magic about how the driver specific features are setup. If - * a driver is attached we assume the driver settings are auto - * remove. - */ - -static int __ide_add_setting(ide_drive_t *drive, const char *name, int rw, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set, int auto_remove) -{ - ide_settings_t **p = (ide_settings_t **) &drive->settings, *setting = NULL; - - down(&ide_setting_sem); - while ((*p) && strcmp((*p)->name, name) < 0) - p = &((*p)->next); - if ((setting = kzalloc(sizeof(*setting), GFP_KERNEL)) == NULL) - goto abort; - if ((setting->name = kmalloc(strlen(name) + 1, GFP_KERNEL)) == NULL) - goto abort; - strcpy(setting->name, name); - setting->rw = rw; - setting->data_type = data_type; - setting->min = min; - setting->max = max; - setting->mul_factor = mul_factor; - setting->div_factor = div_factor; - setting->data = data; - setting->set = set; - - setting->next = *p; - if (auto_remove) - setting->auto_remove = 1; - *p = setting; - up(&ide_setting_sem); - return 0; -abort: - up(&ide_setting_sem); - kfree(setting); - return -1; -} - -int ide_add_setting(ide_drive_t *drive, const char *name, int rw, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set) -{ - return __ide_add_setting(drive, name, rw, data_type, min, max, mul_factor, div_factor, data, set, 1); -} - -EXPORT_SYMBOL(ide_add_setting); - -/** - * __ide_remove_setting - remove an ide setting option - * @drive: drive to use - * @name: setting name - * - * Removes the setting named from the device if it is present. - * The caller must hold the setting semaphore. - */ - -static void __ide_remove_setting (ide_drive_t *drive, char *name) -{ - ide_settings_t **p, *setting; - - p = (ide_settings_t **) &drive->settings; - - while ((*p) && strcmp((*p)->name, name)) - p = &((*p)->next); - if ((setting = (*p)) == NULL) - return; - - (*p) = setting->next; - - kfree(setting->name); - kfree(setting); -} - -/** - * ide_find_setting_by_name - find a drive specific setting - * @drive: drive to scan - * @name: setting name - * - * Scan's the device setting table for a matching entry and returns - * this or NULL if no entry is found. The caller must hold the - * setting semaphore - */ - -ide_settings_t *ide_find_setting_by_name (ide_drive_t *drive, char *name) -{ - ide_settings_t *setting = drive->settings; - - while (setting) { - if (strcmp(setting->name, name) == 0) - break; - setting = setting->next; - } - return setting; -} - -/** - * auto_remove_settings - remove driver specific settings - * @drive: drive - * - * Automatically remove all the driver specific settings for this - * drive. This function may not be called from IRQ context. The - * caller must hold ide_setting_sem. - */ - -static void auto_remove_settings (ide_drive_t *drive) -{ - ide_settings_t *setting; -repeat: - setting = drive->settings; - while (setting) { - if (setting->auto_remove) { - __ide_remove_setting(drive, setting->name); - goto repeat; - } - setting = setting->next; - } -} - -/** - * ide_read_setting - read an IDE setting - * @drive: drive to read from - * @setting: drive setting - * - * Read a drive setting and return the value. The caller - * must hold the ide_setting_sem when making this call. - * - * BUGS: the data return and error are the same return value - * so an error -EINVAL and true return of the same value cannot - * be told apart - */ - -int ide_read_setting (ide_drive_t *drive, ide_settings_t *setting) -{ - int val = -EINVAL; - unsigned long flags; - - if ((setting->rw & SETTING_READ)) { - spin_lock_irqsave(&ide_lock, flags); - switch(setting->data_type) { - case TYPE_BYTE: - val = *((u8 *) setting->data); - break; - case TYPE_SHORT: - val = *((u16 *) setting->data); - break; - case TYPE_INT: - val = *((u32 *) setting->data); - break; - } - spin_unlock_irqrestore(&ide_lock, flags); - } - return val; -} - /** * ide_spin_wait_hwgroup - wait for group * @drive: drive in the group @@ -1030,52 +858,7 @@ int ide_spin_wait_hwgroup (ide_drive_t *drive) EXPORT_SYMBOL(ide_spin_wait_hwgroup); -/** - * ide_write_setting - read an IDE setting - * @drive: drive to read from - * @setting: drive setting - * @val: value - * - * Write a drive setting if it is possible. The caller - * must hold the ide_setting_sem when making this call. - * - * BUGS: the data return and error are the same return value - * so an error -EINVAL and true return of the same value cannot - * be told apart - * - * FIXME: This should be changed to enqueue a special request - * to the driver to change settings, and then wait on a sema for completion. - * The current scheme of polling is kludgy, though safe enough. - */ - -int ide_write_setting (ide_drive_t *drive, ide_settings_t *setting, int val) -{ - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - if (setting->set) - return setting->set(drive, val); - if (!(setting->rw & SETTING_WRITE)) - return -EPERM; - if (val < setting->min || val > setting->max) - return -EINVAL; - if (ide_spin_wait_hwgroup(drive)) - return -EBUSY; - switch (setting->data_type) { - case TYPE_BYTE: - *((u8 *) setting->data) = val; - break; - case TYPE_SHORT: - *((u16 *) setting->data) = val; - break; - case TYPE_INT: - *((u32 *) setting->data) = val; - break; - } - spin_unlock_irq(&ide_lock); - return 0; -} - -static int set_io_32bit(ide_drive_t *drive, int arg) +int set_io_32bit(ide_drive_t *drive, int arg) { if (drive->no_io_32bit) return -EPERM; @@ -1104,7 +887,7 @@ static int set_ksettings(ide_drive_t *drive, int arg) return 0; } -static int set_using_dma (ide_drive_t *drive, int arg) +int set_using_dma(ide_drive_t *drive, int arg) { #ifdef CONFIG_BLK_DEV_IDEDMA ide_hwif_t *hwif = drive->hwif; @@ -1152,7 +935,7 @@ out: #endif } -static int set_pio_mode (ide_drive_t *drive, int arg) +int set_pio_mode(ide_drive_t *drive, int arg) { struct request rq; @@ -1186,48 +969,6 @@ static int set_unmaskirq(ide_drive_t *drive, int arg) return 0; } -static int set_xfer_rate (ide_drive_t *drive, int arg) -{ - int err; - - if (arg < 0 || arg > 70) - return -EINVAL; - - err = ide_wait_cmd(drive, - WIN_SETFEATURES, (u8) arg, - SETFEATURES_XFER, 0, NULL); - - if (!err && arg) { - ide_set_xfer_rate(drive, (u8) arg); - ide_driveid_update(drive); - } - return err; -} - -/** - * ide_add_generic_settings - generic ide settings - * @drive: drive being configured - * - * Add the generic parts of the system settings to the /proc files. - * The caller must not be holding the ide_setting_sem. - */ - -void ide_add_generic_settings (ide_drive_t *drive) -{ -/* - * drive setting name read/write access data type min max mul_factor div_factor data pointer set function - */ - __ide_add_setting(drive, "io_32bit", drive->no_io_32bit ? SETTING_READ : SETTING_RW, TYPE_BYTE, 0, 1 + (SUPPORT_VLB_SYNC << 1), 1, 1, &drive->io_32bit, set_io_32bit, 0); - __ide_add_setting(drive, "keepsettings", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->keep_settings, NULL, 0); - __ide_add_setting(drive, "nice1", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->nice1, NULL, 0); - __ide_add_setting(drive, "pio_mode", SETTING_WRITE, TYPE_BYTE, 0, 255, 1, 1, NULL, set_pio_mode, 0); - __ide_add_setting(drive, "unmaskirq", drive->no_unmask ? SETTING_READ : SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->unmask, NULL, 0); - __ide_add_setting(drive, "using_dma", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->using_dma, set_using_dma, 0); - __ide_add_setting(drive, "init_speed", SETTING_RW, TYPE_BYTE, 0, 70, 1, 1, &drive->init_speed, NULL, 0); - __ide_add_setting(drive, "current_speed", SETTING_RW, TYPE_BYTE, 0, 70, 1, 1, &drive->current_speed, set_xfer_rate, 0); - __ide_add_setting(drive, "number", SETTING_RW, TYPE_BYTE, 0, 3, 1, 1, &drive->dn, NULL, 0); -} - /** * system_bus_clock - clock guess * @@ -1922,54 +1663,6 @@ static void __init probe_for_hwifs (void) #endif } -void ide_register_subdriver(ide_drive_t *drive, ide_driver_t *driver) -{ -#ifdef CONFIG_IDE_PROC_FS - ide_add_proc_entries(drive->proc, driver->proc, drive); -#endif -} - -EXPORT_SYMBOL(ide_register_subdriver); - -/** - * ide_unregister_subdriver - disconnect drive from driver - * @drive: drive to unplug - * @driver: driver - * - * Disconnect a drive from the driver it was attached to and then - * clean up the various proc files and other objects attached to it. - * - * Takes ide_setting_sem and ide_lock. - * Caller must hold none of the locks. - */ - -void ide_unregister_subdriver(ide_drive_t *drive, ide_driver_t *driver) -{ - unsigned long flags; - -#ifdef CONFIG_IDE_PROC_FS - ide_remove_proc_entries(drive->proc, driver->proc); -#endif - down(&ide_setting_sem); - spin_lock_irqsave(&ide_lock, flags); - /* - * ide_setting_sem protects the settings list - * ide_lock protects the use of settings - * - * so we need to hold both, ide_settings_sem because we want to - * modify the settings list, and ide_lock because we cannot take - * a setting out that is being used. - * - * OTOH both ide_{read,write}_setting are only ever used under - * ide_setting_sem. - */ - auto_remove_settings(drive); - spin_unlock_irqrestore(&ide_lock, flags); - up(&ide_setting_sem); -} - -EXPORT_SYMBOL(ide_unregister_subdriver); - /* * Probe module */ diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index 4388b8ab69a1..8263f752809d 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c @@ -721,6 +721,7 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r return ide_stopped; } +#ifdef CONFIG_IDE_PROC_FS static void idescsi_add_settings(ide_drive_t *drive) { idescsi_scsi_t *scsi = drive_to_idescsi(drive); @@ -734,6 +735,9 @@ static void idescsi_add_settings(ide_drive_t *drive) ide_add_setting(drive, "transform", SETTING_RW, TYPE_INT, 0, 3, 1, 1, &scsi->transform, NULL); ide_add_setting(drive, "log", SETTING_RW, TYPE_INT, 0, 1, 1, 1, &scsi->log, NULL); } +#else +static inline void idescsi_add_settings(ide_drive_t *drive) { ; } +#endif /* * Driver initialization. @@ -756,7 +760,7 @@ static void ide_scsi_remove(ide_drive_t *drive) struct ide_scsi_obj *scsi = scsihost_to_idescsi(scsihost); struct gendisk *g = scsi->disk; - ide_unregister_subdriver(drive, scsi->driver); + ide_proc_unregister_driver(drive, scsi->driver); ide_unregister_region(g); @@ -775,8 +779,6 @@ static ide_proc_entry_t idescsi_proc[] = { { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL }, { NULL, 0, NULL, NULL } }; -#else -# define idescsi_proc NULL #endif static ide_driver_t idescsi_driver = { @@ -790,11 +792,13 @@ static ide_driver_t idescsi_driver = { .version = IDESCSI_VERSION, .media = ide_scsi, .supports_dsc_overlap = 0, - .proc = idescsi_proc, .do_request = idescsi_do_request, .end_request = idescsi_end_request, .error = idescsi_atapi_error, .abort = idescsi_atapi_abort, +#ifdef CONFIG_IDE_PROC_FS + .proc = idescsi_proc, +#endif }; static int idescsi_ide_open(struct inode *inode, struct file *filp) @@ -1153,7 +1157,7 @@ static int ide_scsi_probe(ide_drive_t *drive) idescsi->host = host; idescsi->disk = g; g->private_data = &idescsi->driver; - ide_register_subdriver(drive, &idescsi_driver); + ide_proc_register_driver(drive, &idescsi_driver); err = 0; idescsi_setup(drive, idescsi); g->fops = &idescsi_ops; @@ -1165,7 +1169,7 @@ static int ide_scsi_probe(ide_drive_t *drive) } /* fall through on error */ ide_unregister_region(g); - ide_unregister_subdriver(drive, &idescsi_driver); + ide_proc_unregister_driver(drive, &idescsi_driver); put_disk(g); out_host_put: diff --git a/include/linux/ide.h b/include/linux/ide.h index 591a0b55e31c..477b8c6be727 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -559,9 +559,10 @@ typedef struct ide_drive_s { struct ide_drive_s *next; /* circular list of hwgroup drives */ void *driver_data; /* extra driver data */ struct hd_driveid *id; /* drive model identification info */ +#ifdef CONFIG_IDE_PROC_FS struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ struct ide_settings_s *settings;/* /proc/ide/ drive settings */ - +#endif struct hwif_s *hwif; /* actually (ide_hwif_t *) */ unsigned long sleep; /* sleep until this time */ @@ -858,8 +859,15 @@ typedef struct hwgroup_s { unsigned char cmd_buf[4]; } ide_hwgroup_t; -/* structure attached to the request for IDE_TASK_CMDS */ +typedef struct ide_driver_s ide_driver_t; + +extern struct semaphore ide_setting_sem; +int set_io_32bit(ide_drive_t *, int); +int set_pio_mode(ide_drive_t *, int); +int set_using_dma(ide_drive_t *, int); + +#ifdef CONFIG_IDE_PROC_FS /* * configurable drive settings */ @@ -887,12 +895,7 @@ typedef struct ide_settings_s { struct ide_settings_s *next; } ide_settings_t; -extern struct semaphore ide_setting_sem; int ide_add_setting(ide_drive_t *, const char *, int, int, int, int, int, int, void *, ide_procset_t *set); -extern ide_settings_t *ide_find_setting_by_name(ide_drive_t *drive, char *name); -extern int ide_read_setting(ide_drive_t *t, ide_settings_t *setting); -extern int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int val); -extern void ide_add_generic_settings(ide_drive_t *drive); /* * /proc/ide interface @@ -904,15 +907,17 @@ typedef struct { write_proc_t *write_proc; } ide_proc_entry_t; -#ifdef CONFIG_IDE_PROC_FS extern struct proc_dir_entry *proc_ide_root; void proc_ide_create(void); void proc_ide_destroy(void); void create_proc_ide_interfaces(void); void destroy_proc_ide_interface(ide_hwif_t *); -void ide_add_proc_entries(struct proc_dir_entry *, ide_proc_entry_t *, void *); -void ide_remove_proc_entries(struct proc_dir_entry *, ide_proc_entry_t *); +void ide_proc_register_driver(ide_drive_t *, ide_driver_t *); +void ide_proc_unregister_driver(ide_drive_t *, ide_driver_t *); + +void ide_add_generic_settings(ide_drive_t *); + read_proc_t proc_ide_read_capacity; read_proc_t proc_ide_read_geometry; @@ -940,6 +945,9 @@ static inline void proc_ide_create(void) { ; } static inline void proc_ide_destroy(void) { ; } static inline void create_proc_ide_interfaces(void) { ; } static inline void destroy_proc_ide_interface(ide_hwif_t *hwif) { ; } +static inline void ide_proc_register_driver(ide_drive_t *drive, ide_driver_t *driver) { ; } +static inline void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver) { ; } +static inline void ide_add_generic_settings(ide_drive_t *drive) { ; } #define PROC_IDE_READ_RETURN(page,start,off,count,eof,len) return 0; #endif @@ -982,7 +990,7 @@ enum { * The gendriver.owner field should be set to the module owner of this driver. * The gendriver.name field should be set to the name of this driver */ -typedef struct ide_driver_s { +struct ide_driver_s { const char *version; u8 media; unsigned supports_dsc_overlap : 1; @@ -990,12 +998,14 @@ typedef struct ide_driver_s { int (*end_request)(ide_drive_t *, int, int); ide_startstop_t (*error)(ide_drive_t *, struct request *rq, u8, u8); ide_startstop_t (*abort)(ide_drive_t *, struct request *rq); - ide_proc_entry_t *proc; struct device_driver gen_driver; int (*probe)(ide_drive_t *); void (*remove)(ide_drive_t *); void (*shutdown)(ide_drive_t *); -} ide_driver_t; +#ifdef CONFIG_IDE_PROC_FS + ide_proc_entry_t *proc; +#endif +}; #define to_ide_driver(drv) container_of(drv, ide_driver_t, gen_driver) @@ -1205,9 +1215,6 @@ extern void default_hwif_iops(ide_hwif_t *); extern void default_hwif_mmiops(ide_hwif_t *); extern void default_hwif_transport(ide_hwif_t *); -void ide_register_subdriver(ide_drive_t *, ide_driver_t *); -void ide_unregister_subdriver(ide_drive_t *, ide_driver_t *); - #define ON_BOARD 1 #define NEVER_BOARD 0 -- cgit v1.2.3 From 7f8f48af0861c38c28d4abd550102643e0ea9e6a Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Thu, 10 May 2007 00:01:10 +0200 Subject: ide: cable detection fixes (take 2) Tejun's recent eighty_ninty_three() fix has inspired me to do more thorough review of the cable detection code... * print user-friendly warning about limiting the maximum transfer speed to UDMA33 (and the reason behind it) when 80-wire cable is not detected, also while at it cleanup eighty_ninty_three() a bit * use eighty_ninty_three() in ide_ata66_check(), this actually fixes 3 bugs: - bit 14 (word 93 validity check) == 1 && bit 13 (80-wire cable test) == 1 were used as 80-wire cable present test for CONFIG_IDEDMA_IVB=n case (please see FIXME comment in eighty_ninty_three() for more details) - CONFIG_IDEDMA_IVB=y/n cases were interchanged - check for SATA devices was missing * remove private cable warnings from pdc_202xx{old,new} drivers now that core code provides this functionality (plus, in pdc202xx_new case the test could give false warnings for ATAPI devices because pdc202xx_new driver doesn't even support ATAPI DMA) Cc: Tejun Heo Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-iops.c | 53 +++++++++++++++++++++++------------------- drivers/ide/ide-lib.c | 11 +++++++-- drivers/ide/pci/pdc202xx_new.c | 16 ------------- drivers/ide/pci/pdc202xx_old.c | 20 +--------------- include/linux/ide.h | 1 + 5 files changed, 40 insertions(+), 61 deletions(-) (limited to 'include/linux') diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index ed6128f6cd98..f0be5f665a0e 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -571,25 +571,40 @@ EXPORT_SYMBOL(ide_wait_stat); */ u8 eighty_ninty_three (ide_drive_t *drive) { - if(HWIF(drive)->udma_four == 0) - return 0; + ide_hwif_t *hwif = drive->hwif; + struct hd_driveid *id = drive->id; + + if (hwif->udma_four == 0) + goto no_80w; /* Check for SATA but only if we are ATA5 or higher */ - if (drive->id->hw_config == 0 && (drive->id->major_rev_num & 0x7FE0)) + if (id->hw_config == 0 && (id->major_rev_num & 0x7FE0)) return 1; - if (!(drive->id->hw_config & 0x6000)) - return 0; -#ifndef CONFIG_IDEDMA_IVB - if(!(drive->id->hw_config & 0x4000)) - return 0; -#endif /* CONFIG_IDEDMA_IVB */ + /* * FIXME: * - change master/slave IDENTIFY order * - force bit13 (80c cable present) check * (unless the slave device is pre-ATA3) */ - return 1; +#ifndef CONFIG_IDEDMA_IVB + if (id->hw_config & 0x4000) +#else + if (id->hw_config & 0x6000) +#endif + return 1; + +no_80w: + if (drive->udma33_warned == 1) + return 0; + + printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, " + "limiting max speed to UDMA33\n", + drive->name, hwif->udma_four ? "drive" : "host"); + + drive->udma33_warned = 1; + + return 0; } int ide_ata66_check (ide_drive_t *drive, ide_task_t *args) @@ -597,23 +612,13 @@ int ide_ata66_check (ide_drive_t *drive, ide_task_t *args) if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) && (args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) && (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) { -#ifndef CONFIG_IDEDMA_IVB - if ((drive->id->hw_config & 0x6000) == 0) { -#else /* !CONFIG_IDEDMA_IVB */ - if (((drive->id->hw_config & 0x2000) == 0) || - ((drive->id->hw_config & 0x4000) == 0)) { -#endif /* CONFIG_IDEDMA_IVB */ - printk("%s: Speed warnings UDMA 3/4/5 is not " - "functional.\n", drive->name); - return 1; - } - if (!HWIF(drive)->udma_four) { - printk("%s: Speed warnings UDMA 3/4/5 is not " - "functional.\n", - HWIF(drive)->name); + if (eighty_ninty_three(drive) == 0) { + printk(KERN_WARNING "%s: UDMA speeds >UDMA33 cannot " + "be set\n", drive->name); return 1; } } + return 0; } diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c index 4557fc5a3ea3..3be3c69383f2 100644 --- a/drivers/ide/ide-lib.c +++ b/drivers/ide/ide-lib.c @@ -88,8 +88,15 @@ u8 ide_rate_filter(ide_drive_t *drive, u8 speed) if (hwif->udma_filter) mask = hwif->udma_filter(drive); - if ((mask & 0x78) && (eighty_ninty_three(drive) == 0)) - mask &= 0x07; + /* + * TODO: speed > XFER_UDMA_2 extra check is needed to avoid false + * cable warning from eighty_ninty_three(), moving ide_rate_filter() + * calls from ->speedproc to core code will make this hack go away + */ + if (speed > XFER_UDMA_2) { + if ((mask & 0x78) && (eighty_ninty_three(drive) == 0)) + mask &= 0x07; + } if (mask) mode = fls(mask) - 1 + XFER_UDMA_0; diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c index 772ca4007de4..65b1e124edf7 100644 --- a/drivers/ide/pci/pdc202xx_new.c +++ b/drivers/ide/pci/pdc202xx_new.c @@ -37,8 +37,6 @@ #include #endif -#define PDC202_DEBUG_CABLE 0 - #undef DEBUG #ifdef DEBUG @@ -234,17 +232,8 @@ static int config_chipset_for_dma(ide_drive_t *drive) { struct hd_driveid *id = drive->id; ide_hwif_t *hwif = HWIF(drive); - u8 ultra_66 = (id->dma_ultra & 0x0078) ? 1 : 0; - u8 cable = pdcnew_cable_detect(hwif); u8 speed; - if (ultra_66 && cable) { - printk(KERN_WARNING "Warning: %s channel " - "requires an 80-pin cable for operation.\n", - hwif->channel ? "Secondary" : "Primary"); - printk(KERN_WARNING "%s reduced to Ultra33 mode.\n", drive->name); - } - if (id->capability & 4) { /* * Set IORDY_EN & PREFETCH_EN (this seems to have @@ -547,11 +536,6 @@ static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif) if (!noautodma) hwif->autodma = 1; hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma; - -#if PDC202_DEBUG_CABLE - printk(KERN_DEBUG "%s: %s-pin cable\n", - hwif->name, hwif->udma_four ? "80" : "40"); -#endif /* PDC202_DEBUG_CABLE */ } static int __devinit init_setup_pdcnew(struct pci_dev *dev, ide_pci_device_t *d) diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c index 207a6191fac7..7146fe3f6ba7 100644 --- a/drivers/ide/pci/pdc202xx_old.c +++ b/drivers/ide/pci/pdc202xx_old.c @@ -46,7 +46,6 @@ #include #include -#define PDC202_DEBUG_CABLE 0 #define PDC202XX_DEBUG_DRIVE_INFO 0 static const char *pdc_quirk_drives[] = { @@ -238,20 +237,7 @@ static int config_chipset_for_dma (ide_drive_t *drive) u32 drive_conf = 0; u8 drive_pci = 0x60 + (drive->dn << 2); u8 test1 = 0, test2 = 0, speed = -1; - u8 AP = 0, cable = 0; - - u8 ultra_66 = ((id->dma_ultra & 0x0010) || - (id->dma_ultra & 0x0008)) ? 1 : 0; - - if (dev->device != PCI_DEVICE_ID_PROMISE_20246) - cable = pdc202xx_old_cable_detect(hwif); - else - ultra_66 = 0; - - if (ultra_66 && cable) { - printk(KERN_WARNING "Warning: %s channel requires an 80-pin cable for operation.\n", hwif->channel ? "Secondary":"Primary"); - printk(KERN_WARNING "%s reduced to Ultra33 mode.\n", drive->name); - } + u8 AP = 0; if (dev->device != PCI_DEVICE_ID_PROMISE_20246) pdc_old_disable_66MHz_clock(drive->hwif); @@ -477,10 +463,6 @@ static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif) if (!noautodma) hwif->autodma = 1; hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma; -#if PDC202_DEBUG_CABLE - printk(KERN_DEBUG "%s: %s-pin cable\n", - hwif->name, hwif->udma_four ? "80" : "40"); -#endif /* PDC202_DEBUG_CABLE */ } static void __devinit init_dma_pdc202xx(ide_hwif_t *hwif, unsigned long dmabase) diff --git a/include/linux/ide.h b/include/linux/ide.h index 477b8c6be727..ca924b295c2e 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -605,6 +605,7 @@ typedef struct ide_drive_s { unsigned scsi : 1; /* 0=default, 1=ide-scsi emulation */ unsigned sleeping : 1; /* 1=sleeping & sleep field valid */ unsigned post_reset : 1; + unsigned udma33_warned : 1; u8 addressing; /* 0=28-bit, 1=48-bit, 2=48-bit doing 28-bit */ u8 quirk_list; /* considered quirky, set for a specific host */ -- cgit v1.2.3 From 869c56ee9de1b72cd3f8ab9cdfbd3601e55c61f2 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Thu, 10 May 2007 00:01:10 +0200 Subject: ide: add "initializing" argument to ide_register_hw() Add "initializing" argument to ide_register_hw() and use it instead of ide.c wide variable of the same name. Update all users of ide_register_hw() accordingly. Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/arm/bast-ide.c | 2 +- drivers/ide/arm/ide_arm.c | 2 +- drivers/ide/cris/ide-cris.c | 2 +- drivers/ide/h8300/ide-h8300.c | 2 +- drivers/ide/ide-pnp.c | 2 +- drivers/ide/ide.c | 16 +++++++--------- drivers/ide/legacy/buddha.c | 2 +- drivers/ide/legacy/falconide.c | 2 +- drivers/ide/legacy/gayle.c | 2 +- drivers/ide/legacy/ide-cs.c | 2 +- drivers/ide/legacy/macide.c | 6 +++--- drivers/ide/legacy/q40ide.c | 2 +- drivers/ide/pci/delkin_cb.c | 2 +- drivers/macintosh/mediabay.c | 2 +- include/linux/ide.h | 5 +++-- 15 files changed, 25 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c index 9d474e5fd8dc..f7449d04114a 100644 --- a/drivers/ide/arm/bast-ide.c +++ b/drivers/ide/arm/bast-ide.c @@ -45,7 +45,7 @@ bastide_register(unsigned int base, unsigned int aux, int irq, hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20); hw.irq = irq; - ide_register_hw(&hw, hwif); + ide_register_hw(&hw, 0, hwif); return 0; } diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c index 23488c4d1fcd..a3d6744e870a 100644 --- a/drivers/ide/arm/ide_arm.c +++ b/drivers/ide/arm/ide_arm.c @@ -38,6 +38,6 @@ void __init ide_arm_init(void) memset(&hw, 0, sizeof(hw)); ide_std_init_ports(&hw, IDE_ARM_IO, IDE_ARM_IO + 0x206); hw.irq = IDE_ARM_IRQ; - ide_register_hw(&hw, NULL); + ide_register_hw(&hw, 1, NULL); } } diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c index 9691d089fce8..c04cb25a01ff 100644 --- a/drivers/ide/cris/ide-cris.c +++ b/drivers/ide/cris/ide-cris.c @@ -796,7 +796,7 @@ init_e100_ide (void) ide_offsets, 0, 0, cris_ide_ack_intr, ide_default_irq(0)); - ide_register_hw(&hw, &hwif); + ide_register_hw(&hw, 1, &hwif); hwif->mmio = 1; hwif->chipset = ide_etrax100; hwif->tuneproc = &tune_cris_ide; diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c index 88750a300337..6d26ad7360d5 100644 --- a/drivers/ide/h8300/ide-h8300.c +++ b/drivers/ide/h8300/ide-h8300.c @@ -101,7 +101,7 @@ void __init h8300_ide_init(void) hw_setup(&hw); /* register if */ - idx = ide_register_hw(&hw, &hwif); + idx = ide_register_hw(&hw, 1, &hwif); if (idx == -1) { printk(KERN_ERR "ide-h8300: IDE I/F register failed\n"); return; diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c index 98410ca044cf..2b8009c50e91 100644 --- a/drivers/ide/ide-pnp.c +++ b/drivers/ide/ide-pnp.c @@ -42,7 +42,7 @@ static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id hw.irq = pnp_irq(dev, 0); hw.dma = NO_DMA; - index = ide_register_hw(&hw, &hwif); + index = ide_register_hw(&hw, 1, &hwif); if (index != -1) { printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index); diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 614c5fd43cd2..0fc532850bbe 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -168,7 +168,6 @@ static const u8 ide_hwif_to_major[] = { IDE0_MAJOR, IDE1_MAJOR, static int idebus_parameter; /* holds the "idebus=" parameter */ static int system_bus_speed; /* holds what we think is VESA/PCI bus speed */ -static int initializing; /* set while initializing built-in drivers */ DECLARE_MUTEX(ide_cfg_sem); __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock); @@ -302,9 +301,7 @@ static void __init init_ide_data (void) #endif } #ifdef CONFIG_IDE_ARM - initializing = 1; ide_arm_init(); - initializing = 0; #endif } @@ -749,6 +746,7 @@ void ide_setup_ports ( hw_regs_t *hw, /** * ide_register_hw_with_fixup - register IDE interface * @hw: hardware registers + * @initializing: set while initializing built-in drivers * @hwifp: pointer to returned hwif * @fixup: fixup function * @@ -758,7 +756,9 @@ void ide_setup_ports ( hw_regs_t *hw, * Returns -1 on error. */ -int ide_register_hw_with_fixup(hw_regs_t *hw, ide_hwif_t **hwifp, void(*fixup)(ide_hwif_t *hwif)) +int ide_register_hw_with_fixup(hw_regs_t *hw, int initializing, + ide_hwif_t **hwifp, + void(*fixup)(ide_hwif_t *hwif)) { int index, retry = 1; ide_hwif_t *hwif; @@ -810,9 +810,9 @@ found: EXPORT_SYMBOL(ide_register_hw_with_fixup); -int ide_register_hw(hw_regs_t *hw, ide_hwif_t **hwifp) +int ide_register_hw(hw_regs_t *hw, int initializing, ide_hwif_t **hwifp) { - return ide_register_hw_with_fixup(hw, hwifp, NULL); + return ide_register_hw_with_fixup(hw, initializing, hwifp, NULL); } EXPORT_SYMBOL(ide_register_hw); @@ -1108,7 +1108,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device ide_init_hwif_ports(&hw, (unsigned long) args[0], (unsigned long) args[1], NULL); hw.irq = args[2]; - if (ide_register_hw(&hw, NULL) == -1) + if (ide_register_hw(&hw, 0, NULL) == -1) return -EIO; return 0; } @@ -1819,10 +1819,8 @@ static int __init ide_init(void) (void)qd65xx_init(); #endif - initializing = 1; /* Probe for special PCI and other "known" interface chipsets. */ probe_for_hwifs(); - initializing = 0; proc_ide_create(); diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c index 1ed224a01f79..101aee1711c4 100644 --- a/drivers/ide/legacy/buddha.c +++ b/drivers/ide/legacy/buddha.c @@ -213,7 +213,7 @@ fail_base2: IRQ_AMIGA_PORTS); } - index = ide_register_hw(&hw, &hwif); + index = ide_register_hw(&hw, 1, &hwif); if (index != -1) { hwif->mmio = 1; printk("ide%d: ", index); diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c index a9f2cd5bb81e..e1e9d9d6893f 100644 --- a/drivers/ide/legacy/falconide.c +++ b/drivers/ide/legacy/falconide.c @@ -70,7 +70,7 @@ void __init falconide_init(void) 0, 0, NULL, // falconide_iops, IRQ_MFP_IDE); - index = ide_register_hw(&hw, NULL); + index = ide_register_hw(&hw, 1, NULL); if (index != -1) printk("ide%d: Falcon IDE interface\n", index); diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c index dcfadbbf55d8..0830a021bbb6 100644 --- a/drivers/ide/legacy/gayle.c +++ b/drivers/ide/legacy/gayle.c @@ -165,7 +165,7 @@ found: // &gayle_iops, IRQ_AMIGA_PORTS); - index = ide_register_hw(&hw, &hwif); + index = ide_register_hw(&hw, 1, &hwif); if (index != -1) { hwif->mmio = 1; switch (i) { diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c index c6522a64d7ec..2f3977f195b7 100644 --- a/drivers/ide/legacy/ide-cs.c +++ b/drivers/ide/legacy/ide-cs.c @@ -153,7 +153,7 @@ static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq hw.irq = irq; hw.chipset = ide_pci; hw.dev = &handle->dev; - return ide_register_hw_with_fixup(&hw, NULL, ide_undecoded_slave); + return ide_register_hw_with_fixup(&hw, 0, NULL, ide_undecoded_slave); } /*====================================================================== diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c index 4c0079ad52ac..c211fc78345d 100644 --- a/drivers/ide/legacy/macide.c +++ b/drivers/ide/legacy/macide.c @@ -102,21 +102,21 @@ void macide_init(void) 0, 0, macide_ack_intr, // quadra_ide_iops, IRQ_NUBUS_F); - index = ide_register_hw(&hw, &hwif); + index = ide_register_hw(&hw, 1, &hwif); break; case MAC_IDE_PB: ide_setup_ports(&hw, IDE_BASE, macide_offsets, 0, 0, macide_ack_intr, // macide_pb_iops, IRQ_NUBUS_C); - index = ide_register_hw(&hw, &hwif); + index = ide_register_hw(&hw, 1, &hwif); break; case MAC_IDE_BABOON: ide_setup_ports(&hw, BABOON_BASE, macide_offsets, 0, 0, NULL, // macide_baboon_iops, IRQ_BABOON_1); - index = ide_register_hw(&hw, &hwif); + index = ide_register_hw(&hw, 1, &hwif); if (index == -1) break; if (macintosh_config->ident == MAC_MODEL_PB190) { diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c index 74f08124eabb..e628a983ce33 100644 --- a/drivers/ide/legacy/q40ide.c +++ b/drivers/ide/legacy/q40ide.c @@ -142,7 +142,7 @@ void q40ide_init(void) 0, NULL, // m68kide_iops, q40ide_default_irq(pcide_bases[i])); - index = ide_register_hw(&hw, &hwif); + index = ide_register_hw(&hw, 1, &hwif); // **FIXME** if (index != -1) hwif->mmio = 1; diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c index dd7ec37fdeab..46f4a888c037 100644 --- a/drivers/ide/pci/delkin_cb.c +++ b/drivers/ide/pci/delkin_cb.c @@ -80,7 +80,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) hw.irq = dev->irq; hw.chipset = ide_pci; /* this enables IRQ sharing */ - rc = ide_register_hw_with_fixup(&hw, &hwif, ide_undecoded_slave); + rc = ide_register_hw_with_fixup(&hw, 0, &hwif, ide_undecoded_slave); if (rc < 0) { printk(KERN_ERR "delkin_cb: ide_register_hw failed (%d)\n", rc); pci_disable_device(dev); diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c index 0acf2f7fd9d7..c803d2bba65d 100644 --- a/drivers/macintosh/mediabay.c +++ b/drivers/macintosh/mediabay.c @@ -563,7 +563,7 @@ static void media_bay_step(int i) ide_init_hwif_ports(&hw, (unsigned long) bay->cd_base, (unsigned long) 0, NULL); hw.irq = bay->cd_irq; hw.chipset = ide_pmac; - bay->cd_index = ide_register_hw(&hw, NULL); + bay->cd_index = ide_register_hw(&hw, 0, NULL); pmu_resume(); } if (bay->cd_index == -1) { diff --git a/include/linux/ide.h b/include/linux/ide.h index ca924b295c2e..bdb97655ef61 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -223,8 +223,9 @@ typedef struct hw_regs_s { /* * Register new hardware with ide */ -int ide_register_hw(hw_regs_t *hw, struct hwif_s **hwifp); -int ide_register_hw_with_fixup(hw_regs_t *, struct hwif_s **, void (*)(struct hwif_s *)); +int ide_register_hw(hw_regs_t *, int, struct hwif_s **); +int ide_register_hw_with_fixup(hw_regs_t *, int, struct hwif_s **, + void (*)(struct hwif_s *)); /* * Set up hw_regs_t structure before calling ide_register_hw (optional) -- cgit v1.2.3 From 5cbf79cdb37be2aa2a1b4fa94144526b14557060 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Thu, 10 May 2007 00:01:11 +0200 Subject: ide: add ide_proc_register_port() * create_proc_ide_interfaces() tries to add /proc entries for every probed and initialized IDE port, replace it by ide_proc_register_port() which does it only for the given port (also rename destroy_proc_ide_interface() to ide_proc_unregister_port() for consistency) * convert {create,destroy}_proc_ide_interface[s]() users to use new functions * pmac driver depended on proc_ide_create() to add /proc port entries, fix it * au1xxx-ide, swarm and cs5520 drivers depended indirectly on ide-generic driver (CONFIG_IDE_GENERIC=y) to add port /proc entries, fix them * there is now no need to add /proc entries for IDE ports in proc_ide_create() so don't do it * proc_ide_create() needs now to be called before drivers are probed - fix it, while at it make proc_ide_create() create /proc "ide" directory Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/arm/icside.c | 7 +++++-- drivers/ide/arm/rapide.c | 2 +- drivers/ide/ide-generic.c | 2 -- drivers/ide/ide-probe.c | 3 +++ drivers/ide/ide-proc.c | 34 +++++++++++++++++----------------- drivers/ide/ide.c | 14 +++----------- drivers/ide/legacy/ali14xx.c | 3 ++- drivers/ide/legacy/dtc2278.c | 3 ++- drivers/ide/legacy/ht6560b.c | 3 ++- drivers/ide/legacy/qd65xx.c | 7 ++++--- drivers/ide/legacy/umc8672.c | 3 ++- drivers/ide/mips/au1xxx-ide.c | 3 +++ drivers/ide/mips/swarm.c | 3 +++ drivers/ide/pci/cs5520.c | 20 ++++++++++++++++---- drivers/ide/pci/sgiioc4.c | 2 +- drivers/ide/ppc/pmac.c | 2 ++ drivers/ide/setup-pci.c | 25 +++++++++++++++++++++---- include/linux/ide.h | 10 ++++------ 18 files changed, 91 insertions(+), 55 deletions(-) (limited to 'include/linux') diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c index f383ace2a087..1fe0457243db 100644 --- a/drivers/ide/arm/icside.c +++ b/drivers/ide/arm/icside.c @@ -591,7 +591,8 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec) state->hwif[0] = hwif; probe_hwif_init(hwif); - create_proc_ide_interfaces(); + + ide_proc_register_port(hwif); return 0; } @@ -679,7 +680,9 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) probe_hwif_init(hwif); probe_hwif_init(mate); - create_proc_ide_interfaces(); + + ide_proc_register_port(hwif); + ide_proc_register_port(mate); return 0; diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c index 9c6c49fdd2b1..890ea3fac3c6 100644 --- a/drivers/ide/arm/rapide.c +++ b/drivers/ide/arm/rapide.c @@ -76,7 +76,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id) hwif->gendev.parent = &ec->dev; hwif->noprobe = 0; probe_hwif_init(hwif); - create_proc_ide_interfaces(); + ide_proc_register_port(hwif); ecard_set_drvdata(ec, hwif); goto out; } diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c index 99fd56151131..0f72b98d727f 100644 --- a/drivers/ide/ide-generic.c +++ b/drivers/ide/ide-generic.c @@ -22,8 +22,6 @@ static int __init ide_generic_init(void) if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET]) ide_release_lock(); /* for atari only */ - create_proc_ide_interfaces(); - return 0; } diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 8f15c23aa70d..3cebed77f55d 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -1427,6 +1427,9 @@ int ideprobe_init (void) } } } + for (index = 0; index < MAX_HWIFS; ++index) + if (probe[index]) + ide_proc_register_port(&ide_hwifs[index]); return 0; } diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 949a6f609d84..d50bd996ff22 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c @@ -39,6 +39,8 @@ #include +static struct proc_dir_entry *proc_ide_root; + static int proc_ide_read_imodel (char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -783,26 +785,24 @@ static ide_proc_entry_t hwif_entries[] = { { NULL, 0, NULL, NULL } }; -void create_proc_ide_interfaces(void) +void ide_proc_register_port(ide_hwif_t *hwif) { - int h; + if (!hwif->present) + return; - for (h = 0; h < MAX_HWIFS; h++) { - ide_hwif_t *hwif = &ide_hwifs[h]; + if (!hwif->proc) { + hwif->proc = proc_mkdir(hwif->name, proc_ide_root); - if (!hwif->present) - continue; - if (!hwif->proc) { - hwif->proc = proc_mkdir(hwif->name, proc_ide_root); - if (!hwif->proc) - return; - ide_add_proc_entries(hwif->proc, hwif_entries, hwif); - } - create_proc_ide_drives(hwif); + if (!hwif->proc) + return; + + ide_add_proc_entries(hwif->proc, hwif_entries, hwif); } + + create_proc_ide_drives(hwif); } -EXPORT_SYMBOL(create_proc_ide_interfaces); +EXPORT_SYMBOL_GPL(ide_proc_register_port); #ifdef CONFIG_BLK_DEV_IDEPCI void ide_pci_create_host_proc(const char *name, get_info_t *get_info) @@ -813,7 +813,7 @@ void ide_pci_create_host_proc(const char *name, get_info_t *get_info) EXPORT_SYMBOL_GPL(ide_pci_create_host_proc); #endif -void destroy_proc_ide_interface(ide_hwif_t *hwif) +void ide_proc_unregister_port(ide_hwif_t *hwif) { if (hwif->proc) { destroy_proc_ide_drives(hwif); @@ -860,11 +860,11 @@ void proc_ide_create(void) { struct proc_dir_entry *entry; + proc_ide_root = proc_mkdir("ide", NULL); + if (!proc_ide_root) return; - create_proc_ide_interfaces(); - entry = create_proc_entry("drivers", 0, proc_ide_root); if (entry) entry->proc_fops = &ide_drivers_operations; diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 0fc532850bbe..038f2610e734 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -347,10 +347,6 @@ static int ide_system_bus_speed(void) return system_bus_speed; } -#ifdef CONFIG_IDE_PROC_FS -struct proc_dir_entry *proc_ide_root; -#endif - static struct resource* hwif_request_region(ide_hwif_t *hwif, unsigned long addr, int num) { @@ -594,7 +590,7 @@ void ide_unregister(unsigned int index) spin_unlock_irq(&ide_lock); - destroy_proc_ide_interface(hwif); + ide_proc_unregister_port(hwif); hwgroup = hwif->hwgroup; /* @@ -799,7 +795,7 @@ found: if (!initializing) { probe_hwif_init_with_fixup(hwif, fixup); - create_proc_ide_interfaces(); + ide_proc_register_port(hwif); } if (hwifp) @@ -1794,9 +1790,7 @@ static int __init ide_init(void) init_ide_data(); -#ifdef CONFIG_IDE_PROC_FS - proc_ide_root = proc_mkdir("ide", NULL); -#endif + proc_ide_create(); #ifdef CONFIG_BLK_DEV_ALI14XX if (probe_ali14xx) @@ -1822,8 +1816,6 @@ static int __init ide_init(void) /* Probe for special PCI and other "known" interface chipsets. */ probe_for_hwifs(); - proc_ide_create(); - return 0; } diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c index 91961aa03047..df17ed68c0bc 100644 --- a/drivers/ide/legacy/ali14xx.c +++ b/drivers/ide/legacy/ali14xx.c @@ -223,7 +223,8 @@ static int __init ali14xx_probe(void) probe_hwif_init(hwif); probe_hwif_init(mate); - create_proc_ide_interfaces(); + ide_proc_register_port(hwif); + ide_proc_register_port(mate); return 0; } diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c index 0219ffa64db6..36a3f0ac6162 100644 --- a/drivers/ide/legacy/dtc2278.c +++ b/drivers/ide/legacy/dtc2278.c @@ -138,7 +138,8 @@ static int __init dtc2278_probe(void) probe_hwif_init(hwif); probe_hwif_init(mate); - create_proc_ide_interfaces(); + ide_proc_register_port(hwif); + ide_proc_register_port(mate); return 0; } diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c index a2832643c522..c8f353b1296f 100644 --- a/drivers/ide/legacy/ht6560b.c +++ b/drivers/ide/legacy/ht6560b.c @@ -357,7 +357,8 @@ int __init ht6560b_init(void) probe_hwif_init(hwif); probe_hwif_init(mate); - create_proc_ide_interfaces(); + ide_proc_register_port(hwif); + ide_proc_register_port(mate); return 0; diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c index 2fb8f50f1293..d1414a75b523 100644 --- a/drivers/ide/legacy/qd65xx.c +++ b/drivers/ide/legacy/qd65xx.c @@ -427,7 +427,7 @@ static int __init qd_probe(int base) qd_setup(hwif, base, config, QD6500_DEF_DATA, QD6500_DEF_DATA, &qd6500_tune_drive); - create_proc_ide_interfaces(); + ide_proc_register_port(hwif); return 1; } @@ -459,7 +459,7 @@ static int __init qd_probe(int base) &qd6580_tune_drive); qd_write_reg(QD_DEF_CONTR,QD_CONTROL_PORT); - create_proc_ide_interfaces(); + ide_proc_register_port(hwif); return 1; } else { @@ -479,7 +479,8 @@ static int __init qd_probe(int base) &qd6580_tune_drive); qd_write_reg(QD_DEF_CONTR,QD_CONTROL_PORT); - create_proc_ide_interfaces(); + ide_proc_register_port(hwif); + ide_proc_register_port(mate); return 0; /* no other qd65xx possible */ } diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c index ca7974455578..ddc403a0bd82 100644 --- a/drivers/ide/legacy/umc8672.c +++ b/drivers/ide/legacy/umc8672.c @@ -160,7 +160,8 @@ static int __init umc8672_probe(void) probe_hwif_init(hwif); probe_hwif_init(mate); - create_proc_ide_interfaces(); + ide_proc_register_port(hwif); + ide_proc_register_port(mate); return 0; } diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c index d54d9fe92a7d..ca95e990862e 100644 --- a/drivers/ide/mips/au1xxx-ide.c +++ b/drivers/ide/mips/au1xxx-ide.c @@ -760,6 +760,9 @@ static int au_ide_probe(struct device *dev) #endif probe_hwif_init(hwif); + + ide_proc_register_port(hwif); + dev_set_drvdata(dev, hwif); printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode ); diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c index 81fa06851b27..6e935d7c63fd 100644 --- a/drivers/ide/mips/swarm.c +++ b/drivers/ide/mips/swarm.c @@ -129,6 +129,9 @@ static int __devinit swarm_ide_probe(struct device *dev) hwif->irq = hwif->hw.irq; probe_hwif_init(hwif); + + ide_proc_register_port(hwif); + dev_set_drvdata(dev, hwif); return 0; diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c index 400859a839f7..3b88a3a56116 100644 --- a/drivers/ide/pci/cs5520.c +++ b/drivers/ide/pci/cs5520.c @@ -213,6 +213,7 @@ static ide_pci_device_t cyrix_chipsets[] __devinitdata = { static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) { + ide_hwif_t *hwif = NULL, *mate = NULL; ata_index_t index; ide_pci_device_t *d = &cyrix_chipsets[id->driver_data]; @@ -239,10 +240,21 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic ide_pci_setup_ports(dev, d, 14, &index); - if((index.b.low & 0xf0) != 0xf0) - probe_hwif_init(&ide_hwifs[index.b.low]); - if((index.b.high & 0xf0) != 0xf0) - probe_hwif_init(&ide_hwifs[index.b.high]); + if ((index.b.low & 0xf0) != 0xf0) + hwif = &ide_hwifs[index.b.low]; + if ((index.b.high & 0xf0) != 0xf0) + mate = &ide_hwifs[index.b.high]; + + if (hwif) + probe_hwif_init(hwif); + if (mate) + probe_hwif_init(mate); + + if (hwif) + ide_proc_register_port(hwif); + if (mate) + ide_proc_register_port(mate); + return 0; } diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c index fd09b295a69d..d3185e29a38e 100644 --- a/drivers/ide/pci/sgiioc4.c +++ b/drivers/ide/pci/sgiioc4.c @@ -692,7 +692,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t * d) return -EIO; /* Create /proc/ide entries */ - create_proc_ide_interfaces(); + ide_proc_register_port(hwif); return 0; } diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c index a49ebe44babd..45fc36f0f219 100644 --- a/drivers/ide/ppc/pmac.c +++ b/drivers/ide/ppc/pmac.c @@ -1276,6 +1276,8 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) /* We probe the hwif now */ probe_hwif_init(hwif); + ide_proc_register_port(hwif); + return 0; } diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c index 118fb3205ca8..892cda755782 100644 --- a/drivers/ide/setup-pci.c +++ b/drivers/ide/setup-pci.c @@ -702,6 +702,7 @@ out: int ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t *d) { + ide_hwif_t *hwif = NULL, *mate = NULL; ata_index_t index_list; int ret; @@ -710,11 +711,19 @@ int ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t *d) goto out; if ((index_list.b.low & 0xf0) != 0xf0) - probe_hwif_init_with_fixup(&ide_hwifs[index_list.b.low], d->fixup); + hwif = &ide_hwifs[index_list.b.low]; if ((index_list.b.high & 0xf0) != 0xf0) - probe_hwif_init_with_fixup(&ide_hwifs[index_list.b.high], d->fixup); + mate = &ide_hwifs[index_list.b.high]; - create_proc_ide_interfaces(); + if (hwif) + probe_hwif_init_with_fixup(hwif, d->fixup); + if (mate) + probe_hwif_init_with_fixup(mate, d->fixup); + + if (hwif) + ide_proc_register_port(hwif); + if (mate) + ide_proc_register_port(mate); out: return ret; } @@ -748,7 +757,15 @@ int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2, } } - create_proc_ide_interfaces(); + for (i = 0; i < 2; i++) { + u8 idx[2] = { index_list[i].b.low, index_list[i].b.high }; + int j; + + for (j = 0; j < 2; j++) { + if ((idx[j] & 0xf0) != 0xf0) + ide_proc_register_port(ide_hwifs + idx[j]); + } + } out: return ret; } diff --git a/include/linux/ide.h b/include/linux/ide.h index bdb97655ef61..52d482a16dd9 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -909,12 +909,10 @@ typedef struct { write_proc_t *write_proc; } ide_proc_entry_t; -extern struct proc_dir_entry *proc_ide_root; - void proc_ide_create(void); void proc_ide_destroy(void); -void create_proc_ide_interfaces(void); -void destroy_proc_ide_interface(ide_hwif_t *); +void ide_proc_register_port(ide_hwif_t *); +void ide_proc_unregister_port(ide_hwif_t *); void ide_proc_register_driver(ide_drive_t *, ide_driver_t *); void ide_proc_unregister_driver(ide_drive_t *, ide_driver_t *); @@ -945,8 +943,8 @@ void ide_pci_create_host_proc(const char *, get_info_t *); #else static inline void proc_ide_create(void) { ; } static inline void proc_ide_destroy(void) { ; } -static inline void create_proc_ide_interfaces(void) { ; } -static inline void destroy_proc_ide_interface(ide_hwif_t *hwif) { ; } +static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; } +static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; } static inline void ide_proc_register_driver(ide_drive_t *drive, ide_driver_t *driver) { ; } static inline void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver) { ; } static inline void ide_add_generic_settings(ide_drive_t *drive) { ; } -- cgit v1.2.3 From 6d208b39c45edee5def6c201fcd51561c5a39828 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Thu, 10 May 2007 00:01:11 +0200 Subject: ide: legacy PCI bus order probing fixes IDE PCI host drivers should register themselves with IDE core only when IDE driver is built-in, otherwise (IDE driver is modular and thus IDE PCI host drivers are also modular) the code has no effect and just complicates the probing. Fix it by adding new config option CONFIG_IDEPCI_PCIBUS (defined only when needed and invisible to the user) and covering by #ifdef/#endif the code in question. It turned out that "ide=reverse" was silently accepted but did nothing in case when IDE driver was modular, this is fixed now. Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/Kconfig | 3 +++ drivers/ide/ide.c | 10 +++++----- drivers/ide/setup-pci.c | 2 ++ include/linux/ide.h | 5 +++++ 4 files changed, 15 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index a678bbecb489..1d06b415ede9 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -371,6 +371,9 @@ config IDEPCI_SHARE_IRQ It is safe to say Y to this question, in most cases. If unsure, say N. +config IDEPCI_PCIBUS_ORDER + def_bool PCI && BLK_DEV_IDE=y && BLK_DEV_IDEPCI + config BLK_DEV_OFFBOARD bool "Boot off-board chipsets first support" depends on PCI && BLK_DEV_IDEPCI diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 038f2610e734..f2b547ff7722 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -172,7 +172,7 @@ static int system_bus_speed; /* holds what we think is VESA/PCI bus speed */ DECLARE_MUTEX(ide_cfg_sem); __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock); -#ifdef CONFIG_BLK_DEV_IDEPCI +#ifdef CONFIG_IDEPCI_PCIBUS_ORDER static int ide_scan_direction; /* THIS was formerly 2.2.x pci=reverse */ #endif @@ -1333,13 +1333,13 @@ static int __init ide_setup(char *s) return 1; } -#ifdef CONFIG_BLK_DEV_IDEPCI +#ifdef CONFIG_IDEPCI_PCIBUS_ORDER if (!strcmp(s, "ide=reverse")) { ide_scan_direction = 1; printk(" : Enabled support for IDE inverse scan order.\n"); return 1; } -#endif /* CONFIG_BLK_DEV_IDEPCI */ +#endif #ifdef CONFIG_BLK_DEV_IDEACPI if (!strcmp(s, "ide=noacpi")) { @@ -1599,9 +1599,9 @@ extern void __init h8300_ide_init(void); */ static void __init probe_for_hwifs (void) { -#ifdef CONFIG_BLK_DEV_IDEPCI +#ifdef CONFIG_IDEPCI_PCIBUS_ORDER ide_scan_pcibus(ide_scan_direction); -#endif /* CONFIG_BLK_DEV_IDEPCI */ +#endif #ifdef CONFIG_ETRAX_IDE { diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c index 892cda755782..67035ba4bf5e 100644 --- a/drivers/ide/setup-pci.c +++ b/drivers/ide/setup-pci.c @@ -772,6 +772,7 @@ out: EXPORT_SYMBOL_GPL(ide_setup_pci_devices); +#ifdef CONFIG_IDEPCI_PCIBUS_ORDER /* * Module interfaces */ @@ -878,3 +879,4 @@ void __init ide_scan_pcibus (int scan_direction) __pci_register_driver(d, d->driver.owner, d->driver.mod_name); } } +#endif diff --git a/include/linux/ide.h b/include/linux/ide.h index 52d482a16dd9..df4e6a510310 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -1205,9 +1205,14 @@ void ide_init_disk(struct gendisk *, ide_drive_t *); extern int ideprobe_init(void); +#ifdef CONFIG_IDEPCI_PCIBUS_ORDER extern void ide_scan_pcibus(int scan_direction) __init; extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name); #define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME) +#else +#define ide_pci_register_driver(d) pci_register_driver(d) +#endif + void ide_pci_setup_ports(struct pci_dev *, struct ide_pci_device_s *, int, ata_index_t *); extern void ide_setup_pci_noise (struct pci_dev *dev, struct ide_pci_device_s *d); -- cgit v1.2.3 From 44ce6294d07555c3d313757105fd44b78208407f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 9 May 2007 18:51:36 -0700 Subject: Revert "md: improve partition detection in md array" This reverts commit 5b479c91da90eef605f851508744bfe8269591a0. Quoth Neil Brown: "It causes an oops when auto-detecting raid arrays, and it doesn't seem easy to fix. The array may not be 'open' when do_md_run is called, so bdev->bd_disk might be NULL, so bd_set_size can oops. This whole approach of opening an md device before it has been assembled just seems to get more and more painful. I think I'm going to have to come up with something clever to provide both backward comparability with usage expectation, and sane integration into the rest of the kernel." Signed-off-by: Linus Torvalds --- drivers/md/md.c | 26 ++++++++++++++++++-------- drivers/md/raid1.c | 1 + drivers/md/raid5.c | 2 ++ include/linux/raid/md_k.h | 1 + 4 files changed, 22 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/md/md.c b/drivers/md/md.c index 2901d0c0ee9e..65814b0340cb 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3104,7 +3104,6 @@ static int do_md_run(mddev_t * mddev) struct gendisk *disk; struct mdk_personality *pers; char b[BDEVNAME_SIZE]; - struct block_device *bdev; if (list_empty(&mddev->disks)) /* cannot run an array with no devices.. */ @@ -3332,13 +3331,7 @@ static int do_md_run(mddev_t * mddev) md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ - bdev = bdget_disk(mddev->gendisk, 0); - if (bdev) { - bd_set_size(bdev, mddev->array_size << 1); - blkdev_ioctl(bdev->bd_inode, NULL, BLKRRPART, 0); - bdput(bdev); - } - + mddev->changed = 1; md_new_event(mddev); kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE); return 0; @@ -3460,6 +3453,7 @@ static int do_md_stop(mddev_t * mddev, int mode) mddev->pers = NULL; set_capacity(disk, 0); + mddev->changed = 1; if (mddev->ro) mddev->ro = 0; @@ -4599,6 +4593,20 @@ static int md_release(struct inode *inode, struct file * file) return 0; } +static int md_media_changed(struct gendisk *disk) +{ + mddev_t *mddev = disk->private_data; + + return mddev->changed; +} + +static int md_revalidate(struct gendisk *disk) +{ + mddev_t *mddev = disk->private_data; + + mddev->changed = 0; + return 0; +} static struct block_device_operations md_fops = { .owner = THIS_MODULE, @@ -4606,6 +4614,8 @@ static struct block_device_operations md_fops = .release = md_release, .ioctl = md_ioctl, .getgeo = md_getgeo, + .media_changed = md_media_changed, + .revalidate_disk= md_revalidate, }; static int md_thread(void * arg) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1b7130cad21f..97ee870b265d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2063,6 +2063,7 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors) */ mddev->array_size = sectors>>1; set_capacity(mddev->gendisk, mddev->array_size << 1); + mddev->changed = 1; if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) { mddev->recovery_cp = mddev->size << 1; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a72e70ad0975..061375ee6592 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3864,6 +3864,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) sectors &= ~((sector_t)mddev->chunk_size/512 - 1); mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1; set_capacity(mddev->gendisk, mddev->array_size << 1); + mddev->changed = 1; if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { mddev->recovery_cp = mddev->size << 1; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); @@ -3998,6 +3999,7 @@ static void end_reshape(raid5_conf_t *conf) conf->mddev->array_size = conf->mddev->size * (conf->raid_disks - conf->max_degraded); set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); + conf->mddev->changed = 1; bdev = bdget_disk(conf->mddev->gendisk, 0); if (bdev) { diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index a121f36f4437..de72c49747c8 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -201,6 +201,7 @@ struct mddev_s struct mutex reconfig_mutex; atomic_t active; + int changed; /* true if we might need to reread partition info */ int degraded; /* whether md should consider * adding a spare */ -- cgit v1.2.3 From 00eb43a1897a8845d0edb198cec69ac5f1f299dd Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Fri, 4 May 2007 14:16:19 +0200 Subject: acpi,msi-laptop: Fall back to EC polling mode for MSI laptop specific EC commands The ACPI EC that is used in MSI laptops knows some non-standard commands for changing the screen brighntess and a few other things, which are used by the msi-laptop.c driver. Unfortunately for these commands no GPE events for IBF and OBF are triggered. Since nowadays the EC code uses the ec_intr=1 mode by default, this causes these operations to timeout, although they don't fail. In result, all operations that you can do with the msi-laptop.c driver take more or less 1s to complete, which is awfully slow. In one of the more recent kernels (2.6.20?) the EC subsystem has been revamped. With that change the EC timeout has been increased. before that increase the MSI EC accesses were slow -- but not *that* slow, hence I took notice of this limitation of the MSI EC hardware only very recently. The standard EC operations on the MSI EC as defined in the ACPI spec support GPE events properly. The following patch adds a new argument "force_poll" to the ec_transaction() function (and friends). If set to 1, the function will poll for IBF/OBF even if ec_intr=1 is enabled. If set to 0 the current behaviour is used. The msi-laptop driver is modified to make use of this new flag, so that OBF/IBF is polled for the special MSI EC transactions -- but only for them. Signed-off-by: Lennart Poettering Acked-by: Alexey Starikovskiy Signed-off-by: Len Brown --- drivers/acpi/ec.c | 39 +++++++++++++++++++++++---------------- drivers/misc/msi-laptop.c | 12 ++++++------ include/linux/acpi.h | 3 ++- 3 files changed, 31 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index e08cf98f504f..82f496c07675 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -147,9 +147,10 @@ static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event, return 0; } -static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, unsigned count) +static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, + unsigned count, int force_poll) { - if (acpi_ec_mode == EC_POLL) { + if (unlikely(force_poll) || acpi_ec_mode == EC_POLL) { unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); while (time_before(jiffies, delay)) { if (acpi_ec_check_status(ec, event, 0)) @@ -173,14 +174,15 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, unsigned count) static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, const u8 * wdata, unsigned wdata_len, - u8 * rdata, unsigned rdata_len) + u8 * rdata, unsigned rdata_len, + int force_poll) { int result = 0; unsigned count = atomic_read(&ec->event_count); acpi_ec_write_cmd(ec, command); for (; wdata_len > 0; --wdata_len) { - result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count); + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count, force_poll); if (result) { printk(KERN_ERR PREFIX "write_cmd timeout, command = %d\n", command); @@ -191,7 +193,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, } if (!rdata_len) { - result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count); + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count, force_poll); if (result) { printk(KERN_ERR PREFIX "finish-write timeout, command = %d\n", command); @@ -202,7 +204,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, } for (; rdata_len > 0; --rdata_len) { - result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, count); + result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, count, force_poll); if (result) { printk(KERN_ERR PREFIX "read timeout, command = %d\n", command); @@ -217,7 +219,8 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, static int acpi_ec_transaction(struct acpi_ec *ec, u8 command, const u8 * wdata, unsigned wdata_len, - u8 * rdata, unsigned rdata_len) + u8 * rdata, unsigned rdata_len, + int force_poll) { int status; u32 glk; @@ -240,7 +243,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command, /* Make sure GPE is enabled before doing transaction */ acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); - status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0); + status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0, 0); if (status) { printk(KERN_DEBUG PREFIX "input buffer is not empty, aborting transaction\n"); @@ -249,7 +252,8 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command, status = acpi_ec_transaction_unlocked(ec, command, wdata, wdata_len, - rdata, rdata_len); + rdata, rdata_len, + force_poll); end: @@ -267,12 +271,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command, int acpi_ec_burst_enable(struct acpi_ec *ec) { u8 d; - return acpi_ec_transaction(ec, ACPI_EC_BURST_ENABLE, NULL, 0, &d, 1); + return acpi_ec_transaction(ec, ACPI_EC_BURST_ENABLE, NULL, 0, &d, 1, 0); } int acpi_ec_burst_disable(struct acpi_ec *ec) { - return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0); + return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0, 0); } static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data) @@ -281,7 +285,7 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data) u8 d; result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_READ, - &address, 1, &d, 1); + &address, 1, &d, 1, 0); *data = d; return result; } @@ -290,7 +294,7 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) { u8 wdata[2] = { address, data }; return acpi_ec_transaction(ec, ACPI_EC_COMMAND_WRITE, - wdata, 2, NULL, 0); + wdata, 2, NULL, 0, 0); } /* @@ -349,13 +353,15 @@ EXPORT_SYMBOL(ec_write); int ec_transaction(u8 command, const u8 * wdata, unsigned wdata_len, - u8 * rdata, unsigned rdata_len) + u8 * rdata, unsigned rdata_len, + int force_poll) { if (!first_ec) return -ENODEV; return acpi_ec_transaction(first_ec, command, wdata, - wdata_len, rdata, rdata_len); + wdata_len, rdata, rdata_len, + force_poll); } EXPORT_SYMBOL(ec_transaction); @@ -374,7 +380,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data) * bit to be cleared (and thus clearing the interrupt source). */ - result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1); + result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1, 0); if (result) return result; @@ -410,6 +416,7 @@ static u32 acpi_ec_gpe_handler(void *data) acpi_status status = AE_OK; u8 value; struct acpi_ec *ec = data; + atomic_inc(&ec->event_count); if (acpi_ec_mode == EC_INTR) { diff --git a/drivers/misc/msi-laptop.c b/drivers/misc/msi-laptop.c index 68c4b58525ba..41e901f53e7c 100644 --- a/drivers/misc/msi-laptop.c +++ b/drivers/misc/msi-laptop.c @@ -85,7 +85,7 @@ static int set_lcd_level(int level) buf[0] = 0x80; buf[1] = (u8) (level*31); - return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), NULL, 0); + return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), NULL, 0, 1); } static int get_lcd_level(void) @@ -93,7 +93,7 @@ static int get_lcd_level(void) u8 wdata = 0, rdata; int result; - result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1); + result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1, 1); if (result < 0) return result; @@ -105,7 +105,7 @@ static int get_auto_brightness(void) u8 wdata = 4, rdata; int result; - result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1); + result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1, 1); if (result < 0) return result; @@ -119,14 +119,14 @@ static int set_auto_brightness(int enable) wdata[0] = 4; - result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, &rdata, 1); + result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, &rdata, 1, 1); if (result < 0) return result; wdata[0] = 0x84; wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0); - return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, NULL, 0); + return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, NULL, 0, 1); } static int get_wireless_state(int *wlan, int *bluetooth) @@ -134,7 +134,7 @@ static int get_wireless_state(int *wlan, int *bluetooth) u8 wdata = 0, rdata; int result; - result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1); + result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1, 1); if (result < 0) return -1; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 8bcfaa4c66ae..fccd8b548d93 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -182,7 +182,8 @@ extern int ec_read(u8 addr, u8 *val); extern int ec_write(u8 addr, u8 val); extern int ec_transaction(u8 command, const u8 *wdata, unsigned wdata_len, - u8 *rdata, unsigned rdata_len); + u8 *rdata, unsigned rdata_len, + int force_poll); #endif /*CONFIG_ACPI_EC*/ -- cgit v1.2.3 From 49d687b636c1f7e9a14faab7bd654f9fb88ecd11 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 10 May 2007 02:19:51 +1000 Subject: [POWERPC] pmu_sys_suspended is only defined for PPC32 thus we get a link error on ppc64 with CONFIG_PM=y. This fixes it. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/linux/pmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/pmu.h b/include/linux/pmu.h index 37ca57392add..5ad913ff02b2 100644 --- a/include/linux/pmu.h +++ b/include/linux/pmu.h @@ -226,7 +226,7 @@ extern unsigned int pmu_power_flags; extern void pmu_backlight_init(void); /* some code needs to know if the PMU was suspended for hibernation */ -#ifdef CONFIG_PM +#if defined(CONFIG_PM) && defined(CONFIG_PPC32) extern int pmu_sys_suspended; #else /* if power management is not configured it can't be suspended */ -- cgit v1.2.3 From 3e7cbae7c6dda18d427335b3ad98f1a0d40ef30c Mon Sep 17 00:00:00 2001 From: Ivo van Doorn Date: Mon, 12 Jun 2006 16:17:04 +0200 Subject: CRC ITU-T V.41 This will add the CRC calculation according to the CRC ITU-T V.41 to the kernel lib/ folder. This code has been derived from the rt2x00 driver, currently found only in the wireless-dev tree, but this library is generic and could be used by more drivers who currently use their own implementation. Signed-off-by: Ivo van Doorn Also useful for the new firewire stack. Signed-off-by: Kristian Hoegsberg Signed-off-by: Stefan Richter --- include/linux/crc-itu-t.h | 28 +++++++++++++++++++ lib/Kconfig | 8 ++++++ lib/Makefile | 1 + lib/crc-itu-t.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 106 insertions(+) create mode 100644 include/linux/crc-itu-t.h create mode 100644 lib/crc-itu-t.c (limited to 'include/linux') diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h new file mode 100644 index 000000000000..84920f3cc83e --- /dev/null +++ b/include/linux/crc-itu-t.h @@ -0,0 +1,28 @@ +/* + * crc-itu-t.h - CRC ITU-T V.41 routine + * + * Implements the standard CRC ITU-T V.41: + * Width 16 + * Poly 0x0x1021 (x^16 + x^12 + x^15 + 1) + * Init 0 + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#ifndef CRC_ITU_T_H +#define CRC_ITU_T_H + +#include + +extern u16 const crc_itu_t_table[256]; + +extern u16 crc_itu_t(u16 crc, const u8 *buffer, size_t len); + +static inline u16 crc_itu_t_byte(u16 crc, const u8 data) +{ + return (crc << 8) ^ crc_itu_t_table[((crc >> 8) ^ data) & 0xff]; +} + +#endif /* CRC_ITU_T_H */ + diff --git a/lib/Kconfig b/lib/Kconfig index 384249915047..91477b9dc924 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -23,6 +23,14 @@ config CRC16 the kernel tree does. Such modules that use library CRC16 functions require M here. +config CRC_ITU_T + tristate "CRC ITU-T V.41 functions" + help + This option is provided for the case where no in-kernel-tree + modules require CRC ITU-T V.41 functions, but a module built outside + the kernel tree does. Such modules that use library CRC ITU-T V.41 + functions require M here. + config CRC32 tristate "CRC32 functions" default y diff --git a/lib/Makefile b/lib/Makefile index 992a39ef9ffd..2a9e47cc7f22 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -40,6 +40,7 @@ endif obj-$(CONFIG_BITREVERSE) += bitrev.o obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o obj-$(CONFIG_CRC16) += crc16.o +obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o obj-$(CONFIG_CRC32) += crc32.o obj-$(CONFIG_LIBCRC32C) += libcrc32c.o obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o diff --git a/lib/crc-itu-t.c b/lib/crc-itu-t.c new file mode 100644 index 000000000000..a63472b82416 --- /dev/null +++ b/lib/crc-itu-t.c @@ -0,0 +1,69 @@ +/* + * crc-itu-t.c + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include +#include +#include + +/** CRC table for the CRC ITU-T V.41 0x0x1021 (x^16 + x^12 + x^15 + 1) */ +const u16 crc_itu_t_table[256] = { + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 +}; + +EXPORT_SYMBOL(crc_itu_t_table); + +/** + * crc_itu_t - Compute the CRC-ITU-T for the data buffer + * + * @crc: previous CRC value + * @buffer: data pointer + * @len: number of bytes in the buffer + * + * Returns the updated CRC value + */ +u16 crc_itu_t(u16 crc, const u8 *buffer, size_t len) +{ + while (len--) + crc = crc_itu_t_byte(crc, *buffer++); + return crc; +} +EXPORT_SYMBOL(crc_itu_t); + +MODULE_DESCRIPTION("CRC ITU-T V.41 calculations"); +MODULE_LICENSE("GPL"); + -- cgit v1.2.3 From 04dfb8dbd200c574b2f292146ef817d9745c8936 Mon Sep 17 00:00:00 2001 From: Kristian Høgsberg Date: Mon, 7 May 2007 20:33:36 -0400 Subject: firewire: Use linux/*.h instead of asm/*.h header files. Signed-off-by: Kristian Hoegsberg Signed-off-by: Stefan Richter --- include/linux/firewire-cdev.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 026c768e3bf1..3cb83a1fa9ba 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -22,8 +22,8 @@ #ifndef __fw_cdev_h #define __fw_cdev_h -#include -#include +#include +#include #define TCODE_WRITE_QUADLET_REQUEST 0 #define TCODE_WRITE_BLOCK_REQUEST 1 -- cgit v1.2.3 From 4c5a443e80e6d85e5c84a56bf30b61fe84c1f292 Mon Sep 17 00:00:00 2001 From: Kristian Høgsberg Date: Mon, 7 May 2007 20:33:37 -0400 Subject: firewire: Break out shared IEEE1394 constant to separate header file. Signed-off-by: Kristian Hoegsberg Signed-off-by: Stefan Richter --- drivers/firewire/fw-transaction.h | 61 +--------------------------------- include/linux/firewire-cdev.h | 51 ++++------------------------- include/linux/firewire-constants.h | 67 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+), 105 deletions(-) create mode 100644 include/linux/firewire-constants.h (limited to 'include/linux') diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index a1507cf8d764..acdc3be38c61 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -25,18 +25,7 @@ #include #include #include - -#define TCODE_WRITE_QUADLET_REQUEST 0 -#define TCODE_WRITE_BLOCK_REQUEST 1 -#define TCODE_WRITE_RESPONSE 2 -#define TCODE_READ_QUADLET_REQUEST 4 -#define TCODE_READ_BLOCK_REQUEST 5 -#define TCODE_READ_QUADLET_RESPONSE 6 -#define TCODE_READ_BLOCK_RESPONSE 7 -#define TCODE_CYCLE_START 8 -#define TCODE_LOCK_REQUEST 9 -#define TCODE_STREAM_DATA 10 -#define TCODE_LOCK_RESPONSE 11 +#include #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) @@ -45,54 +34,6 @@ #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) #define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0) -/* Juju specific tcodes */ -#define TCODE_DEALLOCATE 0x10 -#define TCODE_LOCK_MASK_SWAP 0x11 -#define TCODE_LOCK_COMPARE_SWAP 0x12 -#define TCODE_LOCK_FETCH_ADD 0x13 -#define TCODE_LOCK_LITTLE_ADD 0x14 -#define TCODE_LOCK_BOUNDED_ADD 0x15 -#define TCODE_LOCK_WRAP_ADD 0x16 -#define TCODE_LOCK_VENDOR_SPECIFIC 0x17 - -#define SCODE_100 0x0 -#define SCODE_200 0x1 -#define SCODE_400 0x2 -#define SCODE_BETA 0x3 - -#define EXTCODE_MASK_SWAP 0x1 -#define EXTCODE_COMPARE_SWAP 0x2 -#define EXTCODE_FETCH_ADD 0x3 -#define EXTCODE_LITTLE_ADD 0x4 -#define EXTCODE_BOUNDED_ADD 0x5 -#define EXTCODE_WRAP_ADD 0x6 - -#define ACK_COMPLETE 0x1 -#define ACK_PENDING 0x2 -#define ACK_BUSY_X 0x4 -#define ACK_BUSY_A 0x5 -#define ACK_BUSY_B 0x6 -#define ACK_DATA_ERROR 0xd -#define ACK_TYPE_ERROR 0xe - -#define RCODE_COMPLETE 0x0 -#define RCODE_CONFLICT_ERROR 0x4 -#define RCODE_DATA_ERROR 0x5 -#define RCODE_TYPE_ERROR 0x6 -#define RCODE_ADDRESS_ERROR 0x7 - -/* Juju specific rcodes */ -#define RCODE_SEND_ERROR 0x10 -#define RCODE_CANCELLED 0x11 -#define RCODE_BUSY 0x12 -#define RCODE_GENERATION 0x13 -#define RCODE_NO_ACK 0x14 - -#define RETRY_1 0x00 -#define RETRY_X 0x01 -#define RETRY_A 0x02 -#define RETRY_B 0x03 - #define LOCAL_BUS 0xffc0 #define SELFID_PORT_CHILD 0x3 diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 3cb83a1fa9ba..d4455eb2ae35 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -1,6 +1,5 @@ -/* -*- c-basic-offset: 8 -*- - * - * fw-device-cdev.h -- Char device interface. +/* + * Char device interface. * * Copyright (C) 2005-2006 Kristian Hoegsberg * @@ -19,50 +18,12 @@ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#ifndef __fw_cdev_h -#define __fw_cdev_h +#ifndef _LINUX_FIREWIRE_CDEV_H +#define _LINUX_FIREWIRE_CDEV_H #include #include - -#define TCODE_WRITE_QUADLET_REQUEST 0 -#define TCODE_WRITE_BLOCK_REQUEST 1 -#define TCODE_WRITE_RESPONSE 2 -#define TCODE_READ_QUADLET_REQUEST 4 -#define TCODE_READ_BLOCK_REQUEST 5 -#define TCODE_READ_QUADLET_RESPONSE 6 -#define TCODE_READ_BLOCK_RESPONSE 7 -#define TCODE_CYCLE_START 8 -#define TCODE_LOCK_REQUEST 9 -#define TCODE_STREAM_DATA 10 -#define TCODE_LOCK_RESPONSE 11 - -#define TCODE_LOCK_MASK_SWAP 0x11 -#define TCODE_LOCK_COMPARE_SWAP 0x12 -#define TCODE_LOCK_FETCH_ADD 0x13 -#define TCODE_LOCK_LITTLE_ADD 0x14 -#define TCODE_LOCK_BOUNDED_ADD 0x15 -#define TCODE_LOCK_WRAP_ADD 0x16 -#define TCODE_LOCK_VENDOR_DEPENDENT 0x17 - -#define RCODE_COMPLETE 0x0 -#define RCODE_CONFLICT_ERROR 0x4 -#define RCODE_DATA_ERROR 0x5 -#define RCODE_TYPE_ERROR 0x6 -#define RCODE_ADDRESS_ERROR 0x7 - -#define RCODE_SEND_ERROR 0x10 -#define RCODE_CANCELLED 0x11 -#define RCODE_BUSY 0x12 -#define RCODE_GENERATION 0x13 -#define RCODE_NO_ACK 0x14 - -#define SCODE_100 0x0 -#define SCODE_200 0x1 -#define SCODE_400 0x2 -#define SCODE_800 0x3 -#define SCODE_1600 0x4 -#define SCODE_3200 0x5 +#include #define FW_CDEV_EVENT_BUS_RESET 0x00 #define FW_CDEV_EVENT_RESPONSE 0x01 @@ -265,4 +226,4 @@ struct fw_cdev_stop_iso { __u32 handle; }; -#endif /* __fw_cdev_h */ +#endif /* _LINUX_FIREWIRE_CDEV_H */ diff --git a/include/linux/firewire-constants.h b/include/linux/firewire-constants.h new file mode 100644 index 000000000000..b316770a43fd --- /dev/null +++ b/include/linux/firewire-constants.h @@ -0,0 +1,67 @@ +#ifndef _LINUX_FIREWIRE_CONSTANTS_H +#define _LINUX_FIREWIRE_CONSTANTS_H + +#define TCODE_WRITE_QUADLET_REQUEST 0x0 +#define TCODE_WRITE_BLOCK_REQUEST 0x1 +#define TCODE_WRITE_RESPONSE 0x2 +#define TCODE_READ_QUADLET_REQUEST 0x4 +#define TCODE_READ_BLOCK_REQUEST 0x5 +#define TCODE_READ_QUADLET_RESPONSE 0x6 +#define TCODE_READ_BLOCK_RESPONSE 0x7 +#define TCODE_CYCLE_START 0x8 +#define TCODE_LOCK_REQUEST 0x9 +#define TCODE_STREAM_DATA 0xa +#define TCODE_LOCK_RESPONSE 0xb + +#define EXTCODE_MASK_SWAP 0x1 +#define EXTCODE_COMPARE_SWAP 0x2 +#define EXTCODE_FETCH_ADD 0x3 +#define EXTCODE_LITTLE_ADD 0x4 +#define EXTCODE_BOUNDED_ADD 0x5 +#define EXTCODE_WRAP_ADD 0x6 +#define EXTCODE_VENDOR_DEPENDENT 0x7 + +/* Juju specific tcodes */ +#define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP) +#define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP) +#define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD) +#define TCODE_LOCK_LITTLE_ADD (0x10 | EXTCODE_LITTLE_ADD) +#define TCODE_LOCK_BOUNDED_ADD (0x10 | EXTCODE_BOUNDED_ADD) +#define TCODE_LOCK_WRAP_ADD (0x10 | EXTCODE_WRAP_ADD) +#define TCODE_LOCK_VENDOR_DEPENDENT (0x10 | EXTCODE_VENDOR_DEPENDENT) + +#define RCODE_COMPLETE 0x0 +#define RCODE_CONFLICT_ERROR 0x4 +#define RCODE_DATA_ERROR 0x5 +#define RCODE_TYPE_ERROR 0x6 +#define RCODE_ADDRESS_ERROR 0x7 + +/* Juju specific rcodes */ +#define RCODE_SEND_ERROR 0x10 +#define RCODE_CANCELLED 0x11 +#define RCODE_BUSY 0x12 +#define RCODE_GENERATION 0x13 +#define RCODE_NO_ACK 0x14 + +#define SCODE_100 0x0 +#define SCODE_200 0x1 +#define SCODE_400 0x2 +#define SCODE_800 0x3 +#define SCODE_1600 0x4 +#define SCODE_3200 0x5 +#define SCODE_BETA 0x3 + +#define ACK_COMPLETE 0x1 +#define ACK_PENDING 0x2 +#define ACK_BUSY_X 0x4 +#define ACK_BUSY_A 0x5 +#define ACK_BUSY_B 0x6 +#define ACK_DATA_ERROR 0xd +#define ACK_TYPE_ERROR 0xe + +#define RETRY_1 0x00 +#define RETRY_X 0x01 +#define RETRY_A 0x02 +#define RETRY_B 0x03 + +#endif /* _LINUX_FIREWIRE_CONSTANTS_H */ -- cgit v1.2.3 From 894b8788d7f265eb7c6f75a9a77cedeb48f51586 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 10 May 2007 03:15:16 -0700 Subject: slub: support concurrent local and remote frees and allocs on a slab Avoid atomic overhead in slab_alloc and slab_free SLUB needs to use the slab_lock for the per cpu slabs to synchronize with potential kfree operations. This patch avoids that need by moving all free objects onto a lockless_freelist. The regular freelist continues to exist and will be used to free objects. So while we consume the lockless_freelist the regular freelist may build up objects. If we are out of objects on the lockless_freelist then we may check the regular freelist. If it has objects then we move those over to the lockless_freelist and do this again. There is a significant savings in terms of atomic operations that have to be performed. We can even free directly to the lockless_freelist if we know that we are running on the same processor. So this speeds up short lived objects. They may be allocated and freed without taking the slab_lock. This is particular good for netperf. In order to maximize the effect of the new faster hotpath we extract the hottest performance pieces into inlined functions. These are then inlined into kmem_cache_alloc and kmem_cache_free. So hotpath allocation and freeing no longer requires a subroutine call within SLUB. [I am not sure that it is worth doing this because it changes the easy to read structure of slub just to reduce atomic ops. However, there is someone out there with a benchmark on 4 way and 8 way processor systems that seems to show a 5% regression vs. Slab. Seems that the regression is due to increased atomic operations use vs. SLAB in SLUB). I wonder if this is applicable or discernable at all in a real workload?] Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_types.h | 7 ++- mm/slub.c | 154 ++++++++++++++++++++++++++++++++++++----------- 2 files changed, 123 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index e30687bad075..d5bb1796e12b 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -50,13 +50,16 @@ struct page { spinlock_t ptl; #endif struct { /* SLUB uses */ - struct page *first_page; /* Compound pages */ + void **lockless_freelist; struct kmem_cache *slab; /* Pointer to slab */ }; + struct { + struct page *first_page; /* Compound pages */ + }; }; union { pgoff_t index; /* Our offset within mapping. */ - void *freelist; /* SLUB: pointer to free object */ + void *freelist; /* SLUB: freelist req. slab lock */ }; struct list_head lru; /* Pageout list, eg. active_list * protected by zone->lru_lock ! diff --git a/mm/slub.c b/mm/slub.c index bd2efae02bcd..b07a1cab4f28 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -81,10 +81,14 @@ * PageActive The slab is used as a cpu cache. Allocations * may be performed from the slab. The slab is not * on any slab list and cannot be moved onto one. + * The cpu slab may be equipped with an additioanl + * lockless_freelist that allows lockless access to + * free objects in addition to the regular freelist + * that requires the slab lock. * * PageError Slab requires special handling due to debug * options set. This moves slab handling out of - * the fast path. + * the fast path and disables lockless freelists. */ static inline int SlabDebug(struct page *page) @@ -1014,6 +1018,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) set_freepointer(s, last, NULL); page->freelist = start; + page->lockless_freelist = NULL; page->inuse = 0; out: if (flags & __GFP_WAIT) @@ -1276,6 +1281,23 @@ static void putback_slab(struct kmem_cache *s, struct page *page) */ static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu) { + /* + * Merge cpu freelist into freelist. Typically we get here + * because both freelists are empty. So this is unlikely + * to occur. + */ + while (unlikely(page->lockless_freelist)) { + void **object; + + /* Retrieve object from cpu_freelist */ + object = page->lockless_freelist; + page->lockless_freelist = page->lockless_freelist[page->offset]; + + /* And put onto the regular freelist */ + object[page->offset] = page->freelist; + page->freelist = object; + page->inuse--; + } s->cpu_slab[cpu] = NULL; ClearPageActive(page); @@ -1322,47 +1344,46 @@ static void flush_all(struct kmem_cache *s) } /* - * slab_alloc is optimized to only modify two cachelines on the fast path - * (aside from the stack): + * Slow path. The lockless freelist is empty or we need to perform + * debugging duties. + * + * Interrupts are disabled. * - * 1. The page struct - * 2. The first cacheline of the object to be allocated. + * Processing is still very fast if new objects have been freed to the + * regular freelist. In that case we simply take over the regular freelist + * as the lockless freelist and zap the regular freelist. * - * The only other cache lines that are read (apart from code) is the - * per cpu array in the kmem_cache struct. + * If that is not working then we fall back to the partial lists. We take the + * first element of the freelist as the object to allocate now and move the + * rest of the freelist to the lockless freelist. * - * Fastpath is not possible if we need to get a new slab or have - * debugging enabled (which means all slabs are marked with SlabDebug) + * And if we were unable to get a new slab from the partial slab lists then + * we need to allocate a new slab. This is slowest path since we may sleep. */ -static void *slab_alloc(struct kmem_cache *s, - gfp_t gfpflags, int node, void *addr) +static void *__slab_alloc(struct kmem_cache *s, + gfp_t gfpflags, int node, void *addr, struct page *page) { - struct page *page; void **object; - unsigned long flags; - int cpu; + int cpu = smp_processor_id(); - local_irq_save(flags); - cpu = smp_processor_id(); - page = s->cpu_slab[cpu]; if (!page) goto new_slab; slab_lock(page); if (unlikely(node != -1 && page_to_nid(page) != node)) goto another_slab; -redo: +load_freelist: object = page->freelist; if (unlikely(!object)) goto another_slab; if (unlikely(SlabDebug(page))) goto debug; -have_object: - page->inuse++; - page->freelist = object[page->offset]; + object = page->freelist; + page->lockless_freelist = object[page->offset]; + page->inuse = s->objects; + page->freelist = NULL; slab_unlock(page); - local_irq_restore(flags); return object; another_slab: @@ -1370,11 +1391,11 @@ another_slab: new_slab: page = get_partial(s, gfpflags, node); - if (likely(page)) { + if (page) { have_slab: s->cpu_slab[cpu] = page; SetPageActive(page); - goto redo; + goto load_freelist; } page = new_slab(s, gfpflags, node); @@ -1397,7 +1418,7 @@ have_slab: discard_slab(s, page); page = s->cpu_slab[cpu]; slab_lock(page); - goto redo; + goto load_freelist; } /* New slab does not fit our expectations */ flush_slab(s, s->cpu_slab[cpu], cpu); @@ -1405,16 +1426,52 @@ have_slab: slab_lock(page); goto have_slab; } - local_irq_restore(flags); return NULL; debug: + object = page->freelist; if (!alloc_object_checks(s, page, object)) goto another_slab; if (s->flags & SLAB_STORE_USER) set_track(s, object, TRACK_ALLOC, addr); trace(s, page, object, 1); init_object(s, object, 1); - goto have_object; + + page->inuse++; + page->freelist = object[page->offset]; + slab_unlock(page); + return object; +} + +/* + * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) + * have the fastpath folded into their functions. So no function call + * overhead for requests that can be satisfied on the fastpath. + * + * The fastpath works by first checking if the lockless freelist can be used. + * If not then __slab_alloc is called for slow processing. + * + * Otherwise we can simply pick the next object from the lockless free list. + */ +static void __always_inline *slab_alloc(struct kmem_cache *s, + gfp_t gfpflags, int node, void *addr) +{ + struct page *page; + void **object; + unsigned long flags; + + local_irq_save(flags); + page = s->cpu_slab[smp_processor_id()]; + if (unlikely(!page || !page->lockless_freelist || + (node != -1 && page_to_nid(page) != node))) + + object = __slab_alloc(s, gfpflags, node, addr, page); + + else { + object = page->lockless_freelist; + page->lockless_freelist = object[page->offset]; + } + local_irq_restore(flags); + return object; } void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) @@ -1432,20 +1489,19 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); #endif /* - * The fastpath only writes the cacheline of the page struct and the first - * cacheline of the object. + * Slow patch handling. This may still be called frequently since objects + * have a longer lifetime than the cpu slabs in most processing loads. * - * We read the cpu_slab cacheline to check if the slab is the per cpu - * slab for this processor. + * So we still attempt to reduce cache line usage. Just take the slab + * lock and free the item. If there is no additional partial page + * handling required then we can return immediately. */ -static void slab_free(struct kmem_cache *s, struct page *page, +static void __slab_free(struct kmem_cache *s, struct page *page, void *x, void *addr) { void *prior; void **object = (void *)x; - unsigned long flags; - local_irq_save(flags); slab_lock(page); if (unlikely(SlabDebug(page))) @@ -1475,7 +1531,6 @@ checks_ok: out_unlock: slab_unlock(page); - local_irq_restore(flags); return; slab_empty: @@ -1487,7 +1542,6 @@ slab_empty: slab_unlock(page); discard_slab(s, page); - local_irq_restore(flags); return; debug: @@ -1502,6 +1556,34 @@ debug: goto checks_ok; } +/* + * Fastpath with forced inlining to produce a kfree and kmem_cache_free that + * can perform fastpath freeing without additional function calls. + * + * The fastpath is only possible if we are freeing to the current cpu slab + * of this processor. This typically the case if we have just allocated + * the item before. + * + * If fastpath is not possible then fall back to __slab_free where we deal + * with all sorts of special processing. + */ +static void __always_inline slab_free(struct kmem_cache *s, + struct page *page, void *x, void *addr) +{ + void **object = (void *)x; + unsigned long flags; + + local_irq_save(flags); + if (likely(page == s->cpu_slab[smp_processor_id()] && + !SlabDebug(page))) { + object[page->offset] = page->lockless_freelist; + page->lockless_freelist = object; + } else + __slab_free(s, page, x, addr); + + local_irq_restore(flags); +} + void kmem_cache_free(struct kmem_cache *s, void *x) { struct page *page; -- cgit v1.2.3 From 218e180e7ea5334e1f94121940ba82cd1f0f4e58 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 10 May 2007 03:15:18 -0700 Subject: add upper-32-bits macro We keep on getting "right shift count >= width of type" warnings when doing things like sector_t s; x = s >> 56; because with CONFIG_LBD=n, s is only 32-bit. Similar problems can occur with dma_addr_t's. So add a simple wrapper function which code can use to avoid this warning. The above example would become x = upper_32_bits(s) >> 24; The first user is in fact AFS. Cc: James Bottomley Cc: "Cameron, Steve" Cc: "Miller, Mike (OS Dev)" Cc: Hisashi Hifumi Cc: Alan Cox Cc: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 144b615f3a89..8645181fca0f 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -41,6 +41,16 @@ extern const char linux_proc_banner[]; #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +/** + * upper_32_bits - return bits 32-63 of a number + * @n: the number we're accessing + * + * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress + * the "right shift count >= width of type" warning when that quantity is + * 32-bits. + */ +#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) + #define KERN_EMERG "<0>" /* system is unusable */ #define KERN_ALERT "<1>" /* action must be taken immediately */ #define KERN_CRIT "<2>" /* critical conditions */ -- cgit v1.2.3 From 572a103ded0ad880f75ce83e99f0512fbb80b5b0 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 8 May 2007 18:34:17 -0700 Subject: [NET] link_watch: Move link watch list into net_device These days the link watch mechanism is an integral part of the network subsystem as it manages the carrier status. So it now makes sense to allocate some memory for it in net_device rather than allocating it on demand. In fact, this is necessary because we can't tolerate a memory allocation failure since that means we'd have to potentially throw a link up event away. It also simplifies the code greatly. In doing so I discovered a subtle race condition in the use of singleevent. This race condition still exists (and is somewhat magnified) without singleevent but it's now plugged thanks to an smp_mb__before_clear_bit. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 ++ net/core/link_watch.c | 50 ++++++++++++++--------------------------------- 2 files changed, 17 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 30446222b396..f671cd2f133f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -467,6 +467,8 @@ struct net_device /* device index hash chain */ struct hlist_node index_hlist; + struct net_device *link_watch_next; + /* register/unregister state machine */ enum { NETREG_UNINITIALIZED=0, NETREG_REGISTERED, /* completed register_netdevice */ diff --git a/net/core/link_watch.c b/net/core/link_watch.c index e3c26a9ccad6..71a35da275d4 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -28,7 +27,6 @@ enum lw_bits { LW_RUNNING = 0, - LW_SE_USED }; static unsigned long linkwatch_flags; @@ -37,17 +35,9 @@ static unsigned long linkwatch_nextevent; static void linkwatch_event(struct work_struct *dummy); static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); -static LIST_HEAD(lweventlist); +static struct net_device *lweventlist; static DEFINE_SPINLOCK(lweventlist_lock); -struct lw_event { - struct list_head list; - struct net_device *dev; -}; - -/* Avoid kmalloc() for most systems */ -static struct lw_event singleevent; - static unsigned char default_operstate(const struct net_device *dev) { if (!netif_carrier_ok(dev)) @@ -90,21 +80,23 @@ static void rfc2863_policy(struct net_device *dev) /* Must be called with the rtnl semaphore held */ void linkwatch_run_queue(void) { - struct list_head head, *n, *next; + struct net_device *next; spin_lock_irq(&lweventlist_lock); - list_replace_init(&lweventlist, &head); + next = lweventlist; + lweventlist = NULL; spin_unlock_irq(&lweventlist_lock); - list_for_each_safe(n, next, &head) { - struct lw_event *event = list_entry(n, struct lw_event, list); - struct net_device *dev = event->dev; + while (next) { + struct net_device *dev = next; - if (event == &singleevent) { - clear_bit(LW_SE_USED, &linkwatch_flags); - } else { - kfree(event); - } + next = dev->link_watch_next; + + /* + * Make sure the above read is complete since it can be + * rewritten as soon as we clear the bit below. + */ + smp_mb__before_clear_bit(); /* We are about to handle this device, * so new events can be accepted @@ -147,24 +139,12 @@ void linkwatch_fire_event(struct net_device *dev) { if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { unsigned long flags; - struct lw_event *event; - - if (test_and_set_bit(LW_SE_USED, &linkwatch_flags)) { - event = kmalloc(sizeof(struct lw_event), GFP_ATOMIC); - - if (unlikely(event == NULL)) { - clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); - return; - } - } else { - event = &singleevent; - } dev_hold(dev); - event->dev = dev; spin_lock_irqsave(&lweventlist_lock, flags); - list_add_tail(&event->list, &lweventlist); + dev->link_watch_next = lweventlist; + lweventlist = dev; spin_unlock_irqrestore(&lweventlist_lock, flags); if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) { -- cgit v1.2.3 From 3c2ad469c317147fc1de19579f8173ddb68a9e91 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Thu, 10 May 2007 14:14:16 -0700 Subject: [NETFILTER]: Clean up table initialization - move arp_tables initial table structure definitions to arp_tables.h similar to ip_tables and ip6_tables - use C99 initializers - use initializer macros where possible Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/netfilter/x_tables.h | 8 ++ include/linux/netfilter_arp/arp_tables.h | 41 +++++++++ include/linux/netfilter_ipv4/ip_tables.h | 22 +++++ include/linux/netfilter_ipv6/ip6_tables.h | 22 +++++ net/ipv4/netfilter/arptable_filter.c | 140 +++++------------------------- net/ipv4/netfilter/iptable_filter.c | 70 +++++---------- net/ipv4/netfilter/iptable_mangle.c | 96 +++++++------------- net/ipv4/netfilter/iptable_raw.c | 58 ++----------- net/ipv4/netfilter/nf_nat_rule.c | 73 ++-------------- net/ipv6/netfilter/ip6table_filter.c | 70 +++++---------- net/ipv6/netfilter/ip6table_mangle.c | 96 +++++++------------- net/ipv6/netfilter/ip6table_raw.c | 52 +---------- 12 files changed, 238 insertions(+), 510 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 022edfa97ed9..7e733a6ba4f6 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -54,6 +54,14 @@ struct xt_entry_target unsigned char data[0]; }; +#define XT_TARGET_INIT(__name, __size) \ +{ \ + .target.u.user = { \ + .target_size = XT_ALIGN(__size), \ + .name = __name, \ + }, \ +} + struct xt_standard_target { struct xt_entry_target target; diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index 24c8786d12e9..584cd1b18f12 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -238,6 +238,47 @@ static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e */ #ifdef __KERNEL__ +/* Standard entry. */ +struct arpt_standard +{ + struct arpt_entry entry; + struct arpt_standard_target target; +}; + +struct arpt_error_target +{ + struct arpt_entry_target target; + char errorname[ARPT_FUNCTION_MAXNAMELEN]; +}; + +struct arpt_error +{ + struct arpt_entry entry; + struct arpt_error_target target; +}; + +#define ARPT_ENTRY_INIT(__size) \ +{ \ + .target_offset = sizeof(struct arpt_entry), \ + .next_offset = (__size), \ +} + +#define ARPT_STANDARD_INIT(__verdict) \ +{ \ + .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_standard)), \ + .target = XT_TARGET_INIT(ARPT_STANDARD_TARGET, \ + sizeof(struct arpt_standard_target)), \ + .target.verdict = -(__verdict) - 1, \ +} + +#define ARPT_ERROR_INIT \ +{ \ + .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_error)), \ + .target = XT_TARGET_INIT(ARPT_ERROR_TARGET, \ + sizeof(struct arpt_error_target)), \ + .target.errorname = "ERROR", \ +} + #define arpt_register_target(tgt) \ ({ (tgt)->family = NF_ARP; \ xt_register_target(tgt); }) diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index 9527296595cd..2f46dd728ee1 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -295,6 +295,28 @@ struct ipt_error struct ipt_error_target target; }; +#define IPT_ENTRY_INIT(__size) \ +{ \ + .target_offset = sizeof(struct ipt_entry), \ + .next_offset = (__size), \ +} + +#define IPT_STANDARD_INIT(__verdict) \ +{ \ + .entry = IPT_ENTRY_INIT(sizeof(struct ipt_standard)), \ + .target = XT_TARGET_INIT(IPT_STANDARD_TARGET, \ + sizeof(struct xt_standard_target)), \ + .target.verdict = -(__verdict) - 1, \ +} + +#define IPT_ERROR_INIT \ +{ \ + .entry = IPT_ENTRY_INIT(sizeof(struct ipt_error)), \ + .target = XT_TARGET_INIT(IPT_ERROR_TARGET, \ + sizeof(struct ipt_error_target)), \ + .target.errorname = "ERROR", \ +} + extern unsigned int ipt_do_table(struct sk_buff **pskb, unsigned int hook, const struct net_device *in, diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 61aa10412fc8..4686f8342cbd 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -123,6 +123,28 @@ struct ip6t_error struct ip6t_error_target target; }; +#define IP6T_ENTRY_INIT(__size) \ +{ \ + .target_offset = sizeof(struct ip6t_entry), \ + .next_offset = (__size), \ +} + +#define IP6T_STANDARD_INIT(__verdict) \ +{ \ + .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_standard)), \ + .target = XT_TARGET_INIT(IP6T_STANDARD_TARGET, \ + sizeof(struct ip6t_standard_target)), \ + .target.verdict = -(__verdict) - 1, \ +} + +#define IP6T_ERROR_INIT \ +{ \ + .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_error)), \ + .target = XT_TARGET_INIT(IP6T_ERROR_TARGET, \ + sizeof(struct ip6t_error_target)), \ + .target.errorname = "ERROR", \ +} + /* * New IP firewall options for [gs]etsockopt at the RAW IP level. * Unlike BSD Linux inherits IP options so you don't have to use diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c index 7edea2a1696c..75c023062533 100644 --- a/net/ipv4/netfilter/arptable_filter.c +++ b/net/ipv4/netfilter/arptable_filter.c @@ -15,128 +15,34 @@ MODULE_DESCRIPTION("arptables filter table"); #define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \ (1 << NF_ARP_FORWARD)) -/* Standard entry. */ -struct arpt_standard -{ - struct arpt_entry entry; - struct arpt_standard_target target; -}; - -struct arpt_error_target -{ - struct arpt_entry_target target; - char errorname[ARPT_FUNCTION_MAXNAMELEN]; -}; - -struct arpt_error -{ - struct arpt_entry entry; - struct arpt_error_target target; -}; - static struct { struct arpt_replace repl; struct arpt_standard entries[3]; struct arpt_error term; -} initial_table __initdata -= { { "filter", FILTER_VALID_HOOKS, 4, - sizeof(struct arpt_standard) * 3 + sizeof(struct arpt_error), - { [NF_ARP_IN] = 0, - [NF_ARP_OUT] = sizeof(struct arpt_standard), - [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard), }, - { [NF_ARP_IN] = 0, - [NF_ARP_OUT] = sizeof(struct arpt_standard), - [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard), }, - 0, NULL, { } }, - { - /* ARP_IN */ - { - { - { - { 0 }, { 0 }, { 0 }, { 0 }, - 0, 0, - { { 0, }, { 0, } }, - { { 0, }, { 0, } }, - 0, 0, - 0, 0, - 0, 0, - "", "", { 0 }, { 0 }, - 0, 0 - }, - sizeof(struct arpt_entry), - sizeof(struct arpt_standard), - 0, - { 0, 0 }, { } }, - { { { { ARPT_ALIGN(sizeof(struct arpt_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } - }, - /* ARP_OUT */ - { - { - { - { 0 }, { 0 }, { 0 }, { 0 }, - 0, 0, - { { 0, }, { 0, } }, - { { 0, }, { 0, } }, - 0, 0, - 0, 0, - 0, 0, - "", "", { 0 }, { 0 }, - 0, 0 - }, - sizeof(struct arpt_entry), - sizeof(struct arpt_standard), - 0, - { 0, 0 }, { } }, - { { { { ARPT_ALIGN(sizeof(struct arpt_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } - }, - /* ARP_FORWARD */ - { - { - { - { 0 }, { 0 }, { 0 }, { 0 }, - 0, 0, - { { 0, }, { 0, } }, - { { 0, }, { 0, } }, - 0, 0, - 0, 0, - 0, 0, - "", "", { 0 }, { 0 }, - 0, 0 - }, - sizeof(struct arpt_entry), - sizeof(struct arpt_standard), - 0, - { 0, 0 }, { } }, - { { { { ARPT_ALIGN(sizeof(struct arpt_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } - } - }, - /* ERROR */ - { - { - { - { 0 }, { 0 }, { 0 }, { 0 }, - 0, 0, - { { 0, }, { 0, } }, - { { 0, }, { 0, } }, - 0, 0, - 0, 0, - 0, 0, - "", "", { 0 }, { 0 }, - 0, 0 - }, - sizeof(struct arpt_entry), - sizeof(struct arpt_error), - 0, - { 0, 0 }, { } }, - { { { { ARPT_ALIGN(sizeof(struct arpt_error_target)), ARPT_ERROR_TARGET } }, - { } }, - "ERROR" - } - } +} initial_table __initdata = { + .repl = { + .name = "filter", + .valid_hooks = FILTER_VALID_HOOKS, + .num_entries = 4, + .size = sizeof(struct arpt_standard) * 3 + sizeof(struct arpt_error), + .hook_entry = { + [NF_ARP_IN] = 0, + [NF_ARP_OUT] = sizeof(struct arpt_standard), + [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard), + }, + .underflow = { + [NF_ARP_IN] = 0, + [NF_ARP_OUT] = sizeof(struct arpt_standard), + [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard), + }, + }, + .entries = { + ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_IN */ + ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_OUT */ + ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_FORWARD */ + }, + .term = ARPT_ERROR_INIT, }; static struct arpt_table packet_filter = { diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c index 42728909eba0..ea14979d8a82 100644 --- a/net/ipv4/netfilter/iptable_filter.c +++ b/net/ipv4/netfilter/iptable_filter.c @@ -26,53 +26,29 @@ static struct struct ipt_replace repl; struct ipt_standard entries[3]; struct ipt_error term; -} initial_table __initdata -= { { "filter", FILTER_VALID_HOOKS, 4, - sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error), - { [NF_IP_LOCAL_IN] = 0, - [NF_IP_FORWARD] = sizeof(struct ipt_standard), - [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 }, - { [NF_IP_LOCAL_IN] = 0, - [NF_IP_FORWARD] = sizeof(struct ipt_standard), - [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 }, - 0, NULL, { } }, - { - /* LOCAL_IN */ - { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ipt_entry), - sizeof(struct ipt_standard), - 0, { 0, 0 }, { } }, - { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } }, - /* FORWARD */ - { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ipt_entry), - sizeof(struct ipt_standard), - 0, { 0, 0 }, { } }, - { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } }, - /* LOCAL_OUT */ - { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ipt_entry), - sizeof(struct ipt_standard), - 0, { 0, 0 }, { } }, - { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } } - }, - /* ERROR */ - { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ipt_entry), - sizeof(struct ipt_error), - 0, { 0, 0 }, { } }, - { { { { IPT_ALIGN(sizeof(struct ipt_error_target)), IPT_ERROR_TARGET } }, - { } }, - "ERROR" - } - } +} initial_table __initdata = { + .repl = { + .name = "filter", + .valid_hooks = FILTER_VALID_HOOKS, + .num_entries = 4, + .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error), + .hook_entry = { + [NF_IP_LOCAL_IN] = 0, + [NF_IP_FORWARD] = sizeof(struct ipt_standard), + [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2, + }, + .underflow = { + [NF_IP_LOCAL_IN] = 0, + [NF_IP_FORWARD] = sizeof(struct ipt_standard), + [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2, + }, + }, + .entries = { + IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */ + IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */ + IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ + }, + .term = IPT_ERROR_INIT, /* ERROR */ }; static struct xt_table packet_filter = { diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index 9278802f2742..c3827bae3b66 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c @@ -33,73 +33,35 @@ static struct struct ipt_replace repl; struct ipt_standard entries[5]; struct ipt_error term; -} initial_table __initdata -= { { "mangle", MANGLE_VALID_HOOKS, 6, - sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error), - { [NF_IP_PRE_ROUTING] = 0, - [NF_IP_LOCAL_IN] = sizeof(struct ipt_standard), - [NF_IP_FORWARD] = sizeof(struct ipt_standard) * 2, - [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 3, - [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard) * 4 }, - { [NF_IP_PRE_ROUTING] = 0, - [NF_IP_LOCAL_IN] = sizeof(struct ipt_standard), - [NF_IP_FORWARD] = sizeof(struct ipt_standard) * 2, - [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 3, - [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard) * 4 }, - 0, NULL, { } }, - { - /* PRE_ROUTING */ - { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ipt_entry), - sizeof(struct ipt_standard), - 0, { 0, 0 }, { } }, - { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } }, - /* LOCAL_IN */ - { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ipt_entry), - sizeof(struct ipt_standard), - 0, { 0, 0 }, { } }, - { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } }, - /* FORWARD */ - { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ipt_entry), - sizeof(struct ipt_standard), - 0, { 0, 0 }, { } }, - { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } }, - /* LOCAL_OUT */ - { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ipt_entry), - sizeof(struct ipt_standard), - 0, { 0, 0 }, { } }, - { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } }, - /* POST_ROUTING */ - { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ipt_entry), - sizeof(struct ipt_standard), - 0, { 0, 0 }, { } }, - { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } }, - }, - /* ERROR */ - { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ipt_entry), - sizeof(struct ipt_error), - 0, { 0, 0 }, { } }, - { { { { IPT_ALIGN(sizeof(struct ipt_error_target)), IPT_ERROR_TARGET } }, - { } }, - "ERROR" - } - } +} initial_table __initdata = { + .repl = { + .name = "mangle", + .valid_hooks = MANGLE_VALID_HOOKS, + .num_entries = 6, + .size = sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error), + .hook_entry = { + [NF_IP_PRE_ROUTING] = 0, + [NF_IP_LOCAL_IN] = sizeof(struct ipt_standard), + [NF_IP_FORWARD] = sizeof(struct ipt_standard) * 2, + [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 3, + [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard) * 4, + }, + .underflow = { + [NF_IP_PRE_ROUTING] = 0, + [NF_IP_LOCAL_IN] = sizeof(struct ipt_standard), + [NF_IP_FORWARD] = sizeof(struct ipt_standard) * 2, + [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 3, + [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard) * 4, + }, + }, + .entries = { + IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */ + IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */ + IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */ + IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ + IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */ + }, + .term = IPT_ERROR_INIT, /* ERROR */ }; static struct xt_table packet_mangler = { diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c index 18c3d4c9ff51..f7d28fd748e2 100644 --- a/net/ipv4/netfilter/iptable_raw.c +++ b/net/ipv4/netfilter/iptable_raw.c @@ -21,62 +21,18 @@ static struct .size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error), .hook_entry = { [NF_IP_PRE_ROUTING] = 0, - [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) }, + [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) + }, .underflow = { [NF_IP_PRE_ROUTING] = 0, - [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) }, + [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) + }, }, .entries = { - /* PRE_ROUTING */ - { - .entry = { - .target_offset = sizeof(struct ipt_entry), - .next_offset = sizeof(struct ipt_standard), - }, - .target = { - .target = { - .u = { - .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)), - }, - }, - .verdict = -NF_ACCEPT - 1, - }, - }, - - /* LOCAL_OUT */ - { - .entry = { - .target_offset = sizeof(struct ipt_entry), - .next_offset = sizeof(struct ipt_standard), - }, - .target = { - .target = { - .u = { - .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)), - }, - }, - .verdict = -NF_ACCEPT - 1, - }, - }, + IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */ + IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ }, - /* ERROR */ - .term = { - .entry = { - .target_offset = sizeof(struct ipt_entry), - .next_offset = sizeof(struct ipt_error), - }, - .target = { - .target = { - .u = { - .user = { - .target_size = IPT_ALIGN(sizeof(struct ipt_error_target)), - .name = IPT_ERROR_TARGET, - }, - }, - }, - .errorname = "ERROR", - }, - } + .term = IPT_ERROR_INIT, /* ERROR */ }; static struct xt_table packet_raw = { diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c index 2534f718ab92..07e99e309402 100644 --- a/net/ipv4/netfilter/nf_nat_rule.c +++ b/net/ipv4/netfilter/nf_nat_rule.c @@ -46,77 +46,20 @@ static struct .hook_entry = { [NF_IP_PRE_ROUTING] = 0, [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard), - [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 }, + [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 + }, .underflow = { [NF_IP_PRE_ROUTING] = 0, [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard), - [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 }, + [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 + }, }, .entries = { - /* PRE_ROUTING */ - { - .entry = { - .target_offset = sizeof(struct ipt_entry), - .next_offset = sizeof(struct ipt_standard), - }, - .target = { - .target = { - .u = { - .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)), - }, - }, - .verdict = -NF_ACCEPT - 1, - }, - }, - /* POST_ROUTING */ - { - .entry = { - .target_offset = sizeof(struct ipt_entry), - .next_offset = sizeof(struct ipt_standard), - }, - .target = { - .target = { - .u = { - .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)), - }, - }, - .verdict = -NF_ACCEPT - 1, - }, - }, - /* LOCAL_OUT */ - { - .entry = { - .target_offset = sizeof(struct ipt_entry), - .next_offset = sizeof(struct ipt_standard), - }, - .target = { - .target = { - .u = { - .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)), - }, - }, - .verdict = -NF_ACCEPT - 1, - }, - }, + IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */ + IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */ + IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ }, - /* ERROR */ - .term = { - .entry = { - .target_offset = sizeof(struct ipt_entry), - .next_offset = sizeof(struct ipt_error), - }, - .target = { - .target = { - .u = { - .user = { - .target_size = IPT_ALIGN(sizeof(struct ipt_error_target)), - .name = IPT_ERROR_TARGET, - }, - }, - }, - .errorname = "ERROR", - }, - } + .term = IPT_ERROR_INIT, /* ERROR */ }; static struct xt_table nat_table = { diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c index 76f0cf66f95c..7e32e2aaf7f7 100644 --- a/net/ipv6/netfilter/ip6table_filter.c +++ b/net/ipv6/netfilter/ip6table_filter.c @@ -24,53 +24,29 @@ static struct struct ip6t_replace repl; struct ip6t_standard entries[3]; struct ip6t_error term; -} initial_table __initdata -= { { "filter", FILTER_VALID_HOOKS, 4, - sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error), - { [NF_IP6_LOCAL_IN] = 0, - [NF_IP6_FORWARD] = sizeof(struct ip6t_standard), - [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2 }, - { [NF_IP6_LOCAL_IN] = 0, - [NF_IP6_FORWARD] = sizeof(struct ip6t_standard), - [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2 }, - 0, NULL, { } }, - { - /* LOCAL_IN */ - { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ip6t_entry), - sizeof(struct ip6t_standard), - 0, { 0, 0 }, { } }, - { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } }, - /* FORWARD */ - { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ip6t_entry), - sizeof(struct ip6t_standard), - 0, { 0, 0 }, { } }, - { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } }, - /* LOCAL_OUT */ - { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ip6t_entry), - sizeof(struct ip6t_standard), - 0, { 0, 0 }, { } }, - { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } } - }, - /* ERROR */ - { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ip6t_entry), - sizeof(struct ip6t_error), - 0, { 0, 0 }, { } }, - { { { { IP6T_ALIGN(sizeof(struct ip6t_error_target)), IP6T_ERROR_TARGET } }, - { } }, - "ERROR" - } - } +} initial_table __initdata = { + .repl = { + .name = "filter", + .valid_hooks = FILTER_VALID_HOOKS, + .num_entries = 4, + .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error), + .hook_entry = { + [NF_IP6_LOCAL_IN] = 0, + [NF_IP6_FORWARD] = sizeof(struct ip6t_standard), + [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2 + }, + .underflow = { + [NF_IP6_LOCAL_IN] = 0, + [NF_IP6_FORWARD] = sizeof(struct ip6t_standard), + [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2 + }, + }, + .entries = { + IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */ + IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */ + IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ + }, + .term = IP6T_ERROR_INIT, /* ERROR */ }; static struct xt_table packet_filter = { diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index a9f10e32c163..f2d26495f413 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c @@ -32,73 +32,35 @@ static struct struct ip6t_replace repl; struct ip6t_standard entries[5]; struct ip6t_error term; -} initial_table __initdata -= { { "mangle", MANGLE_VALID_HOOKS, 6, - sizeof(struct ip6t_standard) * 5 + sizeof(struct ip6t_error), - { [NF_IP6_PRE_ROUTING] = 0, - [NF_IP6_LOCAL_IN] = sizeof(struct ip6t_standard), - [NF_IP6_FORWARD] = sizeof(struct ip6t_standard) * 2, - [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3, - [NF_IP6_POST_ROUTING] = sizeof(struct ip6t_standard) * 4}, - { [NF_IP6_PRE_ROUTING] = 0, - [NF_IP6_LOCAL_IN] = sizeof(struct ip6t_standard), - [NF_IP6_FORWARD] = sizeof(struct ip6t_standard) * 2, - [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3, - [NF_IP6_POST_ROUTING] = sizeof(struct ip6t_standard) * 4}, - 0, NULL, { } }, - { - /* PRE_ROUTING */ - { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ip6t_entry), - sizeof(struct ip6t_standard), - 0, { 0, 0 }, { } }, - { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } }, - /* LOCAL_IN */ - { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ip6t_entry), - sizeof(struct ip6t_standard), - 0, { 0, 0 }, { } }, - { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } }, - /* FORWARD */ - { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ip6t_entry), - sizeof(struct ip6t_standard), - 0, { 0, 0 }, { } }, - { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } }, - /* LOCAL_OUT */ - { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ip6t_entry), - sizeof(struct ip6t_standard), - 0, { 0, 0 }, { } }, - { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } }, - /* POST_ROUTING */ - { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ip6t_entry), - sizeof(struct ip6t_standard), - 0, { 0, 0 }, { } }, - { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } }, - -NF_ACCEPT - 1 } } - }, - /* ERROR */ - { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, - 0, - sizeof(struct ip6t_entry), - sizeof(struct ip6t_error), - 0, { 0, 0 }, { } }, - { { { { IP6T_ALIGN(sizeof(struct ip6t_error_target)), IP6T_ERROR_TARGET } }, - { } }, - "ERROR" - } - } +} initial_table __initdata = { + .repl = { + .name = "mangle", + .valid_hooks = MANGLE_VALID_HOOKS, + .num_entries = 6, + .size = sizeof(struct ip6t_standard) * 5 + sizeof(struct ip6t_error), + .hook_entry = { + [NF_IP6_PRE_ROUTING] = 0, + [NF_IP6_LOCAL_IN] = sizeof(struct ip6t_standard), + [NF_IP6_FORWARD] = sizeof(struct ip6t_standard) * 2, + [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3, + [NF_IP6_POST_ROUTING] = sizeof(struct ip6t_standard) * 4, + }, + .underflow = { + [NF_IP6_PRE_ROUTING] = 0, + [NF_IP6_LOCAL_IN] = sizeof(struct ip6t_standard), + [NF_IP6_FORWARD] = sizeof(struct ip6t_standard) * 2, + [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3, + [NF_IP6_POST_ROUTING] = sizeof(struct ip6t_standard) * 4, + }, + }, + .entries = { + IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */ + IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */ + IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */ + IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ + IP6T_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */ + }, + .term = IP6T_ERROR_INIT, /* ERROR */ }; static struct xt_table packet_mangler = { diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c index a3eb5b8ce18d..0acda45d455d 100644 --- a/net/ipv6/netfilter/ip6table_raw.c +++ b/net/ipv6/netfilter/ip6table_raw.c @@ -35,56 +35,10 @@ static struct }, }, .entries = { - /* PRE_ROUTING */ - { - .entry = { - .target_offset = sizeof(struct ip6t_entry), - .next_offset = sizeof(struct ip6t_standard), - }, - .target = { - .target = { - .u = { - .target_size = IP6T_ALIGN(sizeof(struct ip6t_standard_target)), - }, - }, - .verdict = -NF_ACCEPT - 1, - }, - }, - - /* LOCAL_OUT */ - { - .entry = { - .target_offset = sizeof(struct ip6t_entry), - .next_offset = sizeof(struct ip6t_standard), - }, - .target = { - .target = { - .u = { - .target_size = IP6T_ALIGN(sizeof(struct ip6t_standard_target)), - }, - }, - .verdict = -NF_ACCEPT - 1, - }, - }, + IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */ + IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ }, - /* ERROR */ - .term = { - .entry = { - .target_offset = sizeof(struct ip6t_entry), - .next_offset = sizeof(struct ip6t_error), - }, - .target = { - .target = { - .u = { - .user = { - .target_size = IP6T_ALIGN(sizeof(struct ip6t_error_target)), - .name = IP6T_ERROR_TARGET, - }, - }, - }, - .errorname = "ERROR", - }, - } + .term = IP6T_ERROR_INIT, /* ERROR */ }; static struct xt_table packet_raw = { -- cgit v1.2.3 From a5cb013da773a67ee48d1c19e96436c22a73a7eb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 20 Mar 2007 13:58:35 -0400 Subject: [PATCH] auditing ptrace Signed-off-by: Al Viro --- include/linux/audit.h | 10 ++++++++++ kernel/auditsc.c | 29 +++++++++++++++++++++++++++++ kernel/ptrace.c | 3 +++ 3 files changed, 42 insertions(+) (limited to 'include/linux') diff --git a/include/linux/audit.h b/include/linux/audit.h index 773e30df11ee..f93ce78cecbb 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -91,6 +91,7 @@ #define AUDIT_MQ_GETSETATTR 1315 /* POSIX MQ get/set attribute record type */ #define AUDIT_KERNEL_OTHER 1316 /* For use by 3rd party modules */ #define AUDIT_FD_PAIR 1317 /* audit record for pipe/socketpair */ +#define AUDIT_OBJ_PID 1318 /* ptrace target */ #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ @@ -352,6 +353,8 @@ extern void __audit_inode(const char *name, const struct inode *inode); extern void __audit_inode_child(const char *dname, const struct inode *inode, const struct inode *parent); extern void __audit_inode_update(const struct inode *inode); +extern void __audit_ptrace(struct task_struct *t); + static inline int audit_dummy_context(void) { void *p = current->audit_context; @@ -377,6 +380,12 @@ static inline void audit_inode_update(const struct inode *inode) { __audit_inode_update(inode); } +static inline void audit_ptrace(struct task_struct *t) +{ + if (unlikely(!audit_dummy_context())) + __audit_ptrace(t); +} + /* Private API (for audit.c only) */ extern unsigned int audit_serial(void); extern void auditsc_get_stamp(struct audit_context *ctx, @@ -477,6 +486,7 @@ extern int audit_n_rules; #define audit_mq_timedreceive(d,l,p,t) ({ 0; }) #define audit_mq_notify(d,n) ({ 0; }) #define audit_mq_getsetattr(d,s) ({ 0; }) +#define audit_ptrace(t) ((void)0) #define audit_n_rules 0 #endif diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 628c7ac590a0..2243c559bc03 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -209,6 +209,9 @@ struct audit_context { unsigned long personality; int arch; + pid_t target_pid; + u32 target_sid; + #if AUDIT_DEBUG int put_count; int ino_count; @@ -973,6 +976,23 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts audit_log_end(ab); } + if (context->target_pid) { + ab =audit_log_start(context, GFP_KERNEL, AUDIT_OBJ_PID); + if (ab) { + char *s = NULL, *t; + u32 len; + if (selinux_sid_to_string(context->target_sid, + &s, &len)) + t = "(none)"; + else + t = s; + audit_log_format(ab, "opid=%d obj=%s", + context->target_pid, t); + audit_log_end(ab); + kfree(s); + } + } + if (context->pwd && context->pwdmnt) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD); if (ab) { @@ -1193,6 +1213,7 @@ void audit_syscall_exit(int valid, long return_code) } else { audit_free_names(context); audit_free_aux(context); + context->target_pid = 0; kfree(context->filterkey); context->filterkey = NULL; tsk->audit_context = context; @@ -1880,6 +1901,14 @@ int audit_sockaddr(int len, void *a) return 0; } +void __audit_ptrace(struct task_struct *t) +{ + struct audit_context *context = current->audit_context; + + context->target_pid = t->pid; + selinux_get_task_sid(t, &context->target_sid); +} + /** * audit_avc_path - record the granting or denial of permissions * @dentry: dentry to record diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 4d50e06fd745..ad7949a589dd 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -161,6 +162,8 @@ int ptrace_attach(struct task_struct *task) { int retval; + audit_ptrace(task); + retval = -EPERM; if (task->pid <= 1) goto out; -- cgit v1.2.3 From 7f13da40e36c84d0d046b7adbd060af7d3717250 Mon Sep 17 00:00:00 2001 From: Amy Griffis Date: Thu, 29 Mar 2007 18:00:37 -0400 Subject: [PATCH] add SIGNAL syscall class (v3) Add a syscall class for sending signals. Signed-off-by: Amy Griffis Signed-off-by: Al Viro --- arch/ia64/ia32/audit.c | 5 +++++ arch/ia64/kernel/audit.c | 8 ++++++++ arch/powerpc/kernel/audit.c | 8 ++++++++ arch/powerpc/kernel/compat_audit.c | 5 +++++ arch/s390/kernel/audit.c | 8 ++++++++ arch/s390/kernel/compat_audit.c | 5 +++++ arch/sparc64/kernel/audit.c | 8 ++++++++ arch/sparc64/kernel/compat_audit.c | 5 +++++ arch/x86_64/ia32/audit.c | 5 +++++ arch/x86_64/kernel/audit.c | 8 ++++++++ include/asm-generic/audit_signal.h | 3 +++ include/linux/audit.h | 2 ++ lib/audit.c | 6 ++++++ 13 files changed, 76 insertions(+) create mode 100644 include/asm-generic/audit_signal.h (limited to 'include/linux') diff --git a/arch/ia64/ia32/audit.c b/arch/ia64/ia32/audit.c index 92d7d0c8d93f..8850fe40ea34 100644 --- a/arch/ia64/ia32/audit.c +++ b/arch/ia64/ia32/audit.c @@ -20,6 +20,11 @@ unsigned ia32_read_class[] = { ~0U }; +unsigned ia32_signal_class[] = { +#include +~0U +}; + int ia32_classify_syscall(unsigned syscall) { switch(syscall) { diff --git a/arch/ia64/kernel/audit.c b/arch/ia64/kernel/audit.c index 04682555a28c..538312adc53b 100644 --- a/arch/ia64/kernel/audit.c +++ b/arch/ia64/kernel/audit.c @@ -23,6 +23,11 @@ static unsigned chattr_class[] = { ~0U }; +static unsigned signal_class[] = { +#include +~0U +}; + int audit_classify_syscall(int abi, unsigned syscall) { #ifdef CONFIG_IA32_SUPPORT @@ -49,15 +54,18 @@ static int __init audit_classes_init(void) extern __u32 ia32_write_class[]; extern __u32 ia32_read_class[]; extern __u32 ia32_chattr_class[]; + extern __u32 ia32_signal_class[]; audit_register_class(AUDIT_CLASS_WRITE_32, ia32_write_class); audit_register_class(AUDIT_CLASS_READ_32, ia32_read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class); audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL_32, ia32_signal_class); #endif audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); return 0; } diff --git a/arch/powerpc/kernel/audit.c b/arch/powerpc/kernel/audit.c index 7fe5e6300e9a..66d54bada0bf 100644 --- a/arch/powerpc/kernel/audit.c +++ b/arch/powerpc/kernel/audit.c @@ -23,6 +23,11 @@ static unsigned chattr_class[] = { ~0U }; +static unsigned signal_class[] = { +#include +~0U +}; + int audit_classify_syscall(int abi, unsigned syscall) { #ifdef CONFIG_PPC64 @@ -51,15 +56,18 @@ static int __init audit_classes_init(void) extern __u32 ppc32_write_class[]; extern __u32 ppc32_read_class[]; extern __u32 ppc32_chattr_class[]; + extern __u32 ppc32_signal_class[]; audit_register_class(AUDIT_CLASS_WRITE_32, ppc32_write_class); audit_register_class(AUDIT_CLASS_READ_32, ppc32_read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ppc32_dir_class); audit_register_class(AUDIT_CLASS_CHATTR_32, ppc32_chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL_32, ppc32_signal_class); #endif audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); return 0; } diff --git a/arch/powerpc/kernel/compat_audit.c b/arch/powerpc/kernel/compat_audit.c index 640d4bb29321..108ff14e2122 100644 --- a/arch/powerpc/kernel/compat_audit.c +++ b/arch/powerpc/kernel/compat_audit.c @@ -21,6 +21,11 @@ unsigned ppc32_read_class[] = { ~0U }; +unsigned ppc32_signal_class[] = { +#include +~0U +}; + int ppc32_classify_syscall(unsigned syscall) { switch(syscall) { diff --git a/arch/s390/kernel/audit.c b/arch/s390/kernel/audit.c index 0741d9193390..7affafe626d2 100644 --- a/arch/s390/kernel/audit.c +++ b/arch/s390/kernel/audit.c @@ -23,6 +23,11 @@ static unsigned chattr_class[] = { ~0U }; +static unsigned signal_class[] = { +#include +~0U +}; + int audit_classify_syscall(int abi, unsigned syscall) { #ifdef CONFIG_COMPAT @@ -51,15 +56,18 @@ static int __init audit_classes_init(void) extern __u32 s390_write_class[]; extern __u32 s390_read_class[]; extern __u32 s390_chattr_class[]; + extern __u32 s390_signal_class[]; audit_register_class(AUDIT_CLASS_WRITE_32, s390_write_class); audit_register_class(AUDIT_CLASS_READ_32, s390_read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE_32, s390_dir_class); audit_register_class(AUDIT_CLASS_CHATTR_32, s390_chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL_32, s390_signal_class); #endif audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); return 0; } diff --git a/arch/s390/kernel/compat_audit.c b/arch/s390/kernel/compat_audit.c index 16d9436bfa91..0569f5126e49 100644 --- a/arch/s390/kernel/compat_audit.c +++ b/arch/s390/kernel/compat_audit.c @@ -21,6 +21,11 @@ unsigned s390_read_class[] = { ~0U }; +unsigned s390_signal_class[] = { +#include +~0U +}; + int s390_classify_syscall(unsigned syscall) { switch(syscall) { diff --git a/arch/sparc64/kernel/audit.c b/arch/sparc64/kernel/audit.c index aef19cc27072..d57a9dad0ab7 100644 --- a/arch/sparc64/kernel/audit.c +++ b/arch/sparc64/kernel/audit.c @@ -23,6 +23,11 @@ static unsigned chattr_class[] = { ~0U }; +static unsigned signal_class[] = { +#include +~0U +}; + int audit_classify_syscall(int abi, unsigned syscall) { #ifdef CONFIG_SPARC32_COMPAT @@ -51,15 +56,18 @@ static int __init audit_classes_init(void) extern __u32 sparc32_write_class[]; extern __u32 sparc32_read_class[]; extern __u32 sparc32_chattr_class[]; + extern __u32 sparc32_signal_class[]; audit_register_class(AUDIT_CLASS_WRITE_32, sparc32_write_class); audit_register_class(AUDIT_CLASS_READ_32, sparc32_read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE_32, sparc32_dir_class); audit_register_class(AUDIT_CLASS_CHATTR_32, sparc32_chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL_32, sparc32_signal_class); #endif audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); return 0; } diff --git a/arch/sparc64/kernel/compat_audit.c b/arch/sparc64/kernel/compat_audit.c index cca96c91b780..c1979482aa92 100644 --- a/arch/sparc64/kernel/compat_audit.c +++ b/arch/sparc64/kernel/compat_audit.c @@ -20,6 +20,11 @@ unsigned sparc32_read_class[] = { ~0U }; +unsigned sparc32_signal_class[] = { +#include +~0U +}; + int sparc32_classify_syscall(unsigned syscall) { switch(syscall) { diff --git a/arch/x86_64/ia32/audit.c b/arch/x86_64/ia32/audit.c index 92d7d0c8d93f..8850fe40ea34 100644 --- a/arch/x86_64/ia32/audit.c +++ b/arch/x86_64/ia32/audit.c @@ -20,6 +20,11 @@ unsigned ia32_read_class[] = { ~0U }; +unsigned ia32_signal_class[] = { +#include +~0U +}; + int ia32_classify_syscall(unsigned syscall) { switch(syscall) { diff --git a/arch/x86_64/kernel/audit.c b/arch/x86_64/kernel/audit.c index 21f33387bef3..b970de66ee59 100644 --- a/arch/x86_64/kernel/audit.c +++ b/arch/x86_64/kernel/audit.c @@ -23,6 +23,11 @@ static unsigned chattr_class[] = { ~0U }; +static unsigned signal_class[] = { +#include +~0U +}; + int audit_classify_syscall(int abi, unsigned syscall) { #ifdef CONFIG_IA32_EMULATION @@ -49,15 +54,18 @@ static int __init audit_classes_init(void) extern __u32 ia32_write_class[]; extern __u32 ia32_read_class[]; extern __u32 ia32_chattr_class[]; + extern __u32 ia32_signal_class[]; audit_register_class(AUDIT_CLASS_WRITE_32, ia32_write_class); audit_register_class(AUDIT_CLASS_READ_32, ia32_read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class); audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL_32, ia32_signal_class); #endif audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); return 0; } diff --git a/include/asm-generic/audit_signal.h b/include/asm-generic/audit_signal.h new file mode 100644 index 000000000000..6feab7f18a53 --- /dev/null +++ b/include/asm-generic/audit_signal.h @@ -0,0 +1,3 @@ +__NR_kill, +__NR_tgkill, +__NR_tkill, diff --git a/include/linux/audit.h b/include/linux/audit.h index f93ce78cecbb..f165308254dc 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -149,6 +149,8 @@ #define AUDIT_CLASS_READ_32 5 #define AUDIT_CLASS_WRITE 6 #define AUDIT_CLASS_WRITE_32 7 +#define AUDIT_CLASS_SIGNAL 8 +#define AUDIT_CLASS_SIGNAL_32 9 /* This bitmask is used to validate user input. It represents all bits that * are currently used in an audit field constant understood by the kernel. diff --git a/lib/audit.c b/lib/audit.c index 3b1289fadf06..50e915258515 100644 --- a/lib/audit.c +++ b/lib/audit.c @@ -23,6 +23,11 @@ static unsigned chattr_class[] = { ~0U }; +static unsigned signal_class[] = { +#include +~0U +}; + int audit_classify_syscall(int abi, unsigned syscall) { switch(syscall) { @@ -49,6 +54,7 @@ static int __init audit_classes_init(void) audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); return 0; } -- cgit v1.2.3 From e54dc2431d740a79a6bd013babade99d71b1714f Mon Sep 17 00:00:00 2001 From: Amy Griffis Date: Thu, 29 Mar 2007 18:01:04 -0400 Subject: [PATCH] audit signal recipients When auditing syscalls that send signals, log the pid and security context for each target process. Optimize the data collection by adding a counter for signal-related rules, and avoiding allocating an aux struct unless we have more than one target process. For process groups, collect pid/context data in blocks of 16. Move the audit_signal_info() hook up in check_kill_permission() so we audit attempts where permission is denied. Signed-off-by: Amy Griffis Signed-off-by: Al Viro --- arch/ia64/kernel/audit.c | 9 ++++ arch/powerpc/kernel/audit.c | 9 ++++ arch/s390/kernel/audit.c | 9 ++++ arch/sparc64/kernel/audit.c | 9 ++++ arch/x86_64/kernel/audit.c | 9 ++++ include/linux/audit.h | 3 ++ kernel/audit.h | 13 ++++-- kernel/auditfilter.c | 48 ++++++++++++++++++- kernel/auditsc.c | 111 ++++++++++++++++++++++++++++++++++++-------- kernel/signal.c | 10 ++-- lib/audit.c | 5 ++ 11 files changed, 206 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/arch/ia64/kernel/audit.c b/arch/ia64/kernel/audit.c index 538312adc53b..f3802ae89b10 100644 --- a/arch/ia64/kernel/audit.c +++ b/arch/ia64/kernel/audit.c @@ -28,6 +28,15 @@ static unsigned signal_class[] = { ~0U }; +int audit_classify_arch(int arch) +{ +#ifdef CONFIG_IA32_SUPPORT + if (arch == AUDIT_ARCH_I386) + return 1; +#endif + return 0; +} + int audit_classify_syscall(int abi, unsigned syscall) { #ifdef CONFIG_IA32_SUPPORT diff --git a/arch/powerpc/kernel/audit.c b/arch/powerpc/kernel/audit.c index 66d54bada0bf..a4dab7cab348 100644 --- a/arch/powerpc/kernel/audit.c +++ b/arch/powerpc/kernel/audit.c @@ -28,6 +28,15 @@ static unsigned signal_class[] = { ~0U }; +int audit_classify_arch(int arch) +{ +#ifdef CONFIG_PPC64 + if (arch == AUDIT_ARCH_PPC) + return 1; +#endif + return 0; +} + int audit_classify_syscall(int abi, unsigned syscall) { #ifdef CONFIG_PPC64 diff --git a/arch/s390/kernel/audit.c b/arch/s390/kernel/audit.c index 7affafe626d2..d1c76fe10f29 100644 --- a/arch/s390/kernel/audit.c +++ b/arch/s390/kernel/audit.c @@ -28,6 +28,15 @@ static unsigned signal_class[] = { ~0U }; +int audit_classify_arch(int arch) +{ +#ifdef CONFIG_COMPAT + if (arch == AUDIT_ARCH_S390) + return 1; +#endif + return 0; +} + int audit_classify_syscall(int abi, unsigned syscall) { #ifdef CONFIG_COMPAT diff --git a/arch/sparc64/kernel/audit.c b/arch/sparc64/kernel/audit.c index d57a9dad0ab7..24d7f4b4178a 100644 --- a/arch/sparc64/kernel/audit.c +++ b/arch/sparc64/kernel/audit.c @@ -28,6 +28,15 @@ static unsigned signal_class[] = { ~0U }; +int audit_classify_arch(int arch) +{ +#ifdef CONFIG_SPARC32_COMPAT + if (arch == AUDIT_ARCH_SPARC) + return 1; +#endif + return 0; +} + int audit_classify_syscall(int abi, unsigned syscall) { #ifdef CONFIG_SPARC32_COMPAT diff --git a/arch/x86_64/kernel/audit.c b/arch/x86_64/kernel/audit.c index b970de66ee59..06d3e5a14d9d 100644 --- a/arch/x86_64/kernel/audit.c +++ b/arch/x86_64/kernel/audit.c @@ -28,6 +28,15 @@ static unsigned signal_class[] = { ~0U }; +int audit_classify_arch(int arch) +{ +#ifdef CONFIG_IA32_EMULATION + if (arch == AUDIT_ARCH_I386) + return 1; +#endif + return 0; +} + int audit_classify_syscall(int abi, unsigned syscall) { #ifdef CONFIG_IA32_EMULATION diff --git a/include/linux/audit.h b/include/linux/audit.h index f165308254dc..fde0f1420cd2 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -340,6 +340,7 @@ struct mqstat; #define AUDITSC_RESULT(x) ( ((long)(x))<0?AUDITSC_FAILURE:AUDITSC_SUCCESS ) extern int __init audit_register_class(int class, unsigned *list); extern int audit_classify_syscall(int abi, unsigned syscall); +extern int audit_classify_arch(int arch); #ifdef CONFIG_AUDITSYSCALL /* These are defined in auditsc.c */ /* Public API */ @@ -458,6 +459,7 @@ static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) return 0; } extern int audit_n_rules; +extern int audit_signals; #else #define audit_alloc(t) ({ 0; }) #define audit_free(t) do { ; } while (0) @@ -490,6 +492,7 @@ extern int audit_n_rules; #define audit_mq_getsetattr(d,s) ({ 0; }) #define audit_ptrace(t) ((void)0) #define audit_n_rules 0 +#define audit_signals 0 #endif #ifdef CONFIG_AUDIT diff --git a/kernel/audit.h b/kernel/audit.h index a3370232a390..815d6f5c04ee 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -83,6 +83,7 @@ struct audit_krule { u32 field_count; char *filterkey; /* ties events to rules */ struct audit_field *fields; + struct audit_field *arch_f; /* quick access to arch field */ struct audit_field *inode_f; /* quick access to an inode field */ struct audit_watch *watch; /* associated watch */ struct list_head rlist; /* entry in audit_watch.rules list */ @@ -131,17 +132,19 @@ extern void audit_handle_ievent(struct inotify_watch *, u32, u32, u32, extern int selinux_audit_rule_update(void); #ifdef CONFIG_AUDITSYSCALL -extern void __audit_signal_info(int sig, struct task_struct *t); -static inline void audit_signal_info(int sig, struct task_struct *t) +extern int __audit_signal_info(int sig, struct task_struct *t); +static inline int audit_signal_info(int sig, struct task_struct *t) { - if (unlikely(audit_pid && t->tgid == audit_pid)) - __audit_signal_info(sig, t); + if (unlikely((audit_pid && t->tgid == audit_pid) || + (audit_signals && !audit_dummy_context()))) + return __audit_signal_info(sig, t); + return 0; } extern enum audit_state audit_filter_inodes(struct task_struct *, struct audit_context *); extern void audit_set_auditable(struct audit_context *); #else -#define audit_signal_info(s,t) +#define audit_signal_info(s,t) AUDIT_DISABLED #define audit_filter_inodes(t,c) AUDIT_DISABLED #define audit_set_auditable(c) #endif diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 3749193aed8c..6c61263ff96d 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -311,6 +311,43 @@ int audit_match_class(int class, unsigned syscall) return classes[class][AUDIT_WORD(syscall)] & AUDIT_BIT(syscall); } +static inline int audit_match_class_bits(int class, u32 *mask) +{ + int i; + + if (classes[class]) { + for (i = 0; i < AUDIT_BITMASK_SIZE; i++) + if (mask[i] & classes[class][i]) + return 0; + } + return 1; +} + +static int audit_match_signal(struct audit_entry *entry) +{ + struct audit_field *arch = entry->rule.arch_f; + + if (!arch) { + /* When arch is unspecified, we must check both masks on biarch + * as syscall number alone is ambiguous. */ + return (audit_match_class_bits(AUDIT_CLASS_SIGNAL, + entry->rule.mask) && + audit_match_class_bits(AUDIT_CLASS_SIGNAL_32, + entry->rule.mask)); + } + + switch(audit_classify_arch(arch->val)) { + case 0: /* native */ + return (audit_match_class_bits(AUDIT_CLASS_SIGNAL, + entry->rule.mask)); + case 1: /* 32bit on biarch */ + return (audit_match_class_bits(AUDIT_CLASS_SIGNAL_32, + entry->rule.mask)); + default: + return 1; + } +} + /* Common user-space to kernel rule translation. */ static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule) { @@ -429,6 +466,7 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule) err = -EINVAL; goto exit_free; } + entry->rule.arch_f = f; break; case AUDIT_PERM: if (f->val & ~15) @@ -519,7 +557,6 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, case AUDIT_FSGID: case AUDIT_LOGINUID: case AUDIT_PERS: - case AUDIT_ARCH: case AUDIT_MSGTYPE: case AUDIT_PPID: case AUDIT_DEVMAJOR: @@ -531,6 +568,9 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, case AUDIT_ARG2: case AUDIT_ARG3: break; + case AUDIT_ARCH: + entry->rule.arch_f = f; + break; case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: @@ -1221,6 +1261,9 @@ static inline int audit_add_rule(struct audit_entry *entry, #ifdef CONFIG_AUDITSYSCALL if (!dont_count) audit_n_rules++; + + if (!audit_match_signal(entry)) + audit_signals++; #endif mutex_unlock(&audit_filter_mutex); @@ -1294,6 +1337,9 @@ static inline int audit_del_rule(struct audit_entry *entry, #ifdef CONFIG_AUDITSYSCALL if (!dont_count) audit_n_rules--; + + if (!audit_match_signal(entry)) + audit_signals--; #endif mutex_unlock(&audit_filter_mutex); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 2243c559bc03..6aff0df75568 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -89,6 +89,9 @@ extern int audit_enabled; /* number of audit rules */ int audit_n_rules; +/* determines whether we collect data for signals sent */ +int audit_signals; + /* When fs/namei.c:getname() is called, we store the pointer in name and * we don't let putname() free it (instead we free all of the saved * pointers at syscall exit time). @@ -114,6 +117,9 @@ struct audit_aux_data { #define AUDIT_AUX_IPCPERM 0 +/* Number of target pids per aux struct. */ +#define AUDIT_AUX_PIDS 16 + struct audit_aux_data_mq_open { struct audit_aux_data d; int oflag; @@ -181,6 +187,13 @@ struct audit_aux_data_path { struct vfsmount *mnt; }; +struct audit_aux_data_pids { + struct audit_aux_data d; + pid_t target_pid[AUDIT_AUX_PIDS]; + u32 target_sid[AUDIT_AUX_PIDS]; + int pid_count; +}; + /* The per-task audit context. */ struct audit_context { int dummy; /* must be the first element */ @@ -201,6 +214,7 @@ struct audit_context { struct vfsmount * pwdmnt; struct audit_context *previous; /* For nested syscalls */ struct audit_aux_data *aux; + struct audit_aux_data *aux_pids; /* Save things to print about task_struct */ pid_t pid, ppid; @@ -657,6 +671,10 @@ static inline void audit_free_aux(struct audit_context *context) context->aux = aux->next; kfree(aux); } + while ((aux = context->aux_pids)) { + context->aux_pids = aux->next; + kfree(aux); + } } static inline void audit_zero_context(struct audit_context *context, @@ -798,6 +816,29 @@ static void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk audit_log_task_context(ab); } +static int audit_log_pid_context(struct audit_context *context, pid_t pid, + u32 sid) +{ + struct audit_buffer *ab; + char *s = NULL; + u32 len; + int rc = 0; + + ab = audit_log_start(context, GFP_KERNEL, AUDIT_OBJ_PID); + if (!ab) + return 1; + + if (selinux_sid_to_string(sid, &s, &len)) { + audit_log_format(ab, "opid=%d obj=(none)", pid); + rc = 1; + } else + audit_log_format(ab, "opid=%d obj=%s", pid, s); + audit_log_end(ab); + kfree(s); + + return rc; +} + static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) { int i, call_panic = 0; @@ -976,23 +1017,21 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts audit_log_end(ab); } - if (context->target_pid) { - ab =audit_log_start(context, GFP_KERNEL, AUDIT_OBJ_PID); - if (ab) { - char *s = NULL, *t; - u32 len; - if (selinux_sid_to_string(context->target_sid, - &s, &len)) - t = "(none)"; - else - t = s; - audit_log_format(ab, "opid=%d obj=%s", - context->target_pid, t); - audit_log_end(ab); - kfree(s); - } + for (aux = context->aux_pids; aux; aux = aux->next) { + struct audit_aux_data_pids *axs = (void *)aux; + int i; + + for (i = 0; i < axs->pid_count; i++) + if (audit_log_pid_context(context, axs->target_pid[i], + axs->target_sid[i])) + call_panic = 1; } + if (context->target_pid && + audit_log_pid_context(context, context->target_pid, + context->target_sid)) + call_panic = 1; + if (context->pwd && context->pwdmnt) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD); if (ab) { @@ -1213,7 +1252,10 @@ void audit_syscall_exit(int valid, long return_code) } else { audit_free_names(context); audit_free_aux(context); + context->aux = NULL; + context->aux_pids = NULL; context->target_pid = 0; + context->target_sid = 0; kfree(context->filterkey); context->filterkey = NULL; tsk->audit_context = context; @@ -1947,15 +1989,17 @@ int audit_avc_path(struct dentry *dentry, struct vfsmount *mnt) * If the audit subsystem is being terminated, record the task (pid) * and uid that is doing that. */ -void __audit_signal_info(int sig, struct task_struct *t) +int __audit_signal_info(int sig, struct task_struct *t) { + struct audit_aux_data_pids *axp; + struct task_struct *tsk = current; + struct audit_context *ctx = tsk->audit_context; extern pid_t audit_sig_pid; extern uid_t audit_sig_uid; extern u32 audit_sig_sid; - if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1) { - struct task_struct *tsk = current; - struct audit_context *ctx = tsk->audit_context; + if (audit_pid && t->tgid == audit_pid && + (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1)) { audit_sig_pid = tsk->pid; if (ctx) audit_sig_uid = ctx->loginuid; @@ -1963,4 +2007,33 @@ void __audit_signal_info(int sig, struct task_struct *t) audit_sig_uid = tsk->uid; selinux_get_task_sid(tsk, &audit_sig_sid); } + + if (!audit_signals) /* audit_context checked in wrapper */ + return 0; + + /* optimize the common case by putting first signal recipient directly + * in audit_context */ + if (!ctx->target_pid) { + ctx->target_pid = t->tgid; + selinux_get_task_sid(t, &ctx->target_sid); + return 0; + } + + axp = (void *)ctx->aux_pids; + if (!axp || axp->pid_count == AUDIT_AUX_PIDS) { + axp = kzalloc(sizeof(*axp), GFP_ATOMIC); + if (!axp) + return -ENOMEM; + + axp->d.type = AUDIT_OBJ_PID; + axp->d.next = ctx->aux_pids; + ctx->aux_pids = (void *)axp; + } + BUG_ON(axp->pid_count > AUDIT_AUX_PIDS); + + axp->target_pid[axp->pid_count] = t->tgid; + selinux_get_task_sid(t, &axp->target_sid[axp->pid_count]); + axp->pid_count++; + + return 0; } diff --git a/kernel/signal.c b/kernel/signal.c index 2ac3a668d9dd..c43a3f19d477 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -497,6 +497,11 @@ static int check_kill_permission(int sig, struct siginfo *info, int error = -EINVAL; if (!valid_signal(sig)) return error; + + error = audit_signal_info(sig, t); /* Let audit system see the signal */ + if (error) + return error; + error = -EPERM; if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) && ((sig != SIGCONT) || @@ -506,10 +511,7 @@ static int check_kill_permission(int sig, struct siginfo *info, && !capable(CAP_KILL)) return error; - error = security_task_kill(t, info, sig, 0); - if (!error) - audit_signal_info(sig, t); /* Let audit system see the signal */ - return error; + return security_task_kill(t, info, sig, 0); } /* forward decl */ diff --git a/lib/audit.c b/lib/audit.c index 50e915258515..8e7dc1c63aa9 100644 --- a/lib/audit.c +++ b/lib/audit.c @@ -28,6 +28,11 @@ static unsigned signal_class[] = { ~0U }; +int audit_classify_arch(int arch) +{ + return 0; +} + int audit_classify_syscall(int abi, unsigned syscall) { switch(syscall) { -- cgit v1.2.3 From 4fc03b9beb2314f3adb9e72b7935a80c577954d1 Mon Sep 17 00:00:00 2001 From: Amy Griffis Date: Tue, 13 Feb 2007 14:15:01 -0500 Subject: [PATCH] complete message queue auditing Handle the edge cases for POSIX message queue auditing. Collect inode info when opening an existing mq, and for send/receive operations. Remove audit_inode_update() as it has really evolved into the equivalent of audit_inode(). Signed-off-by: Amy Griffis Signed-off-by: Al Viro --- fs/namei.c | 2 +- include/linux/audit.h | 7 ------- ipc/mqueue.c | 4 ++++ kernel/auditsc.c | 27 --------------------------- 4 files changed, 5 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/fs/namei.c b/fs/namei.c index b3780e3fc88e..5e2d98d10c5d 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1719,7 +1719,7 @@ do_last: * It already exists. */ mutex_unlock(&dir->d_inode->i_mutex); - audit_inode_update(path.dentry->d_inode); + audit_inode(pathname, path.dentry->d_inode); error = -EEXIST; if (flag & O_EXCL) diff --git a/include/linux/audit.h b/include/linux/audit.h index fde0f1420cd2..22976ddbd264 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -355,7 +355,6 @@ extern void audit_putname(const char *name); extern void __audit_inode(const char *name, const struct inode *inode); extern void __audit_inode_child(const char *dname, const struct inode *inode, const struct inode *parent); -extern void __audit_inode_update(const struct inode *inode); extern void __audit_ptrace(struct task_struct *t); static inline int audit_dummy_context(void) @@ -378,10 +377,6 @@ static inline void audit_inode_child(const char *dname, if (unlikely(!audit_dummy_context())) __audit_inode_child(dname, inode, parent); } -static inline void audit_inode_update(const struct inode *inode) { - if (unlikely(!audit_dummy_context())) - __audit_inode_update(inode); -} static inline void audit_ptrace(struct task_struct *t) { @@ -470,10 +465,8 @@ extern int audit_signals; #define audit_putname(n) do { ; } while (0) #define __audit_inode(n,i) do { ; } while (0) #define __audit_inode_child(d,i,p) do { ; } while (0) -#define __audit_inode_update(i) do { ; } while (0) #define audit_inode(n,i) do { ; } while (0) #define audit_inode_child(d,i,p) do { ; } while (0) -#define audit_inode_update(i) do { ; } while (0) #define auditsc_get_stamp(c,t,s) do { BUG(); } while (0) #define audit_get_loginuid(c) ({ -1; }) #define audit_log_task_context(b) do { ; } while (0) diff --git a/ipc/mqueue.c b/ipc/mqueue.c index d17821d3f483..fab5707cb5f7 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -681,6 +681,7 @@ asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode, if (oflag & O_CREAT) { if (dentry->d_inode) { /* entry already exists */ + audit_inode(name, dentry->d_inode); error = -EEXIST; if (oflag & O_EXCL) goto out; @@ -693,6 +694,7 @@ asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode, error = -ENOENT; if (!dentry->d_inode) goto out; + audit_inode(name, dentry->d_inode); filp = do_open(dentry, oflag); } @@ -840,6 +842,7 @@ asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, if (unlikely(filp->f_op != &mqueue_file_operations)) goto out_fput; info = MQUEUE_I(inode); + audit_inode(NULL, inode); if (unlikely(!(filp->f_mode & FMODE_WRITE))) goto out_fput; @@ -923,6 +926,7 @@ asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, if (unlikely(filp->f_op != &mqueue_file_operations)) goto out_fput; info = MQUEUE_I(inode); + audit_inode(NULL, inode); if (unlikely(!(filp->f_mode & FMODE_READ))) goto out_fput; diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 41d129a78793..25d890e997f2 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1475,33 +1475,6 @@ update_context: } } -/** - * audit_inode_update - update inode info for last collected name - * @inode: inode being audited - * - * When open() is called on an existing object with the O_CREAT flag, the inode - * data audit initially collects is incorrect. This additional hook ensures - * audit has the inode data for the actual object to be opened. - */ -void __audit_inode_update(const struct inode *inode) -{ - struct audit_context *context = current->audit_context; - int idx; - - if (!context->in_syscall || !inode) - return; - - if (context->name_count == 0) { - context->name_count++; -#if AUDIT_DEBUG - context->ino_count++; -#endif - } - idx = context->name_count - 1; - - audit_copy_inode(&context->names[idx], inode); -} - /** * auditsc_get_stamp - get local copies of audit_context values * @ctx: audit_context for the task -- cgit v1.2.3 From 0a4ff8c2598b72f2fa9d50aae9e1809e684dbf41 Mon Sep 17 00:00:00 2001 From: Steve Grubb Date: Thu, 19 Apr 2007 10:28:21 -0400 Subject: [PATCH] Abnormal End of Processes Hi, I have been working on some code that detects abnormal events based on audit system events. One kind of event that we currently have no visibility for is when a program terminates due to segfault - which should never happen on a production machine. And if it did, you'd want to investigate it. Attached is a patch that collects these events and sends them into the audit system. Signed-off-by: Steve Grubb Signed-off-by: Al Viro --- fs/exec.c | 2 ++ include/linux/audit.h | 3 +++ kernel/auditsc.c | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+) (limited to 'include/linux') diff --git a/fs/exec.c b/fs/exec.c index 1ba85c7fc6af..7cf078ec758e 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1488,6 +1488,8 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) int flag = 0; int ispipe = 0; + audit_core_dumps(signr); + binfmt = current->binfmt; if (!binfmt || !binfmt->core_dump) goto fail; diff --git a/include/linux/audit.h b/include/linux/audit.h index 22976ddbd264..fccc6e50298a 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -112,6 +112,7 @@ #define AUDIT_FIRST_KERN_ANOM_MSG 1700 #define AUDIT_LAST_KERN_ANOM_MSG 1799 #define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */ +#define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */ #define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */ @@ -377,6 +378,7 @@ static inline void audit_inode_child(const char *dname, if (unlikely(!audit_dummy_context())) __audit_inode_child(dname, inode, parent); } +void audit_core_dumps(long signr); static inline void audit_ptrace(struct task_struct *t) { @@ -467,6 +469,7 @@ extern int audit_signals; #define __audit_inode_child(d,i,p) do { ; } while (0) #define audit_inode(n,i) do { ; } while (0) #define audit_inode_child(d,i,p) do { ; } while (0) +#define audit_core_dumps(i) do { ; } while (0) #define auditsc_get_stamp(c,t,s) do { BUG(); } while (0) #define audit_get_loginuid(c) ({ -1; }) #define audit_log_task_context(b) do { ; } while (0) diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 5276b7ef05f1..e36481ed61b4 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -2037,3 +2037,42 @@ int __audit_signal_info(int sig, struct task_struct *t) return 0; } + +/** + * audit_core_dumps - record information about processes that end abnormally + * @sig: signal value + * + * If a process ends with a core dump, something fishy is going on and we + * should record the event for investigation. + */ +void audit_core_dumps(long signr) +{ + struct audit_buffer *ab; + u32 sid; + + if (!audit_enabled) + return; + + if (signr == SIGQUIT) /* don't care for those */ + return; + + ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); + audit_log_format(ab, "auid=%u uid=%u gid=%u", + audit_get_loginuid(current->audit_context), + current->uid, current->gid); + selinux_get_task_sid(current, &sid); + if (sid) { + char *ctx = NULL; + u32 len; + + if (selinux_sid_to_string(sid, &ctx, &len)) + audit_log_format(ab, " ssid=%u", sid); + else + audit_log_format(ab, " subj=%s", ctx); + kfree(ctx); + } + audit_log_format(ab, " pid=%d comm=", current->pid); + audit_log_untrustedstring(ab, current->comm); + audit_log_format(ab, " sig=%ld", signr); + audit_log_end(ab); +} -- cgit v1.2.3 From d89d87965dcbe6fe4f96a2a7e8421b3a75f634d1 Mon Sep 17 00:00:00 2001 From: Neil Brown Date: Tue, 1 May 2007 09:53:42 +0200 Subject: When stacked block devices are in-use (e.g. md or dm), the recursive calls to generic_make_request can use up a lot of space, and we would rather they didn't. As generic_make_request is a void function, and as it is generally not expected that it will have any effect immediately, it is safe to delay any call to generic_make_request until there is sufficient stack space available. As ->bi_next is reserved for the driver to use, it can have no valid value when generic_make_request is called, and as __make_request implicitly assumes it will be NULL (ELEVATOR_BACK_MERGE fork of switch) we can be certain that all callers set it to NULL. We can therefore safely use bi_next to link pending requests together, providing we clear it before making the real call. So, we choose to allow each thread to only be active in one generic_make_request at a time. If a subsequent (recursive) call is made, the bio is linked into a per-thread list, and is handled when the active call completes. As the list of pending bios is per-thread, there are no locking issues to worry about. I say above that it is "safe to delay any call...". There are, however, some behaviours of a make_request_fn which would make it unsafe. These include any behaviour that assumes anything will have changed after a recursive call to generic_make_request. These could include: - waiting for that call to finish and call it's bi_end_io function. md use to sometimes do this (marking the superblock dirty before completing a write) but doesn't any more - inspecting the bio for fields that generic_make_request might change, such as bi_sector or bi_bdev. It is hard to see a good reason for this, and I don't think anyone actually does it. - inspecing the queue to see if, e.g. it is 'full' yet. Again, I think this is very unlikely to be useful, or to be done. Signed-off-by: Neil Brown Cc: Jens Axboe Cc: Alasdair G Kergon said: I can see nothing wrong with this in principle. For device-mapper at the moment though it's essential that, while the bio mappings may now get delayed, they still get processed in exactly the same order as they were passed to generic_make_request(). My main concern is whether the timing changes implicit in this patch will make the rare data-corrupting races in the existing snapshot code more likely. (I'm working on a fix for these races, but the unfinished patch is already several hundred lines long.) It would be helpful if some people on this mailing list would test this patch in various scenarios and report back. Signed-off-by: Andrew Morton Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++- include/linux/sched.h | 4 ++++ 2 files changed, 56 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 17e188973428..74a567afb830 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3116,7 +3116,7 @@ static inline int should_fail_request(struct bio *bio) * bi_sector for remaps as it sees fit. So the values of these fields * should NOT be depended on after the call to generic_make_request. */ -void generic_make_request(struct bio *bio) +static inline void __generic_make_request(struct bio *bio) { request_queue_t *q; sector_t maxsector; @@ -3215,6 +3215,57 @@ end_io: } while (ret); } +/* + * We only want one ->make_request_fn to be active at a time, + * else stack usage with stacked devices could be a problem. + * So use current->bio_{list,tail} to keep a list of requests + * submited by a make_request_fn function. + * current->bio_tail is also used as a flag to say if + * generic_make_request is currently active in this task or not. + * If it is NULL, then no make_request is active. If it is non-NULL, + * then a make_request is active, and new requests should be added + * at the tail + */ +void generic_make_request(struct bio *bio) +{ + if (current->bio_tail) { + /* make_request is active */ + *(current->bio_tail) = bio; + bio->bi_next = NULL; + current->bio_tail = &bio->bi_next; + return; + } + /* following loop may be a bit non-obvious, and so deserves some + * explanation. + * Before entering the loop, bio->bi_next is NULL (as all callers + * ensure that) so we have a list with a single bio. + * We pretend that we have just taken it off a longer list, so + * we assign bio_list to the next (which is NULL) and bio_tail + * to &bio_list, thus initialising the bio_list of new bios to be + * added. __generic_make_request may indeed add some more bios + * through a recursive call to generic_make_request. If it + * did, we find a non-NULL value in bio_list and re-enter the loop + * from the top. In this case we really did just take the bio + * of the top of the list (no pretending) and so fixup bio_list and + * bio_tail or bi_next, and call into __generic_make_request again. + * + * The loop was structured like this to make only one call to + * __generic_make_request (which is important as it is large and + * inlined) and to keep the structure simple. + */ + BUG_ON(bio->bi_next); + do { + current->bio_list = bio->bi_next; + if (bio->bi_next == NULL) + current->bio_tail = ¤t->bio_list; + else + bio->bi_next = NULL; + __generic_make_request(bio); + bio = current->bio_list; + } while (bio); + current->bio_tail = NULL; /* deactivate */ +} + EXPORT_SYMBOL(generic_make_request); /** diff --git a/include/linux/sched.h b/include/linux/sched.h index 17b72d88c4cb..e38c436ee12b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -88,6 +88,7 @@ struct sched_param { struct exec_domain; struct futex_pi_state; +struct bio; /* * List of flags we want to share for kernel threads, @@ -1014,6 +1015,9 @@ struct task_struct { /* journalling filesystem info */ void *journal_info; +/* stacked block device info */ + struct bio *bio_list, **bio_tail; + /* VM state */ struct reclaim_state *reclaim_state; -- cgit v1.2.3 From 87c1efbfeac49849b981a7eac8cba42d4a49b2e9 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 11 May 2007 13:29:54 +0200 Subject: Fix compile/link of init/do_mounts.c with !CONFIG_BLOCK We need a stub function for when CONFIG_BLOCK isn't set. Signed-off-by: Jens Axboe --- include/linux/genhd.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/genhd.h b/include/linux/genhd.h index f589559cf070..4c03ee353e78 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -434,6 +434,10 @@ static inline struct block_device *bdget_disk(struct gendisk *disk, int index) #endif -#endif +#else /* CONFIG_BLOCK */ + +static inline void printk_all_partitions(void) { } + +#endif /* CONFIG_BLOCK */ #endif -- cgit v1.2.3 From 6eaeeaba39e5fa3d52a0bb8de15e995516ae251a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 10 May 2007 22:22:37 -0700 Subject: getrusage(): fill ru_inblock and ru_oublock fields if possible MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If CONFIG_TASK_IO_ACCOUNTING is defined, we update io accounting counters for each task. This patch permits reporting of values using the well known getrusage() syscall, filling ru_inblock and ru_oublock instead of null values. As TASK_IO_ACCOUNTING currently counts bytes counts, we approximate blocks count doing : nr_blocks = nr_bytes / 512 Example of use : ---------------------- After patch is applied, /usr/bin/time command can now give a good approximation of IO that the process had to do. $ /usr/bin/time grep tototo /usr/include/* Command exited with non-zero status 1 0.00user 0.02system 0:02.11elapsed 1%CPU (0avgtext+0avgdata 0maxresident)k 24288inputs+0outputs (0major+259minor)pagefaults 0swaps $ /usr/bin/time dd if=/dev/zero of=/tmp/testfile count=1000 1000+0 enregistrements lus 1000+0 enregistrements écrits 512000 octets (512 kB) copiés, 0,00326601 seconde, 157 MB/s 0.00user 0.00system 0:00.00elapsed 80%CPU (0avgtext+0avgdata 0maxresident)k 0inputs+3000outputs (0major+299minor)pagefaults 0swaps Signed-off-by: Eric Dumazet Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 1 + include/linux/task_io_accounting_ops.h | 28 ++++++++++++++++++++++++++++ kernel/exit.c | 9 +++++++++ kernel/fork.c | 1 + kernel/sys.c | 7 +++++++ 5 files changed, 46 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 17b72d88c4cb..75f44379b7e9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -469,6 +469,7 @@ struct signal_struct { cputime_t utime, stime, cutime, cstime; unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; + unsigned long inblock, oublock, cinblock, coublock; /* * Cumulative ns of scheduled CPU time for dead threads in the diff --git a/include/linux/task_io_accounting_ops.h b/include/linux/task_io_accounting_ops.h index df2a319106b2..1218733ec6b5 100644 --- a/include/linux/task_io_accounting_ops.h +++ b/include/linux/task_io_accounting_ops.h @@ -10,11 +10,29 @@ static inline void task_io_account_read(size_t bytes) current->ioac.read_bytes += bytes; } +/* + * We approximate number of blocks, because we account bytes only. + * A 'block' is 512 bytes + */ +static inline unsigned long task_io_get_inblock(const struct task_struct *p) +{ + return p->ioac.read_bytes >> 9; +} + static inline void task_io_account_write(size_t bytes) { current->ioac.write_bytes += bytes; } +/* + * We approximate number of blocks, because we account bytes only. + * A 'block' is 512 bytes + */ +static inline unsigned long task_io_get_oublock(const struct task_struct *p) +{ + return p->ioac.write_bytes >> 9; +} + static inline void task_io_account_cancelled_write(size_t bytes) { current->ioac.cancelled_write_bytes += bytes; @@ -31,10 +49,20 @@ static inline void task_io_account_read(size_t bytes) { } +static inline unsigned long task_io_get_inblock(const struct task_struct *p) +{ + return 0; +} + static inline void task_io_account_write(size_t bytes) { } +static inline unsigned long task_io_get_oublock(const struct task_struct *p) +{ + return 0; +} + static inline void task_io_account_cancelled_write(size_t bytes) { } diff --git a/kernel/exit.c b/kernel/exit.c index b0c6f0c3a2df..7a5fd77f8fb0 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -42,6 +42,7 @@ #include /* for audit_free() */ #include #include +#include #include #include @@ -113,6 +114,8 @@ static void __exit_signal(struct task_struct *tsk) sig->nvcsw += tsk->nvcsw; sig->nivcsw += tsk->nivcsw; sig->sched_time += tsk->sched_time; + sig->inblock += task_io_get_inblock(tsk); + sig->oublock += task_io_get_oublock(tsk); sig = NULL; /* Marker for below. */ } @@ -1193,6 +1196,12 @@ static int wait_task_zombie(struct task_struct *p, int noreap, p->nvcsw + sig->nvcsw + sig->cnvcsw; psig->cnivcsw += p->nivcsw + sig->nivcsw + sig->cnivcsw; + psig->cinblock += + task_io_get_inblock(p) + + sig->inblock + sig->cinblock; + psig->coublock += + task_io_get_oublock(p) + + sig->oublock + sig->coublock; spin_unlock_irq(&p->parent->sighand->siglock); } diff --git a/kernel/fork.c b/kernel/fork.c index 5dd3979747f5..da92e01aba6b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -875,6 +875,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; + sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; sig->sched_time = 0; INIT_LIST_HEAD(&sig->cpu_timers[0]); INIT_LIST_HEAD(&sig->cpu_timers[1]); diff --git a/kernel/sys.c b/kernel/sys.c index cdb7e9457ba6..ec319bbb0bd4 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -2082,6 +2083,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) r->ru_nivcsw = p->signal->cnivcsw; r->ru_minflt = p->signal->cmin_flt; r->ru_majflt = p->signal->cmaj_flt; + r->ru_inblock = p->signal->cinblock; + r->ru_oublock = p->signal->coublock; if (who == RUSAGE_CHILDREN) break; @@ -2093,6 +2096,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) r->ru_nivcsw += p->signal->nivcsw; r->ru_minflt += p->signal->min_flt; r->ru_majflt += p->signal->maj_flt; + r->ru_inblock += p->signal->inblock; + r->ru_oublock += p->signal->oublock; t = p; do { utime = cputime_add(utime, t->utime); @@ -2101,6 +2106,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) r->ru_nivcsw += t->nivcsw; r->ru_minflt += t->min_flt; r->ru_majflt += t->maj_flt; + r->ru_inblock += task_io_get_inblock(t); + r->ru_oublock += task_io_get_oublock(t); t = next_thread(t); } while (t != p); break; -- cgit v1.2.3 From 99eaf3c45fe806c4a7f39b9be4a1bd0dfc617699 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 10 May 2007 22:22:39 -0700 Subject: lib/hexdump Based on ace_dump_mem() from Grant Likely for the Xilinx SystemACE CompactFlash interface. Add print_hex_dump() & hex_dumper() to lib/hexdump.c and linux/kernel.h. This patch adds the functions print_hex_dump() & hex_dumper(). print_hex_dump() can be used to perform a hex + ASCII dump of data to syslog, in an easily viewable format, thus providing a common text hex dump format. hex_dumper() provides a dump-to-memory function. It converts one "line" of output (16 bytes of input) at a time. Example usages: print_hex_dump(KERN_DEBUG, DUMP_PREFIX_ADDRESS, frame->data, frame->len); hex_dumper(frame->data, frame->len, linebuf, sizeof(linebuf)); Example output using %DUMP_PREFIX_OFFSET: 0009ab42: 40414243 44454647 48494a4b 4c4d4e4f-@ABCDEFG HIJKLMNO Example output using %DUMP_PREFIX_ADDRESS: ffffffff88089af0: 70717273 74757677 78797a7b 7c7d7e7f-pqrstuvw xyz{|}~. [akpm@linux-foundation.org: cleanups, add export] Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 11 ++++++ lib/Makefile | 2 +- lib/hexdump.c | 104 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 116 insertions(+), 1 deletion(-) create mode 100644 lib/hexdump.c (limited to 'include/linux') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 8645181fca0f..eec0d13169a6 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -213,6 +213,17 @@ extern enum system_states { extern void dump_stack(void); +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +extern void hex_dump_to_buffer(const void *buf, size_t len, char *linebuf, + size_t linebuflen); +extern void print_hex_dump(const char *level, int prefix_type, + void *buf, size_t len); +#define hex_asc(x) "0123456789abcdef"[x] + #ifdef DEBUG /* If you are writing a driver, please use dev_dbg instead */ #define pr_debug(fmt,arg...) \ diff --git a/lib/Makefile b/lib/Makefile index 1f65b4613e09..c8c8e20784ce 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -13,7 +13,7 @@ lib-$(CONFIG_SMP) += cpumask.o lib-y += kobject.o kref.o kobject_uevent.o klist.o obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ - bust_spinlocks.o + bust_spinlocks.o hexdump.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG diff --git a/lib/hexdump.c b/lib/hexdump.c new file mode 100644 index 000000000000..e6da5b7fc29a --- /dev/null +++ b/lib/hexdump.c @@ -0,0 +1,104 @@ +/* + * lib/hexdump.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. See README and COPYING for + * more details. + */ + +#include +#include +#include +#include + +/** + * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory + * @buf: data blob to dump + * @len: number of bytes in the @buf + * @linebuf: where to put the converted data + * @linebuflen: total size of @linebuf, including space for terminating NUL + * + * hex_dump_to_buffer() works on one "line" of output at a time, i.e., + * 16 bytes of input data converted to hex + ASCII output. + * + * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data + * to a hex + ASCII dump at the supplied memory location. + * The converted output is always NUL-terminated. + * + * E.g.: + * hex_dump_to_buffer(frame->data, frame->len, linebuf, sizeof(linebuf)); + * + * example output buffer: + * 40414243 44454647 48494a4b 4c4d4e4f @ABCDEFGHIJKLMNO + */ +void hex_dump_to_buffer(const void *buf, size_t len, char *linebuf, + size_t linebuflen) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + + for (j = 0; (j < 16) && (j < len) && (lx + 3) < linebuflen; j++) { + if (j && !(j % 4)) + linebuf[lx++] = ' '; + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + } + if ((lx + 2) < linebuflen) { + linebuf[lx++] = ' '; + linebuf[lx++] = ' '; + } + for (j = 0; (j < 16) && (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = isprint(ptr[j]) ? ptr[j] : '.'; + linebuf[lx++] = '\0'; +} +EXPORT_SYMBOL(hex_dump_to_buffer); + +/** + * print_hex_dump - print a text hex dump to syslog for a binary blob of data + * @level: kernel log level (e.g. KERN_DEBUG) + * @prefix_type: controls whether prefix of an offset, address, or none + * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) + * @buf: data blob to dump + * @len: number of bytes in the @buf + * + * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump + * to the kernel log at the specified kernel log level, with an optional + * leading prefix. + * + * E.g.: + * print_hex_dump(KERN_DEBUG, DUMP_PREFIX_ADDRESS, frame->data, frame->len); + * + * Example output using %DUMP_PREFIX_OFFSET: + * 0009ab42: 40414243 44454647 48494a4b 4c4d4e4f @ABCDEFGHIJKLMNO + * Example output using %DUMP_PREFIX_ADDRESS: + * ffffffff88089af0: 70717273 74757677 78797a7b 7c7d7e7f pqrstuvwxyz{|}~. + */ +void print_hex_dump(const char *level, int prefix_type, void *buf, size_t len) +{ + u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[100]; + + for (i = 0; i < len; i += 16) { + linelen = min(remaining, 16); + remaining -= 16; + hex_dump_to_buffer(ptr + i, linelen, linebuf, sizeof(linebuf)); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%*p: %s\n", level, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%.8x: %s\n", level, i, linebuf); + break; + default: + printk("%s%s\n", level, linebuf); + break; + } + } +} +EXPORT_SYMBOL(print_hex_dump); -- cgit v1.2.3 From 2acdb1694494eb6f17b44b2b3065879af32d0d46 Mon Sep 17 00:00:00 2001 From: Paul Fulghum Date: Thu, 10 May 2007 22:22:43 -0700 Subject: synclink_gt: add compat_ioctl Add support for 32 bit ioctl on 64 bit systems for synclink_gt Cc: Arnd Bergmann Signed-off-by: Paul Fulghum Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/synclink_gt.c | 107 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/Kbuild | 2 +- include/linux/synclink.h | 24 ++++++++++ 3 files changed, 132 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index 2a7736b5f2f7..02b49bc00028 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c @@ -1170,6 +1170,112 @@ static int ioctl(struct tty_struct *tty, struct file *file, return 0; } +/* + * support for 32 bit ioctl calls on 64 bit systems + */ +#ifdef CONFIG_COMPAT +static long get_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *user_params) +{ + struct MGSL_PARAMS32 tmp_params; + + DBGINFO(("%s get_params32\n", info->device_name)); + tmp_params.mode = (compat_ulong_t)info->params.mode; + tmp_params.loopback = info->params.loopback; + tmp_params.flags = info->params.flags; + tmp_params.encoding = info->params.encoding; + tmp_params.clock_speed = (compat_ulong_t)info->params.clock_speed; + tmp_params.addr_filter = info->params.addr_filter; + tmp_params.crc_type = info->params.crc_type; + tmp_params.preamble_length = info->params.preamble_length; + tmp_params.preamble = info->params.preamble; + tmp_params.data_rate = (compat_ulong_t)info->params.data_rate; + tmp_params.data_bits = info->params.data_bits; + tmp_params.stop_bits = info->params.stop_bits; + tmp_params.parity = info->params.parity; + if (copy_to_user(user_params, &tmp_params, sizeof(struct MGSL_PARAMS32))) + return -EFAULT; + return 0; +} + +static long set_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *new_params) +{ + struct MGSL_PARAMS32 tmp_params; + + DBGINFO(("%s set_params32\n", info->device_name)); + if (copy_from_user(&tmp_params, new_params, sizeof(struct MGSL_PARAMS32))) + return -EFAULT; + + spin_lock(&info->lock); + info->params.mode = tmp_params.mode; + info->params.loopback = tmp_params.loopback; + info->params.flags = tmp_params.flags; + info->params.encoding = tmp_params.encoding; + info->params.clock_speed = tmp_params.clock_speed; + info->params.addr_filter = tmp_params.addr_filter; + info->params.crc_type = tmp_params.crc_type; + info->params.preamble_length = tmp_params.preamble_length; + info->params.preamble = tmp_params.preamble; + info->params.data_rate = tmp_params.data_rate; + info->params.data_bits = tmp_params.data_bits; + info->params.stop_bits = tmp_params.stop_bits; + info->params.parity = tmp_params.parity; + spin_unlock(&info->lock); + + change_params(info); + + return 0; +} + +static long slgt_compat_ioctl(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct slgt_info *info = tty->driver_data; + int rc = -ENOIOCTLCMD; + + if (sanity_check(info, tty->name, "compat_ioctl")) + return -ENODEV; + DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd)); + + switch (cmd) { + + case MGSL_IOCSPARAMS32: + rc = set_params32(info, compat_ptr(arg)); + break; + + case MGSL_IOCGPARAMS32: + rc = get_params32(info, compat_ptr(arg)); + break; + + case MGSL_IOCGPARAMS: + case MGSL_IOCSPARAMS: + case MGSL_IOCGTXIDLE: + case MGSL_IOCGSTATS: + case MGSL_IOCWAITEVENT: + case MGSL_IOCGIF: + case MGSL_IOCSGPIO: + case MGSL_IOCGGPIO: + case MGSL_IOCWAITGPIO: + case TIOCGICOUNT: + rc = ioctl(tty, file, cmd, (unsigned long)(compat_ptr(arg))); + break; + + case MGSL_IOCSTXIDLE: + case MGSL_IOCTXENABLE: + case MGSL_IOCRXENABLE: + case MGSL_IOCTXABORT: + case TIOCMIWAIT: + case MGSL_IOCSIF: + rc = ioctl(tty, file, cmd, arg); + break; + } + + DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc)); + return rc; +} +#else +#define slgt_compat_ioctl NULL +#endif /* ifdef CONFIG_COMPAT */ + /* * proc fs support */ @@ -3446,6 +3552,7 @@ static const struct tty_operations ops = { .chars_in_buffer = chars_in_buffer, .flush_buffer = flush_buffer, .ioctl = ioctl, + .compat_ioctl = slgt_compat_ioctl, .throttle = throttle, .unthrottle = unthrottle, .send_xchar = send_xchar, diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 94cc04a143f2..645b3b528150 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -140,7 +140,6 @@ header-y += snmp.h header-y += sockios.h header-y += som.h header-y += sound.h -header-y += synclink.h header-y += taskstats.h header-y += telephony.h header-y += termios.h @@ -320,6 +319,7 @@ unifdef-y += sonypi.h unifdef-y += soundcard.h unifdef-y += stat.h unifdef-y += stddef.h +unifdef-y += synclink.h unifdef-y += sysctl.h unifdef-y += tcp.h unifdef-y += time.h diff --git a/include/linux/synclink.h b/include/linux/synclink.h index c8b042667af1..5562fbf72095 100644 --- a/include/linux/synclink.h +++ b/include/linux/synclink.h @@ -291,4 +291,28 @@ struct gpio_desc { #define MGSL_IOCGGPIO _IOR(MGSL_MAGIC_IOC,17,struct gpio_desc) #define MGSL_IOCWAITGPIO _IOWR(MGSL_MAGIC_IOC,18,struct gpio_desc) +#ifdef __KERNEL__ +/* provide 32 bit ioctl compatibility on 64 bit systems */ +#ifdef CONFIG_COMPAT +#include +struct MGSL_PARAMS32 { + compat_ulong_t mode; + unsigned char loopback; + unsigned short flags; + unsigned char encoding; + compat_ulong_t clock_speed; + unsigned char addr_filter; + unsigned short crc_type; + unsigned char preamble_length; + unsigned char preamble; + compat_ulong_t data_rate; + unsigned char data_bits; + unsigned char stop_bits; + unsigned char parity; +}; +#define MGSL_IOCSPARAMS32 _IOW(MGSL_MAGIC_IOC,0,struct MGSL_PARAMS32) +#define MGSL_IOCGPARAMS32 _IOR(MGSL_MAGIC_IOC,1,struct MGSL_PARAMS32) +#endif +#endif + #endif /* _SYNCLINK_H_ */ -- cgit v1.2.3 From 2d3fbbb391e280724f7f7804ab00ff61cf1b6a4c Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 10 May 2007 22:22:46 -0700 Subject: Add hard_irq_disable() Some architectures, like powerpc, implement lazy disabling of interrupts. That means that on those, local_irq_disable() doesn't actually disable interrupts on the CPU, but only sets some per CPU flag which cause them to be disabled only if an interrupt actually occurs. However, in some cases, such as stop_machine, we really want interrupts to be fully disabled. For example, I have code using stop machine to do ECC error injection, used to verify operations of the ECC hardware, that sort of thing. It really needs to make sure that nothing is actually writing to memory while the injection happens. Similar examples can be found in other low level bits and pieces. This patch implements a generic hard_irq_disable() function which is meant to be called -after- local_irq_disable() and ensures that interrupts are fully disabled on that CPU. The default implementation is a nop, though powerpc does already provide an appropriate one. Signed-off-by: Benjamin Herrenschmidt Cc: Rusty Russell Cc: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/interrupt.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f7b01b9a35b3..5323f6275854 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -241,6 +241,16 @@ static inline void __deprecated save_and_cli(unsigned long *x) #define save_and_cli(x) save_and_cli(&x) #endif /* CONFIG_SMP */ +/* Some architectures might implement lazy enabling/disabling of + * interrupts. In some cases, such as stop_machine, we might want + * to ensure that after a local_irq_disable(), interrupts have + * really been disabled in hardware. Such architectures need to + * implement the following hook. + */ +#ifndef hard_irq_disable +#define hard_irq_disable() do { } while(0) +#endif + /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high frequency threaded job scheduling. For almost all the purposes tasklets are more than enough. F.e. all serial device BHs et -- cgit v1.2.3 From 108f39a1b3b1e4b28ca8cc156f06171445499d21 Mon Sep 17 00:00:00 2001 From: Rene Herman Date: Thu, 10 May 2007 22:22:50 -0700 Subject: module_author: don't advise putting in an email address module_author: don't advise putting in an email address It's information that's easily outdated and easily mistaken for a driver contact which is a problem especially for modules with multiple current and non-current authors as well as for modules with a maintainer who may not even be a module author. Signed-off-by: Rene Herman Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/module.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/module.h b/include/linux/module.h index 792d483c9af7..e6e0f86ef5fc 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -124,7 +124,7 @@ extern struct module __this_module; */ #define MODULE_LICENSE(_license) MODULE_INFO(license, _license) -/* Author, ideally of form NAME [, NAME ]*[ and NAME ] */ +/* Author, ideally of form NAME[, NAME]*[ and NAME] */ #define MODULE_AUTHOR(_author) MODULE_INFO(author, _author) /* What your module does. */ -- cgit v1.2.3 From e10cc1df1d2014f68a4bdcf73f6dd122c4561f94 Mon Sep 17 00:00:00 2001 From: Paul Fulghum Date: Thu, 10 May 2007 22:22:50 -0700 Subject: tty: add compat_ioctl Add compat_ioctl method for tty code to allow processing of 32 bit ioctl calls on 64 bit systems by tty core, tty drivers, and line disciplines. Based on patch by Arnd Bergmann: http://www.uwsg.iu.edu/hypermail/linux/kernel/0511.0/1732.html [akpm@linux-foundation.org: make things static] Signed-off-by: Paul Fulghum Acked-by: Arnd Bergmann Cc: Alan Cox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/n_tty.c | 29 +++++++++++++---------------- drivers/char/tty_io.c | 43 ++++++++++++++++++++++++++++++++++++++++--- include/linux/tty_driver.h | 9 +++++++++ include/linux/tty_ldisc.h | 7 +++++++ 4 files changed, 69 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index 6ac3ca4c723c..b3d4ccc33a47 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c @@ -1544,21 +1544,18 @@ static unsigned int normal_poll(struct tty_struct * tty, struct file * file, pol } struct tty_ldisc tty_ldisc_N_TTY = { - TTY_LDISC_MAGIC, /* magic */ - "n_tty", /* name */ - 0, /* num */ - 0, /* flags */ - n_tty_open, /* open */ - n_tty_close, /* close */ - n_tty_flush_buffer, /* flush_buffer */ - n_tty_chars_in_buffer, /* chars_in_buffer */ - read_chan, /* read */ - write_chan, /* write */ - n_tty_ioctl, /* ioctl */ - n_tty_set_termios, /* set_termios */ - normal_poll, /* poll */ - NULL, /* hangup */ - n_tty_receive_buf, /* receive_buf */ - n_tty_write_wakeup /* write_wakeup */ + .magic = TTY_LDISC_MAGIC, + .name = "n_tty", + .open = n_tty_open, + .close = n_tty_close, + .flush_buffer = n_tty_flush_buffer, + .chars_in_buffer = n_tty_chars_in_buffer, + .read = read_chan, + .write = write_chan, + .ioctl = n_tty_ioctl, + .set_termios = n_tty_set_termios, + .poll = normal_poll, + .receive_buf = n_tty_receive_buf, + .write_wakeup = n_tty_write_wakeup }; diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index fc662e4ce58a..fe62c2170d01 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -151,6 +151,12 @@ static int tty_open(struct inode *, struct file *); static int tty_release(struct inode *, struct file *); int tty_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg); +#ifdef CONFIG_COMPAT +static long tty_compat_ioctl(struct file * file, unsigned int cmd, + unsigned long arg); +#else +#define tty_compat_ioctl NULL +#endif static int tty_fasync(int fd, struct file * filp, int on); static void release_tty(struct tty_struct *tty, int idx); static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty); @@ -1143,8 +1149,8 @@ static unsigned int hung_up_tty_poll(struct file * filp, poll_table * wait) return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM; } -static int hung_up_tty_ioctl(struct inode * inode, struct file * file, - unsigned int cmd, unsigned long arg) +static long hung_up_tty_ioctl(struct file * file, + unsigned int cmd, unsigned long arg) { return cmd == TIOCSPGRP ? -ENOTTY : -EIO; } @@ -1155,6 +1161,7 @@ static const struct file_operations tty_fops = { .write = tty_write, .poll = tty_poll, .ioctl = tty_ioctl, + .compat_ioctl = tty_compat_ioctl, .open = tty_open, .release = tty_release, .fasync = tty_fasync, @@ -1167,6 +1174,7 @@ static const struct file_operations ptmx_fops = { .write = tty_write, .poll = tty_poll, .ioctl = tty_ioctl, + .compat_ioctl = tty_compat_ioctl, .open = ptmx_open, .release = tty_release, .fasync = tty_fasync, @@ -1179,6 +1187,7 @@ static const struct file_operations console_fops = { .write = redirected_tty_write, .poll = tty_poll, .ioctl = tty_ioctl, + .compat_ioctl = tty_compat_ioctl, .open = tty_open, .release = tty_release, .fasync = tty_fasync, @@ -1189,7 +1198,8 @@ static const struct file_operations hung_up_tty_fops = { .read = hung_up_tty_read, .write = hung_up_tty_write, .poll = hung_up_tty_poll, - .ioctl = hung_up_tty_ioctl, + .unlocked_ioctl = hung_up_tty_ioctl, + .compat_ioctl = hung_up_tty_ioctl, .release = tty_release, }; @@ -3357,6 +3367,32 @@ int tty_ioctl(struct inode * inode, struct file * file, return retval; } +#ifdef CONFIG_COMPAT +static long tty_compat_ioctl(struct file * file, unsigned int cmd, + unsigned long arg) +{ + struct inode *inode = file->f_dentry->d_inode; + struct tty_struct *tty = file->private_data; + struct tty_ldisc *ld; + int retval = -ENOIOCTLCMD; + + if (tty_paranoia_check(tty, inode, "tty_ioctl")) + return -EINVAL; + + if (tty->driver->compat_ioctl) { + retval = (tty->driver->compat_ioctl)(tty, file, cmd, arg); + if (retval != -ENOIOCTLCMD) + return retval; + } + + ld = tty_ldisc_ref_wait(tty); + if (ld->compat_ioctl) + retval = ld->compat_ioctl(tty, file, cmd, arg); + tty_ldisc_deref(ld); + + return retval; +} +#endif /* * This implements the "Secure Attention Key" --- the idea is to @@ -3689,6 +3725,7 @@ void tty_set_operations(struct tty_driver *driver, driver->write_room = op->write_room; driver->chars_in_buffer = op->chars_in_buffer; driver->ioctl = op->ioctl; + driver->compat_ioctl = op->compat_ioctl; driver->set_termios = op->set_termios; driver->throttle = op->throttle; driver->unthrottle = op->unthrottle; diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 659487e3ebeb..85c95cd39bc3 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -52,6 +52,11 @@ * This routine allows the tty driver to implement * device-specific ioctl's. If the ioctl number passed in cmd * is not recognized by the driver, it should return ENOIOCTLCMD. + * + * long (*compat_ioctl)(struct tty_struct *tty, struct file * file, + * unsigned int cmd, unsigned long arg); + * + * implement ioctl processing for 32 bit process on 64 bit system * * void (*set_termios)(struct tty_struct *tty, struct ktermios * old); * @@ -132,6 +137,8 @@ struct tty_operations { int (*chars_in_buffer)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg); + long (*compat_ioctl)(struct tty_struct *tty, struct file * file, + unsigned int cmd, unsigned long arg); void (*set_termios)(struct tty_struct *tty, struct ktermios * old); void (*throttle)(struct tty_struct * tty); void (*unthrottle)(struct tty_struct * tty); @@ -193,6 +200,8 @@ struct tty_driver { int (*chars_in_buffer)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg); + long (*compat_ioctl)(struct tty_struct *tty, struct file * file, + unsigned int cmd, unsigned long arg); void (*set_termios)(struct tty_struct *tty, struct ktermios * old); void (*throttle)(struct tty_struct * tty); void (*unthrottle)(struct tty_struct * tty); diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index d75932e27710..6226504d9108 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -59,6 +59,11 @@ * low-level driver can "grab" an ioctl request before the line * discpline has a chance to see it. * + * long (*compat_ioctl)(struct tty_struct * tty, struct file * file, + * unsigned int cmd, unsigned long arg); + * + * Process ioctl calls from 32-bit process on 64-bit system + * * void (*set_termios)(struct tty_struct *tty, struct ktermios * old); * * This function notifies the line discpline that a change has @@ -118,6 +123,8 @@ struct tty_ldisc { const unsigned char * buf, size_t nr); int (*ioctl)(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg); + long (*compat_ioctl)(struct tty_struct * tty, struct file * file, + unsigned int cmd, unsigned long arg); void (*set_termios)(struct tty_struct *tty, struct ktermios * old); unsigned int (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); -- cgit v1.2.3 From 0ea971801625184a91a6d80ea85e53875caa0bf5 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Thu, 10 May 2007 22:22:51 -0700 Subject: consolidate generic_writepages and mpage_writepages Clean up massive code duplication between mpage_writepages() and generic_writepages(). The new generic function, write_cache_pages() takes a function pointer argument, which will be called for each page to be written. Maybe cifs_writepages() too can use this infrastructure, but I'm not touching that with a ten-foot pole. The upcoming page writeback support in fuse will also want this. Signed-off-by: Miklos Szeredi Acked-by: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/mpage.c | 174 ++++++++++++---------------------------------- include/linux/mpage.h | 1 - include/linux/writeback.h | 10 ++- mm/page-writeback.c | 59 +++++++++++----- 4 files changed, 93 insertions(+), 151 deletions(-) (limited to 'include/linux') diff --git a/fs/mpage.c b/fs/mpage.c index 0fb914fc2ee0..c1698f2291aa 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -454,11 +454,18 @@ EXPORT_SYMBOL(mpage_readpage); * written, so it can intelligently allocate a suitably-sized BIO. For now, * just allocate full-size (16-page) BIOs. */ -static struct bio * -__mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, - sector_t *last_block_in_bio, int *ret, struct writeback_control *wbc, - writepage_t writepage_fn) +struct mpage_data { + struct bio *bio; + sector_t last_block_in_bio; + get_block_t *get_block; + unsigned use_writepage; +}; + +static int __mpage_writepage(struct page *page, struct writeback_control *wbc, + void *data) { + struct mpage_data *mpd = data; + struct bio *bio = mpd->bio; struct address_space *mapping = page->mapping; struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; @@ -476,6 +483,7 @@ __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, int length; struct buffer_head map_bh; loff_t i_size = i_size_read(inode); + int ret = 0; if (page_has_buffers(page)) { struct buffer_head *head = page_buffers(page); @@ -538,7 +546,7 @@ __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, map_bh.b_state = 0; map_bh.b_size = 1 << blkbits; - if (get_block(inode, block_in_file, &map_bh, 1)) + if (mpd->get_block(inode, block_in_file, &map_bh, 1)) goto confused; if (buffer_new(&map_bh)) unmap_underlying_metadata(map_bh.b_bdev, @@ -584,7 +592,7 @@ page_is_mapped: /* * This page will go to BIO. Do we need to send this BIO off first? */ - if (bio && *last_block_in_bio != blocks[0] - 1) + if (bio && mpd->last_block_in_bio != blocks[0] - 1) bio = mpage_bio_submit(WRITE, bio); alloc_new: @@ -641,7 +649,7 @@ alloc_new: boundary_block, 1 << blkbits); } } else { - *last_block_in_bio = blocks[blocks_per_page - 1]; + mpd->last_block_in_bio = blocks[blocks_per_page - 1]; } goto out; @@ -649,18 +657,19 @@ confused: if (bio) bio = mpage_bio_submit(WRITE, bio); - if (writepage_fn) { - *ret = (*writepage_fn)(page, wbc); + if (mpd->use_writepage) { + ret = mapping->a_ops->writepage(page, wbc); } else { - *ret = -EAGAIN; + ret = -EAGAIN; goto out; } /* * The caller has a ref on the inode, so *mapping is stable */ - mapping_set_error(mapping, *ret); + mapping_set_error(mapping, ret); out: - return bio; + mpd->bio = bio; + return ret; } /** @@ -683,120 +692,27 @@ out: * the call was made get new I/O started against them. If wbc->sync_mode is * WB_SYNC_ALL then we were called for data integrity and we must wait for * existing IO to complete. - * - * If you fix this you should check generic_writepages() also! */ int mpage_writepages(struct address_space *mapping, struct writeback_control *wbc, get_block_t get_block) { - struct backing_dev_info *bdi = mapping->backing_dev_info; - struct bio *bio = NULL; - sector_t last_block_in_bio = 0; - int ret = 0; - int done = 0; - int (*writepage)(struct page *page, struct writeback_control *wbc); - struct pagevec pvec; - int nr_pages; - pgoff_t index; - pgoff_t end; /* Inclusive */ - int scanned = 0; - int range_whole = 0; - - if (wbc->nonblocking && bdi_write_congested(bdi)) { - wbc->encountered_congestion = 1; - return 0; - } - - writepage = NULL; - if (get_block == NULL) - writepage = mapping->a_ops->writepage; - - pagevec_init(&pvec, 0); - if (wbc->range_cyclic) { - index = mapping->writeback_index; /* Start from prev offset */ - end = -1; - } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; - if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) - range_whole = 1; - scanned = 1; + int ret; + + if (!get_block) + ret = generic_writepages(mapping, wbc); + else { + struct mpage_data mpd = { + .bio = NULL, + .last_block_in_bio = 0, + .get_block = get_block, + .use_writepage = 1, + }; + + ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); + if (mpd.bio) + mpage_bio_submit(WRITE, mpd.bio); } -retry: - while (!done && (index <= end) && - (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, - PAGECACHE_TAG_DIRTY, - min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { - unsigned i; - - scanned = 1; - for (i = 0; i < nr_pages; i++) { - struct page *page = pvec.pages[i]; - - /* - * At this point we hold neither mapping->tree_lock nor - * lock on the page itself: the page may be truncated or - * invalidated (changing page->mapping to NULL), or even - * swizzled back from swapper_space to tmpfs file - * mapping - */ - - lock_page(page); - - if (unlikely(page->mapping != mapping)) { - unlock_page(page); - continue; - } - - if (!wbc->range_cyclic && page->index > end) { - done = 1; - unlock_page(page); - continue; - } - - if (wbc->sync_mode != WB_SYNC_NONE) - wait_on_page_writeback(page); - - if (PageWriteback(page) || - !clear_page_dirty_for_io(page)) { - unlock_page(page); - continue; - } - - if (writepage) { - ret = (*writepage)(page, wbc); - mapping_set_error(mapping, ret); - } else { - bio = __mpage_writepage(bio, page, get_block, - &last_block_in_bio, &ret, wbc, - page->mapping->a_ops->writepage); - } - if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) - unlock_page(page); - if (ret || (--(wbc->nr_to_write) <= 0)) - done = 1; - if (wbc->nonblocking && bdi_write_congested(bdi)) { - wbc->encountered_congestion = 1; - done = 1; - } - } - pagevec_release(&pvec); - cond_resched(); - } - if (!scanned && !done) { - /* - * We hit the last page and there is more work to be done: wrap - * back to the start of the file - */ - scanned = 1; - index = 0; - goto retry; - } - if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) - mapping->writeback_index = index; - if (bio) - mpage_bio_submit(WRITE, bio); return ret; } EXPORT_SYMBOL(mpage_writepages); @@ -804,15 +720,15 @@ EXPORT_SYMBOL(mpage_writepages); int mpage_writepage(struct page *page, get_block_t get_block, struct writeback_control *wbc) { - int ret = 0; - struct bio *bio; - sector_t last_block_in_bio = 0; - - bio = __mpage_writepage(NULL, page, get_block, - &last_block_in_bio, &ret, wbc, NULL); - if (bio) - mpage_bio_submit(WRITE, bio); - + struct mpage_data mpd = { + .bio = NULL, + .last_block_in_bio = 0, + .get_block = get_block, + .use_writepage = 0, + }; + int ret = __mpage_writepage(page, wbc, &mpd); + if (mpd.bio) + mpage_bio_submit(WRITE, mpd.bio); return ret; } EXPORT_SYMBOL(mpage_writepage); diff --git a/include/linux/mpage.h b/include/linux/mpage.h index cc5fb75af78a..068a0c9946af 100644 --- a/include/linux/mpage.h +++ b/include/linux/mpage.h @@ -12,7 +12,6 @@ #ifdef CONFIG_BLOCK struct writeback_control; -typedef int (writepage_t)(struct page *page, struct writeback_control *wbc); int mpage_readpages(struct address_space *mapping, struct list_head *pages, unsigned nr_pages, get_block_t get_block); diff --git a/include/linux/writeback.h b/include/linux/writeback.h index daa6c125f66e..050915b59576 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -111,9 +111,15 @@ balance_dirty_pages_ratelimited(struct address_space *mapping) balance_dirty_pages_ratelimited_nr(mapping, 1); } +typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, + void *data); + int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); -extern int generic_writepages(struct address_space *mapping, - struct writeback_control *wbc); +int generic_writepages(struct address_space *mapping, + struct writeback_control *wbc); +int write_cache_pages(struct address_space *mapping, + struct writeback_control *wbc, writepage_t writepage, + void *data); int do_writepages(struct address_space *mapping, struct writeback_control *wbc); int sync_page_range(struct inode *inode, struct address_space *mapping, loff_t pos, loff_t count); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 63cd88840eb2..eec1481ba44f 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -588,31 +588,27 @@ void __init page_writeback_init(void) } /** - * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them. + * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. * @mapping: address space structure to write * @wbc: subtract the number of written pages from *@wbc->nr_to_write + * @writepage: function called for each page + * @data: data passed to writepage function * - * This is a library function, which implements the writepages() - * address_space_operation. - * - * If a page is already under I/O, generic_writepages() skips it, even + * If a page is already under I/O, write_cache_pages() skips it, even * if it's dirty. This is desirable behaviour for memory-cleaning writeback, * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() * and msync() need to guarantee that all the data which was dirty at the time * the call was made get new I/O started against them. If wbc->sync_mode is * WB_SYNC_ALL then we were called for data integrity and we must wait for * existing IO to complete. - * - * Derived from mpage_writepages() - if you fix this you should check that - * also! */ -int generic_writepages(struct address_space *mapping, - struct writeback_control *wbc) +int write_cache_pages(struct address_space *mapping, + struct writeback_control *wbc, writepage_t writepage, + void *data) { struct backing_dev_info *bdi = mapping->backing_dev_info; int ret = 0; int done = 0; - int (*writepage)(struct page *page, struct writeback_control *wbc); struct pagevec pvec; int nr_pages; pgoff_t index; @@ -625,12 +621,6 @@ int generic_writepages(struct address_space *mapping, return 0; } - writepage = mapping->a_ops->writepage; - - /* deal with chardevs and other special file */ - if (!writepage) - return 0; - pagevec_init(&pvec, 0); if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ @@ -682,8 +672,7 @@ retry: continue; } - ret = (*writepage)(page, wbc); - mapping_set_error(mapping, ret); + ret = (*writepage)(page, wbc, data); if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) unlock_page(page); @@ -710,6 +699,38 @@ retry: mapping->writeback_index = index; return ret; } +EXPORT_SYMBOL(write_cache_pages); + +/* + * Function used by generic_writepages to call the real writepage + * function and set the mapping flags on error + */ +static int __writepage(struct page *page, struct writeback_control *wbc, + void *data) +{ + struct address_space *mapping = data; + int ret = mapping->a_ops->writepage(page, wbc); + mapping_set_error(mapping, ret); + return ret; +} + +/** + * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them. + * @mapping: address space structure to write + * @wbc: subtract the number of written pages from *@wbc->nr_to_write + * + * This is a library function, which implements the writepages() + * address_space_operation. + */ +int generic_writepages(struct address_space *mapping, + struct writeback_control *wbc) +{ + /* deal with chardevs and other special file */ + if (!mapping->a_ops->writepage) + return 0; + + return write_cache_pages(mapping, wbc, __writepage, mapping); +} EXPORT_SYMBOL(generic_writepages); -- cgit v1.2.3 From e713d0dab21a68500720e222fa02567fc7dfb14b Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Thu, 10 May 2007 22:22:58 -0700 Subject: attach_pid() with struct pid parameter attach_pid() currently takes a pid_t and then uses find_pid() to find the corresponding struct pid. Sometimes we already have the struct pid. We can then skip find_pid() if attach_pid() were to take a struct pid parameter. Signed-off-by: Sukadev Bhattiprolu Cc: Cedric Le Goater Cc: Dave Hansen Cc: Serge Hallyn Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/exec.c | 2 +- include/linux/pid.h | 3 +-- kernel/exit.c | 4 ++-- kernel/fork.c | 11 +++++++---- kernel/pid.c | 9 ++++++--- kernel/sys.c | 2 +- 6 files changed, 18 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/fs/exec.c b/fs/exec.c index 1ba85c7fc6af..2255dc72deef 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -702,7 +702,7 @@ static int de_thread(struct task_struct *tsk) */ detach_pid(tsk, PIDTYPE_PID); tsk->pid = leader->pid; - attach_pid(tsk, PIDTYPE_PID, tsk->pid); + attach_pid(tsk, PIDTYPE_PID, find_pid(tsk->pid)); transfer_pid(leader, tsk, PIDTYPE_PGID); transfer_pid(leader, tsk, PIDTYPE_SID); list_replace_rcu(&leader->tasks, &tsk->tasks); diff --git a/include/linux/pid.h b/include/linux/pid.h index 2ac27f9997dd..33d343880d89 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -76,8 +76,7 @@ extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type); * write-held. */ extern int FASTCALL(attach_pid(struct task_struct *task, - enum pid_type type, int nr)); - + enum pid_type type, struct pid *pid)); extern void FASTCALL(detach_pid(struct task_struct *task, enum pid_type)); extern void FASTCALL(transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type)); diff --git a/kernel/exit.c b/kernel/exit.c index 7a5fd77f8fb0..e93691e9b325 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -302,12 +302,12 @@ void __set_special_pids(pid_t session, pid_t pgrp) if (process_session(curr) != session) { detach_pid(curr, PIDTYPE_SID); set_signal_session(curr->signal, session); - attach_pid(curr, PIDTYPE_SID, session); + attach_pid(curr, PIDTYPE_SID, find_pid(session)); } if (process_group(curr) != pgrp) { detach_pid(curr, PIDTYPE_PGID); curr->signal->pgrp = pgrp; - attach_pid(curr, PIDTYPE_PGID, pgrp); + attach_pid(curr, PIDTYPE_PGID, find_pid(pgrp)); } } diff --git a/kernel/fork.c b/kernel/fork.c index da92e01aba6b..6031800c94cf 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1249,16 +1249,19 @@ static struct task_struct *copy_process(unsigned long clone_flags, __ptrace_link(p, current->parent); if (thread_group_leader(p)) { + pid_t pgid = process_group(current); + pid_t sid = process_session(current); + p->signal->tty = current->signal->tty; - p->signal->pgrp = process_group(current); + p->signal->pgrp = pgid; set_signal_session(p->signal, process_session(current)); - attach_pid(p, PIDTYPE_PGID, process_group(p)); - attach_pid(p, PIDTYPE_SID, process_session(p)); + attach_pid(p, PIDTYPE_PGID, find_pid(pgid)); + attach_pid(p, PIDTYPE_SID, find_pid(sid)); list_add_tail_rcu(&p->tasks, &init_task.tasks); __get_cpu_var(process_counts)++; } - attach_pid(p, PIDTYPE_PID, p->pid); + attach_pid(p, PIDTYPE_PID, find_pid(p->pid)); nr_threads++; } diff --git a/kernel/pid.c b/kernel/pid.c index d3ad724afa83..d76f59326bd4 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -247,13 +247,16 @@ struct pid * fastcall find_pid(int nr) } EXPORT_SYMBOL_GPL(find_pid); -int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr) +/* + * attach_pid() must be called with the tasklist_lock write-held. + */ +int fastcall attach_pid(struct task_struct *task, enum pid_type type, + struct pid *pid) { struct pid_link *link; - struct pid *pid; link = &task->pids[type]; - link->pid = pid = find_pid(nr); + link->pid = pid; hlist_add_head_rcu(&link->node, &pid->tasks[type]); return 0; diff --git a/kernel/sys.c b/kernel/sys.c index df4c3a8f5df9..872271ccc384 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1488,7 +1488,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) if (process_group(p) != pgid) { detach_pid(p, PIDTYPE_PGID); p->signal->pgrp = pgid; - attach_pid(p, PIDTYPE_PGID, pgid); + attach_pid(p, PIDTYPE_PGID, find_pid(pgid)); } err = 0; -- cgit v1.2.3 From 820e45db2380eb1545fa2bc5d34b8b2f2933faeb Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Thu, 10 May 2007 22:23:00 -0700 Subject: statically initialize struct pid for swapper Statically initialize a struct pid for the swapper process (pid_t == 0) and attach it to init_task. This is needed so task_pid(), task_pgrp() and task_session() interfaces work on the swapper process also. Signed-off-by: Sukadev Bhattiprolu Cc: Cedric Le Goater Cc: Dave Hansen Cc: Serge Hallyn Cc: Eric Biederman Cc: Herbert Poetzl Cc: Acked-by: Eric W. Biederman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/init_task.h | 27 +++++++++++++++++++++++++++ include/linux/pid.h | 2 ++ kernel/pid.c | 2 ++ 3 files changed, 31 insertions(+) (limited to 'include/linux') diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 45170b2fa253..a68835fc8c04 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -88,6 +88,28 @@ extern struct nsproxy init_nsproxy; extern struct group_info init_groups; +#define INIT_STRUCT_PID { \ + .count = ATOMIC_INIT(1), \ + .nr = 0, \ + /* Don't put this struct pid in pid_hash */ \ + .pid_chain = { .next = NULL, .pprev = NULL }, \ + .tasks = { \ + { .first = &init_task.pids[PIDTYPE_PID].node }, \ + { .first = &init_task.pids[PIDTYPE_PGID].node }, \ + { .first = &init_task.pids[PIDTYPE_SID].node }, \ + }, \ + .rcu = RCU_HEAD_INIT, \ +} + +#define INIT_PID_LINK(type) \ +{ \ + .node = { \ + .next = NULL, \ + .pprev = &init_struct_pid.tasks[type].first, \ + }, \ + .pid = &init_struct_pid, \ +} + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) @@ -139,6 +161,11 @@ extern struct group_info init_groups; .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .pids = { \ + [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ + [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ + [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ + }, \ INIT_TRACE_IRQFLAGS \ INIT_LOCKDEP \ } diff --git a/include/linux/pid.h b/include/linux/pid.h index 33d343880d89..1e0e4e3423a6 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -51,6 +51,8 @@ struct pid struct rcu_head rcu; }; +extern struct pid init_struct_pid; + struct pid_link { struct hlist_node node; diff --git a/kernel/pid.c b/kernel/pid.c index d76f59326bd4..eb66bd2953ab 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -27,11 +27,13 @@ #include #include #include +#include #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift) static struct hlist_head *pid_hash; static int pidhash_shift; static struct kmem_cache *pid_cachep; +struct pid init_struct_pid = INIT_STRUCT_PID; int pid_max = PID_MAX_DEFAULT; -- cgit v1.2.3 From 325aa33da784e5997c756a151bc46e9cc9b79db2 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Thu, 10 May 2007 22:23:10 -0700 Subject: Don't init pgrp and __session in INIT_SIGNALS Remove initialization of pgrp and __session in INIT_SIGNALS, as these are later set by the call to __set_special_pids() in init/main.c by the patch: explicitly-set-pgid-and-sid-of-init-process.patch Signed-off-by: Sukadev Bhattiprolu Cc: Cedric Le Goater Cc: Dave Hansen Cc: Serge Hallyn Cc: Eric Biederman Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/init_task.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/init_task.h b/include/linux/init_task.h index a68835fc8c04..66e2f0a70814 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -65,9 +65,9 @@ .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ .rlim = INIT_RLIMITS, \ - .pgrp = 1, \ + .pgrp = 0, \ .tty_old_pgrp = NULL, \ - { .__session = 1}, \ + { .__session = 0}, \ } extern struct nsproxy init_nsproxy; -- cgit v1.2.3 From 5dc8bf8132d59c03fe2562bce165c2f03f021687 Mon Sep 17 00:00:00 2001 From: Davide Libenzi Date: Thu, 10 May 2007 22:23:11 -0700 Subject: signal/timer/event fds: anonymous inode source This patch add an anonymous inode source, to be used for files that need and inode only in order to create a file*. We do not care of having an inode for each file, and we do not even care of having different names in the associated dentries (dentry names will be same for classes of file*). This allow code reuse, and will be used by epoll, signalfd and timerfd (and whatever else there'll be). Signed-off-by: Davide Libenzi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/Makefile | 1 + fs/anon_inodes.c | 200 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/anon_inodes.h | 16 ++++ include/linux/magic.h | 1 + init/Kconfig | 10 +++ 5 files changed, 228 insertions(+) create mode 100644 fs/anon_inodes.c create mode 100644 include/linux/anon_inodes.h (limited to 'include/linux') diff --git a/fs/Makefile b/fs/Makefile index 9edf4112bee0..b5cd46a88cb0 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -22,6 +22,7 @@ endif obj-$(CONFIG_INOTIFY) += inotify.o obj-$(CONFIG_INOTIFY_USER) += inotify_user.o obj-$(CONFIG_EPOLL) += eventpoll.o +obj-$(CONFIG_ANON_INODES) += anon_inodes.o obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o nfsd-$(CONFIG_NFSD) := nfsctl.o diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c new file mode 100644 index 000000000000..40fe3a3222e4 --- /dev/null +++ b/fs/anon_inodes.c @@ -0,0 +1,200 @@ +/* + * fs/anon_inodes.c + * + * Copyright (C) 2007 Davide Libenzi + * + * Thanks to Arnd Bergmann for code review and suggestions. + * More changes for Thomas Gleixner suggestions. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static struct vfsmount *anon_inode_mnt __read_mostly; +static struct inode *anon_inode_inode; +static const struct file_operations anon_inode_fops; + +static int anon_inodefs_get_sb(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, + struct vfsmount *mnt) +{ + return get_sb_pseudo(fs_type, "anon_inode:", NULL, ANON_INODE_FS_MAGIC, + mnt); +} + +static int anon_inodefs_delete_dentry(struct dentry *dentry) +{ + /* + * We faked vfs to believe the dentry was hashed when we created it. + * Now we restore the flag so that dput() will work correctly. + */ + dentry->d_flags |= DCACHE_UNHASHED; + return 1; +} + +static struct file_system_type anon_inode_fs_type = { + .name = "anon_inodefs", + .get_sb = anon_inodefs_get_sb, + .kill_sb = kill_anon_super, +}; +static struct dentry_operations anon_inodefs_dentry_operations = { + .d_delete = anon_inodefs_delete_dentry, +}; + +/** + * anon_inode_getfd - creates a new file instance by hooking it up to and + * anonymous inode, and a dentry that describe the "class" + * of the file + * + * @pfd: [out] pointer to the file descriptor + * @dpinode: [out] pointer to the inode + * @pfile: [out] pointer to the file struct + * @name: [in] name of the "class" of the new file + * @fops [in] file operations for the new file + * @priv [in] private data for the new file (will be file's private_data) + * + * Creates a new file by hooking it on a single inode. This is useful for files + * that do not need to have a full-fledged inode in order to operate correctly. + * All the files created with anon_inode_getfd() will share a single inode, by + * hence saving memory and avoiding code duplication for the file/inode/dentry + * setup. + */ +int anon_inode_getfd(int *pfd, struct inode **pinode, struct file **pfile, + const char *name, const struct file_operations *fops, + void *priv) +{ + struct qstr this; + struct dentry *dentry; + struct inode *inode; + struct file *file; + int error, fd; + + if (IS_ERR(anon_inode_inode)) + return -ENODEV; + file = get_empty_filp(); + if (!file) + return -ENFILE; + + inode = igrab(anon_inode_inode); + if (IS_ERR(inode)) { + error = PTR_ERR(inode); + goto err_put_filp; + } + + error = get_unused_fd(); + if (error < 0) + goto err_iput; + fd = error; + + /* + * Link the inode to a directory entry by creating a unique name + * using the inode sequence number. + */ + error = -ENOMEM; + this.name = name; + this.len = strlen(name); + this.hash = 0; + dentry = d_alloc(anon_inode_mnt->mnt_sb->s_root, &this); + if (!dentry) + goto err_put_unused_fd; + dentry->d_op = &anon_inodefs_dentry_operations; + /* Do not publish this dentry inside the global dentry hash table */ + dentry->d_flags &= ~DCACHE_UNHASHED; + d_instantiate(dentry, inode); + + file->f_path.mnt = mntget(anon_inode_mnt); + file->f_path.dentry = dentry; + file->f_mapping = inode->i_mapping; + + file->f_pos = 0; + file->f_flags = O_RDWR; + file->f_op = fops; + file->f_mode = FMODE_READ | FMODE_WRITE; + file->f_version = 0; + file->private_data = priv; + + fd_install(fd, file); + + *pfd = fd; + *pinode = inode; + *pfile = file; + return 0; + +err_put_unused_fd: + put_unused_fd(fd); +err_iput: + iput(inode); +err_put_filp: + put_filp(file); + return error; +} + +/* + * A single inode exist for all anon_inode files. Contrary to pipes, + * anon_inode inodes has no per-instance data associated, so we can avoid + * the allocation of multiple of them. + */ +static struct inode *anon_inode_mkinode(void) +{ + struct inode *inode = new_inode(anon_inode_mnt->mnt_sb); + + if (!inode) + return ERR_PTR(-ENOMEM); + + inode->i_fop = &anon_inode_fops; + + /* + * Mark the inode dirty from the very beginning, + * that way it will never be moved to the dirty + * list because mark_inode_dirty() will think + * that it already _is_ on the dirty list. + */ + inode->i_state = I_DIRTY; + inode->i_mode = S_IRUSR | S_IWUSR; + inode->i_uid = current->fsuid; + inode->i_gid = current->fsgid; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + return inode; +} + +static int __init anon_inode_init(void) +{ + int error; + + error = register_filesystem(&anon_inode_fs_type); + if (error) + goto err_exit; + anon_inode_mnt = kern_mount(&anon_inode_fs_type); + if (IS_ERR(anon_inode_mnt)) { + error = PTR_ERR(anon_inode_mnt); + goto err_unregister_filesystem; + } + anon_inode_inode = anon_inode_mkinode(); + if (IS_ERR(anon_inode_inode)) { + error = PTR_ERR(anon_inode_inode); + goto err_mntput; + } + + return 0; + +err_mntput: + mntput(anon_inode_mnt); +err_unregister_filesystem: + unregister_filesystem(&anon_inode_fs_type); +err_exit: + panic(KERN_ERR "anon_inode_init() failed (%d)\n", error); +} + +fs_initcall(anon_inode_init); + diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h new file mode 100644 index 000000000000..b2e1ba325b9a --- /dev/null +++ b/include/linux/anon_inodes.h @@ -0,0 +1,16 @@ +/* + * include/linux/anon_inodes.h + * + * Copyright (C) 2007 Davide Libenzi + * + */ + +#ifndef _LINUX_ANON_INODES_H +#define _LINUX_ANON_INODES_H + +int anon_inode_getfd(int *pfd, struct inode **pinode, struct file **pfile, + const char *name, const struct file_operations *fops, + void *priv); + +#endif /* _LINUX_ANON_INODES_H */ + diff --git a/include/linux/magic.h b/include/linux/magic.h index a9c6567fe70c..9d713c03e3da 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h @@ -14,6 +14,7 @@ #define ISOFS_SUPER_MAGIC 0x9660 #define JFFS2_SUPER_MAGIC 0x72b6 #define KVMFS_SUPER_MAGIC 0x19700426 +#define ANON_INODE_FS_MAGIC 0x09041934 #define MINIX_SUPER_MAGIC 0x137F /* original minix fs */ #define MINIX_SUPER_MAGIC2 0x138F /* minix fs, 30 char names */ diff --git a/init/Kconfig b/init/Kconfig index 322b1f8c21b3..a80bd8326bc6 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -475,9 +475,19 @@ config FUTEX support for "fast userspace mutexes". The resulting kernel may not run glibc-based applications correctly. +config ANON_INODES + bool "Enable anonymous inode source" if EMBEDDED + default y + help + Anonymous inode source for pseudo-files like epoll, signalfd, + timerfd and eventfd. + + If unsure, say Y. + config EPOLL bool "Enable eventpoll support" if EMBEDDED default y + depends on ANON_INODES help Disabling this option will cause the kernel to be built without support for epoll family of system calls. -- cgit v1.2.3 From fba2afaaec790dc5ab4ae8827972f342211bbb86 Mon Sep 17 00:00:00 2001 From: Davide Libenzi Date: Thu, 10 May 2007 22:23:13 -0700 Subject: signal/timer/event: signalfd core This patch series implements the new signalfd() system call. I took part of the original Linus code (and you know how badly it can be broken :), and I added even more breakage ;) Signals are fetched from the same signal queue used by the process, so signalfd will compete with standard kernel delivery in dequeue_signal(). If you want to reliably fetch signals on the signalfd file, you need to block them with sigprocmask(SIG_BLOCK). This seems to be working fine on my Dual Opteron machine. I made a quick test program for it: http://www.xmailserver.org/signafd-test.c The signalfd() system call implements signal delivery into a file descriptor receiver. The signalfd file descriptor if created with the following API: int signalfd(int ufd, const sigset_t *mask, size_t masksize); The "ufd" parameter allows to change an existing signalfd sigmask, w/out going to close/create cycle (Linus idea). Use "ufd" == -1 if you want a brand new signalfd file. The "mask" allows to specify the signal mask of signals that we are interested in. The "masksize" parameter is the size of "mask". The signalfd fd supports the poll(2) and read(2) system calls. The poll(2) will return POLLIN when signals are available to be dequeued. As a direct consequence of supporting the Linux poll subsystem, the signalfd fd can use used together with epoll(2) too. The read(2) system call will return a "struct signalfd_siginfo" structure in the userspace supplied buffer. The return value is the number of bytes copied in the supplied buffer, or -1 in case of error. The read(2) call can also return 0, in case the sighand structure to which the signalfd was attached, has been orphaned. The O_NONBLOCK flag is also supported, and read(2) will return -EAGAIN in case no signal is available. If the size of the buffer passed to read(2) is lower than sizeof(struct signalfd_siginfo), -EINVAL is returned. A read from the signalfd can also return -ERESTARTSYS in case a signal hits the process. The format of the struct signalfd_siginfo is, and the valid fields depends of the (->code & __SI_MASK) value, in the same way a struct siginfo would: struct signalfd_siginfo { __u32 signo; /* si_signo */ __s32 err; /* si_errno */ __s32 code; /* si_code */ __u32 pid; /* si_pid */ __u32 uid; /* si_uid */ __s32 fd; /* si_fd */ __u32 tid; /* si_fd */ __u32 band; /* si_band */ __u32 overrun; /* si_overrun */ __u32 trapno; /* si_trapno */ __s32 status; /* si_status */ __s32 svint; /* si_int */ __u64 svptr; /* si_ptr */ __u64 utime; /* si_utime */ __u64 stime; /* si_stime */ __u64 addr; /* si_addr */ }; [akpm@linux-foundation.org: fix signalfd_copyinfo() on i386] Signed-off-by: Davide Libenzi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/Makefile | 1 + fs/exec.c | 11 +- fs/signalfd.c | 349 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/Kbuild | 1 + include/linux/init_task.h | 1 + include/linux/sched.h | 1 + include/linux/signal.h | 1 + include/linux/signalfd.h | 97 +++++++++++++ include/linux/syscalls.h | 1 + init/Kconfig | 10 ++ kernel/exit.c | 9 ++ kernel/fork.c | 8 +- kernel/signal.c | 22 ++- kernel/sys_ni.c | 3 + 14 files changed, 508 insertions(+), 7 deletions(-) create mode 100644 fs/signalfd.c create mode 100644 include/linux/signalfd.h (limited to 'include/linux') diff --git a/fs/Makefile b/fs/Makefile index b5cd46a88cb0..cd8a57aeac04 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_INOTIFY) += inotify.o obj-$(CONFIG_INOTIFY_USER) += inotify_user.o obj-$(CONFIG_EPOLL) += eventpoll.o obj-$(CONFIG_ANON_INODES) += anon_inodes.o +obj-$(CONFIG_SIGNALFD) += signalfd.o obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o nfsd-$(CONFIG_NFSD) := nfsctl.o diff --git a/fs/exec.c b/fs/exec.c index 2255dc72deef..955a8eb66d70 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -581,6 +582,13 @@ static int de_thread(struct task_struct *tsk) struct task_struct *leader = NULL; int count; + /* + * Tell all the sighand listeners that this sighand has + * been detached. The signalfd_detach() function grabs the + * sighand lock, if signal listeners are present on the sighand. + */ + signalfd_detach(tsk); + /* * If we don't share sighandlers, then we aren't sharing anything * and we can just re-use it all. @@ -757,8 +765,7 @@ no_thread_group: spin_unlock(&oldsighand->siglock); write_unlock_irq(&tasklist_lock); - if (atomic_dec_and_test(&oldsighand->count)) - kmem_cache_free(sighand_cachep, oldsighand); + __cleanup_sighand(oldsighand); } BUG_ON(!thread_group_leader(tsk)); diff --git a/fs/signalfd.c b/fs/signalfd.c new file mode 100644 index 000000000000..7cfeab412b45 --- /dev/null +++ b/fs/signalfd.c @@ -0,0 +1,349 @@ +/* + * fs/signalfd.c + * + * Copyright (C) 2003 Linus Torvalds + * + * Mon Mar 5, 2007: Davide Libenzi + * Changed ->read() to return a siginfo strcture instead of signal number. + * Fixed locking in ->poll(). + * Added sighand-detach notification. + * Added fd re-use in sys_signalfd() syscall. + * Now using anonymous inode source. + * Thanks to Oleg Nesterov for useful code review and suggestions. + * More comments and suggestions from Arnd Bergmann. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct signalfd_ctx { + struct list_head lnk; + wait_queue_head_t wqh; + sigset_t sigmask; + struct task_struct *tsk; +}; + +struct signalfd_lockctx { + struct task_struct *tsk; + unsigned long flags; +}; + +/* + * Tries to acquire the sighand lock. We do not increment the sighand + * use count, and we do not even pin the task struct, so we need to + * do it inside an RCU read lock, and we must be prepared for the + * ctx->tsk going to NULL (in signalfd_deliver()), and for the sighand + * being detached. We return 0 if the sighand has been detached, or + * 1 if we were able to pin the sighand lock. + */ +static int signalfd_lock(struct signalfd_ctx *ctx, struct signalfd_lockctx *lk) +{ + struct sighand_struct *sighand = NULL; + + rcu_read_lock(); + lk->tsk = rcu_dereference(ctx->tsk); + if (likely(lk->tsk != NULL)) + sighand = lock_task_sighand(lk->tsk, &lk->flags); + rcu_read_unlock(); + + if (sighand && !ctx->tsk) { + unlock_task_sighand(lk->tsk, &lk->flags); + sighand = NULL; + } + + return sighand != NULL; +} + +static void signalfd_unlock(struct signalfd_lockctx *lk) +{ + unlock_task_sighand(lk->tsk, &lk->flags); +} + +/* + * This must be called with the sighand lock held. + */ +void signalfd_deliver(struct task_struct *tsk, int sig) +{ + struct sighand_struct *sighand = tsk->sighand; + struct signalfd_ctx *ctx, *tmp; + + BUG_ON(!sig); + list_for_each_entry_safe(ctx, tmp, &sighand->signalfd_list, lnk) { + /* + * We use a negative signal value as a way to broadcast that the + * sighand has been orphaned, so that we can notify all the + * listeners about this. Remember the ctx->sigmask is inverted, + * so if the user is interested in a signal, that corresponding + * bit will be zero. + */ + if (sig < 0) { + if (ctx->tsk == tsk) { + ctx->tsk = NULL; + list_del_init(&ctx->lnk); + wake_up(&ctx->wqh); + } + } else { + if (!sigismember(&ctx->sigmask, sig)) + wake_up(&ctx->wqh); + } + } +} + +static void signalfd_cleanup(struct signalfd_ctx *ctx) +{ + struct signalfd_lockctx lk; + + /* + * This is tricky. If the sighand is gone, we do not need to remove + * context from the list, the list itself won't be there anymore. + */ + if (signalfd_lock(ctx, &lk)) { + list_del(&ctx->lnk); + signalfd_unlock(&lk); + } + kfree(ctx); +} + +static int signalfd_release(struct inode *inode, struct file *file) +{ + signalfd_cleanup(file->private_data); + return 0; +} + +static unsigned int signalfd_poll(struct file *file, poll_table *wait) +{ + struct signalfd_ctx *ctx = file->private_data; + unsigned int events = 0; + struct signalfd_lockctx lk; + + poll_wait(file, &ctx->wqh, wait); + + /* + * Let the caller get a POLLIN in this case, ala socket recv() when + * the peer disconnects. + */ + if (signalfd_lock(ctx, &lk)) { + if (next_signal(&lk.tsk->pending, &ctx->sigmask) > 0 || + next_signal(&lk.tsk->signal->shared_pending, + &ctx->sigmask) > 0) + events |= POLLIN; + signalfd_unlock(&lk); + } else + events |= POLLIN; + + return events; +} + +/* + * Copied from copy_siginfo_to_user() in kernel/signal.c + */ +static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, + siginfo_t const *kinfo) +{ + long err; + + BUILD_BUG_ON(sizeof(struct signalfd_siginfo) != 128); + + /* + * Unused memebers should be zero ... + */ + err = __clear_user(uinfo, sizeof(*uinfo)); + + /* + * If you change siginfo_t structure, please be sure + * this code is fixed accordingly. + */ + err |= __put_user(kinfo->si_signo, &uinfo->signo); + err |= __put_user(kinfo->si_errno, &uinfo->err); + err |= __put_user((short)kinfo->si_code, &uinfo->code); + switch (kinfo->si_code & __SI_MASK) { + case __SI_KILL: + err |= __put_user(kinfo->si_pid, &uinfo->pid); + err |= __put_user(kinfo->si_uid, &uinfo->uid); + break; + case __SI_TIMER: + err |= __put_user(kinfo->si_tid, &uinfo->tid); + err |= __put_user(kinfo->si_overrun, &uinfo->overrun); + err |= __put_user((long)kinfo->si_ptr, &uinfo->svptr); + break; + case __SI_POLL: + err |= __put_user(kinfo->si_band, &uinfo->band); + err |= __put_user(kinfo->si_fd, &uinfo->fd); + break; + case __SI_FAULT: + err |= __put_user((long)kinfo->si_addr, &uinfo->addr); +#ifdef __ARCH_SI_TRAPNO + err |= __put_user(kinfo->si_trapno, &uinfo->trapno); +#endif + break; + case __SI_CHLD: + err |= __put_user(kinfo->si_pid, &uinfo->pid); + err |= __put_user(kinfo->si_uid, &uinfo->uid); + err |= __put_user(kinfo->si_status, &uinfo->status); + err |= __put_user(kinfo->si_utime, &uinfo->utime); + err |= __put_user(kinfo->si_stime, &uinfo->stime); + break; + case __SI_RT: /* This is not generated by the kernel as of now. */ + case __SI_MESGQ: /* But this is */ + err |= __put_user(kinfo->si_pid, &uinfo->pid); + err |= __put_user(kinfo->si_uid, &uinfo->uid); + err |= __put_user((long)kinfo->si_ptr, &uinfo->svptr); + break; + default: /* this is just in case for now ... */ + err |= __put_user(kinfo->si_pid, &uinfo->pid); + err |= __put_user(kinfo->si_uid, &uinfo->uid); + break; + } + + return err ? -EFAULT: sizeof(*uinfo); +} + +/* + * Returns either the size of a "struct signalfd_siginfo", or zero if the + * sighand we are attached to, has been orphaned. The "count" parameter + * must be at least the size of a "struct signalfd_siginfo". + */ +static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + struct signalfd_ctx *ctx = file->private_data; + ssize_t res = 0; + int locked, signo; + siginfo_t info; + struct signalfd_lockctx lk; + DECLARE_WAITQUEUE(wait, current); + + if (count < sizeof(struct signalfd_siginfo)) + return -EINVAL; + locked = signalfd_lock(ctx, &lk); + if (!locked) + return 0; + res = -EAGAIN; + signo = dequeue_signal(lk.tsk, &ctx->sigmask, &info); + if (signo == 0 && !(file->f_flags & O_NONBLOCK)) { + add_wait_queue(&ctx->wqh, &wait); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + signo = dequeue_signal(lk.tsk, &ctx->sigmask, &info); + if (signo != 0) + break; + if (signal_pending(current)) { + res = -ERESTARTSYS; + break; + } + signalfd_unlock(&lk); + schedule(); + locked = signalfd_lock(ctx, &lk); + if (unlikely(!locked)) { + /* + * Let the caller read zero byte, ala socket + * recv() when the peer disconnect. This test + * must be done before doing a dequeue_signal(), + * because if the sighand has been orphaned, + * the dequeue_signal() call is going to crash. + */ + res = 0; + break; + } + } + remove_wait_queue(&ctx->wqh, &wait); + __set_current_state(TASK_RUNNING); + } + if (likely(locked)) + signalfd_unlock(&lk); + if (likely(signo)) + res = signalfd_copyinfo((struct signalfd_siginfo __user *) buf, + &info); + + return res; +} + +static const struct file_operations signalfd_fops = { + .release = signalfd_release, + .poll = signalfd_poll, + .read = signalfd_read, +}; + +/* + * Create a file descriptor that is associated with our signal + * state. We can pass it around to others if we want to, but + * it will always be _our_ signal state. + */ +asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask) +{ + int error; + sigset_t sigmask; + struct signalfd_ctx *ctx; + struct sighand_struct *sighand; + struct file *file; + struct inode *inode; + struct signalfd_lockctx lk; + + if (sizemask != sizeof(sigset_t) || + copy_from_user(&sigmask, user_mask, sizeof(sigmask))) + return error = -EINVAL; + sigdelsetmask(&sigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); + signotset(&sigmask); + + if (ufd == -1) { + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + init_waitqueue_head(&ctx->wqh); + ctx->sigmask = sigmask; + ctx->tsk = current; + + sighand = current->sighand; + /* + * Add this fd to the list of signal listeners. + */ + spin_lock_irq(&sighand->siglock); + list_add_tail(&ctx->lnk, &sighand->signalfd_list); + spin_unlock_irq(&sighand->siglock); + + /* + * When we call this, the initialization must be complete, since + * anon_inode_getfd() will install the fd. + */ + error = anon_inode_getfd(&ufd, &inode, &file, "[signalfd]", + &signalfd_fops, ctx); + if (error) + goto err_fdalloc; + } else { + file = fget(ufd); + if (!file) + return -EBADF; + ctx = file->private_data; + if (file->f_op != &signalfd_fops) { + fput(file); + return -EINVAL; + } + /* + * We need to be prepared of the fact that the sighand this fd + * is attached to, has been detched. In that case signalfd_lock() + * will return 0, and we'll just skip setting the new mask. + */ + if (signalfd_lock(ctx, &lk)) { + ctx->sigmask = sigmask; + signalfd_unlock(&lk); + } + wake_up(&ctx->wqh); + fput(file); + } + + return ufd; + +err_fdalloc: + signalfd_cleanup(ctx); + return error; +} + diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 645b3b528150..bcd01f269f60 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -190,6 +190,7 @@ unifdef-y += errno.h unifdef-y += errqueue.h unifdef-y += ethtool.h unifdef-y += eventpoll.h +unifdef-y += signalfd.h unifdef-y += ext2_fs.h unifdef-y += ext3_fs.h unifdef-y += fb.h diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 66e2f0a70814..276ccaa2670c 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -84,6 +84,7 @@ extern struct nsproxy init_nsproxy; .count = ATOMIC_INIT(1), \ .action = { { { .sa_handler = NULL, } }, }, \ .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ + .signalfd_list = LIST_HEAD_INIT(sighand.signalfd_list), \ } extern struct group_info init_groups; diff --git a/include/linux/sched.h b/include/linux/sched.h index 75f44379b7e9..97c0c7da58ef 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -391,6 +391,7 @@ struct sighand_struct { atomic_t count; struct k_sigaction action[_NSIG]; spinlock_t siglock; + struct list_head signalfd_list; }; struct pacct_struct { diff --git a/include/linux/signal.h b/include/linux/signal.h index 3fa0fab4a04b..9a5eac508e5e 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -233,6 +233,7 @@ static inline int valid_signal(unsigned long sig) return sig <= _NSIG ? 1 : 0; } +extern int next_signal(struct sigpending *pending, sigset_t *mask); extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); extern long do_sigpending(void __user *, unsigned long); diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h new file mode 100644 index 000000000000..510429495690 --- /dev/null +++ b/include/linux/signalfd.h @@ -0,0 +1,97 @@ +/* + * include/linux/signalfd.h + * + * Copyright (C) 2007 Davide Libenzi + * + */ + +#ifndef _LINUX_SIGNALFD_H +#define _LINUX_SIGNALFD_H + + +struct signalfd_siginfo { + __u32 signo; + __s32 err; + __s32 code; + __u32 pid; + __u32 uid; + __s32 fd; + __u32 tid; + __u32 band; + __u32 overrun; + __u32 trapno; + __s32 status; + __s32 svint; + __u64 svptr; + __u64 utime; + __u64 stime; + __u64 addr; + + /* + * Pad strcture to 128 bytes. Remember to update the + * pad size when you add new memebers. We use a fixed + * size structure to avoid compatibility problems with + * future versions, and we leave extra space for additional + * members. We use fixed size members because this strcture + * comes out of a read(2) and we really don't want to have + * a compat on read(2). + */ + __u8 __pad[48]; +}; + + +#ifdef __KERNEL__ + +#ifdef CONFIG_SIGNALFD + +/* + * Deliver the signal to listening signalfd. This must be called + * with the sighand lock held. Same are the following that end up + * calling signalfd_deliver(). + */ +void signalfd_deliver(struct task_struct *tsk, int sig); + +/* + * No need to fall inside signalfd_deliver() if no signal listeners + * are available. + */ +static inline void signalfd_notify(struct task_struct *tsk, int sig) +{ + if (unlikely(!list_empty(&tsk->sighand->signalfd_list))) + signalfd_deliver(tsk, sig); +} + +/* + * The signal -1 is used to notify the signalfd that the sighand + * is on its way to be detached. + */ +static inline void signalfd_detach_locked(struct task_struct *tsk) +{ + if (unlikely(!list_empty(&tsk->sighand->signalfd_list))) + signalfd_deliver(tsk, -1); +} + +static inline void signalfd_detach(struct task_struct *tsk) +{ + struct sighand_struct *sighand = tsk->sighand; + + if (unlikely(!list_empty(&sighand->signalfd_list))) { + spin_lock_irq(&sighand->siglock); + signalfd_deliver(tsk, -1); + spin_unlock_irq(&sighand->siglock); + } +} + +#else /* CONFIG_SIGNALFD */ + +#define signalfd_deliver(t, s) do { } while (0) +#define signalfd_notify(t, s) do { } while (0) +#define signalfd_detach_locked(t) do { } while (0) +#define signalfd_detach(t) do { } while (0) + +#endif /* CONFIG_SIGNALFD */ + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SIGNALFD_H */ + diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 3139f4412297..e049f14a75b7 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -604,6 +604,7 @@ asmlinkage long sys_get_robust_list(int pid, asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, size_t len); asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache); +asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask); int kernel_execve(const char *filename, char *const argv[], char *const envp[]); diff --git a/init/Kconfig b/init/Kconfig index a80bd8326bc6..db707204b751 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -492,6 +492,16 @@ config EPOLL Disabling this option will cause the kernel to be built without support for epoll family of system calls. +config SIGNALFD + bool "Enable signalfd() system call" if EMBEDDED + depends on ANON_INODES + default y + help + Enable the signalfd() system call that allows to receive signals + on a file descriptor. + + If unsure, say Y. + config SHMEM bool "Use full shmem filesystem" if EMBEDDED default y diff --git a/kernel/exit.c b/kernel/exit.c index e93691e9b325..c6d14b8008dd 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -83,6 +84,14 @@ static void __exit_signal(struct task_struct *tsk) sighand = rcu_dereference(tsk->sighand); spin_lock(&sighand->siglock); + /* + * Notify that this sighand has been detached. This must + * be called with the tsk->sighand lock held. Also, this + * access tsk->sighand internally, so it must be called + * before tsk->sighand is reset. + */ + signalfd_detach_locked(tsk); + posix_cpu_timers_exit(tsk); if (atomic_dec_and_test(&sig->count)) posix_cpu_timers_exit_group(tsk); diff --git a/kernel/fork.c b/kernel/fork.c index 083bf8953ce8..49530e40ea8b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1422,12 +1422,15 @@ long do_fork(unsigned long clone_flags, #define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif -static void sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long flags) +static void sighand_ctor(void *data, struct kmem_cache *cachep, + unsigned long flags) { struct sighand_struct *sighand = data; - if (flags & SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) { spin_lock_init(&sighand->siglock); + INIT_LIST_HEAD(&sighand->signalfd_list); + } } void __init proc_caches_init(void) @@ -1453,7 +1456,6 @@ void __init proc_caches_init(void) SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); } - /* * Check constraints on flags passed to the unshare system call and * force unsharing of additional process context as appropriate. diff --git a/kernel/signal.c b/kernel/signal.c index 2ac3a668d9dd..34b7d6abce8f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -113,8 +114,7 @@ void recalc_sigpending(void) /* Given the mask, find the first available signal that should be serviced. */ -static int -next_signal(struct sigpending *pending, sigset_t *mask) +int next_signal(struct sigpending *pending, sigset_t *mask) { unsigned long i, *s, *m, x; int sig = 0; @@ -629,6 +629,12 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, struct sigqueue * q = NULL; int ret = 0; + /* + * Deliver the signal to listening signalfds. This must be called + * with the sighand lock held. + */ + signalfd_notify(t, sig); + /* * fast-pathed signals for kernel-internal things like SIGSTOP * or SIGKILL. @@ -1280,6 +1286,11 @@ int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) ret = 1; goto out; } + /* + * Deliver the signal to listening signalfds. This must be called + * with the sighand lock held. + */ + signalfd_notify(p, sig); list_add_tail(&q->list, &p->pending.list); sigaddset(&p->pending.signal, sig); @@ -1323,6 +1334,11 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) q->info.si_overrun++; goto out; } + /* + * Deliver the signal to listening signalfds. This must be called + * with the sighand lock held. + */ + signalfd_notify(p, sig); /* * Put this signal on the shared-pending queue. @@ -1983,6 +1999,8 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) /* * If you change siginfo_t structure, please be sure * this code is fixed accordingly. + * Please remember to update the signalfd_copyinfo() function + * inside fs/signalfd.c too, in case siginfo_t changes. * It should never copy any pad contained in the structure * to avoid security leaks, but must copy the generic * 3 ints plus the relevant union member. diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index d7306d0f3dfc..807e9bb8fcdb 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -141,3 +141,6 @@ cond_syscall(compat_sys_migrate_pages); cond_syscall(sys_bdflush); cond_syscall(sys_ioprio_set); cond_syscall(sys_ioprio_get); + +/* New file descriptors */ +cond_syscall(sys_signalfd); -- cgit v1.2.3 From b215e283992899650c4271e7385c79e26fb9a88e Mon Sep 17 00:00:00 2001 From: Davide Libenzi Date: Thu, 10 May 2007 22:23:16 -0700 Subject: signal/timer/event: timerfd core This patch introduces a new system call for timers events delivered though file descriptors. This allows timer event to be used with standard POSIX poll(2), select(2) and read(2). As a consequence of supporting the Linux f_op->poll subsystem, they can be used with epoll(2) too. The system call is defined as: int timerfd(int ufd, int clockid, int flags, const struct itimerspec *utmr); The "ufd" parameter allows for re-use (re-programming) of an existing timerfd w/out going through the close/open cycle (same as signalfd). If "ufd" is -1, s new file descriptor will be created, otherwise the existing "ufd" will be re-programmed. The "clockid" parameter is either CLOCK_MONOTONIC or CLOCK_REALTIME. The time specified in the "utmr->it_value" parameter is the expiry time for the timer. If the TFD_TIMER_ABSTIME flag is set in "flags", this is an absolute time, otherwise it's a relative time. If the time specified in the "utmr->it_interval" is not zero (.tv_sec == 0, tv_nsec == 0), this is the period at which the following ticks should be generated. The "utmr->it_interval" should be set to zero if only one tick is requested. Setting the "utmr->it_value" to zero will disable the timer, or will create a timerfd without the timer enabled. The function returns the new (or same, in case "ufd" is a valid timerfd descriptor) file, or -1 in case of error. As stated before, the timerfd file descriptor supports poll(2), select(2) and epoll(2). When a timer event happened on the timerfd, a POLLIN mask will be returned. The read(2) call can be used, and it will return a u32 variable holding the number of "ticks" that happened on the interface since the last call to read(2). The read(2) call supportes the O_NONBLOCK flag too, and EAGAIN will be returned if no ticks happened. A quick test program, shows timerfd working correctly on my amd64 box: http://www.xmailserver.org/timerfd-test.c [akpm@linux-foundation.org: add sys_timerfd to sys_ni.c] Signed-off-by: Davide Libenzi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/Makefile | 1 + fs/timerfd.c | 227 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/syscalls.h | 2 + include/linux/timerfd.h | 17 ++++ init/Kconfig | 10 +++ kernel/sys_ni.c | 1 + 6 files changed, 258 insertions(+) create mode 100644 fs/timerfd.c create mode 100644 include/linux/timerfd.h (limited to 'include/linux') diff --git a/fs/Makefile b/fs/Makefile index cd8a57aeac04..39625da9e2d6 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_INOTIFY_USER) += inotify_user.o obj-$(CONFIG_EPOLL) += eventpoll.o obj-$(CONFIG_ANON_INODES) += anon_inodes.o obj-$(CONFIG_SIGNALFD) += signalfd.o +obj-$(CONFIG_TIMERFD) += timerfd.o obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o nfsd-$(CONFIG_NFSD) := nfsctl.o diff --git a/fs/timerfd.c b/fs/timerfd.c new file mode 100644 index 000000000000..e329e37f15a8 --- /dev/null +++ b/fs/timerfd.c @@ -0,0 +1,227 @@ +/* + * fs/timerfd.c + * + * Copyright (C) 2007 Davide Libenzi + * + * + * Thanks to Thomas Gleixner for code reviews and useful comments. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct timerfd_ctx { + struct hrtimer tmr; + ktime_t tintv; + spinlock_t lock; + wait_queue_head_t wqh; + int expired; +}; + +/* + * This gets called when the timer event triggers. We set the "expired" + * flag, but we do not re-arm the timer (in case it's necessary, + * tintv.tv64 != 0) until the timer is read. + */ +static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr) +{ + struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx, tmr); + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + ctx->expired = 1; + wake_up_locked(&ctx->wqh); + spin_unlock_irqrestore(&ctx->lock, flags); + + return HRTIMER_NORESTART; +} + +static void timerfd_setup(struct timerfd_ctx *ctx, int clockid, int flags, + const struct itimerspec *ktmr) +{ + enum hrtimer_mode htmode; + ktime_t texp; + + htmode = (flags & TFD_TIMER_ABSTIME) ? + HRTIMER_MODE_ABS: HRTIMER_MODE_REL; + + texp = timespec_to_ktime(ktmr->it_value); + ctx->expired = 0; + ctx->tintv = timespec_to_ktime(ktmr->it_interval); + hrtimer_init(&ctx->tmr, clockid, htmode); + ctx->tmr.expires = texp; + ctx->tmr.function = timerfd_tmrproc; + if (texp.tv64 != 0) + hrtimer_start(&ctx->tmr, texp, htmode); +} + +static int timerfd_release(struct inode *inode, struct file *file) +{ + struct timerfd_ctx *ctx = file->private_data; + + hrtimer_cancel(&ctx->tmr); + kfree(ctx); + return 0; +} + +static unsigned int timerfd_poll(struct file *file, poll_table *wait) +{ + struct timerfd_ctx *ctx = file->private_data; + unsigned int events = 0; + unsigned long flags; + + poll_wait(file, &ctx->wqh, wait); + + spin_lock_irqsave(&ctx->lock, flags); + if (ctx->expired) + events |= POLLIN; + spin_unlock_irqrestore(&ctx->lock, flags); + + return events; +} + +static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + struct timerfd_ctx *ctx = file->private_data; + ssize_t res; + u32 ticks = 0; + DECLARE_WAITQUEUE(wait, current); + + if (count < sizeof(ticks)) + return -EINVAL; + spin_lock_irq(&ctx->lock); + res = -EAGAIN; + if (!ctx->expired && !(file->f_flags & O_NONBLOCK)) { + __add_wait_queue(&ctx->wqh, &wait); + for (res = 0;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (ctx->expired) { + res = 0; + break; + } + if (signal_pending(current)) { + res = -ERESTARTSYS; + break; + } + spin_unlock_irq(&ctx->lock); + schedule(); + spin_lock_irq(&ctx->lock); + } + __remove_wait_queue(&ctx->wqh, &wait); + __set_current_state(TASK_RUNNING); + } + if (ctx->expired) { + ctx->expired = 0; + if (ctx->tintv.tv64 != 0) { + /* + * If tintv.tv64 != 0, this is a periodic timer that + * needs to be re-armed. We avoid doing it in the timer + * callback to avoid DoS attacks specifying a very + * short timer period. + */ + ticks = (u32) + hrtimer_forward(&ctx->tmr, + hrtimer_cb_get_time(&ctx->tmr), + ctx->tintv); + hrtimer_restart(&ctx->tmr); + } else + ticks = 1; + } + spin_unlock_irq(&ctx->lock); + if (ticks) + res = put_user(ticks, buf) ? -EFAULT: sizeof(ticks); + return res; +} + +static const struct file_operations timerfd_fops = { + .release = timerfd_release, + .poll = timerfd_poll, + .read = timerfd_read, +}; + +asmlinkage long sys_timerfd(int ufd, int clockid, int flags, + const struct itimerspec __user *utmr) +{ + int error; + struct timerfd_ctx *ctx; + struct file *file; + struct inode *inode; + struct itimerspec ktmr; + + if (copy_from_user(&ktmr, utmr, sizeof(ktmr))) + return -EFAULT; + + if (clockid != CLOCK_MONOTONIC && + clockid != CLOCK_REALTIME) + return -EINVAL; + if (!timespec_valid(&ktmr.it_value) || + !timespec_valid(&ktmr.it_interval)) + return -EINVAL; + + if (ufd == -1) { + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + init_waitqueue_head(&ctx->wqh); + spin_lock_init(&ctx->lock); + + timerfd_setup(ctx, clockid, flags, &ktmr); + + /* + * When we call this, the initialization must be complete, since + * anon_inode_getfd() will install the fd. + */ + error = anon_inode_getfd(&ufd, &inode, &file, "[timerfd]", + &timerfd_fops, ctx); + if (error) + goto err_tmrcancel; + } else { + file = fget(ufd); + if (!file) + return -EBADF; + ctx = file->private_data; + if (file->f_op != &timerfd_fops) { + fput(file); + return -EINVAL; + } + /* + * We need to stop the existing timer before reprogramming + * it to the new values. + */ + for (;;) { + spin_lock_irq(&ctx->lock); + if (hrtimer_try_to_cancel(&ctx->tmr) >= 0) + break; + spin_unlock_irq(&ctx->lock); + cpu_relax(); + } + /* + * Re-program the timer to the new value ... + */ + timerfd_setup(ctx, clockid, flags, &ktmr); + + spin_unlock_irq(&ctx->lock); + fput(file); + } + + return ufd; + +err_tmrcancel: + hrtimer_cancel(&ctx->tmr); + kfree(ctx); + return error; +} + diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index e049f14a75b7..fc637be1d9cf 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -605,6 +605,8 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, size_t len); asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache); asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask); +asmlinkage long sys_timerfd(int ufd, int clockid, int flags, + const struct itimerspec __user *utmr); int kernel_execve(const char *filename, char *const argv[], char *const envp[]); diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h new file mode 100644 index 000000000000..cf2b10d75731 --- /dev/null +++ b/include/linux/timerfd.h @@ -0,0 +1,17 @@ +/* + * include/linux/timerfd.h + * + * Copyright (C) 2007 Davide Libenzi + * + */ + +#ifndef _LINUX_TIMERFD_H +#define _LINUX_TIMERFD_H + + +#define TFD_TIMER_ABSTIME (1 << 0) + + + +#endif /* _LINUX_TIMERFD_H */ + diff --git a/init/Kconfig b/init/Kconfig index db707204b751..02c167de9646 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -502,6 +502,16 @@ config SIGNALFD If unsure, say Y. +config TIMERFD + bool "Enable timerfd() system call" if EMBEDDED + depends on ANON_INODES + default y + help + Enable the timerfd() system call that allows to receive timer + events on a file descriptor. + + If unsure, say Y. + config SHMEM bool "Use full shmem filesystem" if EMBEDDED default y diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 807e9bb8fcdb..b18f62549515 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -144,3 +144,4 @@ cond_syscall(sys_ioprio_get); /* New file descriptors */ cond_syscall(sys_signalfd); +cond_syscall(sys_timerfd); -- cgit v1.2.3 From 83f5d1266926c75890f1bc4678e49d79483cb573 Mon Sep 17 00:00:00 2001 From: Davide Libenzi Date: Thu, 10 May 2007 22:23:18 -0700 Subject: signal/timer/event: timerfd compat code This patch implements the necessary compat code for the timerfd system call. Signed-off-by: Davide Libenzi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/compat.c | 23 +++++++++++++++++++++++ include/linux/compat.h | 5 +++++ kernel/compat.c | 8 ++++---- 3 files changed, 32 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/fs/compat.c b/fs/compat.c index 2487b83b18df..7b21b0a82596 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -2225,3 +2225,26 @@ asmlinkage long compat_sys_signalfd(int ufd, #endif /* CONFIG_SIGNALFD */ +#ifdef CONFIG_TIMERFD + +asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags, + const struct compat_itimerspec __user *utmr) +{ + long res; + struct itimerspec t; + struct itimerspec __user *ut; + + res = -EFAULT; + if (get_compat_itimerspec(&t, utmr)) + goto err_exit; + ut = compat_alloc_user_space(sizeof(*ut)); + if (copy_to_user(ut, &t, sizeof(t)) ) + goto err_exit; + + res = sys_timerfd(ufd, clockid, flags, ut); +err_exit: + return res; +} + +#endif /* CONFIG_TIMERFD */ + diff --git a/include/linux/compat.h b/include/linux/compat.h index 70a157a130bb..636502c02734 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -225,6 +225,11 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs, return lhs->tv_nsec - rhs->tv_nsec; } +extern int get_compat_itimerspec(struct itimerspec *dst, + const struct compat_itimerspec __user *src); +extern int put_compat_itimerspec(struct compat_itimerspec __user *dst, + const struct itimerspec *src); + asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); extern int compat_printk(const char *fmt, ...); diff --git a/kernel/compat.c b/kernel/compat.c index cebb4c28c039..3bae3742c2aa 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -475,8 +475,8 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, return min_length; } -static int get_compat_itimerspec(struct itimerspec *dst, - struct compat_itimerspec __user *src) +int get_compat_itimerspec(struct itimerspec *dst, + const struct compat_itimerspec __user *src) { if (get_compat_timespec(&dst->it_interval, &src->it_interval) || get_compat_timespec(&dst->it_value, &src->it_value)) @@ -484,8 +484,8 @@ static int get_compat_itimerspec(struct itimerspec *dst, return 0; } -static int put_compat_itimerspec(struct compat_itimerspec __user *dst, - struct itimerspec *src) +int put_compat_itimerspec(struct compat_itimerspec __user *dst, + const struct itimerspec *src) { if (put_compat_timespec(&src->it_interval, &dst->it_interval) || put_compat_timespec(&src->it_value, &dst->it_value)) -- cgit v1.2.3 From e1ad7468c77ddb94b0615d5f50fa255525fde0f0 Mon Sep 17 00:00:00 2001 From: Davide Libenzi Date: Thu, 10 May 2007 22:23:19 -0700 Subject: signal/timer/event: eventfd core This is a very simple and light file descriptor, that can be used as event wait/dispatch by userspace (both wait and dispatch) and by the kernel (dispatch only). It can be used instead of pipe(2) in all cases where those would simply be used to signal events. Their kernel overhead is much lower than pipes, and they do not consume two fds. When used in the kernel, it can offer an fd-bridge to enable, for example, functionalities like KAIO or syslets/threadlets to signal to an fd the completion of certain operations. But more in general, an eventfd can be used by the kernel to signal readiness, in a POSIX poll/select way, of interfaces that would otherwise be incompatible with it. The API is: int eventfd(unsigned int count); The eventfd API accepts an initial "count" parameter, and returns an eventfd fd. It supports poll(2) (POLLIN, POLLOUT, POLLERR), read(2) and write(2). The POLLIN flag is raised when the internal counter is greater than zero. The POLLOUT flag is raised when at least a value of "1" can be written to the internal counter. The POLLERR flag is raised when an overflow in the counter value is detected. The write(2) operation can never overflow the counter, since it blocks (unless O_NONBLOCK is set, in which case -EAGAIN is returned). But the eventfd_signal() function can do it, since it's supposed to not sleep during its operation. The read(2) function reads the __u64 counter value, and reset the internal value to zero. If the value read is equal to (__u64) -1, an overflow happened on the internal counter (due to 2^64 eventfd_signal() posts that has never been retired - unlickely, but possible). The write(2) call writes an __u64 count value, and adds it to the current counter. The eventfd fd supports O_NONBLOCK also. On the kernel side, we have: struct file *eventfd_fget(int fd); int eventfd_signal(struct file *file, unsigned int n); The eventfd_fget() should be called to get a struct file* from an eventfd fd (this is an fget() + check of f_op being an eventfd fops pointer). The kernel can then call eventfd_signal() every time it wants to post an event to userspace. The eventfd_signal() function can be called from any context. An eventfd() simple test and bench is available here: http://www.xmailserver.org/eventfd-bench.c This is the eventfd-based version of pipetest-4 (pipe(2) based): http://www.xmailserver.org/pipetest-4.c Not that performance matters much in the eventfd case, but eventfd-bench shows almost as double as performance than pipetest-4. [akpm@linux-foundation.org: fix i386 build] [akpm@linux-foundation.org: add sys_eventfd to sys_ni.c] Signed-off-by: Davide Libenzi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/Makefile | 1 + fs/eventfd.c | 228 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/eventfd.h | 29 ++++++ include/linux/syscalls.h | 1 + init/Kconfig | 10 +++ kernel/sys_ni.c | 1 + 6 files changed, 270 insertions(+) create mode 100644 fs/eventfd.c create mode 100644 include/linux/eventfd.h (limited to 'include/linux') diff --git a/fs/Makefile b/fs/Makefile index 39625da9e2d6..720c29d57a62 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_EPOLL) += eventpoll.o obj-$(CONFIG_ANON_INODES) += anon_inodes.o obj-$(CONFIG_SIGNALFD) += signalfd.o obj-$(CONFIG_TIMERFD) += timerfd.o +obj-$(CONFIG_EVENTFD) += eventfd.o obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o nfsd-$(CONFIG_NFSD) := nfsctl.o diff --git a/fs/eventfd.c b/fs/eventfd.c new file mode 100644 index 000000000000..480e2b3c4166 --- /dev/null +++ b/fs/eventfd.c @@ -0,0 +1,228 @@ +/* + * fs/eventfd.c + * + * Copyright (C) 2007 Davide Libenzi + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct eventfd_ctx { + spinlock_t lock; + wait_queue_head_t wqh; + /* + * Every time that a write(2) is performed on an eventfd, the + * value of the __u64 being written is added to "count" and a + * wakeup is performed on "wqh". A read(2) will return the "count" + * value to userspace, and will reset "count" to zero. The kernel + * size eventfd_signal() also, adds to the "count" counter and + * issue a wakeup. + */ + __u64 count; +}; + +/* + * Adds "n" to the eventfd counter "count". Returns "n" in case of + * success, or a value lower then "n" in case of coutner overflow. + * This function is supposed to be called by the kernel in paths + * that do not allow sleeping. In this function we allow the counter + * to reach the ULLONG_MAX value, and we signal this as overflow + * condition by returining a POLLERR to poll(2). + */ +int eventfd_signal(struct file *file, int n) +{ + struct eventfd_ctx *ctx = file->private_data; + unsigned long flags; + + if (n < 0) + return -EINVAL; + spin_lock_irqsave(&ctx->lock, flags); + if (ULLONG_MAX - ctx->count < n) + n = (int) (ULLONG_MAX - ctx->count); + ctx->count += n; + if (waitqueue_active(&ctx->wqh)) + wake_up_locked(&ctx->wqh); + spin_unlock_irqrestore(&ctx->lock, flags); + + return n; +} + +static int eventfd_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} + +static unsigned int eventfd_poll(struct file *file, poll_table *wait) +{ + struct eventfd_ctx *ctx = file->private_data; + unsigned int events = 0; + unsigned long flags; + + poll_wait(file, &ctx->wqh, wait); + + spin_lock_irqsave(&ctx->lock, flags); + if (ctx->count > 0) + events |= POLLIN; + if (ctx->count == ULLONG_MAX) + events |= POLLERR; + if (ULLONG_MAX - 1 > ctx->count) + events |= POLLOUT; + spin_unlock_irqrestore(&ctx->lock, flags); + + return events; +} + +static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + struct eventfd_ctx *ctx = file->private_data; + ssize_t res; + __u64 ucnt; + DECLARE_WAITQUEUE(wait, current); + + if (count < sizeof(ucnt)) + return -EINVAL; + spin_lock_irq(&ctx->lock); + res = -EAGAIN; + ucnt = ctx->count; + if (ucnt > 0) + res = sizeof(ucnt); + else if (!(file->f_flags & O_NONBLOCK)) { + __add_wait_queue(&ctx->wqh, &wait); + for (res = 0;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (ctx->count > 0) { + ucnt = ctx->count; + res = sizeof(ucnt); + break; + } + if (signal_pending(current)) { + res = -ERESTARTSYS; + break; + } + spin_unlock_irq(&ctx->lock); + schedule(); + spin_lock_irq(&ctx->lock); + } + __remove_wait_queue(&ctx->wqh, &wait); + __set_current_state(TASK_RUNNING); + } + if (res > 0) { + ctx->count = 0; + if (waitqueue_active(&ctx->wqh)) + wake_up_locked(&ctx->wqh); + } + spin_unlock_irq(&ctx->lock); + if (res > 0 && put_user(ucnt, (__u64 __user *) buf)) + return -EFAULT; + + return res; +} + +static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, + loff_t *ppos) +{ + struct eventfd_ctx *ctx = file->private_data; + ssize_t res; + __u64 ucnt; + DECLARE_WAITQUEUE(wait, current); + + if (count < sizeof(ucnt)) + return -EINVAL; + if (copy_from_user(&ucnt, buf, sizeof(ucnt))) + return -EFAULT; + if (ucnt == ULLONG_MAX) + return -EINVAL; + spin_lock_irq(&ctx->lock); + res = -EAGAIN; + if (ULLONG_MAX - ctx->count > ucnt) + res = sizeof(ucnt); + else if (!(file->f_flags & O_NONBLOCK)) { + __add_wait_queue(&ctx->wqh, &wait); + for (res = 0;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (ULLONG_MAX - ctx->count > ucnt) { + res = sizeof(ucnt); + break; + } + if (signal_pending(current)) { + res = -ERESTARTSYS; + break; + } + spin_unlock_irq(&ctx->lock); + schedule(); + spin_lock_irq(&ctx->lock); + } + __remove_wait_queue(&ctx->wqh, &wait); + __set_current_state(TASK_RUNNING); + } + if (res > 0) { + ctx->count += ucnt; + if (waitqueue_active(&ctx->wqh)) + wake_up_locked(&ctx->wqh); + } + spin_unlock_irq(&ctx->lock); + + return res; +} + +static const struct file_operations eventfd_fops = { + .release = eventfd_release, + .poll = eventfd_poll, + .read = eventfd_read, + .write = eventfd_write, +}; + +struct file *eventfd_fget(int fd) +{ + struct file *file; + + file = fget(fd); + if (!file) + return ERR_PTR(-EBADF); + if (file->f_op != &eventfd_fops) { + fput(file); + return ERR_PTR(-EINVAL); + } + + return file; +} + +asmlinkage long sys_eventfd(unsigned int count) +{ + int error, fd; + struct eventfd_ctx *ctx; + struct file *file; + struct inode *inode; + + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + init_waitqueue_head(&ctx->wqh); + spin_lock_init(&ctx->lock); + ctx->count = count; + + /* + * When we call this, the initialization must be complete, since + * anon_inode_getfd() will install the fd. + */ + error = anon_inode_getfd(&fd, &inode, &file, "[eventfd]", + &eventfd_fops, ctx); + if (!error) + return fd; + + kfree(ctx); + return error; +} + diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h new file mode 100644 index 000000000000..0d6ecc60b94d --- /dev/null +++ b/include/linux/eventfd.h @@ -0,0 +1,29 @@ +/* + * include/linux/eventfd.h + * + * Copyright (C) 2007 Davide Libenzi + * + */ + +#ifndef _LINUX_EVENTFD_H +#define _LINUX_EVENTFD_H + + +#ifdef __KERNEL__ + +#ifdef CONFIG_EVENTFD + +struct file *eventfd_fget(int fd); +int eventfd_signal(struct file *file, int n); + +#else /* CONFIG_EVENTFD */ + +#define eventfd_fget(fd) ERR_PTR(-ENOSYS) +#define eventfd_signal(f, n) 0 + +#endif /* CONFIG_EVENTFD */ + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_EVENTFD_H */ + diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index fc637be1d9cf..b02070eac422 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -607,6 +607,7 @@ asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct g asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask); asmlinkage long sys_timerfd(int ufd, int clockid, int flags, const struct itimerspec __user *utmr); +asmlinkage long sys_eventfd(unsigned int count); int kernel_execve(const char *filename, char *const argv[], char *const envp[]); diff --git a/init/Kconfig b/init/Kconfig index 02c167de9646..4e009fde4b69 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -512,6 +512,16 @@ config TIMERFD If unsure, say Y. +config EVENTFD + bool "Enable eventfd() system call" if EMBEDDED + depends on ANON_INODES + default y + help + Enable the eventfd() system call that allows to receive both + kernel notification (ie. KAIO) or userspace notifications. + + If unsure, say Y. + config SHMEM bool "Use full shmem filesystem" if EMBEDDED default y diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index b18f62549515..b6d77a8a1ca9 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -145,3 +145,4 @@ cond_syscall(sys_ioprio_get); /* New file descriptors */ cond_syscall(sys_signalfd); cond_syscall(sys_timerfd); +cond_syscall(sys_eventfd); -- cgit v1.2.3 From 9c3060bedd84144653a2ad7bea32389f65598d40 Mon Sep 17 00:00:00 2001 From: Davide Libenzi Date: Thu, 10 May 2007 22:23:21 -0700 Subject: signal/timer/event: KAIO eventfd support example This is an example about how to add eventfd support to the current KAIO code, in order to enable KAIO to post readiness events to a pollable fd (hence compatible with POSIX select/poll). The KAIO code simply signals the eventfd fd when events are ready, and this triggers a POLLIN in the fd. This patch uses a reserved for future use member of the struct iocb to pass an eventfd file descriptor, that KAIO will use to post events every time a request completes. At that point, an aio_getevents() will return the completed result to a struct io_event. I made a quick test program to verify the patch, and it runs fine here: http://www.xmailserver.org/eventfd-aio-test.c The test program uses poll(2), but it'd, of course, work with select and epoll too. This can allow to schedule both block I/O and other poll-able devices requests, and wait for results using select/poll/epoll. In a typical scenario, an application would submit KAIO request using aio_submit(), and will also use epoll_ctl() on the whole other class of devices (that with the addition of signals, timers and user events, now it's pretty much complete), and then would: epoll_wait(...); for_each_event { if (curr_event_is_kaiofd) { aio_getevents(); dispatch_aio_events(); } else { dispatch_epoll_event(); } } Signed-off-by: Davide Libenzi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/aio.c | 28 ++++++++++++++++++++++++++-- include/linux/aio.h | 6 ++++++ include/linux/aio_abi.h | 18 +++++++++++++++++- 3 files changed, 49 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/aio.c b/fs/aio.c index ac1c1587aa02..dbe699e9828c 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -417,6 +418,7 @@ static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) req->private = NULL; req->ki_iovec = NULL; INIT_LIST_HEAD(&req->ki_run_list); + req->ki_eventfd = ERR_PTR(-EINVAL); /* Check if the completion queue has enough free space to * accept an event from this io. @@ -458,6 +460,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) { assert_spin_locked(&ctx->ctx_lock); + if (!IS_ERR(req->ki_eventfd)) + fput(req->ki_eventfd); if (req->ki_dtor) req->ki_dtor(req); if (req->ki_iovec != &req->ki_inline_vec) @@ -942,6 +946,14 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) return 1; } + /* + * Check if the user asked us to deliver the result through an + * eventfd. The eventfd_signal() function is safe to be called + * from IRQ context. + */ + if (!IS_ERR(iocb->ki_eventfd)) + eventfd_signal(iocb->ki_eventfd, 1); + info = &ctx->ring_info; /* add a completion event to the ring buffer. @@ -1526,8 +1538,7 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, ssize_t ret; /* enforce forwards compatibility on users */ - if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2 || - iocb->aio_reserved3)) { + if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { pr_debug("EINVAL: io_submit: reserve field set\n"); return -EINVAL; } @@ -1551,6 +1562,19 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, fput(file); return -EAGAIN; } + if (iocb->aio_flags & IOCB_FLAG_RESFD) { + /* + * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an + * instance of the file* now. The file descriptor must be + * an eventfd() fd, and will be signaled for each completed + * event using the eventfd_signal() function. + */ + req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); + if (unlikely(IS_ERR(req->ki_eventfd))) { + ret = PTR_ERR(req->ki_eventfd); + goto out_put_req; + } + } req->ki_filp = file; ret = put_user(req->ki_key, &user_iocb->aio_key); diff --git a/include/linux/aio.h b/include/linux/aio.h index 43dc2ebfaa0e..b903fc02bdb7 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -119,6 +119,12 @@ struct kiocb { struct list_head ki_list; /* the aio core uses this * for cancellation */ + + /* + * If the aio_resfd field of the userspace iocb is not zero, + * this is the underlying file* to deliver event to. + */ + struct file *ki_eventfd; }; #define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY) diff --git a/include/linux/aio_abi.h b/include/linux/aio_abi.h index e3ca0a485cc6..9e0172931315 100644 --- a/include/linux/aio_abi.h +++ b/include/linux/aio_abi.h @@ -45,6 +45,14 @@ enum { IOCB_CMD_PWRITEV = 8, }; +/* + * Valid flags for the "aio_flags" member of the "struct iocb". + * + * IOCB_FLAG_RESFD - Set if the "aio_resfd" member of the "struct iocb" + * is valid. + */ +#define IOCB_FLAG_RESFD (1 << 0) + /* read() from /dev/aio returns these structures. */ struct io_event { __u64 data; /* the data field from the iocb */ @@ -84,7 +92,15 @@ struct iocb { /* extra parameters */ __u64 aio_reserved2; /* TODO: use this for a (struct sigevent *) */ - __u64 aio_reserved3; + + /* flags for the "struct iocb" */ + __u32 aio_flags; + + /* + * if the IOCB_FLAG_RESFD flag of "aio_flags" is set, this is an + * eventfd to signal AIO readiness to + */ + __u32 aio_resfd; }; /* 64 bytes */ #undef IFBIG -- cgit v1.2.3