From de125187dc17e3715ba983adf60faecfdc3a64c4 Mon Sep 17 00:00:00 2001 From: Jean-Christophe PLAGNIOL-VILLARD Date: Fri, 6 May 2011 07:03:49 +0200 Subject: kconfig: autogenerated config_is_xxx macro this will allow to use to use if(config_is_xxx()) if(config_is_xxx_module()) in the code instead of #ifdef CONFIG_xxx #ifdef CONFIG_xxx_MODULE and now let the compiler remove the non usefull code and not the pre-processor as done in the mach-types for arm as exmaple Signed-off-by: Jean-Christophe PLAGNIOL-VILLARD Acked-by: Andi Kleen Signed-off-by: Michal Marek --- scripts/kconfig/confdata.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 834eecb010ba..db06af0321b3 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -778,6 +778,29 @@ out: return res; } +static void conf_write_function_autoconf(FILE *out, char* conf, char* name, + int val) +{ + char c; + char *tmp, *d; + + d = strdup(conf); + tmp = d; + while ((c = *conf++)) + *d++ = tolower(c); + + fprintf(out, "#define %sis_", tmp); + free(tmp); + + d = strdup(name); + tmp = d; + while ((c = *name++)) + *d++ = tolower(c); + fprintf(out, "%s%s() %d\n", tmp, (val > 1) ? "_module" : "", + val ? 1 : 0); + free(tmp); +} + int conf_write_autoconf(void) { struct symbol *sym; @@ -785,6 +808,7 @@ int conf_write_autoconf(void) const char *name; FILE *out, *tristate, *out_h; int i; + int fct_val; sym_clear_all_valid(); @@ -825,6 +849,7 @@ int conf_write_autoconf(void) rootmenu.prompt->text); for_all_symbols(i, sym) { + fct_val = 1; sym_calc_value(sym); if (!(sym->flags & SYMBOL_WRITE) || !sym->name) continue; @@ -838,12 +863,14 @@ int conf_write_autoconf(void) case S_TRISTATE: switch (sym_get_tristate_value(sym)) { case no: + fct_val = 0; break; case mod: fprintf(tristate, "%s%s=M\n", CONFIG_, sym->name); fprintf(out_h, "#define %s%s_MODULE 1\n", CONFIG_, sym->name); + fct_val = 2; break; case yes: if (sym->type == S_TRISTATE) @@ -870,8 +897,10 @@ int conf_write_autoconf(void) CONFIG_, sym->name, str); break; default: + fct_val = 0; break; } + conf_write_function_autoconf(out_h, CONFIG_, sym->name, fct_val); } fclose(out); fclose(tristate); -- cgit v1.2.3 From 4c54f0f846102b05efcc99114ada2b913baab161 Mon Sep 17 00:00:00 2001 From: Michal Marek Date: Tue, 17 May 2011 17:31:53 +0200 Subject: kconfig: Only generate config_is_xxx for bool and tristate options For strings and integers, the config_is_xxx macros are useless and sometimes misleading: #define CONFIG_INITRAMFS_SOURCE "" #define config_is_initramfs_source() 1 Cc: Jean-Christophe PLAGNIOL-VILLARD Signed-off-by: Michal Marek --- scripts/kconfig/confdata.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index a1a9872e4a2d..d01f962e879f 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -798,7 +798,6 @@ int conf_write_autoconf(void) const char *name; FILE *out, *tristate, *out_h; int i; - int fct_val; sym_clear_all_valid(); @@ -839,7 +838,7 @@ int conf_write_autoconf(void) rootmenu.prompt->text); for_all_symbols(i, sym) { - fct_val = 1; + int fct_val = 0; sym_calc_value(sym); if (!(sym->flags & SYMBOL_WRITE) || !sym->name) continue; @@ -853,7 +852,6 @@ int conf_write_autoconf(void) case S_TRISTATE: switch (sym_get_tristate_value(sym)) { case no: - fct_val = 0; break; case mod: fprintf(tristate, "%s%s=M\n", @@ -868,8 +866,10 @@ int conf_write_autoconf(void) CONFIG_, sym->name); fprintf(out_h, "#define %s%s 1\n", CONFIG_, sym->name); + fct_val = 1; break; } + conf_write_function_autoconf(out_h, CONFIG_, sym->name, fct_val); break; case S_STRING: conf_write_string(true, sym->name, sym_get_string_value(sym), out_h); @@ -887,10 +887,8 @@ int conf_write_autoconf(void) CONFIG_, sym->name, str); break; default: - fct_val = 0; break; } - conf_write_function_autoconf(out_h, CONFIG_, sym->name, fct_val); } fclose(out); fclose(tristate); -- cgit v1.2.3 From f21e0e81d81b649ad309cedc7226f1bed72982e0 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 24 May 2011 08:12:40 +0800 Subject: regulator: Do bulk enables of regulators in parallel In order to reduce the impact of ramp times rather than enabling the regulators for a device in series use async tasks to run the actual enables. This means that the delays which the enables implement can all run in parallel, though it does mean that the order in which the supplies come on may be unstable. For super bonus fun points if any of the regulators are shared between multiple supplies on the same device (as is rather likely) then this will test our locking. Note that in this case we only delay once for each physical regulator so the threads shouldn't block each other while delaying. It'd be even nicer if we could coalesce writes to a shared enable registers in PMICs but that's definitely future work, and it may also be useful and is certainly more achievable to optimise out the parallelism if none of the regulators implement ramp delays. Signed-off-by: Mark Brown Signed-off-by: Liam Girdwood --- drivers/regulator/core.c | 32 ++++++++++++++++++++++++++------ include/linux/regulator/consumer.h | 3 +++ 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index d3e38790906e..7b38af90a012 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -2264,6 +2265,13 @@ err: } EXPORT_SYMBOL_GPL(regulator_bulk_get); +static void regulator_bulk_enable_async(void *data, async_cookie_t cookie) +{ + struct regulator_bulk_data *bulk = data; + + bulk->ret = regulator_enable(bulk->consumer); +} + /** * regulator_bulk_enable - enable multiple regulator consumers * @@ -2279,21 +2287,33 @@ EXPORT_SYMBOL_GPL(regulator_bulk_get); int regulator_bulk_enable(int num_consumers, struct regulator_bulk_data *consumers) { + LIST_HEAD(async_domain); int i; - int ret; + int ret = 0; + + for (i = 0; i < num_consumers; i++) + async_schedule_domain(regulator_bulk_enable_async, + &consumers[i], &async_domain); + + async_synchronize_full_domain(&async_domain); + /* If any consumer failed we need to unwind any that succeeded */ for (i = 0; i < num_consumers; i++) { - ret = regulator_enable(consumers[i].consumer); - if (ret != 0) + if (consumers[i].ret != 0) { + ret = consumers[i].ret; goto err; + } } return 0; err: - pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret); - for (--i; i >= 0; --i) - regulator_disable(consumers[i].consumer); + for (i = 0; i < num_consumers; i++) + if (consumers[i].ret == 0) + regulator_disable(consumers[i].consumer); + else + pr_err("Failed to enable %s: %d\n", + consumers[i].supply, consumers[i].ret); return ret; } diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 9e87c1cb7270..26f6ea4444e3 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -122,6 +122,9 @@ struct regulator; struct regulator_bulk_data { const char *supply; struct regulator *consumer; + + /* Internal use */ + int ret; }; #if defined(CONFIG_REGULATOR) -- cgit v1.2.3 From 2ae3636b79aee1a69b2e84eff68bb123090796d3 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 24 May 2011 23:14:40 +0800 Subject: regulator: Use _cansleep() for WM8994 regulator GPIOs The WM8994 regulator driver is perfectly happy if the GPIO used to enable the regulator sleeps so call the appropriate GPIO API. Signed-off-by: Mark Brown Signed-off-by: Liam Girdwood --- drivers/regulator/wm8994-regulator.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c index 35b2958d5106..1a6a690f24db 100644 --- a/drivers/regulator/wm8994-regulator.c +++ b/drivers/regulator/wm8994-regulator.c @@ -43,7 +43,7 @@ static int wm8994_ldo_enable(struct regulator_dev *rdev) if (!ldo->enable) return 0; - gpio_set_value(ldo->enable, 1); + gpio_set_value_cansleep(ldo->enable, 1); ldo->is_enabled = true; return 0; @@ -57,7 +57,7 @@ static int wm8994_ldo_disable(struct regulator_dev *rdev) if (!ldo->enable) return -EINVAL; - gpio_set_value(ldo->enable, 0); + gpio_set_value_cansleep(ldo->enable, 0); ldo->is_enabled = false; return 0; -- cgit v1.2.3 From 7736f11dbadce33d3f12bf0e8114d0f1da5e8622 Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Fri, 27 May 2011 12:25:27 -0700 Subject: regulator: twl-regulator: fix n_voltages for twl6030 variable LDOs The n_voltages initializer for the TWL6030_ADJUSTABLE_LDO macro is off by one, causing the the highest supported voltage to be unreachable. Setting the machine constraints to only allow the highest voltage causes errors: machine_constraints_voltage: VAUX3_6030: unsupportable voltage constraints twl_reg twl_reg.39: can't register VAUX3_6030, -22 twl_reg: probe of twl_reg.39 failed with error -22 This patch fixes the off by one error. Tested by setting VAUX3_6030 to 3.3V. Signed-off-by: Colin Cross Acked-by: Mark Brown Signed-off-by: Liam Girdwood --- drivers/regulator/twl-regulator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 87fe0f75a56e..503c2bc64c84 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -864,7 +864,7 @@ static struct regulator_ops twlsmps_ops = { .desc = { \ .name = #label, \ .id = TWL6030_REG_##label, \ - .n_voltages = (max_mVolts - min_mVolts)/100, \ + .n_voltages = (max_mVolts - min_mVolts)/100 + 1, \ .ops = &twl6030ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ -- cgit v1.2.3 From c3d4913cd4cd469cbf29d411293e937729e83f3a Mon Sep 17 00:00:00 2001 From: Tomoya MORINAGA Date: Tue, 31 May 2011 10:34:45 +0900 Subject: pch_dma: fix DMA issue(ch8-ch11) ISSUE: In case PCH_DMA with I2S communications with ch8~ch11, sometimes I2S data is not send correctly. CAUSE: The following patch I submitted before was not enough modification for supporting DMA ch8~ch11. The modification for status register of ch8~11 was not enough. pch_dma: Support I2S for ML7213 IOH author Tomoya MORINAGA Mon, 9 May 2011 07:09:38 +0000 (16:09 +0900) committer Vinod Koul Mon, 9 May 2011 11:42:23 +0000 (16:42 +0530) commit 194f5f2706c7472f9c6bb2d17fa788993606581f tree c9d4903ea02b18939a4f390956a48be1a3734517 parent 60092d0bde4c8741198da4a69b693d3709385bf1 This patch fixes the issue. We can confirm PCH_DMA with I2S communications with ch8~ch11 works well. Signed-off-by: Tomoya MORINAGA Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 69 ++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 55 insertions(+), 14 deletions(-) diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index ff5b38f9d45b..65c32f893a57 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -45,7 +45,8 @@ #define DMA_STATUS_MASK_BITS 0x3 #define DMA_STATUS_SHIFT_BITS 16 #define DMA_STATUS_IRQ(x) (0x1 << (x)) -#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8)) +#define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8)) +#define DMA_STATUS2_ERR(x) (0x1 << (x)) #define DMA_DESC_WIDTH_SHIFT_BITS 12 #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) @@ -133,6 +134,7 @@ struct pch_dma { #define PCH_DMA_CTL3 0x0C #define PCH_DMA_STS0 0x10 #define PCH_DMA_STS1 0x14 +#define PCH_DMA_STS2 0x18 #define dma_readl(pd, name) \ readl((pd)->membase + PCH_DMA_##name) @@ -183,13 +185,19 @@ static void pdc_enable_irq(struct dma_chan *chan, int enable) { struct pch_dma *pd = to_pd(chan->device); u32 val; + int pos; + + if (chan->chan_id < 8) + pos = chan->chan_id; + else + pos = chan->chan_id + 8; val = dma_readl(pd, CTL2); if (enable) - val |= 0x1 << chan->chan_id; + val |= 0x1 << pos; else - val &= ~(0x1 << chan->chan_id); + val &= ~(0x1 << pos); dma_writel(pd, CTL2, val); @@ -262,7 +270,7 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode) chan->chan_id, val); } -static u32 pdc_get_status(struct pch_dma_chan *pd_chan) +static u32 pdc_get_status0(struct pch_dma_chan *pd_chan) { struct pch_dma *pd = to_pd(pd_chan->chan.device); u32 val; @@ -272,9 +280,27 @@ static u32 pdc_get_status(struct pch_dma_chan *pd_chan) DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); } +static u32 pdc_get_status2(struct pch_dma_chan *pd_chan) +{ + struct pch_dma *pd = to_pd(pd_chan->chan.device); + u32 val; + + val = dma_readl(pd, STS2); + return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + + DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8))); +} + static bool pdc_is_idle(struct pch_dma_chan *pd_chan) { - if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE) + u32 sts; + + if (pd_chan->chan.chan_id < 8) + sts = pdc_get_status0(pd_chan); + else + sts = pdc_get_status2(pd_chan); + + + if (sts == DMA_STATUS_IDLE) return true; else return false; @@ -693,30 +719,45 @@ static irqreturn_t pd_irq(int irq, void *devid) struct pch_dma *pd = (struct pch_dma *)devid; struct pch_dma_chan *pd_chan; u32 sts0; + u32 sts2; int i; - int ret = IRQ_NONE; + int ret0 = IRQ_NONE; + int ret2 = IRQ_NONE; sts0 = dma_readl(pd, STS0); + sts2 = dma_readl(pd, STS2); dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); for (i = 0; i < pd->dma.chancnt; i++) { pd_chan = &pd->channels[i]; - if (sts0 & DMA_STATUS_IRQ(i)) { - if (sts0 & DMA_STATUS_ERR(i)) - set_bit(0, &pd_chan->err_status); + if (i < 8) { + if (sts0 & DMA_STATUS_IRQ(i)) { + if (sts0 & DMA_STATUS0_ERR(i)) + set_bit(0, &pd_chan->err_status); - tasklet_schedule(&pd_chan->tasklet); - ret = IRQ_HANDLED; - } + tasklet_schedule(&pd_chan->tasklet); + ret0 = IRQ_HANDLED; + } + } else { + if (sts2 & DMA_STATUS_IRQ(i - 8)) { + if (sts2 & DMA_STATUS2_ERR(i)) + set_bit(0, &pd_chan->err_status); + tasklet_schedule(&pd_chan->tasklet); + ret2 = IRQ_HANDLED; + } + } } /* clear interrupt bits in status register */ - dma_writel(pd, STS0, sts0); + if (ret0) + dma_writel(pd, STS0, sts0); + if (ret2) + dma_writel(pd, STS2, sts2); - return ret; + return ret0 | ret2; } #ifdef CONFIG_PM -- cgit v1.2.3 From a2daff6803a384ce065e3681a2affea1da59c5f5 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 31 May 2011 14:09:00 -0700 Subject: fuse: fix non-ANSI void function notation Fix void function parameter list sparse warning: fs/fuse/inode.c:74:44: warning: non-ANSI function declaration of function 'fuse_alloc_forget' Signed-off-by: Randy Dunlap Signed-off-by: Miklos Szeredi --- fs/fuse/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index cc6ec4b2f0ff..5354906e797c 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -71,7 +71,7 @@ struct fuse_mount_data { unsigned blksize; }; -struct fuse_forget_link *fuse_alloc_forget() +struct fuse_forget_link *fuse_alloc_forget(void) { return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL); } -- cgit v1.2.3 From badc1446891c158f065c5a9726febdae74eb5ac5 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 1 Jun 2011 21:25:47 +0100 Subject: [IA64] Hook up gpiolib support Allow people to use gpiolib on ia64, mostly for build coverage as it seems more useful to standardise on availablity of the API than handle it being optional. Signed-off-by: Mark Brown Signed-off-by: Tony Luck --- arch/ia64/Kconfig | 4 ++++ arch/ia64/include/asm/gpio.h | 55 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 arch/ia64/include/asm/gpio.h diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 38280ef4a2af..578701ea03d4 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -27,6 +27,7 @@ config IA64 select GENERIC_PENDING_IRQ if SMP select IRQ_PER_CPU select GENERIC_IRQ_SHOW + select ARCH_WANT_OPTIONAL_GPIOLIB default y help The Itanium Processor Family is Intel's 64-bit successor to @@ -89,6 +90,9 @@ config GENERIC_TIME_VSYSCALL config HAVE_SETUP_PER_CPU_AREA def_bool y +config GENERIC_GPIO + def_bool y + config DMI bool default y diff --git a/arch/ia64/include/asm/gpio.h b/arch/ia64/include/asm/gpio.h new file mode 100644 index 000000000000..590a20debc4e --- /dev/null +++ b/arch/ia64/include/asm/gpio.h @@ -0,0 +1,55 @@ +/* + * Generic GPIO API implementation for IA-64. + * + * A stright copy of that for PowerPC which was: + * + * Copyright (c) 2007-2008 MontaVista Software, Inc. + * + * Author: Anton Vorontsov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ASM_IA64_GPIO_H +#define _ASM_IA64_GPIO_H + +#include +#include + +#ifdef CONFIG_GPIOLIB + +/* + * We don't (yet) implement inlined/rapid versions for on-chip gpios. + * Just call gpiolib. + */ +static inline int gpio_get_value(unsigned int gpio) +{ + return __gpio_get_value(gpio); +} + +static inline void gpio_set_value(unsigned int gpio, int value) +{ + __gpio_set_value(gpio, value); +} + +static inline int gpio_cansleep(unsigned int gpio) +{ + return __gpio_cansleep(gpio); +} + +static inline int gpio_to_irq(unsigned int gpio) +{ + return __gpio_to_irq(gpio); +} + +static inline int irq_to_gpio(unsigned int irq) +{ + return -EINVAL; +} + +#endif /* CONFIG_GPIOLIB */ + +#endif /* _ASM_IA64_GPIO_H */ -- cgit v1.2.3 From 9f14517bd6f38cf4b48d742a0ac5db9d17edecba Mon Sep 17 00:00:00 2001 From: John Stultz Date: Wed, 1 Jun 2011 00:32:50 -0700 Subject: clocksource: tile: convert to use clocksource_register_hz Convert tile to use clocksource_register_hz. CC: Thomas Gleixner Signed-off-by: John Stultz Signed-off-by: Chris Metcalf --- arch/tile/kernel/time.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index c4be58cc5d50..f6f50f2a5e37 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -78,7 +78,6 @@ static struct clocksource cycle_counter_cs = { .rating = 300, .read = clocksource_get_cycles, .mask = CLOCKSOURCE_MASK(64), - .shift = 22, /* typical value, e.g. x86 tsc uses this */ .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -91,8 +90,6 @@ void __init setup_clock(void) cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED); sched_clock_mult = clocksource_hz2mult(cycles_per_sec, SCHED_CLOCK_SHIFT); - cycle_counter_cs.mult = - clocksource_hz2mult(cycles_per_sec, cycle_counter_cs.shift); } void __init calibrate_delay(void) @@ -107,7 +104,7 @@ void __init calibrate_delay(void) void __init time_init(void) { /* Initialize and register the clock source. */ - clocksource_register(&cycle_counter_cs); + clocksource_register_hz(&cycle_counter_cs, cycles_per_sec); /* Start up the tile-timer interrupt source on the boot cpu. */ setup_tile_timer(); -- cgit v1.2.3 From 2c007a9d8cb018a3ce2106342f2aff2abca2a3bd Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Fri, 3 Jun 2011 17:36:18 -0400 Subject: tile: use generic-y format for one-line asm-generic headers This lets us remove a lot of one-line wrapper header files. See commit d8ecc5cd8e227bc318513b5306ae88a474b8886d for context. Signed-off-by: Chris Metcalf --- arch/tile/include/asm/Kbuild | 38 +++++++++++++++++++++++++++++++ arch/tile/include/asm/bug.h | 1 - arch/tile/include/asm/bugs.h | 1 - arch/tile/include/asm/cputime.h | 1 - arch/tile/include/asm/device.h | 1 - arch/tile/include/asm/div64.h | 1 - arch/tile/include/asm/emergency-restart.h | 1 - arch/tile/include/asm/errno.h | 1 - arch/tile/include/asm/fb.h | 1 - arch/tile/include/asm/fcntl.h | 1 - arch/tile/include/asm/ioctl.h | 1 - arch/tile/include/asm/ioctls.h | 1 - arch/tile/include/asm/ipc.h | 1 - arch/tile/include/asm/ipcbuf.h | 1 - arch/tile/include/asm/irq_regs.h | 1 - arch/tile/include/asm/kdebug.h | 1 - arch/tile/include/asm/local.h | 1 - arch/tile/include/asm/module.h | 1 - arch/tile/include/asm/msgbuf.h | 1 - arch/tile/include/asm/mutex.h | 1 - arch/tile/include/asm/param.h | 1 - arch/tile/include/asm/parport.h | 1 - arch/tile/include/asm/poll.h | 1 - arch/tile/include/asm/posix_types.h | 1 - arch/tile/include/asm/resource.h | 1 - arch/tile/include/asm/scatterlist.h | 1 - arch/tile/include/asm/sembuf.h | 1 - arch/tile/include/asm/serial.h | 1 - arch/tile/include/asm/shmbuf.h | 1 - arch/tile/include/asm/shmparam.h | 1 - arch/tile/include/asm/socket.h | 1 - arch/tile/include/asm/sockios.h | 1 - arch/tile/include/asm/statfs.h | 1 - arch/tile/include/asm/termbits.h | 1 - arch/tile/include/asm/termios.h | 1 - arch/tile/include/asm/types.h | 1 - arch/tile/include/asm/ucontext.h | 1 - arch/tile/include/asm/xor.h | 1 - 38 files changed, 38 insertions(+), 37 deletions(-) delete mode 100644 arch/tile/include/asm/bug.h delete mode 100644 arch/tile/include/asm/bugs.h delete mode 100644 arch/tile/include/asm/cputime.h delete mode 100644 arch/tile/include/asm/device.h delete mode 100644 arch/tile/include/asm/div64.h delete mode 100644 arch/tile/include/asm/emergency-restart.h delete mode 100644 arch/tile/include/asm/errno.h delete mode 100644 arch/tile/include/asm/fb.h delete mode 100644 arch/tile/include/asm/fcntl.h delete mode 100644 arch/tile/include/asm/ioctl.h delete mode 100644 arch/tile/include/asm/ioctls.h delete mode 100644 arch/tile/include/asm/ipc.h delete mode 100644 arch/tile/include/asm/ipcbuf.h delete mode 100644 arch/tile/include/asm/irq_regs.h delete mode 100644 arch/tile/include/asm/kdebug.h delete mode 100644 arch/tile/include/asm/local.h delete mode 100644 arch/tile/include/asm/module.h delete mode 100644 arch/tile/include/asm/msgbuf.h delete mode 100644 arch/tile/include/asm/mutex.h delete mode 100644 arch/tile/include/asm/param.h delete mode 100644 arch/tile/include/asm/parport.h delete mode 100644 arch/tile/include/asm/poll.h delete mode 100644 arch/tile/include/asm/posix_types.h delete mode 100644 arch/tile/include/asm/resource.h delete mode 100644 arch/tile/include/asm/scatterlist.h delete mode 100644 arch/tile/include/asm/sembuf.h delete mode 100644 arch/tile/include/asm/serial.h delete mode 100644 arch/tile/include/asm/shmbuf.h delete mode 100644 arch/tile/include/asm/shmparam.h delete mode 100644 arch/tile/include/asm/socket.h delete mode 100644 arch/tile/include/asm/sockios.h delete mode 100644 arch/tile/include/asm/statfs.h delete mode 100644 arch/tile/include/asm/termbits.h delete mode 100644 arch/tile/include/asm/termios.h delete mode 100644 arch/tile/include/asm/types.h delete mode 100644 arch/tile/include/asm/ucontext.h delete mode 100644 arch/tile/include/asm/xor.h diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index 849ab2fa1f5c..aec60dc06007 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -2,3 +2,41 @@ include include/asm-generic/Kbuild.asm header-y += ucontext.h header-y += hardwall.h + +generic-y += bug.h +generic-y += bugs.h +generic-y += cputime.h +generic-y += device.h +generic-y += div64.h +generic-y += emergency-restart.h +generic-y += errno.h +generic-y += fb.h +generic-y += fcntl.h +generic-y += ioctl.h +generic-y += ioctls.h +generic-y += ipc.h +generic-y += ipcbuf.h +generic-y += irq_regs.h +generic-y += kdebug.h +generic-y += local.h +generic-y += module.h +generic-y += msgbuf.h +generic-y += mutex.h +generic-y += param.h +generic-y += parport.h +generic-y += poll.h +generic-y += posix_types.h +generic-y += resource.h +generic-y += scatterlist.h +generic-y += sembuf.h +generic-y += serial.h +generic-y += shmbuf.h +generic-y += shmparam.h +generic-y += socket.h +generic-y += sockios.h +generic-y += statfs.h +generic-y += termbits.h +generic-y += termios.h +generic-y += types.h +generic-y += ucontext.h +generic-y += xor.h diff --git a/arch/tile/include/asm/bug.h b/arch/tile/include/asm/bug.h deleted file mode 100644 index b12fd89e42e9..000000000000 --- a/arch/tile/include/asm/bug.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/bugs.h b/arch/tile/include/asm/bugs.h deleted file mode 100644 index 61791e1ad9f5..000000000000 --- a/arch/tile/include/asm/bugs.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/cputime.h b/arch/tile/include/asm/cputime.h deleted file mode 100644 index 6d68ad7e0ea3..000000000000 --- a/arch/tile/include/asm/cputime.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/device.h b/arch/tile/include/asm/device.h deleted file mode 100644 index f0a4c256403b..000000000000 --- a/arch/tile/include/asm/device.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/div64.h b/arch/tile/include/asm/div64.h deleted file mode 100644 index 6cd978cefb28..000000000000 --- a/arch/tile/include/asm/div64.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/emergency-restart.h b/arch/tile/include/asm/emergency-restart.h deleted file mode 100644 index 3711bd9d50bd..000000000000 --- a/arch/tile/include/asm/emergency-restart.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/errno.h b/arch/tile/include/asm/errno.h deleted file mode 100644 index 4c82b503d92f..000000000000 --- a/arch/tile/include/asm/errno.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/fb.h b/arch/tile/include/asm/fb.h deleted file mode 100644 index 3a4988e8df45..000000000000 --- a/arch/tile/include/asm/fb.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/fcntl.h b/arch/tile/include/asm/fcntl.h deleted file mode 100644 index 46ab12db5739..000000000000 --- a/arch/tile/include/asm/fcntl.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/ioctl.h b/arch/tile/include/asm/ioctl.h deleted file mode 100644 index b279fe06dfe5..000000000000 --- a/arch/tile/include/asm/ioctl.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/ioctls.h b/arch/tile/include/asm/ioctls.h deleted file mode 100644 index ec34c760665e..000000000000 --- a/arch/tile/include/asm/ioctls.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/ipc.h b/arch/tile/include/asm/ipc.h deleted file mode 100644 index a46e3d9c2a3f..000000000000 --- a/arch/tile/include/asm/ipc.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/ipcbuf.h b/arch/tile/include/asm/ipcbuf.h deleted file mode 100644 index 84c7e51cb6d0..000000000000 --- a/arch/tile/include/asm/ipcbuf.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/irq_regs.h b/arch/tile/include/asm/irq_regs.h deleted file mode 100644 index 3dd9c0b70270..000000000000 --- a/arch/tile/include/asm/irq_regs.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/kdebug.h b/arch/tile/include/asm/kdebug.h deleted file mode 100644 index 6ece1b037665..000000000000 --- a/arch/tile/include/asm/kdebug.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/local.h b/arch/tile/include/asm/local.h deleted file mode 100644 index c11c530f74d0..000000000000 --- a/arch/tile/include/asm/local.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h deleted file mode 100644 index 1e4b79fe8584..000000000000 --- a/arch/tile/include/asm/module.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/msgbuf.h b/arch/tile/include/asm/msgbuf.h deleted file mode 100644 index 809134c644a6..000000000000 --- a/arch/tile/include/asm/msgbuf.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/mutex.h b/arch/tile/include/asm/mutex.h deleted file mode 100644 index ff6101aa2c71..000000000000 --- a/arch/tile/include/asm/mutex.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/param.h b/arch/tile/include/asm/param.h deleted file mode 100644 index 965d45427975..000000000000 --- a/arch/tile/include/asm/param.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/parport.h b/arch/tile/include/asm/parport.h deleted file mode 100644 index cf252af64590..000000000000 --- a/arch/tile/include/asm/parport.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/poll.h b/arch/tile/include/asm/poll.h deleted file mode 100644 index c98509d3149e..000000000000 --- a/arch/tile/include/asm/poll.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/posix_types.h b/arch/tile/include/asm/posix_types.h deleted file mode 100644 index 22cae6230ceb..000000000000 --- a/arch/tile/include/asm/posix_types.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/resource.h b/arch/tile/include/asm/resource.h deleted file mode 100644 index 04bc4db8921b..000000000000 --- a/arch/tile/include/asm/resource.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/scatterlist.h b/arch/tile/include/asm/scatterlist.h deleted file mode 100644 index 35d786fe93ae..000000000000 --- a/arch/tile/include/asm/scatterlist.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/sembuf.h b/arch/tile/include/asm/sembuf.h deleted file mode 100644 index 7673b83cfef7..000000000000 --- a/arch/tile/include/asm/sembuf.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/serial.h b/arch/tile/include/asm/serial.h deleted file mode 100644 index a0cb0caff152..000000000000 --- a/arch/tile/include/asm/serial.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/shmbuf.h b/arch/tile/include/asm/shmbuf.h deleted file mode 100644 index 83c05fc2de38..000000000000 --- a/arch/tile/include/asm/shmbuf.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/shmparam.h b/arch/tile/include/asm/shmparam.h deleted file mode 100644 index 93f30deb95d0..000000000000 --- a/arch/tile/include/asm/shmparam.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/socket.h b/arch/tile/include/asm/socket.h deleted file mode 100644 index 6b71384b9d8b..000000000000 --- a/arch/tile/include/asm/socket.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/sockios.h b/arch/tile/include/asm/sockios.h deleted file mode 100644 index def6d4746ee7..000000000000 --- a/arch/tile/include/asm/sockios.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/statfs.h b/arch/tile/include/asm/statfs.h deleted file mode 100644 index 0b91fe198c20..000000000000 --- a/arch/tile/include/asm/statfs.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/termbits.h b/arch/tile/include/asm/termbits.h deleted file mode 100644 index 3935b106de79..000000000000 --- a/arch/tile/include/asm/termbits.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/termios.h b/arch/tile/include/asm/termios.h deleted file mode 100644 index 280d78a9d966..000000000000 --- a/arch/tile/include/asm/termios.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/types.h b/arch/tile/include/asm/types.h deleted file mode 100644 index b9e79bc580dd..000000000000 --- a/arch/tile/include/asm/types.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/ucontext.h b/arch/tile/include/asm/ucontext.h deleted file mode 100644 index 9bc07b9f30fb..000000000000 --- a/arch/tile/include/asm/ucontext.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/tile/include/asm/xor.h b/arch/tile/include/asm/xor.h deleted file mode 100644 index c82eb12a5b18..000000000000 --- a/arch/tile/include/asm/xor.h +++ /dev/null @@ -1 +0,0 @@ -#include -- cgit v1.2.3 From ab7cfb5548d22604fafeaaa95950be2f97869f1e Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 1 Jun 2011 14:47:42 +0900 Subject: serial: sh-sci: Kill off bitrotted H8/300 support. h8300 has never been updated upstream to support the conversion to the driver model (which happened mid-2.5), and it doesn't seem likely that it ever will. Kill off the remaining bitrotted support to reduce the maintenance burden going forward. Signed-off-by: Paul Mundt --- drivers/tty/serial/Kconfig | 2 +- drivers/tty/serial/sh-sci.c | 30 +------------ drivers/tty/serial/sh-sci.h | 103 +++++--------------------------------------- 3 files changed, 14 insertions(+), 121 deletions(-) diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 636144cea932..1c0cd2d26d37 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -974,7 +974,7 @@ config SERIAL_IP22_ZILOG_CONSOLE config SERIAL_SH_SCI tristate "SuperH SCI(F) serial port support" - depends on HAVE_CLK && (SUPERH || H8300 || ARCH_SHMOBILE) + depends on HAVE_CLK && (SUPERH || ARCH_SHMOBILE) select SERIAL_CORE config SERIAL_SH_SCI_NR_UARTS diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index ebd8629c108d..280c02af0eae 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -54,10 +54,6 @@ #include #endif -#ifdef CONFIG_H8300 -#include -#endif - #include "sh-sci.h" struct sci_port { @@ -164,23 +160,7 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c) } #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ -#if defined(__H8300H__) || defined(__H8300S__) -static void sci_init_pins(struct uart_port *port, unsigned int cflag) -{ - int ch = (port->mapbase - SMR0) >> 3; - - /* set DDR regs */ - H8300_GPIO_DDR(h8300_sci_pins[ch].port, - h8300_sci_pins[ch].rx, - H8300_GPIO_INPUT); - H8300_GPIO_DDR(h8300_sci_pins[ch].port, - h8300_sci_pins[ch].tx, - H8300_GPIO_OUTPUT); - - /* tx mark output*/ - H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx; -} -#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) +#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) { if (port->mapbase == 0xA4400000) { @@ -1863,14 +1843,8 @@ static int __devinit serial_console_setup(struct console *co, char *options) if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); - ret = uart_set_options(port, co, baud, parity, bits, flow); -#if defined(__H8300H__) || defined(__H8300S__) - /* disable rx interrupt */ - if (ret == 0) - sci_stop_rx(port); -#endif /* TODO: disable clock */ - return ret; + return uart_set_options(port, co, baud, parity, bits, flow); } static struct console serial_console = { diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h index b04d937c9110..4dc249ecc59f 100644 --- a/drivers/tty/serial/sh-sci.h +++ b/drivers/tty/serial/sh-sci.h @@ -2,13 +2,6 @@ #include #include -#if defined(CONFIG_H83007) || defined(CONFIG_H83068) -#include -#endif -#if defined(CONFIG_H8S2678) -#include -#endif - #if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ defined(CONFIG_CPU_SUBTYPE_SH7707) || \ defined(CONFIG_CPU_SUBTYPE_SH7708) || \ @@ -72,10 +65,6 @@ #elif defined(CONFIG_CPU_SUBTYPE_SH4_202) # define SCSPTR2 0xffe80020 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* overrun error bit */ -#elif defined(CONFIG_H83007) || defined(CONFIG_H83068) -# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port) -#elif defined(CONFIG_H8S2678) -# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port) #elif defined(CONFIG_CPU_SUBTYPE_SH7757) # define SCSPTR0 0xfe4b0020 # define SCIF_ORER 0x0001 @@ -223,17 +212,6 @@ } \ } -#ifdef CONFIG_H8300 -/* h8300 don't have SCIF */ -#define CPU_SCIF_FNS(name) \ - static inline unsigned int sci_##name##_in(struct uart_port *port) \ - { \ - return 0; \ - } \ - static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \ - { \ - } -#else #define CPU_SCIF_FNS(name, scif_offset, scif_size) \ static inline unsigned int sci_##name##_in(struct uart_port *port) \ { \ @@ -243,7 +221,6 @@ { \ SCI_OUT(scif_size, scif_offset, value); \ } -#endif #define CPU_SCI_FNS(name, sci_offset, sci_size) \ static inline unsigned int sci_##name##_in(struct uart_port* port) \ @@ -262,8 +239,7 @@ defined(CONFIG_ARCH_SH7372) #if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) #define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ - sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ - h8_sci_offset, h8_sci_size) \ + sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size) #define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) @@ -282,19 +258,11 @@ CPU_SCIF_FNS(name, scif_offset, scif_size) #else #define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ - sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ - h8_sci_offset, h8_sci_size) \ + sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ CPU_SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh3_scif_offset, sh3_scif_size) #define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ CPU_SCIF_FNS(name, sh3_scif_offset, sh3_scif_size) #endif -#elif defined(__H8300H__) || defined(__H8300S__) -#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ - sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ - h8_sci_offset, h8_sci_size) \ - CPU_SCI_FNS(name, h8_sci_offset, h8_sci_size) -#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIF_FNS(name) #elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\ defined(CONFIG_CPU_SUBTYPE_SH7724) #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \ @@ -303,8 +271,7 @@ CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) #else #define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ - sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ - h8_sci_offset, h8_sci_size) \ + sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size) #define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) @@ -353,14 +320,14 @@ SCIF_FNS(SCFCR, 0x18, 16) SCIF_FNS(SCFDR, 0x1c, 16) SCIF_FNS(SCLSR, 0x24, 16) #else -/* reg SCI/SH3 SCI/SH4 SCIF/SH3 SCIF/SH4 SCI/H8*/ -/* name off sz off sz off sz off sz off sz*/ -SCIx_FNS(SCSMR, 0x00, 8, 0x00, 8, 0x00, 8, 0x00, 16, 0x00, 8) -SCIx_FNS(SCBRR, 0x02, 8, 0x04, 8, 0x02, 8, 0x04, 8, 0x01, 8) -SCIx_FNS(SCSCR, 0x04, 8, 0x08, 8, 0x04, 8, 0x08, 16, 0x02, 8) -SCIx_FNS(SCxTDR, 0x06, 8, 0x0c, 8, 0x06, 8, 0x0C, 8, 0x03, 8) -SCIx_FNS(SCxSR, 0x08, 8, 0x10, 8, 0x08, 16, 0x10, 16, 0x04, 8) -SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8) +/* reg SCI/SH3 SCI/SH4 SCIF/SH3 SCIF/SH4 */ +/* name off sz off sz off sz off sz */ +SCIx_FNS(SCSMR, 0x00, 8, 0x00, 8, 0x00, 8, 0x00, 16) +SCIx_FNS(SCBRR, 0x02, 8, 0x04, 8, 0x02, 8, 0x04, 8) +SCIx_FNS(SCSCR, 0x04, 8, 0x08, 8, 0x04, 8, 0x08, 16) +SCIx_FNS(SCxTDR, 0x06, 8, 0x0c, 8, 0x06, 8, 0x0C, 8) +SCIx_FNS(SCxSR, 0x08, 8, 0x10, 8, 0x08, 16, 0x10, 16) +SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8) SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16) #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ @@ -390,48 +357,6 @@ SCIF_FNS(SCLSR, 0, 0, 0x24, 16) #define sci_in(port, reg) sci_##reg##_in(port) #define sci_out(port, reg, value) sci_##reg##_out(port, value) -/* H8/300 series SCI pins assignment */ -#if defined(__H8300H__) || defined(__H8300S__) -static const struct __attribute__((packed)) { - int port; /* GPIO port no */ - unsigned short rx,tx; /* GPIO bit no */ -} h8300_sci_pins[] = { -#if defined(CONFIG_H83007) || defined(CONFIG_H83068) - { /* SCI0 */ - .port = H8300_GPIO_P9, - .rx = H8300_GPIO_B2, - .tx = H8300_GPIO_B0, - }, - { /* SCI1 */ - .port = H8300_GPIO_P9, - .rx = H8300_GPIO_B3, - .tx = H8300_GPIO_B1, - }, - { /* SCI2 */ - .port = H8300_GPIO_PB, - .rx = H8300_GPIO_B7, - .tx = H8300_GPIO_B6, - } -#elif defined(CONFIG_H8S2678) - { /* SCI0 */ - .port = H8300_GPIO_P3, - .rx = H8300_GPIO_B2, - .tx = H8300_GPIO_B0, - }, - { /* SCI1 */ - .port = H8300_GPIO_P3, - .rx = H8300_GPIO_B3, - .tx = H8300_GPIO_B1, - }, - { /* SCI2 */ - .port = H8300_GPIO_P5, - .rx = H8300_GPIO_B1, - .tx = H8300_GPIO_B0, - } -#endif -}; -#endif - #if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ defined(CONFIG_CPU_SUBTYPE_SH7707) || \ defined(CONFIG_CPU_SUBTYPE_SH7708) || \ @@ -454,12 +379,6 @@ static inline int sci_rxd_in(struct uart_port *port) return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */ return 1; } -#elif defined(__H8300H__) || defined(__H8300S__) -static inline int sci_rxd_in(struct uart_port *port) -{ - int ch = (port->mapbase - SMR0) >> 3; - return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0; -} #else /* default case for non-SCI processors */ static inline int sci_rxd_in(struct uart_port *port) { -- cgit v1.2.3 From 5fa29a17fabfe204fa9f20edd5fc81ab2364eb4b Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Sun, 29 May 2011 13:10:02 +0300 Subject: dmaengine: add ep93xx DMA support The ep93xx DMA controller has 10 independent memory to peripheral (M2P) channels, and 2 dedicated memory to memory (M2M) channels. M2M channels can also be used by SPI and IDE to perform DMA transfers to/from their memory mapped FIFOs. This driver supports both M2P and M2M channels with DMA_SLAVE, DMA_CYCLIC and DMA_MEMCPY (M2M only) capabilities. Signed-off-by: Mika Westerberg Signed-off-by: Ryan Mallon Acked-by: H Hartley Sweeten Acked-by: Vinod Koul Cc: Dan Williams Signed-off-by: Vinod Koul --- arch/arm/mach-ep93xx/include/mach/dma.h | 87 ++ drivers/dma/Kconfig | 7 + drivers/dma/Makefile | 1 + drivers/dma/ep93xx_dma.c | 1355 +++++++++++++++++++++++++++++++ 4 files changed, 1450 insertions(+) create mode 100644 drivers/dma/ep93xx_dma.c diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h index 5e31b2b25da9..6e7049a796a4 100644 --- a/arch/arm/mach-ep93xx/include/mach/dma.h +++ b/arch/arm/mach-ep93xx/include/mach/dma.h @@ -15,6 +15,8 @@ #include #include +#include +#include /** * struct ep93xx_dma_buffer - Information about a buffer to be transferred @@ -146,4 +148,89 @@ void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p, */ void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p); +/* + * M2P channels. + * + * Note that these values are also directly used for setting the PPALLOC + * register. + */ +#define EP93XX_DMA_I2S1 0 +#define EP93XX_DMA_I2S2 1 +#define EP93XX_DMA_AAC1 2 +#define EP93XX_DMA_AAC2 3 +#define EP93XX_DMA_AAC3 4 +#define EP93XX_DMA_I2S3 5 +#define EP93XX_DMA_UART1 6 +#define EP93XX_DMA_UART2 7 +#define EP93XX_DMA_UART3 8 +#define EP93XX_DMA_IRDA 9 +/* M2M channels */ +#define EP93XX_DMA_SSP 10 +#define EP93XX_DMA_IDE 11 + +/** + * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine + * @port: peripheral which is requesting the channel + * @direction: TX/RX channel + * @name: optional name for the channel, this is displayed in /proc/interrupts + * + * This information is passed as private channel parameter in a filter + * function. Note that this is only needed for slave/cyclic channels. For + * memcpy channels %NULL data should be passed. + */ +struct ep93xx_dma_data { + int port; + enum dma_data_direction direction; + const char *name; +}; + +/** + * struct ep93xx_dma_chan_data - platform specific data for a DMA channel + * @name: name of the channel, used for getting the right clock for the channel + * @base: mapped registers + * @irq: interrupt number used by this channel + */ +struct ep93xx_dma_chan_data { + const char *name; + void __iomem *base; + int irq; +}; + +/** + * struct ep93xx_dma_platform_data - platform data for the dmaengine driver + * @channels: array of channels which are passed to the driver + * @num_channels: number of channels in the array + * + * This structure is passed to the DMA engine driver via platform data. For + * M2P channels, contract is that even channels are for TX and odd for RX. + * There is no requirement for the M2M channels. + */ +struct ep93xx_dma_platform_data { + struct ep93xx_dma_chan_data *channels; + size_t num_channels; +}; + +static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan) +{ + return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p"); +} + +/** + * ep93xx_dma_chan_direction - returns direction the channel can be used + * @chan: channel + * + * This function can be used in filter functions to find out whether the + * channel supports given DMA direction. Only M2P channels have such + * limitation, for M2M channels the direction is configurable. + */ +static inline enum dma_data_direction +ep93xx_dma_chan_direction(struct dma_chan *chan) +{ + if (!ep93xx_dma_chan_is_m2p(chan)) + return DMA_NONE; + + /* even channels are for TX, odd for RX */ + return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; +} + #endif /* __ASM_ARCH_DMA_H */ diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 25cf327cd1cb..2e3b3d38c465 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -237,6 +237,13 @@ config MXS_DMA Support the MXS DMA engine. This engine including APBH-DMA and APBX-DMA is integrated into Freescale i.MX23/28 chips. +config EP93XX_DMA + bool "Cirrus Logic EP93xx DMA support" + depends on ARCH_EP93XX + select DMA_ENGINE + help + Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 836095ab3c5c..30cf3b1f0c5c 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_PL330_DMA) += pl330.o obj-$(CONFIG_PCH_DMA) += pch_dma.o obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o +obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c new file mode 100644 index 000000000000..0766c1e53b1d --- /dev/null +++ b/drivers/dma/ep93xx_dma.c @@ -0,0 +1,1355 @@ +/* + * Driver for the Cirrus Logic EP93xx DMA Controller + * + * Copyright (C) 2011 Mika Westerberg + * + * DMA M2P implementation is based on the original + * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights: + * + * Copyright (C) 2006 Lennert Buytenhek + * Copyright (C) 2006 Applied Data Systems + * Copyright (C) 2009 Ryan Mallon + * + * This driver is based on dw_dmac and amba-pl08x drivers. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#include + +/* M2P registers */ +#define M2P_CONTROL 0x0000 +#define M2P_CONTROL_STALLINT BIT(0) +#define M2P_CONTROL_NFBINT BIT(1) +#define M2P_CONTROL_CH_ERROR_INT BIT(3) +#define M2P_CONTROL_ENABLE BIT(4) +#define M2P_CONTROL_ICE BIT(6) + +#define M2P_INTERRUPT 0x0004 +#define M2P_INTERRUPT_STALL BIT(0) +#define M2P_INTERRUPT_NFB BIT(1) +#define M2P_INTERRUPT_ERROR BIT(3) + +#define M2P_PPALLOC 0x0008 +#define M2P_STATUS 0x000c + +#define M2P_MAXCNT0 0x0020 +#define M2P_BASE0 0x0024 +#define M2P_MAXCNT1 0x0030 +#define M2P_BASE1 0x0034 + +#define M2P_STATE_IDLE 0 +#define M2P_STATE_STALL 1 +#define M2P_STATE_ON 2 +#define M2P_STATE_NEXT 3 + +/* M2M registers */ +#define M2M_CONTROL 0x0000 +#define M2M_CONTROL_DONEINT BIT(2) +#define M2M_CONTROL_ENABLE BIT(3) +#define M2M_CONTROL_START BIT(4) +#define M2M_CONTROL_DAH BIT(11) +#define M2M_CONTROL_SAH BIT(12) +#define M2M_CONTROL_PW_SHIFT 9 +#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT) +#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT) +#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT) +#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT) +#define M2M_CONTROL_TM_SHIFT 13 +#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT) +#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT) +#define M2M_CONTROL_RSS_SHIFT 22 +#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT) +#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT) +#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT) +#define M2M_CONTROL_NO_HDSK BIT(24) +#define M2M_CONTROL_PWSC_SHIFT 25 + +#define M2M_INTERRUPT 0x0004 +#define M2M_INTERRUPT_DONEINT BIT(1) + +#define M2M_BCR0 0x0010 +#define M2M_BCR1 0x0014 +#define M2M_SAR_BASE0 0x0018 +#define M2M_SAR_BASE1 0x001c +#define M2M_DAR_BASE0 0x002c +#define M2M_DAR_BASE1 0x0030 + +#define DMA_MAX_CHAN_BYTES 0xffff +#define DMA_MAX_CHAN_DESCRIPTORS 32 + +struct ep93xx_dma_engine; + +/** + * struct ep93xx_dma_desc - EP93xx specific transaction descriptor + * @src_addr: source address of the transaction + * @dst_addr: destination address of the transaction + * @size: size of the transaction (in bytes) + * @complete: this descriptor is completed + * @txd: dmaengine API descriptor + * @tx_list: list of linked descriptors + * @node: link used for putting this into a channel queue + */ +struct ep93xx_dma_desc { + u32 src_addr; + u32 dst_addr; + size_t size; + bool complete; + struct dma_async_tx_descriptor txd; + struct list_head tx_list; + struct list_head node; +}; + +/** + * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel + * @chan: dmaengine API channel + * @edma: pointer to to the engine device + * @regs: memory mapped registers + * @irq: interrupt number of the channel + * @clk: clock used by this channel + * @tasklet: channel specific tasklet used for callbacks + * @lock: lock protecting the fields following + * @flags: flags for the channel + * @buffer: which buffer to use next (0/1) + * @last_completed: last completed cookie value + * @active: flattened chain of descriptors currently being processed + * @queue: pending descriptors which are handled next + * @free_list: list of free descriptors which can be used + * @runtime_addr: physical address currently used as dest/src (M2M only). This + * is set via %DMA_SLAVE_CONFIG before slave operation is + * prepared + * @runtime_ctrl: M2M runtime values for the control register. + * + * As EP93xx DMA controller doesn't support real chained DMA descriptors we + * will have slightly different scheme here: @active points to a head of + * flattened DMA descriptor chain. + * + * @queue holds pending transactions. These are linked through the first + * descriptor in the chain. When a descriptor is moved to the @active queue, + * the first and chained descriptors are flattened into a single list. + * + * @chan.private holds pointer to &struct ep93xx_dma_data which contains + * necessary channel configuration information. For memcpy channels this must + * be %NULL. + */ +struct ep93xx_dma_chan { + struct dma_chan chan; + const struct ep93xx_dma_engine *edma; + void __iomem *regs; + int irq; + struct clk *clk; + struct tasklet_struct tasklet; + /* protects the fields following */ + spinlock_t lock; + unsigned long flags; +/* Channel is configured for cyclic transfers */ +#define EP93XX_DMA_IS_CYCLIC 0 + + int buffer; + dma_cookie_t last_completed; + struct list_head active; + struct list_head queue; + struct list_head free_list; + u32 runtime_addr; + u32 runtime_ctrl; +}; + +/** + * struct ep93xx_dma_engine - the EP93xx DMA engine instance + * @dma_dev: holds the dmaengine device + * @m2m: is this an M2M or M2P device + * @hw_setup: method which sets the channel up for operation + * @hw_shutdown: shuts the channel down and flushes whatever is left + * @hw_submit: pushes active descriptor(s) to the hardware + * @hw_interrupt: handle the interrupt + * @num_channels: number of channels for this instance + * @channels: array of channels + * + * There is one instance of this struct for the M2P channels and one for the + * M2M channels. hw_xxx() methods are used to perform operations which are + * different on M2M and M2P channels. These methods are called with channel + * lock held and interrupts disabled so they cannot sleep. + */ +struct ep93xx_dma_engine { + struct dma_device dma_dev; + bool m2m; + int (*hw_setup)(struct ep93xx_dma_chan *); + void (*hw_shutdown)(struct ep93xx_dma_chan *); + void (*hw_submit)(struct ep93xx_dma_chan *); + int (*hw_interrupt)(struct ep93xx_dma_chan *); +#define INTERRUPT_UNKNOWN 0 +#define INTERRUPT_DONE 1 +#define INTERRUPT_NEXT_BUFFER 2 + + size_t num_channels; + struct ep93xx_dma_chan channels[]; +}; + +static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac) +{ + return &edmac->chan.dev->device; +} + +static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct ep93xx_dma_chan, chan); +} + +/** + * ep93xx_dma_set_active - set new active descriptor chain + * @edmac: channel + * @desc: head of the new active descriptor chain + * + * Sets @desc to be the head of the new active descriptor chain. This is the + * chain which is processed next. The active list must be empty before calling + * this function. + * + * Called with @edmac->lock held and interrupts disabled. + */ +static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac, + struct ep93xx_dma_desc *desc) +{ + BUG_ON(!list_empty(&edmac->active)); + + list_add_tail(&desc->node, &edmac->active); + + /* Flatten the @desc->tx_list chain into @edmac->active list */ + while (!list_empty(&desc->tx_list)) { + struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list, + struct ep93xx_dma_desc, node); + + /* + * We copy the callback parameters from the first descriptor + * to all the chained descriptors. This way we can call the + * callback without having to find out the first descriptor in + * the chain. Useful for cyclic transfers. + */ + d->txd.callback = desc->txd.callback; + d->txd.callback_param = desc->txd.callback_param; + + list_move_tail(&d->node, &edmac->active); + } +} + +/* Called with @edmac->lock held and interrupts disabled */ +static struct ep93xx_dma_desc * +ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) +{ + return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); +} + +/** + * ep93xx_dma_advance_active - advances to the next active descriptor + * @edmac: channel + * + * Function advances active descriptor to the next in the @edmac->active and + * returns %true if we still have descriptors in the chain to process. + * Otherwise returns %false. + * + * When the channel is in cyclic mode always returns %true. + * + * Called with @edmac->lock held and interrupts disabled. + */ +static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) +{ + list_rotate_left(&edmac->active); + + if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) + return true; + + /* + * If txd.cookie is set it means that we are back in the first + * descriptor in the chain and hence done with it. + */ + return !ep93xx_dma_get_active(edmac)->txd.cookie; +} + +/* + * M2P DMA implementation + */ + +static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control) +{ + writel(control, edmac->regs + M2P_CONTROL); + /* + * EP93xx User's Guide states that we must perform a dummy read after + * write to the control register. + */ + readl(edmac->regs + M2P_CONTROL); +} + +static int m2p_hw_setup(struct ep93xx_dma_chan *edmac) +{ + struct ep93xx_dma_data *data = edmac->chan.private; + u32 control; + + writel(data->port & 0xf, edmac->regs + M2P_PPALLOC); + + control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE + | M2P_CONTROL_ENABLE; + m2p_set_control(edmac, control); + + return 0; +} + +static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac) +{ + return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; +} + +static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) +{ + u32 control; + + control = readl(edmac->regs + M2P_CONTROL); + control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); + m2p_set_control(edmac, control); + + while (m2p_channel_state(edmac) >= M2P_STATE_ON) + cpu_relax(); + + m2p_set_control(edmac, 0); + + while (m2p_channel_state(edmac) == M2P_STATE_STALL) + cpu_relax(); +} + +static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) +{ + struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); + u32 bus_addr; + + if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE) + bus_addr = desc->src_addr; + else + bus_addr = desc->dst_addr; + + if (edmac->buffer == 0) { + writel(desc->size, edmac->regs + M2P_MAXCNT0); + writel(bus_addr, edmac->regs + M2P_BASE0); + } else { + writel(desc->size, edmac->regs + M2P_MAXCNT1); + writel(bus_addr, edmac->regs + M2P_BASE1); + } + + edmac->buffer ^= 1; +} + +static void m2p_hw_submit(struct ep93xx_dma_chan *edmac) +{ + u32 control = readl(edmac->regs + M2P_CONTROL); + + m2p_fill_desc(edmac); + control |= M2P_CONTROL_STALLINT; + + if (ep93xx_dma_advance_active(edmac)) { + m2p_fill_desc(edmac); + control |= M2P_CONTROL_NFBINT; + } + + m2p_set_control(edmac, control); +} + +static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac) +{ + u32 irq_status = readl(edmac->regs + M2P_INTERRUPT); + u32 control; + + if (irq_status & M2P_INTERRUPT_ERROR) { + struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); + + /* Clear the error interrupt */ + writel(1, edmac->regs + M2P_INTERRUPT); + + /* + * It seems that there is no easy way of reporting errors back + * to client so we just report the error here and continue as + * usual. + * + * Revisit this when there is a mechanism to report back the + * errors. + */ + dev_err(chan2dev(edmac), + "DMA transfer failed! Details:\n" + "\tcookie : %d\n" + "\tsrc_addr : 0x%08x\n" + "\tdst_addr : 0x%08x\n" + "\tsize : %zu\n", + desc->txd.cookie, desc->src_addr, desc->dst_addr, + desc->size); + } + + switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) { + case M2P_INTERRUPT_STALL: + /* Disable interrupts */ + control = readl(edmac->regs + M2P_CONTROL); + control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); + m2p_set_control(edmac, control); + + return INTERRUPT_DONE; + + case M2P_INTERRUPT_NFB: + if (ep93xx_dma_advance_active(edmac)) + m2p_fill_desc(edmac); + + return INTERRUPT_NEXT_BUFFER; + } + + return INTERRUPT_UNKNOWN; +} + +/* + * M2M DMA implementation + * + * For the M2M transfers we don't use NFB at all. This is because it simply + * doesn't work well with memcpy transfers. When you submit both buffers it is + * extremely unlikely that you get an NFB interrupt, but it instead reports + * DONE interrupt and both buffers are already transferred which means that we + * weren't able to update the next buffer. + * + * So for now we "simulate" NFB by just submitting buffer after buffer + * without double buffering. + */ + +static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) +{ + const struct ep93xx_dma_data *data = edmac->chan.private; + u32 control = 0; + + if (!data) { + /* This is memcpy channel, nothing to configure */ + writel(control, edmac->regs + M2M_CONTROL); + return 0; + } + + switch (data->port) { + case EP93XX_DMA_SSP: + /* + * This was found via experimenting - anything less than 5 + * causes the channel to perform only a partial transfer which + * leads to problems since we don't get DONE interrupt then. + */ + control = (5 << M2M_CONTROL_PWSC_SHIFT); + control |= M2M_CONTROL_NO_HDSK; + + if (data->direction == DMA_TO_DEVICE) { + control |= M2M_CONTROL_DAH; + control |= M2M_CONTROL_TM_TX; + control |= M2M_CONTROL_RSS_SSPTX; + } else { + control |= M2M_CONTROL_SAH; + control |= M2M_CONTROL_TM_RX; + control |= M2M_CONTROL_RSS_SSPRX; + } + break; + + case EP93XX_DMA_IDE: + /* + * This IDE part is totally untested. Values below are taken + * from the EP93xx Users's Guide and might not be correct. + */ + control |= M2M_CONTROL_NO_HDSK; + control |= M2M_CONTROL_RSS_IDE; + control |= M2M_CONTROL_PW_16; + + if (data->direction == DMA_TO_DEVICE) { + /* Worst case from the UG */ + control = (3 << M2M_CONTROL_PWSC_SHIFT); + control |= M2M_CONTROL_DAH; + control |= M2M_CONTROL_TM_TX; + } else { + control = (2 << M2M_CONTROL_PWSC_SHIFT); + control |= M2M_CONTROL_SAH; + control |= M2M_CONTROL_TM_RX; + } + break; + + default: + return -EINVAL; + } + + writel(control, edmac->regs + M2M_CONTROL); + return 0; +} + +static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac) +{ + /* Just disable the channel */ + writel(0, edmac->regs + M2M_CONTROL); +} + +static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) +{ + struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); + + if (edmac->buffer == 0) { + writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); + writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0); + writel(desc->size, edmac->regs + M2M_BCR0); + } else { + writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1); + writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1); + writel(desc->size, edmac->regs + M2M_BCR1); + } + + edmac->buffer ^= 1; +} + +static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) +{ + struct ep93xx_dma_data *data = edmac->chan.private; + u32 control = readl(edmac->regs + M2M_CONTROL); + + /* + * Since we allow clients to configure PW (peripheral width) we always + * clear PW bits here and then set them according what is given in + * the runtime configuration. + */ + control &= ~M2M_CONTROL_PW_MASK; + control |= edmac->runtime_ctrl; + + m2m_fill_desc(edmac); + control |= M2M_CONTROL_DONEINT; + + /* + * Now we can finally enable the channel. For M2M channel this must be + * done _after_ the BCRx registers are programmed. + */ + control |= M2M_CONTROL_ENABLE; + writel(control, edmac->regs + M2M_CONTROL); + + if (!data) { + /* + * For memcpy channels the software trigger must be asserted + * in order to start the memcpy operation. + */ + control |= M2M_CONTROL_START; + writel(control, edmac->regs + M2M_CONTROL); + } +} + +static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) +{ + u32 control; + + if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT)) + return INTERRUPT_UNKNOWN; + + /* Clear the DONE bit */ + writel(0, edmac->regs + M2M_INTERRUPT); + + /* Disable interrupts and the channel */ + control = readl(edmac->regs + M2M_CONTROL); + control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE); + writel(control, edmac->regs + M2M_CONTROL); + + /* + * Since we only get DONE interrupt we have to find out ourselves + * whether there still is something to process. So we try to advance + * the chain an see whether it succeeds. + */ + if (ep93xx_dma_advance_active(edmac)) { + edmac->edma->hw_submit(edmac); + return INTERRUPT_NEXT_BUFFER; + } + + return INTERRUPT_DONE; +} + +/* + * DMA engine API implementation + */ + +static struct ep93xx_dma_desc * +ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac) +{ + struct ep93xx_dma_desc *desc, *_desc; + struct ep93xx_dma_desc *ret = NULL; + unsigned long flags; + + spin_lock_irqsave(&edmac->lock, flags); + list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) { + if (async_tx_test_ack(&desc->txd)) { + list_del_init(&desc->node); + + /* Re-initialize the descriptor */ + desc->src_addr = 0; + desc->dst_addr = 0; + desc->size = 0; + desc->complete = false; + desc->txd.cookie = 0; + desc->txd.callback = NULL; + desc->txd.callback_param = NULL; + + ret = desc; + break; + } + } + spin_unlock_irqrestore(&edmac->lock, flags); + return ret; +} + +static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac, + struct ep93xx_dma_desc *desc) +{ + if (desc) { + unsigned long flags; + + spin_lock_irqsave(&edmac->lock, flags); + list_splice_init(&desc->tx_list, &edmac->free_list); + list_add(&desc->node, &edmac->free_list); + spin_unlock_irqrestore(&edmac->lock, flags); + } +} + +/** + * ep93xx_dma_advance_work - start processing the next pending transaction + * @edmac: channel + * + * If we have pending transactions queued and we are currently idling, this + * function takes the next queued transaction from the @edmac->queue and + * pushes it to the hardware for execution. + */ +static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) +{ + struct ep93xx_dma_desc *new; + unsigned long flags; + + spin_lock_irqsave(&edmac->lock, flags); + if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) { + spin_unlock_irqrestore(&edmac->lock, flags); + return; + } + + /* Take the next descriptor from the pending queue */ + new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node); + list_del_init(&new->node); + + ep93xx_dma_set_active(edmac, new); + + /* Push it to the hardware */ + edmac->edma->hw_submit(edmac); + spin_unlock_irqrestore(&edmac->lock, flags); +} + +static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc) +{ + struct device *dev = desc->txd.chan->device->dev; + + if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) + dma_unmap_single(dev, desc->src_addr, desc->size, + DMA_TO_DEVICE); + else + dma_unmap_page(dev, desc->src_addr, desc->size, + DMA_TO_DEVICE); + } + if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) + dma_unmap_single(dev, desc->dst_addr, desc->size, + DMA_FROM_DEVICE); + else + dma_unmap_page(dev, desc->dst_addr, desc->size, + DMA_FROM_DEVICE); + } +} + +static void ep93xx_dma_tasklet(unsigned long data) +{ + struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; + struct ep93xx_dma_desc *desc, *d; + dma_async_tx_callback callback; + void *callback_param; + LIST_HEAD(list); + + spin_lock_irq(&edmac->lock); + desc = ep93xx_dma_get_active(edmac); + if (desc->complete) { + edmac->last_completed = desc->txd.cookie; + list_splice_init(&edmac->active, &list); + } + spin_unlock_irq(&edmac->lock); + + /* Pick up the next descriptor from the queue */ + ep93xx_dma_advance_work(edmac); + + callback = desc->txd.callback; + callback_param = desc->txd.callback_param; + + /* Now we can release all the chained descriptors */ + list_for_each_entry_safe(desc, d, &list, node) { + /* + * For the memcpy channels the API requires us to unmap the + * buffers unless requested otherwise. + */ + if (!edmac->chan.private) + ep93xx_dma_unmap_buffers(desc); + + ep93xx_dma_desc_put(edmac, desc); + } + + if (callback) + callback(callback_param); +} + +static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) +{ + struct ep93xx_dma_chan *edmac = dev_id; + irqreturn_t ret = IRQ_HANDLED; + + spin_lock(&edmac->lock); + + switch (edmac->edma->hw_interrupt(edmac)) { + case INTERRUPT_DONE: + ep93xx_dma_get_active(edmac)->complete = true; + tasklet_schedule(&edmac->tasklet); + break; + + case INTERRUPT_NEXT_BUFFER: + if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) + tasklet_schedule(&edmac->tasklet); + break; + + default: + dev_warn(chan2dev(edmac), "unknown interrupt!\n"); + ret = IRQ_NONE; + break; + } + + spin_unlock(&edmac->lock); + return ret; +} + +/** + * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed + * @tx: descriptor to be executed + * + * Function will execute given descriptor on the hardware or if the hardware + * is busy, queue the descriptor to be executed later on. Returns cookie which + * can be used to poll the status of the descriptor. + */ +static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan); + struct ep93xx_dma_desc *desc; + dma_cookie_t cookie; + unsigned long flags; + + spin_lock_irqsave(&edmac->lock, flags); + + cookie = edmac->chan.cookie; + + if (++cookie < 0) + cookie = 1; + + desc = container_of(tx, struct ep93xx_dma_desc, txd); + + edmac->chan.cookie = cookie; + desc->txd.cookie = cookie; + + /* + * If nothing is currently prosessed, we push this descriptor + * directly to the hardware. Otherwise we put the descriptor + * to the pending queue. + */ + if (list_empty(&edmac->active)) { + ep93xx_dma_set_active(edmac, desc); + edmac->edma->hw_submit(edmac); + } else { + list_add_tail(&desc->node, &edmac->queue); + } + + spin_unlock_irqrestore(&edmac->lock, flags); + return cookie; +} + +/** + * ep93xx_dma_alloc_chan_resources - allocate resources for the channel + * @chan: channel to allocate resources + * + * Function allocates necessary resources for the given DMA channel and + * returns number of allocated descriptors for the channel. Negative errno + * is returned in case of failure. + */ +static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + struct ep93xx_dma_data *data = chan->private; + const char *name = dma_chan_name(chan); + int ret, i; + + /* Sanity check the channel parameters */ + if (!edmac->edma->m2m) { + if (!data) + return -EINVAL; + if (data->port < EP93XX_DMA_I2S1 || + data->port > EP93XX_DMA_IRDA) + return -EINVAL; + if (data->direction != ep93xx_dma_chan_direction(chan)) + return -EINVAL; + } else { + if (data) { + switch (data->port) { + case EP93XX_DMA_SSP: + case EP93XX_DMA_IDE: + if (data->direction != DMA_TO_DEVICE && + data->direction != DMA_FROM_DEVICE) + return -EINVAL; + break; + default: + return -EINVAL; + } + } + } + + if (data && data->name) + name = data->name; + + ret = clk_enable(edmac->clk); + if (ret) + return ret; + + ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac); + if (ret) + goto fail_clk_disable; + + spin_lock_irq(&edmac->lock); + edmac->last_completed = 1; + edmac->chan.cookie = 1; + ret = edmac->edma->hw_setup(edmac); + spin_unlock_irq(&edmac->lock); + + if (ret) + goto fail_free_irq; + + for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) { + struct ep93xx_dma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) { + dev_warn(chan2dev(edmac), "not enough descriptors\n"); + break; + } + + INIT_LIST_HEAD(&desc->tx_list); + + dma_async_tx_descriptor_init(&desc->txd, chan); + desc->txd.flags = DMA_CTRL_ACK; + desc->txd.tx_submit = ep93xx_dma_tx_submit; + + ep93xx_dma_desc_put(edmac, desc); + } + + return i; + +fail_free_irq: + free_irq(edmac->irq, edmac); +fail_clk_disable: + clk_disable(edmac->clk); + + return ret; +} + +/** + * ep93xx_dma_free_chan_resources - release resources for the channel + * @chan: channel + * + * Function releases all the resources allocated for the given channel. + * The channel must be idle when this is called. + */ +static void ep93xx_dma_free_chan_resources(struct dma_chan *chan) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + struct ep93xx_dma_desc *desc, *d; + unsigned long flags; + LIST_HEAD(list); + + BUG_ON(!list_empty(&edmac->active)); + BUG_ON(!list_empty(&edmac->queue)); + + spin_lock_irqsave(&edmac->lock, flags); + edmac->edma->hw_shutdown(edmac); + edmac->runtime_addr = 0; + edmac->runtime_ctrl = 0; + edmac->buffer = 0; + list_splice_init(&edmac->free_list, &list); + spin_unlock_irqrestore(&edmac->lock, flags); + + list_for_each_entry_safe(desc, d, &list, node) + kfree(desc); + + clk_disable(edmac->clk); + free_irq(edmac->irq, edmac); +} + +/** + * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation + * @chan: channel + * @dest: destination bus address + * @src: source bus address + * @len: size of the transaction + * @flags: flags for the descriptor + * + * Returns a valid DMA descriptor or %NULL in case of failure. + */ +struct dma_async_tx_descriptor * +ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + struct ep93xx_dma_desc *desc, *first; + size_t bytes, offset; + + first = NULL; + for (offset = 0; offset < len; offset += bytes) { + desc = ep93xx_dma_desc_get(edmac); + if (!desc) { + dev_warn(chan2dev(edmac), "couln't get descriptor\n"); + goto fail; + } + + bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES); + + desc->src_addr = src + offset; + desc->dst_addr = dest + offset; + desc->size = bytes; + + if (!first) + first = desc; + else + list_add_tail(&desc->node, &first->tx_list); + } + + first->txd.cookie = -EBUSY; + first->txd.flags = flags; + + return &first->txd; +fail: + ep93xx_dma_desc_put(edmac, first); + return NULL; +} + +/** + * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation + * @chan: channel + * @sgl: list of buffers to transfer + * @sg_len: number of entries in @sgl + * @dir: direction of tha DMA transfer + * @flags: flags for the descriptor + * + * Returns a valid DMA descriptor or %NULL in case of failure. + */ +static struct dma_async_tx_descriptor * +ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction dir, + unsigned long flags) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + struct ep93xx_dma_desc *desc, *first; + struct scatterlist *sg; + int i; + + if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { + dev_warn(chan2dev(edmac), + "channel was configured with different direction\n"); + return NULL; + } + + if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { + dev_warn(chan2dev(edmac), + "channel is already used for cyclic transfers\n"); + return NULL; + } + + first = NULL; + for_each_sg(sgl, sg, sg_len, i) { + size_t sg_len = sg_dma_len(sg); + + if (sg_len > DMA_MAX_CHAN_BYTES) { + dev_warn(chan2dev(edmac), "too big transfer size %d\n", + sg_len); + goto fail; + } + + desc = ep93xx_dma_desc_get(edmac); + if (!desc) { + dev_warn(chan2dev(edmac), "couln't get descriptor\n"); + goto fail; + } + + if (dir == DMA_TO_DEVICE) { + desc->src_addr = sg_dma_address(sg); + desc->dst_addr = edmac->runtime_addr; + } else { + desc->src_addr = edmac->runtime_addr; + desc->dst_addr = sg_dma_address(sg); + } + desc->size = sg_len; + + if (!first) + first = desc; + else + list_add_tail(&desc->node, &first->tx_list); + } + + first->txd.cookie = -EBUSY; + first->txd.flags = flags; + + return &first->txd; + +fail: + ep93xx_dma_desc_put(edmac, first); + return NULL; +} + +/** + * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation + * @chan: channel + * @dma_addr: DMA mapped address of the buffer + * @buf_len: length of the buffer (in bytes) + * @period_len: lenght of a single period + * @dir: direction of the operation + * + * Prepares a descriptor for cyclic DMA operation. This means that once the + * descriptor is submitted, we will be submitting in a @period_len sized + * buffers and calling callback once the period has been elapsed. Transfer + * terminates only when client calls dmaengine_terminate_all() for this + * channel. + * + * Returns a valid DMA descriptor or %NULL in case of failure. + */ +static struct dma_async_tx_descriptor * +ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, + size_t buf_len, size_t period_len, + enum dma_data_direction dir) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + struct ep93xx_dma_desc *desc, *first; + size_t offset = 0; + + if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { + dev_warn(chan2dev(edmac), + "channel was configured with different direction\n"); + return NULL; + } + + if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { + dev_warn(chan2dev(edmac), + "channel is already used for cyclic transfers\n"); + return NULL; + } + + if (period_len > DMA_MAX_CHAN_BYTES) { + dev_warn(chan2dev(edmac), "too big period length %d\n", + period_len); + return NULL; + } + + /* Split the buffer into period size chunks */ + first = NULL; + for (offset = 0; offset < buf_len; offset += period_len) { + desc = ep93xx_dma_desc_get(edmac); + if (!desc) { + dev_warn(chan2dev(edmac), "couln't get descriptor\n"); + goto fail; + } + + if (dir == DMA_TO_DEVICE) { + desc->src_addr = dma_addr + offset; + desc->dst_addr = edmac->runtime_addr; + } else { + desc->src_addr = edmac->runtime_addr; + desc->dst_addr = dma_addr + offset; + } + + desc->size = period_len; + + if (!first) + first = desc; + else + list_add_tail(&desc->node, &first->tx_list); + } + + first->txd.cookie = -EBUSY; + + return &first->txd; + +fail: + ep93xx_dma_desc_put(edmac, first); + return NULL; +} + +/** + * ep93xx_dma_terminate_all - terminate all transactions + * @edmac: channel + * + * Stops all DMA transactions. All descriptors are put back to the + * @edmac->free_list and callbacks are _not_ called. + */ +static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac) +{ + struct ep93xx_dma_desc *desc, *_d; + unsigned long flags; + LIST_HEAD(list); + + spin_lock_irqsave(&edmac->lock, flags); + /* First we disable and flush the DMA channel */ + edmac->edma->hw_shutdown(edmac); + clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags); + list_splice_init(&edmac->active, &list); + list_splice_init(&edmac->queue, &list); + /* + * We then re-enable the channel. This way we can continue submitting + * the descriptors by just calling ->hw_submit() again. + */ + edmac->edma->hw_setup(edmac); + spin_unlock_irqrestore(&edmac->lock, flags); + + list_for_each_entry_safe(desc, _d, &list, node) + ep93xx_dma_desc_put(edmac, desc); + + return 0; +} + +static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, + struct dma_slave_config *config) +{ + enum dma_slave_buswidth width; + unsigned long flags; + u32 addr, ctrl; + + if (!edmac->edma->m2m) + return -EINVAL; + + switch (config->direction) { + case DMA_FROM_DEVICE: + width = config->src_addr_width; + addr = config->src_addr; + break; + + case DMA_TO_DEVICE: + width = config->dst_addr_width; + addr = config->dst_addr; + break; + + default: + return -EINVAL; + } + + switch (width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + ctrl = 0; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + ctrl = M2M_CONTROL_PW_16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + ctrl = M2M_CONTROL_PW_32; + break; + default: + return -EINVAL; + } + + spin_lock_irqsave(&edmac->lock, flags); + edmac->runtime_addr = addr; + edmac->runtime_ctrl = ctrl; + spin_unlock_irqrestore(&edmac->lock, flags); + + return 0; +} + +/** + * ep93xx_dma_control - manipulate all pending operations on a channel + * @chan: channel + * @cmd: control command to perform + * @arg: optional argument + * + * Controls the channel. Function returns %0 in case of success or negative + * error in case of failure. + */ +static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + struct dma_slave_config *config; + + switch (cmd) { + case DMA_TERMINATE_ALL: + return ep93xx_dma_terminate_all(edmac); + + case DMA_SLAVE_CONFIG: + config = (struct dma_slave_config *)arg; + return ep93xx_dma_slave_config(edmac, config); + + default: + break; + } + + return -ENOSYS; +} + +/** + * ep93xx_dma_tx_status - check if a transaction is completed + * @chan: channel + * @cookie: transaction specific cookie + * @state: state of the transaction is stored here if given + * + * This function can be used to query state of a given transaction. + */ +static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *state) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + dma_cookie_t last_used, last_completed; + enum dma_status ret; + unsigned long flags; + + spin_lock_irqsave(&edmac->lock, flags); + last_used = chan->cookie; + last_completed = edmac->last_completed; + spin_unlock_irqrestore(&edmac->lock, flags); + + ret = dma_async_is_complete(cookie, last_completed, last_used); + dma_set_tx_state(state, last_completed, last_used, 0); + + return ret; +} + +/** + * ep93xx_dma_issue_pending - push pending transactions to the hardware + * @chan: channel + * + * When this function is called, all pending transactions are pushed to the + * hardware and executed. + */ +static void ep93xx_dma_issue_pending(struct dma_chan *chan) +{ + ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan)); +} + +static int __init ep93xx_dma_probe(struct platform_device *pdev) +{ + struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct ep93xx_dma_engine *edma; + struct dma_device *dma_dev; + size_t edma_size; + int ret, i; + + edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan); + edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL); + if (!edma) + return -ENOMEM; + + dma_dev = &edma->dma_dev; + edma->m2m = platform_get_device_id(pdev)->driver_data; + edma->num_channels = pdata->num_channels; + + INIT_LIST_HEAD(&dma_dev->channels); + for (i = 0; i < pdata->num_channels; i++) { + const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i]; + struct ep93xx_dma_chan *edmac = &edma->channels[i]; + + edmac->chan.device = dma_dev; + edmac->regs = cdata->base; + edmac->irq = cdata->irq; + edmac->edma = edma; + + edmac->clk = clk_get(NULL, cdata->name); + if (IS_ERR(edmac->clk)) { + dev_warn(&pdev->dev, "failed to get clock for %s\n", + cdata->name); + continue; + } + + spin_lock_init(&edmac->lock); + INIT_LIST_HEAD(&edmac->active); + INIT_LIST_HEAD(&edmac->queue); + INIT_LIST_HEAD(&edmac->free_list); + tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet, + (unsigned long)edmac); + + list_add_tail(&edmac->chan.device_node, + &dma_dev->channels); + } + + dma_cap_zero(dma_dev->cap_mask); + dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); + + dma_dev->dev = &pdev->dev; + dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources; + dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; + dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; + dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; + dma_dev->device_control = ep93xx_dma_control; + dma_dev->device_issue_pending = ep93xx_dma_issue_pending; + dma_dev->device_tx_status = ep93xx_dma_tx_status; + + dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES); + + if (edma->m2m) { + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); + dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy; + + edma->hw_setup = m2m_hw_setup; + edma->hw_shutdown = m2m_hw_shutdown; + edma->hw_submit = m2m_hw_submit; + edma->hw_interrupt = m2m_hw_interrupt; + } else { + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); + + edma->hw_setup = m2p_hw_setup; + edma->hw_shutdown = m2p_hw_shutdown; + edma->hw_submit = m2p_hw_submit; + edma->hw_interrupt = m2p_hw_interrupt; + } + + ret = dma_async_device_register(dma_dev); + if (unlikely(ret)) { + for (i = 0; i < edma->num_channels; i++) { + struct ep93xx_dma_chan *edmac = &edma->channels[i]; + if (!IS_ERR_OR_NULL(edmac->clk)) + clk_put(edmac->clk); + } + kfree(edma); + } else { + dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", + edma->m2m ? "M" : "P"); + } + + return ret; +} + +static struct platform_device_id ep93xx_dma_driver_ids[] = { + { "ep93xx-dma-m2p", 0 }, + { "ep93xx-dma-m2m", 1 }, + { }, +}; + +static struct platform_driver ep93xx_dma_driver = { + .driver = { + .name = "ep93xx-dma", + }, + .id_table = ep93xx_dma_driver_ids, +}; + +static int __init ep93xx_dma_module_init(void) +{ + return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe); +} +subsys_initcall(ep93xx_dma_module_init); + +MODULE_AUTHOR("Mika Westerberg "); +MODULE_DESCRIPTION("EP93xx DMA driver"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From f911d026e84a137e35701a4f23732f47ce40a6b8 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Sun, 29 May 2011 13:10:03 +0300 Subject: ep93xx: add dmaengine platform code Add platform support code for the new EP93xx dmaengine driver. Signed-off-by: Mika Westerberg Signed-off-by: Ryan Mallon Acked-by: H Hartley Sweeten Signed-off-by: Vinod Koul --- arch/arm/mach-ep93xx/Makefile | 2 + arch/arm/mach-ep93xx/dma.c | 108 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+) create mode 100644 arch/arm/mach-ep93xx/dma.c diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile index 33ee2c863d18..4920f7ae8330 100644 --- a/arch/arm/mach-ep93xx/Makefile +++ b/arch/arm/mach-ep93xx/Makefile @@ -6,6 +6,8 @@ obj-m := obj-n := obj- := +obj-$(CONFIG_EP93XX_DMA) += dma.o + obj-$(CONFIG_MACH_ADSSPHERE) += adssphere.o obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c new file mode 100644 index 000000000000..5a2570881255 --- /dev/null +++ b/arch/arm/mach-ep93xx/dma.c @@ -0,0 +1,108 @@ +/* + * arch/arm/mach-ep93xx/dma.c + * + * Platform support code for the EP93xx dmaengine driver. + * + * Copyright (C) 2011 Mika Westerberg + * + * This work is based on the original dma-m2p implementation with + * following copyrights: + * + * Copyright (C) 2006 Lennert Buytenhek + * Copyright (C) 2006 Applied Data Systems + * Copyright (C) 2009 Ryan Mallon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DMA_CHANNEL(_name, _base, _irq) \ + { .name = (_name), .base = (_base), .irq = (_irq) } + +/* + * DMA M2P channels. + * + * On the EP93xx chip the following peripherals my be allocated to the 10 + * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive). + * + * I2S contains 3 Tx and 3 Rx DMA Channels + * AAC contains 3 Tx and 3 Rx DMA Channels + * UART1 contains 1 Tx and 1 Rx DMA Channels + * UART2 contains 1 Tx and 1 Rx DMA Channels + * UART3 contains 1 Tx and 1 Rx DMA Channels + * IrDA contains 1 Tx and 1 Rx DMA Channels + * + * Registers are mapped statically in ep93xx_map_io(). + */ +static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = { + DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0), + DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1), + DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2), + DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3), + DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4), + DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5), + DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6), + DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7), + DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8), + DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9), +}; + +static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = { + .channels = ep93xx_dma_m2p_channels, + .num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels), +}; + +static struct platform_device ep93xx_dma_m2p_device = { + .name = "ep93xx-dma-m2p", + .id = -1, + .dev = { + .platform_data = &ep93xx_dma_m2p_data, + }, +}; + +/* + * DMA M2M channels. + * + * There are 2 M2M channels which support memcpy/memset and in addition simple + * hardware requests from/to SSP and IDE. We do not implement an external + * hardware requests. + * + * Registers are mapped statically in ep93xx_map_io(). + */ +static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = { + DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0), + DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1), +}; + +static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = { + .channels = ep93xx_dma_m2m_channels, + .num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels), +}; + +static struct platform_device ep93xx_dma_m2m_device = { + .name = "ep93xx-dma-m2m", + .id = -1, + .dev = { + .platform_data = &ep93xx_dma_m2m_data, + }, +}; + +static int __init ep93xx_dma_init(void) +{ + platform_device_register(&ep93xx_dma_m2p_device); + platform_device_register(&ep93xx_dma_m2m_device); + return 0; +} +arch_initcall(ep93xx_dma_init); -- cgit v1.2.3 From a103fc67c612bfc0f6388885fea7244967afaad4 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Sun, 29 May 2011 13:10:04 +0300 Subject: ASoC: ep93xx: convert to use the DMA engine API Now that we have the EP93xx DMA engine driver in place, we convert the ASoC drivers (I2S, AC97 and PCM) to take advantage of this new API. There are no functional changes. Signed-off-by: Mika Westerberg Acked-by: H Hartley Sweeten Acked-by: Liam Girdwood Acked-by: Mark Brown Signed-off-by: Vinod Koul --- sound/soc/ep93xx/ep93xx-ac97.c | 4 +- sound/soc/ep93xx/ep93xx-i2s.c | 4 +- sound/soc/ep93xx/ep93xx-pcm.c | 137 +++++++++++++++++++++++------------------ 3 files changed, 81 insertions(+), 64 deletions(-) diff --git a/sound/soc/ep93xx/ep93xx-ac97.c b/sound/soc/ep93xx/ep93xx-ac97.c index 104e95cda0ad..c7417c76552b 100644 --- a/sound/soc/ep93xx/ep93xx-ac97.c +++ b/sound/soc/ep93xx/ep93xx-ac97.c @@ -106,12 +106,12 @@ static struct ep93xx_ac97_info *ep93xx_ac97_info; static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_out = { .name = "ac97-pcm-out", - .dma_port = EP93XX_DMA_M2P_PORT_AAC1, + .dma_port = EP93XX_DMA_AAC1, }; static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_in = { .name = "ac97-pcm-in", - .dma_port = EP93XX_DMA_M2P_PORT_AAC1, + .dma_port = EP93XX_DMA_AAC1, }; static inline unsigned ep93xx_ac97_read_reg(struct ep93xx_ac97_info *info, diff --git a/sound/soc/ep93xx/ep93xx-i2s.c b/sound/soc/ep93xx/ep93xx-i2s.c index 042f4e93746f..30df42568dbb 100644 --- a/sound/soc/ep93xx/ep93xx-i2s.c +++ b/sound/soc/ep93xx/ep93xx-i2s.c @@ -70,11 +70,11 @@ struct ep93xx_i2s_info { struct ep93xx_pcm_dma_params ep93xx_i2s_dma_params[] = { [SNDRV_PCM_STREAM_PLAYBACK] = { .name = "i2s-pcm-out", - .dma_port = EP93XX_DMA_M2P_PORT_I2S1, + .dma_port = EP93XX_DMA_I2S1, }, [SNDRV_PCM_STREAM_CAPTURE] = { .name = "i2s-pcm-in", - .dma_port = EP93XX_DMA_M2P_PORT_I2S1, + .dma_port = EP93XX_DMA_I2S1, }, }; diff --git a/sound/soc/ep93xx/ep93xx-pcm.c b/sound/soc/ep93xx/ep93xx-pcm.c index a456e491155f..a07f99c9c375 100644 --- a/sound/soc/ep93xx/ep93xx-pcm.c +++ b/sound/soc/ep93xx/ep93xx-pcm.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -53,43 +54,34 @@ static const struct snd_pcm_hardware ep93xx_pcm_hardware = { struct ep93xx_runtime_data { - struct ep93xx_dma_m2p_client cl; - struct ep93xx_pcm_dma_params *params; int pointer_bytes; - struct tasklet_struct period_tasklet; int periods; - struct ep93xx_dma_buffer buf[32]; + int period_bytes; + struct dma_chan *dma_chan; + struct ep93xx_dma_data dma_data; }; -static void ep93xx_pcm_period_elapsed(unsigned long data) +static void ep93xx_pcm_dma_callback(void *data) { - struct snd_pcm_substream *substream = (struct snd_pcm_substream *)data; - snd_pcm_period_elapsed(substream); -} + struct snd_pcm_substream *substream = data; + struct ep93xx_runtime_data *rtd = substream->runtime->private_data; -static void ep93xx_pcm_buffer_started(void *cookie, - struct ep93xx_dma_buffer *buf) -{ + rtd->pointer_bytes += rtd->period_bytes; + rtd->pointer_bytes %= rtd->period_bytes * rtd->periods; + + snd_pcm_period_elapsed(substream); } -static void ep93xx_pcm_buffer_finished(void *cookie, - struct ep93xx_dma_buffer *buf, - int bytes, int error) +static bool ep93xx_pcm_dma_filter(struct dma_chan *chan, void *filter_param) { - struct snd_pcm_substream *substream = cookie; - struct ep93xx_runtime_data *rtd = substream->runtime->private_data; - - if (buf == rtd->buf + rtd->periods - 1) - rtd->pointer_bytes = 0; - else - rtd->pointer_bytes += buf->size; + struct ep93xx_dma_data *data = filter_param; - if (!error) { - ep93xx_dma_m2p_submit_recursive(&rtd->cl, buf); - tasklet_schedule(&rtd->period_tasklet); - } else { - snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); + if (data->direction == ep93xx_dma_chan_direction(chan)) { + chan->private = data; + return true; } + + return false; } static int ep93xx_pcm_open(struct snd_pcm_substream *substream) @@ -98,30 +90,38 @@ static int ep93xx_pcm_open(struct snd_pcm_substream *substream) struct snd_soc_dai *cpu_dai = soc_rtd->cpu_dai; struct ep93xx_pcm_dma_params *dma_params; struct ep93xx_runtime_data *rtd; + dma_cap_mask_t mask; int ret; - dma_params = snd_soc_dai_get_dma_data(cpu_dai, substream); + ret = snd_pcm_hw_constraint_integer(substream->runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + if (ret < 0) + return ret; + snd_soc_set_runtime_hwparams(substream, &ep93xx_pcm_hardware); rtd = kmalloc(sizeof(*rtd), GFP_KERNEL); if (!rtd) return -ENOMEM; - memset(&rtd->period_tasklet, 0, sizeof(rtd->period_tasklet)); - rtd->period_tasklet.func = ep93xx_pcm_period_elapsed; - rtd->period_tasklet.data = (unsigned long)substream; - - rtd->cl.name = dma_params->name; - rtd->cl.flags = dma_params->dma_port | EP93XX_DMA_M2P_IGNORE_ERROR | - ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? - EP93XX_DMA_M2P_TX : EP93XX_DMA_M2P_RX); - rtd->cl.cookie = substream; - rtd->cl.buffer_started = ep93xx_pcm_buffer_started; - rtd->cl.buffer_finished = ep93xx_pcm_buffer_finished; - ret = ep93xx_dma_m2p_client_register(&rtd->cl); - if (ret < 0) { + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_CYCLIC, mask); + + dma_params = snd_soc_dai_get_dma_data(cpu_dai, substream); + rtd->dma_data.port = dma_params->dma_port; + rtd->dma_data.name = dma_params->name; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + rtd->dma_data.direction = DMA_TO_DEVICE; + else + rtd->dma_data.direction = DMA_FROM_DEVICE; + + rtd->dma_chan = dma_request_channel(mask, ep93xx_pcm_dma_filter, + &rtd->dma_data); + if (!rtd->dma_chan) { kfree(rtd); - return ret; + return -EINVAL; } substream->runtime->private_data = rtd; @@ -132,31 +132,52 @@ static int ep93xx_pcm_close(struct snd_pcm_substream *substream) { struct ep93xx_runtime_data *rtd = substream->runtime->private_data; - ep93xx_dma_m2p_client_unregister(&rtd->cl); + dma_release_channel(rtd->dma_chan); kfree(rtd); return 0; } +static int ep93xx_pcm_dma_submit(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct ep93xx_runtime_data *rtd = runtime->private_data; + struct dma_chan *chan = rtd->dma_chan; + struct dma_device *dma_dev = chan->device; + struct dma_async_tx_descriptor *desc; + + rtd->pointer_bytes = 0; + desc = dma_dev->device_prep_dma_cyclic(chan, runtime->dma_addr, + rtd->period_bytes * rtd->periods, + rtd->period_bytes, + rtd->dma_data.direction); + if (!desc) + return -EINVAL; + + desc->callback = ep93xx_pcm_dma_callback; + desc->callback_param = substream; + + dmaengine_submit(desc); + return 0; +} + +static void ep93xx_pcm_dma_flush(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct ep93xx_runtime_data *rtd = runtime->private_data; + + dmaengine_terminate_all(rtd->dma_chan); +} + static int ep93xx_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; struct ep93xx_runtime_data *rtd = runtime->private_data; - size_t totsize = params_buffer_bytes(params); - size_t period = params_period_bytes(params); - int i; snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); - runtime->dma_bytes = totsize; - - rtd->periods = (totsize + period - 1) / period; - for (i = 0; i < rtd->periods; i++) { - rtd->buf[i].bus_addr = runtime->dma_addr + (i * period); - rtd->buf[i].size = period; - if ((i + 1) * period > totsize) - rtd->buf[i].size = totsize - (i * period); - } + rtd->periods = params_periods(params); + rtd->period_bytes = params_period_bytes(params); return 0; } @@ -168,24 +189,20 @@ static int ep93xx_pcm_hw_free(struct snd_pcm_substream *substream) static int ep93xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { - struct ep93xx_runtime_data *rtd = substream->runtime->private_data; int ret; - int i; ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - rtd->pointer_bytes = 0; - for (i = 0; i < rtd->periods; i++) - ep93xx_dma_m2p_submit(&rtd->cl, rtd->buf + i); + ret = ep93xx_pcm_dma_submit(substream); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - ep93xx_dma_m2p_flush(&rtd->cl); + ep93xx_pcm_dma_flush(substream); break; default: -- cgit v1.2.3 From 8e4a93008db7780e45838fe65840b289f389ef4a Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Sun, 29 May 2011 13:10:05 +0300 Subject: ep93xx: remove the old M2P DMA code Since we have converted all existing users of the old DMA API to use the DMA engine API the old code can be dropped. Signed-off-by: Mika Westerberg Acked-by: Ryan Mallon Acked-by: H Hartley Sweeten Signed-off-by: Vinod Koul --- arch/arm/mach-ep93xx/Makefile | 2 +- arch/arm/mach-ep93xx/dma-m2p.c | 411 -------------------------------- arch/arm/mach-ep93xx/include/mach/dma.h | 143 ----------- 3 files changed, 1 insertion(+), 555 deletions(-) delete mode 100644 arch/arm/mach-ep93xx/dma-m2p.c diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile index 4920f7ae8330..21e721ab7378 100644 --- a/arch/arm/mach-ep93xx/Makefile +++ b/arch/arm/mach-ep93xx/Makefile @@ -1,7 +1,7 @@ # # Makefile for the linux kernel. # -obj-y := core.o clock.o dma-m2p.o gpio.o +obj-y := core.o clock.o gpio.o obj-m := obj-n := obj- := diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c deleted file mode 100644 index a696d354b1f8..000000000000 --- a/arch/arm/mach-ep93xx/dma-m2p.c +++ /dev/null @@ -1,411 +0,0 @@ -/* - * arch/arm/mach-ep93xx/dma-m2p.c - * M2P DMA handling for Cirrus EP93xx chips. - * - * Copyright (C) 2006 Lennert Buytenhek - * Copyright (C) 2006 Applied Data Systems - * - * Copyright (C) 2009 Ryan Mallon - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - */ - -/* - * On the EP93xx chip the following peripherals my be allocated to the 10 - * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive). - * - * I2S contains 3 Tx and 3 Rx DMA Channels - * AAC contains 3 Tx and 3 Rx DMA Channels - * UART1 contains 1 Tx and 1 Rx DMA Channels - * UART2 contains 1 Tx and 1 Rx DMA Channels - * UART3 contains 1 Tx and 1 Rx DMA Channels - * IrDA contains 1 Tx and 1 Rx DMA Channels - * - * SSP and IDE use the Memory to Memory (M2M) channels and are not covered - * with this implementation. - */ - -#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include - -#include -#include - -#define M2P_CONTROL 0x00 -#define M2P_CONTROL_STALL_IRQ_EN (1 << 0) -#define M2P_CONTROL_NFB_IRQ_EN (1 << 1) -#define M2P_CONTROL_ERROR_IRQ_EN (1 << 3) -#define M2P_CONTROL_ENABLE (1 << 4) -#define M2P_INTERRUPT 0x04 -#define M2P_INTERRUPT_STALL (1 << 0) -#define M2P_INTERRUPT_NFB (1 << 1) -#define M2P_INTERRUPT_ERROR (1 << 3) -#define M2P_PPALLOC 0x08 -#define M2P_STATUS 0x0c -#define M2P_REMAIN 0x14 -#define M2P_MAXCNT0 0x20 -#define M2P_BASE0 0x24 -#define M2P_MAXCNT1 0x30 -#define M2P_BASE1 0x34 - -#define STATE_IDLE 0 /* Channel is inactive. */ -#define STATE_STALL 1 /* Channel is active, no buffers pending. */ -#define STATE_ON 2 /* Channel is active, one buffer pending. */ -#define STATE_NEXT 3 /* Channel is active, two buffers pending. */ - -struct m2p_channel { - char *name; - void __iomem *base; - int irq; - - struct clk *clk; - spinlock_t lock; - - void *client; - unsigned next_slot:1; - struct ep93xx_dma_buffer *buffer_xfer; - struct ep93xx_dma_buffer *buffer_next; - struct list_head buffers_pending; -}; - -static struct m2p_channel m2p_rx[] = { - {"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1}, - {"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3}, - {"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5}, - {"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7}, - {"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9}, - {NULL}, -}; - -static struct m2p_channel m2p_tx[] = { - {"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0}, - {"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2}, - {"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4}, - {"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6}, - {"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8}, - {NULL}, -}; - -static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf) -{ - if (ch->next_slot == 0) { - writel(buf->size, ch->base + M2P_MAXCNT0); - writel(buf->bus_addr, ch->base + M2P_BASE0); - } else { - writel(buf->size, ch->base + M2P_MAXCNT1); - writel(buf->bus_addr, ch->base + M2P_BASE1); - } - ch->next_slot ^= 1; -} - -static void choose_buffer_xfer(struct m2p_channel *ch) -{ - struct ep93xx_dma_buffer *buf; - - ch->buffer_xfer = NULL; - if (!list_empty(&ch->buffers_pending)) { - buf = list_entry(ch->buffers_pending.next, - struct ep93xx_dma_buffer, list); - list_del(&buf->list); - feed_buf(ch, buf); - ch->buffer_xfer = buf; - } -} - -static void choose_buffer_next(struct m2p_channel *ch) -{ - struct ep93xx_dma_buffer *buf; - - ch->buffer_next = NULL; - if (!list_empty(&ch->buffers_pending)) { - buf = list_entry(ch->buffers_pending.next, - struct ep93xx_dma_buffer, list); - list_del(&buf->list); - feed_buf(ch, buf); - ch->buffer_next = buf; - } -} - -static inline void m2p_set_control(struct m2p_channel *ch, u32 v) -{ - /* - * The control register must be read immediately after being written so - * that the internal state machine is correctly updated. See the ep93xx - * users' guide for details. - */ - writel(v, ch->base + M2P_CONTROL); - readl(ch->base + M2P_CONTROL); -} - -static inline int m2p_channel_state(struct m2p_channel *ch) -{ - return (readl(ch->base + M2P_STATUS) >> 4) & 0x3; -} - -static irqreturn_t m2p_irq(int irq, void *dev_id) -{ - struct m2p_channel *ch = dev_id; - struct ep93xx_dma_m2p_client *cl; - u32 irq_status, v; - int error = 0; - - cl = ch->client; - - spin_lock(&ch->lock); - irq_status = readl(ch->base + M2P_INTERRUPT); - - if (irq_status & M2P_INTERRUPT_ERROR) { - writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT); - error = 1; - } - - if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) { - spin_unlock(&ch->lock); - return IRQ_NONE; - } - - switch (m2p_channel_state(ch)) { - case STATE_IDLE: - pr_crit("dma interrupt without a dma buffer\n"); - BUG(); - break; - - case STATE_STALL: - cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error); - if (ch->buffer_next != NULL) { - cl->buffer_finished(cl->cookie, ch->buffer_next, - 0, error); - } - choose_buffer_xfer(ch); - choose_buffer_next(ch); - if (ch->buffer_xfer != NULL) - cl->buffer_started(cl->cookie, ch->buffer_xfer); - break; - - case STATE_ON: - cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error); - ch->buffer_xfer = ch->buffer_next; - choose_buffer_next(ch); - cl->buffer_started(cl->cookie, ch->buffer_xfer); - break; - - case STATE_NEXT: - pr_crit("dma interrupt while next\n"); - BUG(); - break; - } - - v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN | - M2P_CONTROL_NFB_IRQ_EN); - if (ch->buffer_xfer != NULL) - v |= M2P_CONTROL_STALL_IRQ_EN; - if (ch->buffer_next != NULL) - v |= M2P_CONTROL_NFB_IRQ_EN; - m2p_set_control(ch, v); - - spin_unlock(&ch->lock); - return IRQ_HANDLED; -} - -static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch; - int i; - - if (cl->flags & EP93XX_DMA_M2P_RX) - ch = m2p_rx; - else - ch = m2p_tx; - - for (i = 0; ch[i].base; i++) { - struct ep93xx_dma_m2p_client *client; - - client = ch[i].client; - if (client != NULL) { - int port; - - port = cl->flags & EP93XX_DMA_M2P_PORT_MASK; - if (port == (client->flags & - EP93XX_DMA_M2P_PORT_MASK)) { - pr_warning("DMA channel already used by %s\n", - cl->name ? : "unknown client"); - return ERR_PTR(-EBUSY); - } - } - } - - for (i = 0; ch[i].base; i++) { - if (ch[i].client == NULL) - return ch + i; - } - - pr_warning("No free DMA channel for %s\n", - cl->name ? : "unknown client"); - return ERR_PTR(-ENODEV); -} - -static void channel_enable(struct m2p_channel *ch) -{ - struct ep93xx_dma_m2p_client *cl = ch->client; - u32 v; - - clk_enable(ch->clk); - - v = cl->flags & EP93XX_DMA_M2P_PORT_MASK; - writel(v, ch->base + M2P_PPALLOC); - - v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK; - v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN; - m2p_set_control(ch, v); -} - -static void channel_disable(struct m2p_channel *ch) -{ - u32 v; - - v = readl(ch->base + M2P_CONTROL); - v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN); - m2p_set_control(ch, v); - - while (m2p_channel_state(ch) >= STATE_ON) - cpu_relax(); - - m2p_set_control(ch, 0x0); - - while (m2p_channel_state(ch) == STATE_STALL) - cpu_relax(); - - clk_disable(ch->clk); -} - -int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch; - int err; - - ch = find_free_channel(cl); - if (IS_ERR(ch)) - return PTR_ERR(ch); - - err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch); - if (err) - return err; - - ch->client = cl; - ch->next_slot = 0; - ch->buffer_xfer = NULL; - ch->buffer_next = NULL; - INIT_LIST_HEAD(&ch->buffers_pending); - - cl->channel = ch; - - channel_enable(ch); - - return 0; -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register); - -void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch = cl->channel; - - channel_disable(ch); - free_irq(ch->irq, ch); - ch->client = NULL; -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister); - -void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl, - struct ep93xx_dma_buffer *buf) -{ - struct m2p_channel *ch = cl->channel; - unsigned long flags; - u32 v; - - spin_lock_irqsave(&ch->lock, flags); - v = readl(ch->base + M2P_CONTROL); - if (ch->buffer_xfer == NULL) { - ch->buffer_xfer = buf; - feed_buf(ch, buf); - cl->buffer_started(cl->cookie, buf); - - v |= M2P_CONTROL_STALL_IRQ_EN; - m2p_set_control(ch, v); - - } else if (ch->buffer_next == NULL) { - ch->buffer_next = buf; - feed_buf(ch, buf); - - v |= M2P_CONTROL_NFB_IRQ_EN; - m2p_set_control(ch, v); - } else { - list_add_tail(&buf->list, &ch->buffers_pending); - } - spin_unlock_irqrestore(&ch->lock, flags); -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit); - -void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl, - struct ep93xx_dma_buffer *buf) -{ - struct m2p_channel *ch = cl->channel; - - list_add_tail(&buf->list, &ch->buffers_pending); -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive); - -void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch = cl->channel; - - channel_disable(ch); - ch->next_slot = 0; - ch->buffer_xfer = NULL; - ch->buffer_next = NULL; - INIT_LIST_HEAD(&ch->buffers_pending); - channel_enable(ch); -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush); - -static int init_channel(struct m2p_channel *ch) -{ - ch->clk = clk_get(NULL, ch->name); - if (IS_ERR(ch->clk)) - return PTR_ERR(ch->clk); - - spin_lock_init(&ch->lock); - ch->client = NULL; - - return 0; -} - -static int __init ep93xx_dma_m2p_init(void) -{ - int i; - int ret; - - for (i = 0; m2p_rx[i].base; i++) { - ret = init_channel(m2p_rx + i); - if (ret) - return ret; - } - - for (i = 0; m2p_tx[i].base; i++) { - ret = init_channel(m2p_tx + i); - if (ret) - return ret; - } - - pr_info("M2P DMA subsystem initialized\n"); - return 0; -} -arch_initcall(ep93xx_dma_m2p_init); diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h index 6e7049a796a4..46d4d876e6fb 100644 --- a/arch/arm/mach-ep93xx/include/mach/dma.h +++ b/arch/arm/mach-ep93xx/include/mach/dma.h @@ -1,153 +1,10 @@ -/** - * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine - * - * The EP93xx DMA M2P subsystem handles DMA transfers between memory and - * peripherals. DMA M2P channels are available for audio, UARTs and IrDA. - * See chapter 10 of the EP93xx users guide for full details on the DMA M2P - * engine. - * - * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code. - * - */ - #ifndef __ASM_ARCH_DMA_H #define __ASM_ARCH_DMA_H -#include #include #include #include -/** - * struct ep93xx_dma_buffer - Information about a buffer to be transferred - * using the DMA M2P engine - * - * @list: Entry in DMA buffer list - * @bus_addr: Physical address of the buffer - * @size: Size of the buffer in bytes - */ -struct ep93xx_dma_buffer { - struct list_head list; - u32 bus_addr; - u16 size; -}; - -/** - * struct ep93xx_dma_m2p_client - Information about a DMA M2P client - * - * @name: Unique name for this client - * @flags: Client flags - * @cookie: User data to pass to callback functions - * @buffer_started: Non NULL function to call when a transfer is started. - * The arguments are the user data cookie and the DMA - * buffer which is starting. - * @buffer_finished: Non NULL function to call when a transfer is completed. - * The arguments are the user data cookie, the DMA buffer - * which has completed, and a boolean flag indicating if - * the transfer had an error. - */ -struct ep93xx_dma_m2p_client { - char *name; - u8 flags; - void *cookie; - void (*buffer_started)(void *cookie, - struct ep93xx_dma_buffer *buf); - void (*buffer_finished)(void *cookie, - struct ep93xx_dma_buffer *buf, - int bytes, int error); - - /* private: Internal use only */ - void *channel; -}; - -/* DMA M2P ports */ -#define EP93XX_DMA_M2P_PORT_I2S1 0x00 -#define EP93XX_DMA_M2P_PORT_I2S2 0x01 -#define EP93XX_DMA_M2P_PORT_AAC1 0x02 -#define EP93XX_DMA_M2P_PORT_AAC2 0x03 -#define EP93XX_DMA_M2P_PORT_AAC3 0x04 -#define EP93XX_DMA_M2P_PORT_I2S3 0x05 -#define EP93XX_DMA_M2P_PORT_UART1 0x06 -#define EP93XX_DMA_M2P_PORT_UART2 0x07 -#define EP93XX_DMA_M2P_PORT_UART3 0x08 -#define EP93XX_DMA_M2P_PORT_IRDA 0x09 -#define EP93XX_DMA_M2P_PORT_MASK 0x0f - -/* DMA M2P client flags */ -#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */ -#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */ - -/* - * DMA M2P client error handling flags. See the EP93xx users guide - * documentation on the DMA M2P CONTROL register for more details - */ -#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */ -#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */ -#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */ - -/** - * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P - * subsystem - * - * @m2p: Client information to register - * returns 0 on success - * - * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA - * client - */ -int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p); - -/** - * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P - * subsystem - * - * @m2p: Client to unregister - * - * Any transfers currently in progress will be completed in hardware, but - * ignored in software. - */ -void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p); - -/** - * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer - * - * @m2p: DMA Client to submit the transfer on - * @buf: DMA Buffer to submit - * - * If the current or next transfer positions are free on the M2P client then - * the transfer is started immediately. If not, the transfer is added to the - * list of pending transfers. This function must not be called from the - * buffer_finished callback for an M2P channel. - * - */ -void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p, - struct ep93xx_dma_buffer *buf); - -/** - * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list - * for an M2P channel - * - * @m2p: DMA Client to submit the transfer on - * @buf: DMA Buffer to submit - * - * This function must only be called from the buffer_finished callback for an - * M2P channel. It is commonly used to add the next transfer in a chained list - * of DMA transfers. - */ -void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p, - struct ep93xx_dma_buffer *buf); - -/** - * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client - * - * @m2p: DMA client to flush transfers on - * - * Any transfers currently in progress will be completed in hardware, but - * ignored in software. - * - */ -void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p); - /* * M2P channels. * -- cgit v1.2.3 From d41071575b0b20b780bb0e8e7e70c62c1b07a883 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Sun, 29 May 2011 13:10:06 +0300 Subject: spi/ep93xx: add DMA support This patch adds DMA support for the EP93xx SPI driver. By default the DMA is not enabled but it can be enabled by setting ep93xx_spi_info.use_dma to true in board configuration file. Note that the SPI driver still uses PIO for small transfers (<= 8 bytes) for performance reasons. Signed-off-by: Mika Westerberg Acked-by: H Hartley Sweeten Cc: Grant Likely Acked-by: Grant Likely Signed-off-by: Vinod Koul --- Documentation/spi/ep93xx_spi | 10 + arch/arm/mach-ep93xx/core.c | 6 +- arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h | 2 + drivers/spi/ep93xx_spi.c | 303 ++++++++++++++++++++++++- 4 files changed, 308 insertions(+), 13 deletions(-) diff --git a/Documentation/spi/ep93xx_spi b/Documentation/spi/ep93xx_spi index 6325f5b48635..d8eb01c15db1 100644 --- a/Documentation/spi/ep93xx_spi +++ b/Documentation/spi/ep93xx_spi @@ -88,6 +88,16 @@ static void __init ts72xx_init_machine(void) ARRAY_SIZE(ts72xx_spi_devices)); } +The driver can use DMA for the transfers also. In this case ts72xx_spi_info +becomes: + +static struct ep93xx_spi_info ts72xx_spi_info = { + .num_chipselect = ARRAY_SIZE(ts72xx_spi_devices), + .use_dma = true; +}; + +Note that CONFIG_EP93XX_DMA should be enabled as well. + Thanks to ========= Martin Guy, H. Hartley Sweeten and others who helped me during development of diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index 82079545adc4..cc9f1d4b104d 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -488,11 +488,15 @@ static struct resource ep93xx_spi_resources[] = { }, }; +static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32); + static struct platform_device ep93xx_spi_device = { .name = "ep93xx-spi", .id = 0, .dev = { - .platform_data = &ep93xx_spi_master_data, + .platform_data = &ep93xx_spi_master_data, + .coherent_dma_mask = DMA_BIT_MASK(32), + .dma_mask = &ep93xx_spi_dma_mask, }, .num_resources = ARRAY_SIZE(ep93xx_spi_resources), .resource = ep93xx_spi_resources, diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h index 0a37961b3453..9bb63ac13f04 100644 --- a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h +++ b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h @@ -7,9 +7,11 @@ struct spi_device; * struct ep93xx_spi_info - EP93xx specific SPI descriptor * @num_chipselect: number of chip selects on this board, must be * at least one + * @use_dma: use DMA for the transfers */ struct ep93xx_spi_info { int num_chipselect; + bool use_dma; }; /** diff --git a/drivers/spi/ep93xx_spi.c b/drivers/spi/ep93xx_spi.c index d3570071e98f..1cf645479bfe 100644 --- a/drivers/spi/ep93xx_spi.c +++ b/drivers/spi/ep93xx_spi.c @@ -1,7 +1,7 @@ /* * Driver for Cirrus Logic EP93xx SPI controller. * - * Copyright (c) 2010 Mika Westerberg + * Copyright (C) 2010-2011 Mika Westerberg * * Explicit FIFO handling code was inspired by amba-pl022 driver. * @@ -21,13 +21,16 @@ #include #include #include +#include #include #include #include #include #include +#include #include +#include #include #define SSPCR0 0x0000 @@ -71,6 +74,7 @@ * @pdev: pointer to platform device * @clk: clock for the controller * @regs_base: pointer to ioremap()'d registers + * @sspdr_phys: physical address of the SSPDR register * @irq: IRQ number used by the driver * @min_rate: minimum clock rate (in Hz) supported by the controller * @max_rate: maximum clock rate (in Hz) supported by the controller @@ -84,6 +88,14 @@ * @rx: current byte in transfer to receive * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one * frame decreases this level and sending one frame increases it. + * @dma_rx: RX DMA channel + * @dma_tx: TX DMA channel + * @dma_rx_data: RX parameters passed to the DMA engine + * @dma_tx_data: TX parameters passed to the DMA engine + * @rx_sgt: sg table for RX transfers + * @tx_sgt: sg table for TX transfers + * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by + * the client * * This structure holds EP93xx SPI controller specific information. When * @running is %true, driver accepts transfer requests from protocol drivers. @@ -100,6 +112,7 @@ struct ep93xx_spi { const struct platform_device *pdev; struct clk *clk; void __iomem *regs_base; + unsigned long sspdr_phys; int irq; unsigned long min_rate; unsigned long max_rate; @@ -112,6 +125,13 @@ struct ep93xx_spi { size_t tx; size_t rx; size_t fifo_level; + struct dma_chan *dma_rx; + struct dma_chan *dma_tx; + struct ep93xx_dma_data dma_rx_data; + struct ep93xx_dma_data dma_tx_data; + struct sg_table rx_sgt; + struct sg_table tx_sgt; + void *zeropage; }; /** @@ -496,14 +516,195 @@ static int ep93xx_spi_read_write(struct ep93xx_spi *espi) espi->fifo_level++; } - if (espi->rx == t->len) { - msg->actual_length += t->len; + if (espi->rx == t->len) return 0; - } return -EINPROGRESS; } +static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi) +{ + /* + * Now everything is set up for the current transfer. We prime the TX + * FIFO, enable interrupts, and wait for the transfer to complete. + */ + if (ep93xx_spi_read_write(espi)) { + ep93xx_spi_enable_interrupts(espi); + wait_for_completion(&espi->wait); + } +} + +/** + * ep93xx_spi_dma_prepare() - prepares a DMA transfer + * @espi: ep93xx SPI controller struct + * @dir: DMA transfer direction + * + * Function configures the DMA, maps the buffer and prepares the DMA + * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR + * in case of failure. + */ +static struct dma_async_tx_descriptor * +ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) +{ + struct spi_transfer *t = espi->current_msg->state; + struct dma_async_tx_descriptor *txd; + enum dma_slave_buswidth buswidth; + struct dma_slave_config conf; + struct scatterlist *sg; + struct sg_table *sgt; + struct dma_chan *chan; + const void *buf, *pbuf; + size_t len = t->len; + int i, ret, nents; + + if (bits_per_word(espi) > 8) + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + else + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + + memset(&conf, 0, sizeof(conf)); + conf.direction = dir; + + if (dir == DMA_FROM_DEVICE) { + chan = espi->dma_rx; + buf = t->rx_buf; + sgt = &espi->rx_sgt; + + conf.src_addr = espi->sspdr_phys; + conf.src_addr_width = buswidth; + } else { + chan = espi->dma_tx; + buf = t->tx_buf; + sgt = &espi->tx_sgt; + + conf.dst_addr = espi->sspdr_phys; + conf.dst_addr_width = buswidth; + } + + ret = dmaengine_slave_config(chan, &conf); + if (ret) + return ERR_PTR(ret); + + /* + * We need to split the transfer into PAGE_SIZE'd chunks. This is + * because we are using @espi->zeropage to provide a zero RX buffer + * for the TX transfers and we have only allocated one page for that. + * + * For performance reasons we allocate a new sg_table only when + * needed. Otherwise we will re-use the current one. Eventually the + * last sg_table is released in ep93xx_spi_release_dma(). + */ + + nents = DIV_ROUND_UP(len, PAGE_SIZE); + if (nents != sgt->nents) { + sg_free_table(sgt); + + ret = sg_alloc_table(sgt, nents, GFP_KERNEL); + if (ret) + return ERR_PTR(ret); + } + + pbuf = buf; + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes = min_t(size_t, len, PAGE_SIZE); + + if (buf) { + sg_set_page(sg, virt_to_page(pbuf), bytes, + offset_in_page(pbuf)); + } else { + sg_set_page(sg, virt_to_page(espi->zeropage), + bytes, 0); + } + + pbuf += bytes; + len -= bytes; + } + + if (WARN_ON(len)) { + dev_warn(&espi->pdev->dev, "len = %d expected 0!", len); + return ERR_PTR(-EINVAL); + } + + nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); + if (!nents) + return ERR_PTR(-ENOMEM); + + txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents, + dir, DMA_CTRL_ACK); + if (!txd) { + dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); + return ERR_PTR(-ENOMEM); + } + return txd; +} + +/** + * ep93xx_spi_dma_finish() - finishes with a DMA transfer + * @espi: ep93xx SPI controller struct + * @dir: DMA transfer direction + * + * Function finishes with the DMA transfer. After this, the DMA buffer is + * unmapped. + */ +static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi, + enum dma_data_direction dir) +{ + struct dma_chan *chan; + struct sg_table *sgt; + + if (dir == DMA_FROM_DEVICE) { + chan = espi->dma_rx; + sgt = &espi->rx_sgt; + } else { + chan = espi->dma_tx; + sgt = &espi->tx_sgt; + } + + dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); +} + +static void ep93xx_spi_dma_callback(void *callback_param) +{ + complete(callback_param); +} + +static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) +{ + struct spi_message *msg = espi->current_msg; + struct dma_async_tx_descriptor *rxd, *txd; + + rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE); + if (IS_ERR(rxd)) { + dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); + msg->status = PTR_ERR(rxd); + return; + } + + txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE); + if (IS_ERR(txd)) { + ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); + dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); + msg->status = PTR_ERR(txd); + return; + } + + /* We are ready when RX is done */ + rxd->callback = ep93xx_spi_dma_callback; + rxd->callback_param = &espi->wait; + + /* Now submit both descriptors and wait while they finish */ + dmaengine_submit(rxd); + dmaengine_submit(txd); + + dma_async_issue_pending(espi->dma_rx); + dma_async_issue_pending(espi->dma_tx); + + wait_for_completion(&espi->wait); + + ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE); + ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); +} + /** * ep93xx_spi_process_transfer() - processes one SPI transfer * @espi: ep93xx SPI controller struct @@ -556,13 +757,14 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, espi->tx = 0; /* - * Now everything is set up for the current transfer. We prime the TX - * FIFO, enable interrupts, and wait for the transfer to complete. + * There is no point of setting up DMA for the transfers which will + * fit into the FIFO and can be transferred with a single interrupt. + * So in these cases we will be using PIO and don't bother for DMA. */ - if (ep93xx_spi_read_write(espi)) { - ep93xx_spi_enable_interrupts(espi); - wait_for_completion(&espi->wait); - } + if (espi->dma_rx && t->len > SPI_FIFO_SIZE) + ep93xx_spi_dma_transfer(espi); + else + ep93xx_spi_pio_transfer(espi); /* * In case of error during transmit, we bail out from processing @@ -571,6 +773,8 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, if (msg->status) return; + msg->actual_length += t->len; + /* * After this transfer is finished, perform any possible * post-transfer actions requested by the protocol driver. @@ -752,6 +956,75 @@ static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param) +{ + if (ep93xx_dma_chan_is_m2p(chan)) + return false; + + chan->private = filter_param; + return true; +} + +static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) +{ + dma_cap_mask_t mask; + int ret; + + espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL); + if (!espi->zeropage) + return -ENOMEM; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + espi->dma_rx_data.port = EP93XX_DMA_SSP; + espi->dma_rx_data.direction = DMA_FROM_DEVICE; + espi->dma_rx_data.name = "ep93xx-spi-rx"; + + espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, + &espi->dma_rx_data); + if (!espi->dma_rx) { + ret = -ENODEV; + goto fail_free_page; + } + + espi->dma_tx_data.port = EP93XX_DMA_SSP; + espi->dma_tx_data.direction = DMA_TO_DEVICE; + espi->dma_tx_data.name = "ep93xx-spi-tx"; + + espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, + &espi->dma_tx_data); + if (!espi->dma_tx) { + ret = -ENODEV; + goto fail_release_rx; + } + + return 0; + +fail_release_rx: + dma_release_channel(espi->dma_rx); + espi->dma_rx = NULL; +fail_free_page: + free_page((unsigned long)espi->zeropage); + + return ret; +} + +static void ep93xx_spi_release_dma(struct ep93xx_spi *espi) +{ + if (espi->dma_rx) { + dma_release_channel(espi->dma_rx); + sg_free_table(&espi->rx_sgt); + } + if (espi->dma_tx) { + dma_release_channel(espi->dma_tx); + sg_free_table(&espi->tx_sgt); + } + + if (espi->zeropage) + free_page((unsigned long)espi->zeropage); +} + static int __init ep93xx_spi_probe(struct platform_device *pdev) { struct spi_master *master; @@ -818,6 +1091,7 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev) goto fail_put_clock; } + espi->sspdr_phys = res->start + SSPDR; espi->regs_base = ioremap(res->start, resource_size(res)); if (!espi->regs_base) { dev_err(&pdev->dev, "failed to map resources\n"); @@ -832,10 +1106,13 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev) goto fail_unmap_regs; } + if (info->use_dma && ep93xx_spi_setup_dma(espi)) + dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n"); + espi->wq = create_singlethread_workqueue("ep93xx_spid"); if (!espi->wq) { dev_err(&pdev->dev, "unable to create workqueue\n"); - goto fail_free_irq; + goto fail_free_dma; } INIT_WORK(&espi->msg_work, ep93xx_spi_work); INIT_LIST_HEAD(&espi->msg_queue); @@ -857,7 +1134,8 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev) fail_free_queue: destroy_workqueue(espi->wq); -fail_free_irq: +fail_free_dma: + ep93xx_spi_release_dma(espi); free_irq(espi->irq, espi); fail_unmap_regs: iounmap(espi->regs_base); @@ -901,6 +1179,7 @@ static int __exit ep93xx_spi_remove(struct platform_device *pdev) } spin_unlock_irq(&espi->lock); + ep93xx_spi_release_dma(espi); free_irq(espi->irq, espi); iounmap(espi->regs_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- cgit v1.2.3 From 7dab35c0c01c5d960d7b551a607270adccfadb42 Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Wed, 1 Jun 2011 15:10:30 -0700 Subject: dma: ipu_idmac.c: use resource_size in ioremap Signed-off-by: H Hartley Sweeten Cc: Dan Williams Cc: Vinod Koul Cc: Guennadi Liakhovetski Cc: Anatolij Gustschin Signed-off-by: Vinod Koul --- drivers/dma/ipu/ipu_idmac.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index c1a125e7d1df..25447a8ca282 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1705,16 +1705,14 @@ static int __init ipu_probe(struct platform_device *pdev) ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); /* Remap IPU common registers */ - ipu_data.reg_ipu = ioremap(mem_ipu->start, - mem_ipu->end - mem_ipu->start + 1); + ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu)); if (!ipu_data.reg_ipu) { ret = -ENOMEM; goto err_ioremap_ipu; } /* Remap Image Converter and Image DMA Controller registers */ - ipu_data.reg_ic = ioremap(mem_ic->start, - mem_ic->end - mem_ic->start + 1); + ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic)); if (!ipu_data.reg_ic) { ret = -ENOMEM; goto err_ioremap_ic; -- cgit v1.2.3 From 114df7d66efd5c23561782f38e97c48fb30d4f5d Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Wed, 1 Jun 2011 15:16:09 -0700 Subject: dma: at_hdmac.c: use resource_size Signed-off-by: H Hartley Sweeten Cc: Dan Williams Cc: Vinod Koul Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 36144f88d718..6a483eac7b3f 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1216,7 +1216,7 @@ static int __init at_dma_probe(struct platform_device *pdev) atdma->dma_common.cap_mask = pdata->cap_mask; atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; - size = io->end - io->start + 1; + size = resource_size(io); if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { err = -EBUSY; goto err_kfree; @@ -1362,7 +1362,7 @@ static int __exit at_dma_remove(struct platform_device *pdev) atdma->regs = NULL; io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(io->start, io->end - io->start + 1); + release_mem_region(io->start, resource_size(io)); kfree(atdma); -- cgit v1.2.3 From d8fc320079b46cf462897148f48d4a63f37f56ce Mon Sep 17 00:00:00 2001 From: Arnaud Lacombe Date: Tue, 31 May 2011 12:30:26 -0400 Subject: kconfig: annotate non-trivial fall-trough Signed-off-by: Arnaud Lacombe --- scripts/kconfig/conf.c | 4 ++++ scripts/kconfig/confdata.c | 5 +++++ scripts/kconfig/gconf.c | 1 + scripts/kconfig/mconf.c | 1 + 4 files changed, 11 insertions(+) diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index 006ad817cd5f..6d2e936f3b67 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c @@ -106,6 +106,7 @@ static int conf_askvalue(struct symbol *sym, const char *def) return 0; } check_stdin(); + /* fall through */ case oldaskconfig: fflush(stdout); xfgets(line, 128, stdin); @@ -150,6 +151,7 @@ static int conf_string(struct menu *menu) def = NULL; break; } + /* fall through */ default: line[strlen(line)-1] = 0; def = line; @@ -304,6 +306,7 @@ static int conf_choice(struct menu *menu) break; } check_stdin(); + /* fall through */ case oldaskconfig: fflush(stdout); xfgets(line, 128, stdin); @@ -369,6 +372,7 @@ static void conf(struct menu *menu) check_conf(menu); return; } + /* fall through */ case P_COMMENT: prompt = menu_get_prompt(menu); if (prompt) diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 2bafd9a7c8da..0a1ccc397b86 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -128,6 +128,7 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p) sym->flags |= def_flags; break; } + /* fall through */ case S_BOOLEAN: if (p[0] == 'y') { sym->def[def].tri = yes; @@ -148,6 +149,7 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p) sym->type = S_STRING; goto done; } + /* fall through */ case S_STRING: if (*p++ != '"') break; @@ -162,6 +164,7 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p) conf_warning("invalid string found"); return 1; } + /* fall through */ case S_INT: case S_HEX: done: @@ -237,6 +240,7 @@ load: case S_STRING: if (sym->def[def].val) free(sym->def[def].val); + /* fall through */ default: sym->def[def].val = NULL; sym->def[def].tri = no; @@ -363,6 +367,7 @@ int conf_read(const char *name) break; if (!sym_is_choice(sym)) goto sym_ok; + /* fall through */ default: if (!strcmp(sym->curr.val, sym->def[S_DEF_USER].val)) goto sym_ok; diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c index a11d5f7b9eeb..c406bde28dbe 100644 --- a/scripts/kconfig/gconf.c +++ b/scripts/kconfig/gconf.c @@ -1172,6 +1172,7 @@ static gchar **fill_row(struct menu *menu) row[COL_BTNVIS] = GINT_TO_POINTER(TRUE); if (sym_is_choice(sym)) break; + /* fall through */ case S_TRISTATE: val = sym_get_tristate_value(sym); switch (val) { diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c index d433c7a24745..87001e62cefd 100644 --- a/scripts/kconfig/mconf.c +++ b/scripts/kconfig/mconf.c @@ -845,6 +845,7 @@ int main(int ac, char **av) "\n\n")); return 1; } + /* fall through */ case -1: printf(_("\n\n" "*** End of the configuration.\n" -- cgit v1.2.3 From 75f1468beaeca690e139b4e1bcd19aa20973fca9 Mon Sep 17 00:00:00 2001 From: Arnaud Lacombe Date: Tue, 31 May 2011 12:31:57 -0400 Subject: kconfig: fix return code for invalid boolean symbol in conf_set_sym_val() Cc: Sam Ravnborg Signed-off-by: Arnaud Lacombe --- scripts/kconfig/confdata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 0a1ccc397b86..4e878dda1add 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -141,7 +141,7 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p) break; } conf_warning("symbol value '%s' invalid for %s", p, sym->name); - break; + return 1; case S_OTHER: if (*p != '"') { for (p2 = p; *p2 && !isspace(*p2); p2++) -- cgit v1.2.3 From 10a4b2772e7643247ddb5316c644f1fe7c4dccca Mon Sep 17 00:00:00 2001 From: Arnaud Lacombe Date: Wed, 1 Jun 2011 16:00:46 -0400 Subject: kconfig: add missing inclusion This header is needed when using va_{start,end,copy}(3) functions family. Signed-off-by: Arnaud Lacombe --- scripts/kconfig/confdata.c | 1 + scripts/kconfig/menu.c | 1 + scripts/kconfig/util.c | 1 + 3 files changed, 3 insertions(+) diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 4e878dda1add..ca16ab4367dd 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c index 5fdf10dc1d8a..d64108bc0cf9 100644 --- a/scripts/kconfig/menu.c +++ b/scripts/kconfig/menu.c @@ -3,6 +3,7 @@ * Released under the terms of the GNU GPL v2.0. */ +#include #include #include diff --git a/scripts/kconfig/util.c b/scripts/kconfig/util.c index 6330cc871a47..e07d557bb3a9 100644 --- a/scripts/kconfig/util.c +++ b/scripts/kconfig/util.c @@ -5,6 +5,7 @@ * Released under the terms of the GNU GPL v2.0. */ +#include #include #include "lkc.h" -- cgit v1.2.3 From dd003306a4fae241e1f9cac5bef2c8f2afeb0446 Mon Sep 17 00:00:00 2001 From: Arnaud Lacombe Date: Wed, 1 Jun 2011 16:06:22 -0400 Subject: kconfig: add missing inclusion This header is needed when using isspace(3) function family. Signed-off-by: Arnaud Lacombe --- scripts/kconfig/menu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c index d64108bc0cf9..24547fe0fe9e 100644 --- a/scripts/kconfig/menu.c +++ b/scripts/kconfig/menu.c @@ -3,6 +3,7 @@ * Released under the terms of the GNU GPL v2.0. */ +#include #include #include #include -- cgit v1.2.3 From 02d95c96c3d29df0a1d3bb515692ad4894030729 Mon Sep 17 00:00:00 2001 From: Arnaud Lacombe Date: Wed, 1 Jun 2011 16:08:14 -0400 Subject: kconfig: add missing inclusion This header is needed when using {m,re}alloc(3) and free(3) function family. Signed-off-by: Arnaud Lacombe --- scripts/kconfig/util.c | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/kconfig/util.c b/scripts/kconfig/util.c index e07d557bb3a9..d0b8b2318e48 100644 --- a/scripts/kconfig/util.c +++ b/scripts/kconfig/util.c @@ -6,6 +6,7 @@ */ #include +#include #include #include "lkc.h" -- cgit v1.2.3 From 84250386efa581fdf5578b68b9dd6b79998ac48d Mon Sep 17 00:00:00 2001 From: Arnaud Lacombe Date: Wed, 1 Jun 2011 16:15:52 -0400 Subject: kconfig: nuke reference to SWIG SWIG is not used (yet?) to create kconfig binding, so there is no point referencing it. Signed-off-by: Arnaud Lacombe --- scripts/kconfig/expr.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h index 16bfae2d3217..80fce57080cc 100644 --- a/scripts/kconfig/expr.h +++ b/scripts/kconfig/expr.h @@ -172,8 +172,6 @@ struct menu { #define MENU_CHANGED 0x0001 #define MENU_ROOT 0x0002 -#ifndef SWIG - extern struct file *file_list; extern struct file *current_file; struct file *lookup_file(const char *name); @@ -218,7 +216,6 @@ static inline int expr_is_no(struct expr *e) { return e && (e->type == E_SYMBOL && e->left.sym == &symbol_no); } -#endif #ifdef __cplusplus } -- cgit v1.2.3 From 5a6f8d2bd9e3392569ed6f29ea4d7210652f929b Mon Sep 17 00:00:00 2001 From: Arnaud Lacombe Date: Wed, 1 Jun 2011 16:14:47 -0400 Subject: kconfig: nuke LKC_DIRECT_LINK cruft This interface is not (and has never been ?) used by any frontend, just get rid of it. Signed-off-by: Arnaud Lacombe --- scripts/kconfig/Makefile | 24 ++++++++---------------- scripts/kconfig/conf.c | 1 - scripts/kconfig/confdata.c | 1 - scripts/kconfig/expr.c | 1 - scripts/kconfig/gconf.c | 4 ---- scripts/kconfig/kconfig_load.c | 35 ----------------------------------- scripts/kconfig/kxgettext.c | 1 - scripts/kconfig/lex.zconf.c_shipped | 1 - scripts/kconfig/lkc.h | 5 ----- scripts/kconfig/mconf.c | 1 - scripts/kconfig/menu.c | 1 - scripts/kconfig/nconf.c | 2 +- scripts/kconfig/qconf.cc | 4 ---- scripts/kconfig/symbol.c | 1 - scripts/kconfig/zconf.l | 1 - scripts/kconfig/zconf.tab.c_shipped | 1 - scripts/kconfig/zconf.y | 1 - 17 files changed, 9 insertions(+), 76 deletions(-) delete mode 100644 scripts/kconfig/kconfig_load.c diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index faa9a4701b6f..0460ac3b3588 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -170,8 +170,8 @@ mconf-objs := mconf.o zconf.tab.o $(lxdialog) nconf-objs := nconf.o zconf.tab.o nconf.gui.o kxgettext-objs := kxgettext.o zconf.tab.o qconf-cxxobjs := qconf.o -qconf-objs := kconfig_load.o zconf.tab.o -gconf-objs := gconf.o kconfig_load.o zconf.tab.o +qconf-objs := zconf.tab.o +gconf-objs := gconf.o zconf.tab.o hostprogs-y := conf @@ -203,7 +203,7 @@ ifeq ($(gconf-target),1) hostprogs-y += gconf endif -clean-files := lkc_defs.h qconf.moc .tmp_qtcheck .tmp_gtkcheck +clean-files := qconf.moc .tmp_qtcheck .tmp_gtkcheck clean-files += zconf.tab.c lex.zconf.c zconf.hash.c gconf.glade.h clean-files += mconf qconf gconf nconf clean-files += config.pot linux.pot @@ -223,12 +223,11 @@ HOST_EXTRACFLAGS += $(shell $(CONFIG_SHELL) $(srctree)/$(src)/check.sh $(HOSTCC) HOSTCFLAGS_lex.zconf.o := -I$(src) HOSTCFLAGS_zconf.tab.o := -I$(src) -HOSTLOADLIBES_qconf = $(KC_QT_LIBS) -ldl -HOSTCXXFLAGS_qconf.o = $(KC_QT_CFLAGS) -D LKC_DIRECT_LINK +HOSTLOADLIBES_qconf = $(KC_QT_LIBS) +HOSTCXXFLAGS_qconf.o = $(KC_QT_CFLAGS) -HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0` -ldl -HOSTCFLAGS_gconf.o = `pkg-config --cflags gtk+-2.0 gmodule-2.0 libglade-2.0` \ - -D LKC_DIRECT_LINK +HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0` +HOSTCFLAGS_gconf.o = `pkg-config --cflags gtk+-2.0 gmodule-2.0 libglade-2.0` HOSTLOADLIBES_mconf = $(shell $(CONFIG_SHELL) $(check-lxdialog) -ldflags $(HOSTCC)) @@ -318,18 +317,11 @@ endif $(obj)/zconf.tab.o: $(obj)/lex.zconf.c $(obj)/zconf.hash.c -$(obj)/kconfig_load.o: $(obj)/lkc_defs.h - -$(obj)/qconf.o: $(obj)/qconf.moc $(obj)/lkc_defs.h - -$(obj)/gconf.o: $(obj)/lkc_defs.h +$(obj)/qconf.o: $(obj)/qconf.moc $(obj)/%.moc: $(src)/%.h $(KC_QT_MOC) -i $< -o $@ -$(obj)/lkc_defs.h: $(src)/lkc_proto.h - $(Q)sed < $< > $@ 's/P(\([^,]*\),.*/#define \1 (\*\1_p)/' - # Extract gconf menu items for I18N support $(obj)/gconf.glade.h: $(obj)/gconf.glade $(Q)intltool-extract --type=gettext/glade --srcdir=$(srctree) \ diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index 6d2e936f3b67..08c05bcc82c9 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c @@ -14,7 +14,6 @@ #include #include -#define LKC_DIRECT_LINK #include "lkc.h" static void conf(struct menu *menu); diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index ca16ab4367dd..c257bb0bf90a 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -14,7 +14,6 @@ #include #include -#define LKC_DIRECT_LINK #include "lkc.h" static void conf_warning(const char *fmt, ...) diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c index 001003452f68..792c62ed9d63 100644 --- a/scripts/kconfig/expr.c +++ b/scripts/kconfig/expr.c @@ -7,7 +7,6 @@ #include #include -#define LKC_DIRECT_LINK #include "lkc.h" #define DEBUG_EXPR 0 diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c index c406bde28dbe..9258957a0913 100644 --- a/scripts/kconfig/gconf.c +++ b/scripts/kconfig/gconf.c @@ -1507,10 +1507,6 @@ int main(int ac, char *av[]) char *env; gchar *glade_file; -#ifndef LKC_DIRECT_LINK - kconfig_load(); -#endif - bindtextdomain(PACKAGE, LOCALEDIR); bind_textdomain_codeset(PACKAGE, "UTF-8"); textdomain(PACKAGE); diff --git a/scripts/kconfig/kconfig_load.c b/scripts/kconfig/kconfig_load.c deleted file mode 100644 index dbdcaad82325..000000000000 --- a/scripts/kconfig/kconfig_load.c +++ /dev/null @@ -1,35 +0,0 @@ -#include -#include -#include - -#include "lkc.h" - -#define P(name,type,arg) type (*name ## _p) arg -#include "lkc_proto.h" -#undef P - -void kconfig_load(void) -{ - void *handle; - char *error; - - handle = dlopen("./libkconfig.so", RTLD_LAZY); - if (!handle) { - handle = dlopen("./scripts/kconfig/libkconfig.so", RTLD_LAZY); - if (!handle) { - fprintf(stderr, "%s\n", dlerror()); - exit(1); - } - } - -#define P(name,type,arg) \ -{ \ - name ## _p = dlsym(handle, #name); \ - if ((error = dlerror())) { \ - fprintf(stderr, "%s\n", error); \ - exit(1); \ - } \ -} -#include "lkc_proto.h" -#undef P -} diff --git a/scripts/kconfig/kxgettext.c b/scripts/kconfig/kxgettext.c index e9d8e791bf0d..2858738b22d5 100644 --- a/scripts/kconfig/kxgettext.c +++ b/scripts/kconfig/kxgettext.c @@ -7,7 +7,6 @@ #include #include -#define LKC_DIRECT_LINK #include "lkc.h" static char *escape(const char* text, char *bf, int len) diff --git a/scripts/kconfig/lex.zconf.c_shipped b/scripts/kconfig/lex.zconf.c_shipped index d9182916f724..dcea7a744317 100644 --- a/scripts/kconfig/lex.zconf.c_shipped +++ b/scripts/kconfig/lex.zconf.c_shipped @@ -785,7 +785,6 @@ char *zconftext; #include #include -#define LKC_DIRECT_LINK #include "lkc.h" #define START_STRSIZE 16 diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h index febf0c94d558..625ec69ebeee 100644 --- a/scripts/kconfig/lkc.h +++ b/scripts/kconfig/lkc.h @@ -21,12 +21,7 @@ static inline char *bind_textdomain_codeset(const char *dn, char *c) { return c; extern "C" { #endif -#ifdef LKC_DIRECT_LINK #define P(name,type,arg) extern type name arg -#else -#include "lkc_defs.h" -#define P(name,type,arg) extern type (*name ## _p) arg -#endif #include "lkc_proto.h" #undef P diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c index 87001e62cefd..820d2b6800fb 100644 --- a/scripts/kconfig/mconf.c +++ b/scripts/kconfig/mconf.c @@ -18,7 +18,6 @@ #include #include -#define LKC_DIRECT_LINK #include "lkc.h" #include "lxdialog/dialog.h" diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c index 24547fe0fe9e..aab5a1fee5a8 100644 --- a/scripts/kconfig/menu.c +++ b/scripts/kconfig/menu.c @@ -8,7 +8,6 @@ #include #include -#define LKC_DIRECT_LINK #include "lkc.h" static const char nohelp_text[] = N_( diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c index 488dd7410787..da9f5c49d886 100644 --- a/scripts/kconfig/nconf.c +++ b/scripts/kconfig/nconf.c @@ -7,7 +7,7 @@ */ #define _GNU_SOURCE #include -#define LKC_DIRECT_LINK + #include "lkc.h" #include "nconf.h" #include diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc index c2796b866f8f..31e01cdbfc6e 100644 --- a/scripts/kconfig/qconf.cc +++ b/scripts/kconfig/qconf.cc @@ -1745,10 +1745,6 @@ int main(int ac, char** av) bindtextdomain(PACKAGE, LOCALEDIR); textdomain(PACKAGE); -#ifndef LKC_DIRECT_LINK - kconfig_load(); -#endif - progname = av[0]; configApp = new QApplication(ac, av); if (ac > 1 && av[1][0] == '-') { diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c index a796c95fe8a0..cf8edf4fc429 100644 --- a/scripts/kconfig/symbol.c +++ b/scripts/kconfig/symbol.c @@ -9,7 +9,6 @@ #include #include -#define LKC_DIRECT_LINK #include "lkc.h" struct symbol symbol_yes = { diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l index b22f884f9022..29b79f4c34c2 100644 --- a/scripts/kconfig/zconf.l +++ b/scripts/kconfig/zconf.l @@ -14,7 +14,6 @@ #include #include -#define LKC_DIRECT_LINK #include "lkc.h" #define START_STRSIZE 16 diff --git a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped index 4c5495ea205e..c1579e6e79a6 100644 --- a/scripts/kconfig/zconf.tab.c_shipped +++ b/scripts/kconfig/zconf.tab.c_shipped @@ -88,7 +88,6 @@ #include #include -#define LKC_DIRECT_LINK #include "lkc.h" #define printd(mask, fmt...) if (cdebug & (mask)) printf(fmt) diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y index 49fb4ab664c3..29c391f51606 100644 --- a/scripts/kconfig/zconf.y +++ b/scripts/kconfig/zconf.y @@ -11,7 +11,6 @@ #include #include -#define LKC_DIRECT_LINK #include "lkc.h" #define printd(mask, fmt...) if (cdebug & (mask)) printf(fmt) -- cgit v1.2.3 From f8aea775c1d852c09adee1d0d62a9cab8764e6ea Mon Sep 17 00:00:00 2001 From: Arnaud Lacombe Date: Sun, 5 Jun 2011 23:32:07 -0400 Subject: kconfig/gconf: kill deadcode The only call site of renderer_toggled() has been commented out since Apr. 2003, as per Linus' Linux history repository: commit e7f67eb3c0570aa50c1cc0707b478a6d93bdc255 Author: Roman Zippel Date: Fri Apr 4 04:18:05 2003 -0800 [PATCH] gconf update A gconf update by Romain Livin - fixed bug when double-clicking for changing value. - expand row when enabling a row with a submenu. - various bug fixes As this result in a warning: scripts/kconfig/gconf.c:891:13: warning: 'renderer_toggled' defined but not used just nuke that code. Signed-off-by: Arnaud Lacombe --- scripts/kconfig/gconf.c | 33 --------------------------------- 1 file changed, 33 deletions(-) diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c index 9258957a0913..9f4438027df4 100644 --- a/scripts/kconfig/gconf.c +++ b/scripts/kconfig/gconf.c @@ -285,8 +285,6 @@ void init_left_tree(void) static void renderer_edited(GtkCellRendererText * cell, const gchar * path_string, const gchar * new_text, gpointer user_data); -static void renderer_toggled(GtkCellRendererToggle * cellrenderertoggle, - gchar * arg1, gpointer user_data); void init_right_tree(void) { @@ -320,8 +318,6 @@ void init_right_tree(void) "inconsistent", COL_BTNINC, "visible", COL_BTNVIS, "radio", COL_BTNRAD, NULL); - /*g_signal_connect(G_OBJECT(renderer), "toggled", - G_CALLBACK(renderer_toggled), NULL); */ renderer = gtk_cell_renderer_text_new(); gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column), renderer, FALSE); @@ -888,35 +884,6 @@ static void toggle_sym_value(struct menu *menu) display_tree_part(); //fixme: keep exp/coll } -static void renderer_toggled(GtkCellRendererToggle * cell, - gchar * path_string, gpointer user_data) -{ - GtkTreePath *path, *sel_path = NULL; - GtkTreeIter iter, sel_iter; - GtkTreeSelection *sel; - struct menu *menu; - - path = gtk_tree_path_new_from_string(path_string); - if (!gtk_tree_model_get_iter(model2, &iter, path)) - return; - - sel = gtk_tree_view_get_selection(GTK_TREE_VIEW(tree2_w)); - if (gtk_tree_selection_get_selected(sel, NULL, &sel_iter)) - sel_path = gtk_tree_model_get_path(model2, &sel_iter); - if (!sel_path) - goto out1; - if (gtk_tree_path_compare(path, sel_path)) - goto out2; - - gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); - toggle_sym_value(menu); - - out2: - gtk_tree_path_free(sel_path); - out1: - gtk_tree_path_free(path); -} - static gint column2index(GtkTreeViewColumn * column) { gint i; -- cgit v1.2.3 From 1ea3ad4e93222faf1d138ceb10291376d2da7cc6 Mon Sep 17 00:00:00 2001 From: Arnaud Lacombe Date: Sun, 5 Jun 2011 23:36:05 -0400 Subject: kconfig/gconf: silent missing prototype warnings As the `gconf' frontend is un-maintained, go the easy way by silencing the "warning: no previous prototype for ''" warnings. Signed-off-by: Arnaud Lacombe --- scripts/kconfig/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index 0460ac3b3588..84abb2fc61d1 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -227,7 +227,8 @@ HOSTLOADLIBES_qconf = $(KC_QT_LIBS) HOSTCXXFLAGS_qconf.o = $(KC_QT_CFLAGS) HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0` -HOSTCFLAGS_gconf.o = `pkg-config --cflags gtk+-2.0 gmodule-2.0 libglade-2.0` +HOSTCFLAGS_gconf.o = `pkg-config --cflags gtk+-2.0 gmodule-2.0 libglade-2.0` \ + -Wno-missing-prototypes HOSTLOADLIBES_mconf = $(shell $(CONFIG_SHELL) $(check-lxdialog) -ldflags $(HOSTCC)) -- cgit v1.2.3 From 6e6938b6d3130305a5960c86b1a9b21e58cf6144 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Sun, 6 Jun 2010 10:38:15 -0600 Subject: writeback: introduce .tagged_writepages for the WB_SYNC_NONE sync stage sync(2) is performed in two stages: the WB_SYNC_NONE sync and the WB_SYNC_ALL sync. Identify the first stage with .tagged_writepages and do livelock prevention for it, too. Jan's commit f446daaea9 ("mm: implement writeback livelock avoidance using page tagging") is a partial fix in that it only fixed the WB_SYNC_ALL phase livelock. Although ext4 is tested to no longer livelock with commit f446daaea9, it may due to some "redirty_tail() after pages_skipped" effect which is by no means a guarantee for _all_ the file systems. Note that writeback_inodes_sb() is called by not only sync(), they are treated the same because the other callers also need livelock prevention. Impact: It changes the order in which pages/inodes are synced to disk. Now in the WB_SYNC_NONE stage, it won't proceed to write the next inode until finished with the current inode. Acked-by: Jan Kara CC: Dave Chinner Signed-off-by: Wu Fengguang --- fs/ext4/inode.c | 4 ++-- fs/fs-writeback.c | 17 +++++++++-------- include/linux/writeback.h | 1 + mm/page-writeback.c | 4 ++-- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index a5763e3505ba..8558b6c3450a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2741,7 +2741,7 @@ static int write_cache_pages_da(struct address_space *mapping, index = wbc->range_start >> PAGE_CACHE_SHIFT; end = wbc->range_end >> PAGE_CACHE_SHIFT; - if (wbc->sync_mode == WB_SYNC_ALL) + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; @@ -2973,7 +2973,7 @@ static int ext4_da_writepages(struct address_space *mapping, } retry: - if (wbc->sync_mode == WB_SYNC_ALL) + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, index, end); while (!ret && wbc->nr_to_write > 0) { diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 0f015a0468de..5ed2ce9a28d0 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -36,6 +36,7 @@ struct wb_writeback_work { long nr_pages; struct super_block *sb; enum writeback_sync_modes sync_mode; + unsigned int tagged_writepages:1; unsigned int for_kupdate:1; unsigned int range_cyclic:1; unsigned int for_background:1; @@ -650,6 +651,7 @@ static long wb_writeback(struct bdi_writeback *wb, { struct writeback_control wbc = { .sync_mode = work->sync_mode, + .tagged_writepages = work->tagged_writepages, .older_than_this = NULL, .for_kupdate = work->for_kupdate, .for_background = work->for_background, @@ -657,7 +659,7 @@ static long wb_writeback(struct bdi_writeback *wb, }; unsigned long oldest_jif; long wrote = 0; - long write_chunk; + long write_chunk = MAX_WRITEBACK_PAGES; struct inode *inode; if (wbc.for_kupdate) { @@ -683,9 +685,7 @@ static long wb_writeback(struct bdi_writeback *wb, * (quickly) tag currently dirty pages * (maybe slowly) sync all tagged pages */ - if (wbc.sync_mode == WB_SYNC_NONE) - write_chunk = MAX_WRITEBACK_PAGES; - else + if (wbc.sync_mode == WB_SYNC_ALL || wbc.tagged_writepages) write_chunk = LONG_MAX; wbc.wb_start = jiffies; /* livelock avoidance */ @@ -1188,10 +1188,11 @@ void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr) { DECLARE_COMPLETION_ONSTACK(done); struct wb_writeback_work work = { - .sb = sb, - .sync_mode = WB_SYNC_NONE, - .done = &done, - .nr_pages = nr, + .sb = sb, + .sync_mode = WB_SYNC_NONE, + .tagged_writepages = 1, + .done = &done, + .nr_pages = nr, }; WARN_ON(!rwsem_is_locked(&sb->s_umount)); diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 17e7ccc322a5..3f6542ca6198 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -47,6 +47,7 @@ struct writeback_control { unsigned encountered_congestion:1; /* An output: a queue is full */ unsigned for_kupdate:1; /* A kupdate writeback */ unsigned for_background:1; /* A background writeback */ + unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned range_cyclic:1; /* range_start is cyclic */ unsigned more_io:1; /* more io to be dispatched */ diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 31f698862420..955fe35d01e0 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -892,12 +892,12 @@ int write_cache_pages(struct address_space *mapping, range_whole = 1; cycled = 1; /* ignore range_cyclic tests */ } - if (wbc->sync_mode == WB_SYNC_ALL) + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; retry: - if (wbc->sync_mode == WB_SYNC_ALL) + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, index, end); done_index = index; while (!done && (index <= end)) { -- cgit v1.2.3 From 94c3dcbb0b0cdfd82cedd21705424d8044edc42c Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 27 Apr 2011 19:05:21 -0600 Subject: writeback: update dirtied_when for synced inode to prevent livelock Explicitly update .dirtied_when on synced inodes, so that they are no longer considered for writeback in the next round. It can prevent both of the following livelock schemes: - while true; do echo data >> f; done - while true; do touch f; done (in theory) The exact livelock condition is, during sync(1): (1) no new inodes are dirtied (2) an inode being actively dirtied On (2), the inode will be tagged and synced with .nr_to_write=LONG_MAX. When finished, it will be redirty_tail()ed because it's still dirty and (.nr_to_write > 0). redirty_tail() won't update its ->dirtied_when on condition (1). The sync work will then revisit it on the next queue_io() and find it eligible again because its old ->dirtied_when predates the sync work start time. We'll do more aggressive "keep writeback as long as we wrote something" logic in wb_writeback(). The "use LONG_MAX .nr_to_write" trick in commit b9543dac5bbc ("writeback: avoid livelocking WB_SYNC_ALL writeback") will no longer be enough to stop sync livelock. Reviewed-by: Jan Kara Signed-off-by: Wu Fengguang --- fs/fs-writeback.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 5ed2ce9a28d0..fe190a8b0bc8 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -419,6 +419,15 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) spin_lock(&inode->i_lock); inode->i_state &= ~I_SYNC; if (!(inode->i_state & I_FREEING)) { + /* + * Sync livelock prevention. Each inode is tagged and synced in + * one shot. If still dirty, it will be redirty_tail()'ed below. + * Update the dirty time to prevent enqueue and sync it again. + */ + if ((inode->i_state & I_DIRTY) && + (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) + inode->dirtied_when = jiffies; + if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { /* * We didn't write back all the pages. nfs_writepages() -- cgit v1.2.3 From cb9bd1159c5fe8995e151fa7df10fa19f8c119cc Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 21 Jul 2010 22:50:57 -0600 Subject: writeback: introduce writeback_control.inodes_written The flusher works on dirty inodes in batches, and may quit prematurely if the batch of inodes happen to be metadata-only dirtied: in this case wbc->nr_to_write won't be decreased at all, which stands for "no pages written" but also mis-interpreted as "no progress". So introduce writeback_control.inodes_written to count the inodes get cleaned from VFS POV. A non-zero value means there are some progress on writeback, in which case more writeback can be tried. Acked-by: Jan Kara Acked-by: Mel Gorman Signed-off-by: Wu Fengguang --- fs/fs-writeback.c | 4 ++++ include/linux/writeback.h | 1 + 2 files changed, 5 insertions(+) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index fe190a8b0bc8..e4504299f4a5 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -464,6 +464,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) * No need to add it back to the LRU. */ list_del_init(&inode->i_wb_list); + wbc->inodes_written++; } } inode_sync_complete(inode); @@ -725,6 +726,7 @@ static long wb_writeback(struct bdi_writeback *wb, wbc.more_io = 0; wbc.nr_to_write = write_chunk; wbc.pages_skipped = 0; + wbc.inodes_written = 0; trace_wbc_writeback_start(&wbc, wb->bdi); if (work->sb) @@ -741,6 +743,8 @@ static long wb_writeback(struct bdi_writeback *wb, */ if (wbc.nr_to_write <= 0) continue; + if (wbc.inodes_written) + continue; /* * Didn't write everything and we don't have more IO, bail */ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 3f6542ca6198..7df9026f7129 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -34,6 +34,7 @@ struct writeback_control { long nr_to_write; /* Write this many pages, and decrement this for each page written */ long pages_skipped; /* Pages which were not written */ + long inodes_written; /* # of inodes written (at least) */ /* * For a_ops->writepages(): is start or end are non-zero then this is -- cgit v1.2.3 From e6fb6da2e10682d477f2fdb749451d9fe5d168e8 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Thu, 22 Jul 2010 10:23:44 -0600 Subject: writeback: try more writeback as long as something was written writeback_inodes_wb()/__writeback_inodes_sb() are not aggressive in that they only populate possibly a subset of eligible inodes into b_io at entrance time. When the queued set of inodes are all synced, they just return, possibly with all queued inode pages written but still wbc.nr_to_write > 0. For kupdate and background writeback, there may be more eligible inodes sitting in b_dirty when the current set of b_io inodes are completed. So it is necessary to try another round of writeback as long as we made some progress in this round. When there are no more eligible inodes, no more inodes will be enqueued in queue_io(), hence nothing could/will be synced and we may safely bail. For example, imagine 100 inodes i0, i1, i2, ..., i90, i91, i99 At queue_io() time, i90-i99 happen to be expired and moved to s_io for IO. When finished successfully, if their total size is less than MAX_WRITEBACK_PAGES, nr_to_write will be > 0. Then wb_writeback() will quit the background work (w/o this patch) while it's still over background threshold. This will be a fairly normal/frequent case I guess. Now that we do tagged sync and update inode->dirtied_when after the sync, this change won't livelock sync(1). I actually tried to write 1 page per 1ms with this command write-and-fsync -n10000 -S 1000 -c 4096 /fs/test and do sync(1) at the same time. The sync completes quickly on ext4, xfs, btrfs. Acked-by: Jan Kara Signed-off-by: Wu Fengguang --- fs/fs-writeback.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index e4504299f4a5..271cf2150ba0 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -739,22 +739,22 @@ static long wb_writeback(struct bdi_writeback *wb, wrote += write_chunk - wbc.nr_to_write; /* - * If we consumed everything, see if we have more + * Did we write something? Try for more + * + * Dirty inodes are moved to b_io for writeback in batches. + * The completion of the current batch does not necessarily + * mean the overall work is done. So we keep looping as long + * as made some progress on cleaning pages or inodes. */ - if (wbc.nr_to_write <= 0) + if (wbc.nr_to_write < write_chunk) continue; if (wbc.inodes_written) continue; /* - * Didn't write everything and we don't have more IO, bail + * No more inodes for IO, bail */ if (!wbc.more_io) break; - /* - * Did we write something? Try for more - */ - if (wbc.nr_to_write < write_chunk) - continue; /* * Nothing written. Wait for some inode to * become available for writeback. Otherwise -- cgit v1.2.3 From ba9aa8399fda48510d80c2fed1afb8fedbe1bb41 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 21 Jul 2010 20:32:30 -0600 Subject: writeback: the kupdate expire timestamp should be a moving target Dynamically compute the dirty expire timestamp at queue_io() time. writeback_control.older_than_this used to be determined at entrance to the kupdate writeback work. This _static_ timestamp may go stale if the kupdate work runs on and on. The flusher may then stuck with some old busy inodes, never considering newly expired inodes thereafter. This has two possible problems: - It is unfair for a large dirty inode to delay (for a long time) the writeback of small dirty inodes. - As time goes by, the large and busy dirty inode may contain only _freshly_ dirtied pages. Ignoring newly expired dirty inodes risks delaying the expired dirty pages to the end of LRU lists, triggering the evil pageout(). Nevertheless this patch merely addresses part of the problem. v2: keep policy changes inside wb_writeback() and keep the wbc.older_than_this visibility as suggested by Dave. CC: Dave Chinner Acked-by: Jan Kara Acked-by: Mel Gorman Signed-off-by: Itaru Kitayama Signed-off-by: Wu Fengguang --- fs/fs-writeback.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 271cf2150ba0..0adee7853b80 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -672,11 +672,6 @@ static long wb_writeback(struct bdi_writeback *wb, long write_chunk = MAX_WRITEBACK_PAGES; struct inode *inode; - if (wbc.for_kupdate) { - wbc.older_than_this = &oldest_jif; - oldest_jif = jiffies - - msecs_to_jiffies(dirty_expire_interval * 10); - } if (!wbc.range_cyclic) { wbc.range_start = 0; wbc.range_end = LLONG_MAX; @@ -723,6 +718,12 @@ static long wb_writeback(struct bdi_writeback *wb, if (work->for_background && !over_bground_thresh()) break; + if (work->for_kupdate) { + oldest_jif = jiffies - + msecs_to_jiffies(dirty_expire_interval * 10); + wbc.older_than_this = &oldest_jif; + } + wbc.more_io = 0; wbc.nr_to_write = write_chunk; wbc.pages_skipped = 0; -- cgit v1.2.3 From 424b351fe1901fc909fd0ca4f21dab58f24c1aac Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 21 Jul 2010 20:11:53 -0600 Subject: writeback: refill b_io iff empty There is no point to carry different refill policies between for_kupdate and other type of works. Use a consistent "refill b_io iff empty" policy which can guarantee fairness in an easy to understand way. A b_io refill will setup a _fixed_ work set with all currently eligible inodes and start a new round of walk through b_io. The "fixed" work set means no new inodes will be added to the work set during the walk. Only when a complete walk over b_io is done, new inodes that are eligible at the time will be enqueued and the walk be started over. This procedure provides fairness among the inodes because it guarantees each inode to be synced once and only once at each round. So all inodes will be free from starvations. This change relies on wb_writeback() to keep retrying as long as we made some progress on cleaning some pages and/or inodes. Without that ability, the old logic on background works relies on aggressively queuing all eligible inodes into b_io at every time. But that's not a guarantee. The below test script completes a slightly faster now: 2.6.39-rc3 2.6.39-rc3-dyn-expire+ ------------------------------------------------ all elapsed 256.043 252.367 stddev 24.381 12.530 tar elapsed 30.097 28.808 dd elapsed 13.214 11.782 #!/bin/zsh cp /c/linux-2.6.38.3.tar.bz2 /dev/shm/ umount /dev/sda7 mkfs.xfs -f /dev/sda7 mount /dev/sda7 /fs echo 3 > /proc/sys/vm/drop_caches tic=$(cat /proc/uptime|cut -d' ' -f2) cd /fs time tar jxf /dev/shm/linux-2.6.38.3.tar.bz2 & time dd if=/dev/zero of=/fs/zero bs=1M count=1000 & wait sync tac=$(cat /proc/uptime|cut -d' ' -f2) echo elapsed: $((tac - tic)) It maintains roughly the same small vs. large file writeout shares, and offers large files better chances to be written in nice 4M chunks. Analyzes from Dave Chinner in great details: Let's say we have lots of inodes with 100 dirty pages being created, and one large writeback going on. We expire 8 new inodes for every 1024 pages we write back. With the old code, we do: b_more_io (large inode) -> b_io (1l) 8 newly expired inodes -> b_io (1l, 8s) writeback large inode 1024 pages -> b_more_io b_more_io (large inode) -> b_io (8s, 1l) 8 newly expired inodes -> b_io (8s, 1l, 8s) writeback 8 small inodes 800 pages 1 large inode 224 pages -> b_more_io b_more_io (large inode) -> b_io (8s, 1l) 8 newly expired inodes -> b_io (8s, 1l, 8s) ..... Your new code: b_more_io (large inode) -> b_io (1l) 8 newly expired inodes -> b_io (1l, 8s) writeback large inode 1024 pages -> b_more_io (b_io == 8s) writeback 8 small inodes 800 pages b_io empty: (1800 pages written) b_more_io (large inode) -> b_io (1l) 14 newly expired inodes -> b_io (1l, 14s) writeback large inode 1024 pages -> b_more_io (b_io == 14s) writeback 10 small inodes 1000 pages 1 small inode 24 pages -> b_more_io (1l, 1s(24)) writeback 5 small inodes 500 pages b_io empty: (2548 pages written) b_more_io (large inode) -> b_io (1l, 1s(24)) 20 newly expired inodes -> b_io (1l, 1s(24), 20s) ...... Rough progression of pages written at b_io refill: Old code: total large file % of writeback 1024 224 21.9% (fixed) New code: total large file % of writeback 1800 1024 ~55% 2550 1024 ~40% 3050 1024 ~33% 3500 1024 ~29% 3950 1024 ~26% 4250 1024 ~24% 4500 1024 ~22.7% 4700 1024 ~21.7% 4800 1024 ~21.3% 4800 1024 ~21.3% (pretty much steady state from here) Ok, so the steady state is reached with a similar percentage of writeback to the large file as the existing code. Ok, that's good, but providing some evidence that is doesn't change the shared of writeback to the large should be in the commit message ;) The other advantage to this is that we always write 1024 page chunks to the large file, rather than smaller "whatever remains" chunks. CC: Jan Kara Acked-by: Mel Gorman Signed-off-by: Wu Fengguang --- fs/fs-writeback.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 0adee7853b80..664acdb2e7ef 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -589,7 +589,8 @@ void writeback_inodes_wb(struct bdi_writeback *wb, if (!wbc->wb_start) wbc->wb_start = jiffies; /* livelock avoidance */ spin_lock(&inode_wb_list_lock); - if (!wbc->for_kupdate || list_empty(&wb->b_io)) + + if (list_empty(&wb->b_io)) queue_io(wb, wbc->older_than_this); while (!list_empty(&wb->b_io)) { @@ -616,7 +617,7 @@ static void __writeback_inodes_sb(struct super_block *sb, WARN_ON(!rwsem_is_locked(&sb->s_umount)); spin_lock(&inode_wb_list_lock); - if (!wbc->for_kupdate || list_empty(&wb->b_io)) + if (list_empty(&wb->b_io)) queue_io(wb, wbc->older_than_this); writeback_sb_inodes(sb, wb, wbc, true); spin_unlock(&inode_wb_list_lock); -- cgit v1.2.3 From f758eeabeb96f878c860e8f110f94ec8820822a9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 21 Apr 2011 18:19:44 -0600 Subject: writeback: split inode_wb_list_lock into bdi_writeback.list_lock Split the global inode_wb_list_lock into a per-bdi_writeback list_lock, as it's currently the most contended lock in the system for metadata heavy workloads. It won't help for single-filesystem workloads for which we'll need the I/O-less balance_dirty_pages, but at least we can dedicate a cpu to spinning on each bdi now for larger systems. Based on earlier patches from Nick Piggin and Dave Chinner. It reduces lock contentions to 1/4 in this test case: 10 HDD JBOD, 100 dd on each disk, XFS, 6GB ram lock_stat version 0.3 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- class name con-bounces contentions waittime-min waittime-max waittime-total acq-bounces acquisitions holdtime-min holdtime-max holdtime-total ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- vanilla 2.6.39-rc3: inode_wb_list_lock: 42590 44433 0.12 147.74 144127.35 252274 886792 0.08 121.34 917211.23 ------------------ inode_wb_list_lock 2 [] bdev_inode_switch_bdi+0x29/0x85 inode_wb_list_lock 34 [] inode_wb_list_del+0x22/0x49 inode_wb_list_lock 12893 [] __mark_inode_dirty+0x170/0x1d0 inode_wb_list_lock 10702 [] writeback_single_inode+0x16d/0x20a ------------------ inode_wb_list_lock 2 [] bdev_inode_switch_bdi+0x29/0x85 inode_wb_list_lock 19 [] inode_wb_list_del+0x22/0x49 inode_wb_list_lock 5550 [] __mark_inode_dirty+0x170/0x1d0 inode_wb_list_lock 8511 [] writeback_sb_inodes+0x10f/0x157 2.6.39-rc3 + patch: &(&wb->list_lock)->rlock: 11383 11657 0.14 151.69 40429.51 90825 527918 0.11 145.90 556843.37 ------------------------ &(&wb->list_lock)->rlock 10 [] inode_wb_list_del+0x5f/0x86 &(&wb->list_lock)->rlock 1493 [] writeback_inodes_wb+0x3d/0x150 &(&wb->list_lock)->rlock 3652 [] writeback_sb_inodes+0x123/0x16f &(&wb->list_lock)->rlock 1412 [] writeback_single_inode+0x17f/0x223 ------------------------ &(&wb->list_lock)->rlock 3 [] bdi_lock_two+0x46/0x4b &(&wb->list_lock)->rlock 6 [] inode_wb_list_del+0x5f/0x86 &(&wb->list_lock)->rlock 2061 [] __mark_inode_dirty+0x173/0x1cf &(&wb->list_lock)->rlock 2629 [] writeback_sb_inodes+0x123/0x16f hughd@google.com: fix recursive lock when bdi_lock_two() is called with new the same as old akpm@linux-foundation.org: cleanup bdev_inode_switch_bdi() comment Signed-off-by: Christoph Hellwig Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Wu Fengguang --- fs/block_dev.c | 16 +++++--- fs/fs-writeback.c | 97 +++++++++++++++++++++++---------------------- fs/inode.c | 5 +-- include/linux/backing-dev.h | 2 + include/linux/writeback.h | 2 - mm/backing-dev.c | 21 ++++++++-- mm/filemap.c | 6 +-- mm/rmap.c | 4 +- 8 files changed, 85 insertions(+), 68 deletions(-) diff --git a/fs/block_dev.c b/fs/block_dev.c index 1a2421f908f0..3c9a03e51b62 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -44,24 +44,28 @@ inline struct block_device *I_BDEV(struct inode *inode) { return &BDEV_I(inode)->bdev; } - EXPORT_SYMBOL(I_BDEV); /* - * move the inode from it's current bdi to the a new bdi. if the inode is dirty - * we need to move it onto the dirty list of @dst so that the inode is always - * on the right list. + * Move the inode from its current bdi to a new bdi. If the inode is dirty we + * need to move it onto the dirty list of @dst so that the inode is always on + * the right list. */ static void bdev_inode_switch_bdi(struct inode *inode, struct backing_dev_info *dst) { - spin_lock(&inode_wb_list_lock); + struct backing_dev_info *old = inode->i_data.backing_dev_info; + + if (unlikely(dst == old)) /* deadlock avoidance */ + return; + bdi_lock_two(&old->wb, &dst->wb); spin_lock(&inode->i_lock); inode->i_data.backing_dev_info = dst; if (inode->i_state & I_DIRTY) list_move(&inode->i_wb_list, &dst->wb.b_dirty); spin_unlock(&inode->i_lock); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&old->wb.list_lock); + spin_unlock(&dst->wb.list_lock); } static sector_t max_block(struct block_device *bdev) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 664acdb2e7ef..36a30917e0dc 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -181,12 +181,13 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi) */ void inode_wb_list_del(struct inode *inode) { - spin_lock(&inode_wb_list_lock); + struct backing_dev_info *bdi = inode_to_bdi(inode); + + spin_lock(&bdi->wb.list_lock); list_del_init(&inode->i_wb_list); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&bdi->wb.list_lock); } - /* * Redirty an inode: set its when-it-was dirtied timestamp and move it to the * furthest end of its superblock's dirty-inode list. @@ -196,11 +197,9 @@ void inode_wb_list_del(struct inode *inode) * the case then the inode must have been redirtied while it was being written * out and we don't reset its dirtied_when. */ -static void redirty_tail(struct inode *inode) +static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) { - struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; - - assert_spin_locked(&inode_wb_list_lock); + assert_spin_locked(&wb->list_lock); if (!list_empty(&wb->b_dirty)) { struct inode *tail; @@ -214,11 +213,9 @@ static void redirty_tail(struct inode *inode) /* * requeue inode for re-scanning after bdi->b_io list is exhausted. */ -static void requeue_io(struct inode *inode) +static void requeue_io(struct inode *inode, struct bdi_writeback *wb) { - struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; - - assert_spin_locked(&inode_wb_list_lock); + assert_spin_locked(&wb->list_lock); list_move(&inode->i_wb_list, &wb->b_more_io); } @@ -226,7 +223,7 @@ static void inode_sync_complete(struct inode *inode) { /* * Prevent speculative execution through - * spin_unlock(&inode_wb_list_lock); + * spin_unlock(&wb->list_lock); */ smp_mb(); @@ -302,7 +299,7 @@ static void move_expired_inodes(struct list_head *delaying_queue, */ static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) { - assert_spin_locked(&inode_wb_list_lock); + assert_spin_locked(&wb->list_lock); list_splice_init(&wb->b_more_io, &wb->b_io); move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); } @@ -317,7 +314,8 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc) /* * Wait for writeback on an inode to complete. */ -static void inode_wait_for_writeback(struct inode *inode) +static void inode_wait_for_writeback(struct inode *inode, + struct bdi_writeback *wb) { DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); wait_queue_head_t *wqh; @@ -325,15 +323,15 @@ static void inode_wait_for_writeback(struct inode *inode) wqh = bit_waitqueue(&inode->i_state, __I_SYNC); while (inode->i_state & I_SYNC) { spin_unlock(&inode->i_lock); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); - spin_lock(&inode_wb_list_lock); + spin_lock(&wb->list_lock); spin_lock(&inode->i_lock); } } /* - * Write out an inode's dirty pages. Called under inode_wb_list_lock and + * Write out an inode's dirty pages. Called under wb->list_lock and * inode->i_lock. Either the caller has an active reference on the inode or * the inode has I_WILL_FREE set. * @@ -344,13 +342,14 @@ static void inode_wait_for_writeback(struct inode *inode) * livelocks, etc. */ static int -writeback_single_inode(struct inode *inode, struct writeback_control *wbc) +writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, + struct writeback_control *wbc) { struct address_space *mapping = inode->i_mapping; unsigned dirty; int ret; - assert_spin_locked(&inode_wb_list_lock); + assert_spin_locked(&wb->list_lock); assert_spin_locked(&inode->i_lock); if (!atomic_read(&inode->i_count)) @@ -368,14 +367,14 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) * completed a full scan of b_io. */ if (wbc->sync_mode != WB_SYNC_ALL) { - requeue_io(inode); + requeue_io(inode, wb); return 0; } /* * It's a data-integrity sync. We must wait. */ - inode_wait_for_writeback(inode); + inode_wait_for_writeback(inode, wb); } BUG_ON(inode->i_state & I_SYNC); @@ -384,7 +383,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) inode->i_state |= I_SYNC; inode->i_state &= ~I_DIRTY_PAGES; spin_unlock(&inode->i_lock); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); ret = do_writepages(mapping, wbc); @@ -415,7 +414,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) ret = err; } - spin_lock(&inode_wb_list_lock); + spin_lock(&wb->list_lock); spin_lock(&inode->i_lock); inode->i_state &= ~I_SYNC; if (!(inode->i_state & I_FREEING)) { @@ -438,7 +437,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) /* * slice used up: queue for next turn */ - requeue_io(inode); + requeue_io(inode, wb); } else { /* * Writeback blocked by something other than @@ -447,7 +446,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) * retrying writeback of the dirty page/inode * that cannot be performed immediately. */ - redirty_tail(inode); + redirty_tail(inode, wb); } } else if (inode->i_state & I_DIRTY) { /* @@ -456,7 +455,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) * submission or metadata updates after data IO * completion. */ - redirty_tail(inode); + redirty_tail(inode, wb); } else { /* * The inode is clean. At this point we either have @@ -521,7 +520,7 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, * superblock, move all inodes not belonging * to it back onto the dirty list. */ - redirty_tail(inode); + redirty_tail(inode, wb); continue; } @@ -541,7 +540,7 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, spin_lock(&inode->i_lock); if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { spin_unlock(&inode->i_lock); - requeue_io(inode); + requeue_io(inode, wb); continue; } @@ -557,19 +556,19 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, __iget(inode); pages_skipped = wbc->pages_skipped; - writeback_single_inode(inode, wbc); + writeback_single_inode(inode, wb, wbc); if (wbc->pages_skipped != pages_skipped) { /* * writeback is not making progress due to locked * buffers. Skip this inode for now. */ - redirty_tail(inode); + redirty_tail(inode, wb); } spin_unlock(&inode->i_lock); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); iput(inode); cond_resched(); - spin_lock(&inode_wb_list_lock); + spin_lock(&wb->list_lock); if (wbc->nr_to_write <= 0) { wbc->more_io = 1; return 1; @@ -588,7 +587,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb, if (!wbc->wb_start) wbc->wb_start = jiffies; /* livelock avoidance */ - spin_lock(&inode_wb_list_lock); + spin_lock(&wb->list_lock); if (list_empty(&wb->b_io)) queue_io(wb, wbc->older_than_this); @@ -598,7 +597,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb, struct super_block *sb = inode->i_sb; if (!pin_sb_for_writeback(sb)) { - requeue_io(inode); + requeue_io(inode, wb); continue; } ret = writeback_sb_inodes(sb, wb, wbc, false); @@ -607,7 +606,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb, if (ret) break; } - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); /* Leave any unwritten inodes on b_io */ } @@ -616,11 +615,11 @@ static void __writeback_inodes_sb(struct super_block *sb, { WARN_ON(!rwsem_is_locked(&sb->s_umount)); - spin_lock(&inode_wb_list_lock); + spin_lock(&wb->list_lock); if (list_empty(&wb->b_io)) queue_io(wb, wbc->older_than_this); writeback_sb_inodes(sb, wb, wbc, true); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); } /* @@ -762,15 +761,15 @@ static long wb_writeback(struct bdi_writeback *wb, * become available for writeback. Otherwise * we'll just busyloop. */ - spin_lock(&inode_wb_list_lock); + spin_lock(&wb->list_lock); if (!list_empty(&wb->b_more_io)) { inode = wb_inode(wb->b_more_io.prev); trace_wbc_writeback_wait(&wbc, wb->bdi); spin_lock(&inode->i_lock); - inode_wait_for_writeback(inode); + inode_wait_for_writeback(inode, wb); spin_unlock(&inode->i_lock); } - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); } return wrote; @@ -1104,10 +1103,10 @@ void __mark_inode_dirty(struct inode *inode, int flags) } spin_unlock(&inode->i_lock); - spin_lock(&inode_wb_list_lock); + spin_lock(&bdi->wb.list_lock); inode->dirtied_when = jiffies; list_move(&inode->i_wb_list, &bdi->wb.b_dirty); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&bdi->wb.list_lock); if (wakeup_bdi) bdi_wakeup_thread_delayed(bdi); @@ -1309,6 +1308,7 @@ EXPORT_SYMBOL(sync_inodes_sb); */ int write_inode_now(struct inode *inode, int sync) { + struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; int ret; struct writeback_control wbc = { .nr_to_write = LONG_MAX, @@ -1321,11 +1321,11 @@ int write_inode_now(struct inode *inode, int sync) wbc.nr_to_write = 0; might_sleep(); - spin_lock(&inode_wb_list_lock); + spin_lock(&wb->list_lock); spin_lock(&inode->i_lock); - ret = writeback_single_inode(inode, &wbc); + ret = writeback_single_inode(inode, wb, &wbc); spin_unlock(&inode->i_lock); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); if (sync) inode_sync_wait(inode); return ret; @@ -1345,13 +1345,14 @@ EXPORT_SYMBOL(write_inode_now); */ int sync_inode(struct inode *inode, struct writeback_control *wbc) { + struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; int ret; - spin_lock(&inode_wb_list_lock); + spin_lock(&wb->list_lock); spin_lock(&inode->i_lock); - ret = writeback_single_inode(inode, wbc); + ret = writeback_single_inode(inode, wb, wbc); spin_unlock(&inode->i_lock); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); return ret; } EXPORT_SYMBOL(sync_inode); diff --git a/fs/inode.c b/fs/inode.c index 0f7e88a7803f..4be128cbc754 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -37,7 +37,7 @@ * inode_lru, inode->i_lru * inode_sb_list_lock protects: * sb->s_inodes, inode->i_sb_list - * inode_wb_list_lock protects: + * bdi->wb.list_lock protects: * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list * inode_hash_lock protects: * inode_hashtable, inode->i_hash @@ -48,7 +48,7 @@ * inode->i_lock * inode_lru_lock * - * inode_wb_list_lock + * bdi->wb.list_lock * inode->i_lock * * inode_hash_lock @@ -68,7 +68,6 @@ static LIST_HEAD(inode_lru); static DEFINE_SPINLOCK(inode_lru_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); -__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); /* * iprune_sem provides exclusion between the icache shrinking and the diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 96f4094b706d..47feb2c4706a 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -57,6 +57,7 @@ struct bdi_writeback { struct list_head b_dirty; /* dirty inodes */ struct list_head b_io; /* parked for writeback */ struct list_head b_more_io; /* parked for more writeback */ + spinlock_t list_lock; /* protects the b_* lists */ }; struct backing_dev_info { @@ -106,6 +107,7 @@ int bdi_writeback_thread(void *data); int bdi_has_dirty_io(struct backing_dev_info *bdi); void bdi_arm_supers_timer(void); void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); +void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2); extern spinlock_t bdi_lock; extern struct list_head bdi_list; diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 7df9026f7129..c2d957fb38d3 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -9,8 +9,6 @@ struct backing_dev_info; -extern spinlock_t inode_wb_list_lock; - /* * fs/fs-writeback.c */ diff --git a/mm/backing-dev.c b/mm/backing-dev.c index f032e6e1e09a..5f6553ef1ba7 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -45,6 +45,17 @@ static struct timer_list sync_supers_timer; static int bdi_sync_supers(void *); static void sync_supers_timer_fn(unsigned long); +void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) +{ + if (wb1 < wb2) { + spin_lock(&wb1->list_lock); + spin_lock_nested(&wb2->list_lock, 1); + } else { + spin_lock(&wb2->list_lock); + spin_lock_nested(&wb1->list_lock, 1); + } +} + #ifdef CONFIG_DEBUG_FS #include #include @@ -67,14 +78,14 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) struct inode *inode; nr_dirty = nr_io = nr_more_io = 0; - spin_lock(&inode_wb_list_lock); + spin_lock(&wb->list_lock); list_for_each_entry(inode, &wb->b_dirty, i_wb_list) nr_dirty++; list_for_each_entry(inode, &wb->b_io, i_wb_list) nr_io++; list_for_each_entry(inode, &wb->b_more_io, i_wb_list) nr_more_io++; - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); global_dirty_limits(&background_thresh, &dirty_thresh); bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); @@ -628,6 +639,7 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) INIT_LIST_HEAD(&wb->b_dirty); INIT_LIST_HEAD(&wb->b_io); INIT_LIST_HEAD(&wb->b_more_io); + spin_lock_init(&wb->list_lock); setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi); } @@ -676,11 +688,12 @@ void bdi_destroy(struct backing_dev_info *bdi) if (bdi_has_dirty_io(bdi)) { struct bdi_writeback *dst = &default_backing_dev_info.wb; - spin_lock(&inode_wb_list_lock); + bdi_lock_two(&bdi->wb, dst); list_splice(&bdi->wb.b_dirty, &dst->b_dirty); list_splice(&bdi->wb.b_io, &dst->b_io); list_splice(&bdi->wb.b_more_io, &dst->b_more_io); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&bdi->wb.list_lock); + spin_unlock(&dst->list_lock); } bdi_unregister(bdi); diff --git a/mm/filemap.c b/mm/filemap.c index d7b10578a64b..1e492c3dd6f8 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -81,7 +81,7 @@ * ->i_mutex * ->i_alloc_sem (various) * - * inode_wb_list_lock + * bdi->wb.list_lock * sb_lock (fs/fs-writeback.c) * ->mapping->tree_lock (__sync_single_inode) * @@ -99,9 +99,9 @@ * ->zone.lru_lock (check_pte_range->isolate_lru_page) * ->private_lock (page_remove_rmap->set_page_dirty) * ->tree_lock (page_remove_rmap->set_page_dirty) - * inode_wb_list_lock (page_remove_rmap->set_page_dirty) + * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) * ->inode->i_lock (page_remove_rmap->set_page_dirty) - * inode_wb_list_lock (zap_pte_range->set_page_dirty) + * bdi.wb->list_lock (zap_pte_range->set_page_dirty) * ->inode->i_lock (zap_pte_range->set_page_dirty) * ->private_lock (zap_pte_range->__set_page_dirty_buffers) * diff --git a/mm/rmap.c b/mm/rmap.c index 0eb463ea88dd..d04e36a7cc9f 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -32,11 +32,11 @@ * mmlist_lock (in mmput, drain_mmlist and others) * mapping->private_lock (in __set_page_dirty_buffers) * inode->i_lock (in set_page_dirty's __mark_inode_dirty) - * inode_wb_list_lock (in set_page_dirty's __mark_inode_dirty) + * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) * sb_lock (within inode_lock in fs/fs-writeback.c) * mapping->tree_lock (widely used, in set_page_dirty, * in arch-dependent flush_dcache_mmap_lock, - * within inode_wb_list_lock in __sync_single_inode) + * within bdi.wb->list_lock in __sync_single_inode) * * (code doesn't rely on that order so it could be switched around) * ->tasklist_lock -- cgit v1.2.3 From e8dfc30582995ae12454cda517b17d6294175b07 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Thu, 21 Apr 2011 12:06:32 -0600 Subject: writeback: elevate queue_io() into wb_writeback() Code refactor for more logical code layout. No behavior change. - remove the mis-named __writeback_inodes_sb() - wb_writeback()/writeback_inodes_wb() will decide when to queue_io() before calling __writeback_inodes_wb() Acked-by: Jan Kara Signed-off-by: Wu Fengguang --- fs/fs-writeback.c | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 36a30917e0dc..565b1fd15be6 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -580,17 +580,13 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, return 1; } -void writeback_inodes_wb(struct bdi_writeback *wb, - struct writeback_control *wbc) +static void __writeback_inodes_wb(struct bdi_writeback *wb, + struct writeback_control *wbc) { int ret = 0; if (!wbc->wb_start) wbc->wb_start = jiffies; /* livelock avoidance */ - spin_lock(&wb->list_lock); - - if (list_empty(&wb->b_io)) - queue_io(wb, wbc->older_than_this); while (!list_empty(&wb->b_io)) { struct inode *inode = wb_inode(wb->b_io.prev); @@ -606,19 +602,16 @@ void writeback_inodes_wb(struct bdi_writeback *wb, if (ret) break; } - spin_unlock(&wb->list_lock); /* Leave any unwritten inodes on b_io */ } -static void __writeback_inodes_sb(struct super_block *sb, - struct bdi_writeback *wb, struct writeback_control *wbc) +void writeback_inodes_wb(struct bdi_writeback *wb, + struct writeback_control *wbc) { - WARN_ON(!rwsem_is_locked(&sb->s_umount)); - spin_lock(&wb->list_lock); if (list_empty(&wb->b_io)) queue_io(wb, wbc->older_than_this); - writeback_sb_inodes(sb, wb, wbc, true); + __writeback_inodes_wb(wb, wbc); spin_unlock(&wb->list_lock); } @@ -685,7 +678,7 @@ static long wb_writeback(struct bdi_writeback *wb, * The intended call sequence for WB_SYNC_ALL writeback is: * * wb_writeback() - * __writeback_inodes_sb() <== called only once + * writeback_sb_inodes() <== called only once * write_cache_pages() <== called once for each inode * (quickly) tag currently dirty pages * (maybe slowly) sync all tagged pages @@ -694,6 +687,7 @@ static long wb_writeback(struct bdi_writeback *wb, write_chunk = LONG_MAX; wbc.wb_start = jiffies; /* livelock avoidance */ + spin_lock(&wb->list_lock); for (;;) { /* * Stop writeback when nr_pages has been consumed @@ -730,10 +724,12 @@ static long wb_writeback(struct bdi_writeback *wb, wbc.inodes_written = 0; trace_wbc_writeback_start(&wbc, wb->bdi); + if (list_empty(&wb->b_io)) + queue_io(wb, wbc.older_than_this); if (work->sb) - __writeback_inodes_sb(work->sb, wb, &wbc); + writeback_sb_inodes(work->sb, wb, &wbc, true); else - writeback_inodes_wb(wb, &wbc); + __writeback_inodes_wb(wb, &wbc); trace_wbc_writeback_written(&wbc, wb->bdi); work->nr_pages -= write_chunk - wbc.nr_to_write; @@ -761,7 +757,6 @@ static long wb_writeback(struct bdi_writeback *wb, * become available for writeback. Otherwise * we'll just busyloop. */ - spin_lock(&wb->list_lock); if (!list_empty(&wb->b_more_io)) { inode = wb_inode(wb->b_more_io.prev); trace_wbc_writeback_wait(&wbc, wb->bdi); @@ -769,8 +764,8 @@ static long wb_writeback(struct bdi_writeback *wb, inode_wait_for_writeback(inode, wb); spin_unlock(&inode->i_lock); } - spin_unlock(&wb->list_lock); } + spin_unlock(&wb->list_lock); return wrote; } -- cgit v1.2.3 From e185dda89d69cde142b48059413a03561f41f78a Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Sat, 23 Apr 2011 11:26:07 -0600 Subject: writeback: avoid extra sync work at enqueue time This removes writeback_control.wb_start and does more straightforward sync livelock prevention by setting .older_than_this to prevent extra inodes from being enqueued in the first place. Acked-by: Jan Kara Signed-off-by: Wu Fengguang --- fs/fs-writeback.c | 16 +++------------- include/linux/writeback.h | 3 --- 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 565b1fd15be6..d0553f33fb50 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -544,15 +544,6 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, continue; } - /* - * Was this inode dirtied after sync_sb_inodes was called? - * This keeps sync from extra jobs and livelock. - */ - if (inode_dirtied_after(inode, wbc->wb_start)) { - spin_unlock(&inode->i_lock); - return 1; - } - __iget(inode); pages_skipped = wbc->pages_skipped; @@ -585,9 +576,6 @@ static void __writeback_inodes_wb(struct bdi_writeback *wb, { int ret = 0; - if (!wbc->wb_start) - wbc->wb_start = jiffies; /* livelock avoidance */ - while (!list_empty(&wb->b_io)) { struct inode *inode = wb_inode(wb->b_io.prev); struct super_block *sb = inode->i_sb; @@ -686,7 +674,9 @@ static long wb_writeback(struct bdi_writeback *wb, if (wbc.sync_mode == WB_SYNC_ALL || wbc.tagged_writepages) write_chunk = LONG_MAX; - wbc.wb_start = jiffies; /* livelock avoidance */ + oldest_jif = jiffies; + wbc.older_than_this = &oldest_jif; + spin_lock(&wb->list_lock); for (;;) { /* diff --git a/include/linux/writeback.h b/include/linux/writeback.h index c2d957fb38d3..d8e96a480850 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -26,9 +26,6 @@ struct writeback_control { enum writeback_sync_modes sync_mode; unsigned long *older_than_this; /* If !NULL, only write back inodes older than this */ - unsigned long wb_start; /* Time writeback_inodes_wb was - called. This is needed to avoid - extra jobs and livelock */ long nr_to_write; /* Write this many pages, and decrement this for each page written */ long pages_skipped; /* Pages which were not written */ -- cgit v1.2.3 From 6f7186562771ec9b629914df328048449ccddf4a Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 2 Mar 2011 17:14:34 -0600 Subject: writeback: add bdi_dirty_limit() kernel-doc Clarify the bdi_dirty_limit() comment. Acked-by: Peter Zijlstra Acked-by: Jan Kara Signed-off-by: Wu Fengguang --- mm/page-writeback.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 955fe35d01e0..b8be62381396 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -437,10 +437,17 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) *pdirty = dirty; } -/* +/** * bdi_dirty_limit - @bdi's share of dirty throttling threshold + * @bdi: the backing_dev_info to query + * @dirty: global dirty limit in pages + * + * Returns @bdi's dirty limit in pages. The term "dirty" in the context of + * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages. + * And the "limit" in the name is not seriously taken as hard limit in + * balance_dirty_pages(). * - * Allocate high/low dirty limits to fast/slow devices, in order to prevent + * It allocates high/low dirty limits to fast/slow devices, in order to prevent * - starving fast devices * - piling up dirty pages (that will take long time to sync) on slow devices * -- cgit v1.2.3 From 3efaf0faba6793cd91298c76315e15de59c13ae0 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Thu, 16 Dec 2010 22:22:00 -0600 Subject: writeback: skip balance_dirty_pages() for in-memory fs This avoids unnecessary checks and dirty throttling on tmpfs/ramfs. Notes about the tmpfs/ramfs behavior changes: As for 2.6.36 and older kernels, the tmpfs writes will sleep inside balance_dirty_pages() as long as we are over the (dirty+background)/2 global throttle threshold. This is because both the dirty pages and threshold will be 0 for tmpfs/ramfs. Hence this test will always evaluate to TRUE: dirty_exceeded = (bdi_nr_reclaimable + bdi_nr_writeback >= bdi_thresh) || (nr_reclaimable + nr_writeback >= dirty_thresh); For 2.6.37, someone complained that the current logic does not allow the users to set vm.dirty_ratio=0. So commit 4cbec4c8b9 changed the test to dirty_exceeded = (bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh) || (nr_reclaimable + nr_writeback > dirty_thresh); So 2.6.37 will behave differently for tmpfs/ramfs: it will never get throttled unless the global dirty threshold is exceeded (which is very unlikely to happen; once happen, will block many tasks). I'd say that the 2.6.36 behavior is very bad for tmpfs/ramfs. It means for a busy writing server, tmpfs write()s may get livelocked! The "inadvertent" throttling can hardly bring help to any workload because of its "either no throttling, or get throttled to death" property. So based on 2.6.37, this patch won't bring more noticeable changes. CC: Hugh Dickins Acked-by: Rik van Riel Acked-by: Peter Zijlstra Reviewed-by: Minchan Kim Signed-off-by: Wu Fengguang --- mm/page-writeback.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b8be62381396..b2529f8f8be0 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -244,13 +244,8 @@ void task_dirty_inc(struct task_struct *tsk) static void bdi_writeout_fraction(struct backing_dev_info *bdi, long *numerator, long *denominator) { - if (bdi_cap_writeback_dirty(bdi)) { - prop_fraction_percpu(&vm_completions, &bdi->completions, + prop_fraction_percpu(&vm_completions, &bdi->completions, numerator, denominator); - } else { - *numerator = 0; - *denominator = 1; - } } static inline void task_dirties_fraction(struct task_struct *tsk, @@ -495,6 +490,9 @@ static void balance_dirty_pages(struct address_space *mapping, bool dirty_exceeded = false; struct backing_dev_info *bdi = mapping->backing_dev_info; + if (!bdi_cap_account_dirty(bdi)) + return; + for (;;) { struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, -- cgit v1.2.3 From b7a2441f9966fe3e1be960a876ab52e6029ea005 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 21 Jul 2010 22:19:51 -0600 Subject: writeback: remove writeback_control.more_io When wbc.more_io was first introduced, it indicates whether there are at least one superblock whose s_more_io contains more IO work. Now with the per-bdi writeback, it can be replaced with a simple b_more_io test. Acked-by: Jan Kara Acked-by: Mel Gorman Reviewed-by: Minchan Kim Signed-off-by: Wu Fengguang --- fs/fs-writeback.c | 9 ++------- include/linux/writeback.h | 1 - include/trace/events/ext4.h | 6 ++---- include/trace/events/writeback.h | 5 +---- 4 files changed, 5 insertions(+), 16 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index d0553f33fb50..f43c479feee9 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -560,12 +560,8 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, iput(inode); cond_resched(); spin_lock(&wb->list_lock); - if (wbc->nr_to_write <= 0) { - wbc->more_io = 1; + if (wbc->nr_to_write <= 0) return 1; - } - if (!list_empty(&wb->b_more_io)) - wbc->more_io = 1; } /* b_io is empty */ return 1; @@ -708,7 +704,6 @@ static long wb_writeback(struct bdi_writeback *wb, wbc.older_than_this = &oldest_jif; } - wbc.more_io = 0; wbc.nr_to_write = write_chunk; wbc.pages_skipped = 0; wbc.inodes_written = 0; @@ -740,7 +735,7 @@ static long wb_writeback(struct bdi_writeback *wb, /* * No more inodes for IO, bail */ - if (!wbc.more_io) + if (list_empty(&wb->b_more_io)) break; /* * Nothing written. Wait for some inode to diff --git a/include/linux/writeback.h b/include/linux/writeback.h index d8e96a480850..8797b20dd22b 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -46,7 +46,6 @@ struct writeback_control { unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned range_cyclic:1; /* range_start is cyclic */ - unsigned more_io:1; /* more io to be dispatched */ }; /* diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index e09592d2f916..b225d0d8c87f 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -404,7 +404,6 @@ TRACE_EVENT(ext4_da_writepages_result, __field( int, pages_written ) __field( long, pages_skipped ) __field( int, sync_mode ) - __field( char, more_io ) __field( pgoff_t, writeback_index ) ), @@ -415,16 +414,15 @@ TRACE_EVENT(ext4_da_writepages_result, __entry->pages_written = pages_written; __entry->pages_skipped = wbc->pages_skipped; __entry->sync_mode = wbc->sync_mode; - __entry->more_io = wbc->more_io; __entry->writeback_index = inode->i_mapping->writeback_index; ), TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld " - " more_io %d sync_mode %d writeback_index %lu", + "sync_mode %d writeback_index %lu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, __entry->ret, __entry->pages_written, __entry->pages_skipped, - __entry->more_io, __entry->sync_mode, + __entry->sync_mode, (unsigned long) __entry->writeback_index) ); diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 4e249b927eaa..b2cfac5f3313 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -101,7 +101,6 @@ DECLARE_EVENT_CLASS(wbc_class, __field(int, for_background) __field(int, for_reclaim) __field(int, range_cyclic) - __field(int, more_io) __field(unsigned long, older_than_this) __field(long, range_start) __field(long, range_end) @@ -116,7 +115,6 @@ DECLARE_EVENT_CLASS(wbc_class, __entry->for_background = wbc->for_background; __entry->for_reclaim = wbc->for_reclaim; __entry->range_cyclic = wbc->range_cyclic; - __entry->more_io = wbc->more_io; __entry->older_than_this = wbc->older_than_this ? *wbc->older_than_this : 0; __entry->range_start = (long)wbc->range_start; @@ -124,7 +122,7 @@ DECLARE_EVENT_CLASS(wbc_class, ), TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " - "bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx " + "bgrd=%d reclm=%d cyclic=%d older=0x%lx " "start=0x%lx end=0x%lx", __entry->name, __entry->nr_to_write, @@ -134,7 +132,6 @@ DECLARE_EVENT_CLASS(wbc_class, __entry->for_background, __entry->for_reclaim, __entry->range_cyclic, - __entry->more_io, __entry->older_than_this, __entry->range_start, __entry->range_end) -- cgit v1.2.3 From 846d5a091b0506b75489577cde27f39b37a192a4 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Thu, 5 May 2011 21:10:38 -0600 Subject: writeback: remove .nonblocking and .encountered_congestion Remove two unused struct writeback_control fields: .encountered_congestion (completely unused) .nonblocking (never set, checked/showed in XFS,NFS/btrfs) The .for_background check in nfs_write_inode() is also removed btw, as .for_background implies WB_SYNC_NONE. Reviewed-by: Jan Kara Proposed-by: Christoph Hellwig Signed-off-by: Wu Fengguang --- fs/nfs/write.c | 3 +-- fs/xfs/linux-2.6/xfs_aops.c | 2 +- include/linux/writeback.h | 2 -- include/trace/events/btrfs.h | 6 ++---- 4 files changed, 4 insertions(+), 9 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e268e3b23497..dd6a6cee39a7 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1564,8 +1564,7 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) int status; bool sync = true; - if (wbc->sync_mode == WB_SYNC_NONE || wbc->nonblocking || - wbc->for_background) + if (wbc->sync_mode == WB_SYNC_NONE) sync = false; status = pnfs_layoutcommit_inode(inode, sync); diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 79ce38be15a1..7559861481aa 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -970,7 +970,7 @@ xfs_vm_writepage( offset = page_offset(page); type = IO_OVERWRITE; - if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking) + if (wbc->sync_mode == WB_SYNC_NONE) nonblocking = 1; do { diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 8797b20dd22b..2f1b512bd6e0 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -39,8 +39,6 @@ struct writeback_control { loff_t range_start; loff_t range_end; - unsigned nonblocking:1; /* Don't get stuck on request queues */ - unsigned encountered_congestion:1; /* An output: a queue is full */ unsigned for_kupdate:1; /* A kupdate writeback */ unsigned for_background:1; /* A background writeback */ unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 4114129f0794..b31702ac15be 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -284,7 +284,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage, __field( long, pages_skipped ) __field( loff_t, range_start ) __field( loff_t, range_end ) - __field( char, nonblocking ) __field( char, for_kupdate ) __field( char, for_reclaim ) __field( char, range_cyclic ) @@ -299,7 +298,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage, __entry->pages_skipped = wbc->pages_skipped; __entry->range_start = wbc->range_start; __entry->range_end = wbc->range_end; - __entry->nonblocking = wbc->nonblocking; __entry->for_kupdate = wbc->for_kupdate; __entry->for_reclaim = wbc->for_reclaim; __entry->range_cyclic = wbc->range_cyclic; @@ -310,13 +308,13 @@ DECLARE_EVENT_CLASS(btrfs__writepage, TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, " "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, " - "range_end = %llu, nonblocking = %d, for_kupdate = %d, " + "range_end = %llu, for_kupdate = %d, " "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu", show_root_type(__entry->root_objectid), (unsigned long)__entry->ino, __entry->index, __entry->nr_to_write, __entry->pages_skipped, __entry->range_start, __entry->range_end, - __entry->nonblocking, __entry->for_kupdate, + __entry->for_kupdate, __entry->for_reclaim, __entry->range_cyclic, (unsigned long)__entry->writeback_index) ); -- cgit v1.2.3 From 251d6a471c831e22880b3c146bb4556ddfb1dc82 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 1 Dec 2010 17:33:37 -0600 Subject: writeback: trace event writeback_single_inode It is valuable to know how the dirty inodes are iterated and their IO size. "writeback_single_inode: bdi 8:0: ino=134246746 state=I_DIRTY_SYNC|I_SYNC age=414 index=0 to_write=1024 wrote=0" - "state" reflects inode->i_state at the end of writeback_single_inode() - "index" reflects mapping->writeback_index after the ->writepages() call - "to_write" is the wbc->nr_to_write at entrance of writeback_single_inode() - "wrote" is the number of pages actually written v2: add trace event writeback_single_inode_requeue as proposed by Dave. CC: Dave Chinner Signed-off-by: Wu Fengguang --- fs/fs-writeback.c | 4 +++ include/trace/events/writeback.h | 70 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f43c479feee9..5185fad48b62 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -346,6 +346,7 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, struct writeback_control *wbc) { struct address_space *mapping = inode->i_mapping; + long nr_to_write = wbc->nr_to_write; unsigned dirty; int ret; @@ -368,6 +369,8 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, */ if (wbc->sync_mode != WB_SYNC_ALL) { requeue_io(inode, wb); + trace_writeback_single_inode_requeue(inode, wbc, + nr_to_write); return 0; } @@ -467,6 +470,7 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, } } inode_sync_complete(inode); + trace_writeback_single_inode(inode, wbc, nr_to_write); return ret; } diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index b2cfac5f3313..898277bc89b4 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -8,6 +8,19 @@ #include #include +#define show_inode_state(state) \ + __print_flags(state, "|", \ + {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \ + {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \ + {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \ + {I_NEW, "I_NEW"}, \ + {I_WILL_FREE, "I_WILL_FREE"}, \ + {I_FREEING, "I_FREEING"}, \ + {I_CLEAR, "I_CLEAR"}, \ + {I_SYNC, "I_SYNC"}, \ + {I_REFERENCED, "I_REFERENCED"} \ + ) + struct wb_writeback_work; DECLARE_EVENT_CLASS(writeback_work_class, @@ -184,6 +197,63 @@ DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested, TP_ARGS(usec_timeout, usec_delayed) ); +DECLARE_EVENT_CLASS(writeback_single_inode_template, + + TP_PROTO(struct inode *inode, + struct writeback_control *wbc, + unsigned long nr_to_write + ), + + TP_ARGS(inode, wbc, nr_to_write), + + TP_STRUCT__entry( + __array(char, name, 32) + __field(unsigned long, ino) + __field(unsigned long, state) + __field(unsigned long, age) + __field(unsigned long, writeback_index) + __field(long, nr_to_write) + __field(unsigned long, wrote) + ), + + TP_fast_assign( + strncpy(__entry->name, + dev_name(inode->i_mapping->backing_dev_info->dev), 32); + __entry->ino = inode->i_ino; + __entry->state = inode->i_state; + __entry->age = (jiffies - inode->dirtied_when) * + 1000 / HZ; + __entry->writeback_index = inode->i_mapping->writeback_index; + __entry->nr_to_write = nr_to_write; + __entry->wrote = nr_to_write - wbc->nr_to_write; + ), + + TP_printk("bdi %s: ino=%lu state=%s age=%lu " + "index=%lu to_write=%ld wrote=%lu", + __entry->name, + __entry->ino, + show_inode_state(__entry->state), + __entry->age, + __entry->writeback_index, + __entry->nr_to_write, + __entry->wrote + ) +); + +DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue, + TP_PROTO(struct inode *inode, + struct writeback_control *wbc, + unsigned long nr_to_write), + TP_ARGS(inode, wbc, nr_to_write) +); + +DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, + TP_PROTO(struct inode *inode, + struct writeback_control *wbc, + unsigned long nr_to_write), + TP_ARGS(inode, wbc, nr_to_write) +); + #endif /* _TRACE_WRITEBACK_H */ /* This part must be outside protection */ -- cgit v1.2.3 From e84d0a4f8e39a73003a6ec9a11b07702745f4c1f Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Sat, 23 Apr 2011 12:27:27 -0600 Subject: writeback: trace event writeback_queue_io Note that it adds a little overheads to account the moved/enqueued inodes from b_dirty to b_io. The "moved" accounting may be later used to limit the number of inodes that can be moved in one shot, in order to keep spinlock hold time under control. Signed-off-by: Wu Fengguang --- fs/fs-writeback.c | 14 ++++++++++---- include/trace/events/writeback.h | 25 +++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 5185fad48b62..6caa98247a5b 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -248,15 +248,16 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) /* * Move expired dirty inodes from @delaying_queue to @dispatch_queue. */ -static void move_expired_inodes(struct list_head *delaying_queue, +static int move_expired_inodes(struct list_head *delaying_queue, struct list_head *dispatch_queue, - unsigned long *older_than_this) + unsigned long *older_than_this) { LIST_HEAD(tmp); struct list_head *pos, *node; struct super_block *sb = NULL; struct inode *inode; int do_sb_sort = 0; + int moved = 0; while (!list_empty(delaying_queue)) { inode = wb_inode(delaying_queue->prev); @@ -267,12 +268,13 @@ static void move_expired_inodes(struct list_head *delaying_queue, do_sb_sort = 1; sb = inode->i_sb; list_move(&inode->i_wb_list, &tmp); + moved++; } /* just one sb in list, splice to dispatch_queue and we're done */ if (!do_sb_sort) { list_splice(&tmp, dispatch_queue); - return; + goto out; } /* Move inodes from one superblock together */ @@ -284,6 +286,8 @@ static void move_expired_inodes(struct list_head *delaying_queue, list_move(&inode->i_wb_list, dispatch_queue); } } +out: + return moved; } /* @@ -299,9 +303,11 @@ static void move_expired_inodes(struct list_head *delaying_queue, */ static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) { + int moved; assert_spin_locked(&wb->list_lock); list_splice_init(&wb->b_more_io, &wb->b_io); - move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); + trace_writeback_queue_io(wb, older_than_this, moved); } static int write_inode(struct inode *inode, struct writeback_control *wbc) diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 898277bc89b4..205d14919ef2 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -162,6 +162,31 @@ DEFINE_WBC_EVENT(wbc_balance_dirty_written); DEFINE_WBC_EVENT(wbc_balance_dirty_wait); DEFINE_WBC_EVENT(wbc_writepage); +TRACE_EVENT(writeback_queue_io, + TP_PROTO(struct bdi_writeback *wb, + unsigned long *older_than_this, + int moved), + TP_ARGS(wb, older_than_this, moved), + TP_STRUCT__entry( + __array(char, name, 32) + __field(unsigned long, older) + __field(long, age) + __field(int, moved) + ), + TP_fast_assign( + strncpy(__entry->name, dev_name(wb->bdi->dev), 32); + __entry->older = older_than_this ? *older_than_this : 0; + __entry->age = older_than_this ? + (jiffies - *older_than_this) * 1000 / HZ : -1; + __entry->moved = moved; + ), + TP_printk("bdi %s: older=%lu age=%ld enqueue=%d", + __entry->name, + __entry->older, /* older_than_this in jiffies */ + __entry->age, /* older_than_this in relative milliseconds */ + __entry->moved) +); + DECLARE_EVENT_CLASS(writeback_congest_waited_template, TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), -- cgit v1.2.3 From a01cdc10689f5d252530d14474528ea785ecfde4 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 8 Jun 2011 17:06:25 +0900 Subject: serial: sh-sci: Tidy up ioread/write wrappers, kill off unused SCI helper. Signed-off-by: Paul Mundt --- drivers/tty/serial/sh-sci.h | 35 +++++++++-------------------------- 1 file changed, 9 insertions(+), 26 deletions(-) diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h index 4dc249ecc59f..923ebd908e70 100644 --- a/drivers/tty/serial/sh-sci.h +++ b/drivers/tty/serial/sh-sci.h @@ -181,32 +181,25 @@ #define SCI_MAJOR 204 #define SCI_MINOR_START 8 -#define SCI_IN(size, offset) \ - if ((size) == 8) { \ - return ioread8(port->membase + (offset)); \ - } else { \ - return ioread16(port->membase + (offset)); \ - } -#define SCI_OUT(size, offset, value) \ - if ((size) == 8) { \ - iowrite8(value, port->membase + (offset)); \ - } else if ((size) == 16) { \ - iowrite16(value, port->membase + (offset)); \ - } +#define SCI_IN(size, offset) \ + ioread##size(port->membase + (offset)) + +#define SCI_OUT(size, offset, value) \ + iowrite##size(value, port->membase + (offset)) #define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\ static inline unsigned int sci_##name##_in(struct uart_port *port) \ { \ if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \ - SCI_IN(scif_size, scif_offset) \ + return SCI_IN(scif_size, scif_offset); \ } else { /* PORT_SCI or PORT_SCIFA */ \ - SCI_IN(sci_size, sci_offset); \ + return SCI_IN(sci_size, sci_offset); \ } \ } \ static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \ { \ if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \ - SCI_OUT(scif_size, scif_offset, value) \ + SCI_OUT(scif_size, scif_offset, value); \ } else { /* PORT_SCI or PORT_SCIFA */ \ SCI_OUT(sci_size, sci_offset, value); \ } \ @@ -215,23 +208,13 @@ #define CPU_SCIF_FNS(name, scif_offset, scif_size) \ static inline unsigned int sci_##name##_in(struct uart_port *port) \ { \ - SCI_IN(scif_size, scif_offset); \ + return SCI_IN(scif_size, scif_offset); \ } \ static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \ { \ SCI_OUT(scif_size, scif_offset, value); \ } -#define CPU_SCI_FNS(name, sci_offset, sci_size) \ - static inline unsigned int sci_##name##_in(struct uart_port* port) \ - { \ - SCI_IN(sci_size, sci_offset); \ - } \ - static inline void sci_##name##_out(struct uart_port* port, unsigned int value) \ - { \ - SCI_OUT(sci_size, sci_offset, value); \ - } - #if defined(CONFIG_CPU_SH3) || \ defined(CONFIG_ARCH_SH73A0) || \ defined(CONFIG_ARCH_SH7367) || \ -- cgit v1.2.3 From b03034016184b7e9fd19f2a24ffb131953fdcc41 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 8 Jun 2011 17:13:20 +0900 Subject: serial: sh-sci: Kill off some more unused definitions. Signed-off-by: Paul Mundt --- drivers/tty/serial/sh-sci.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h index 923ebd908e70..ed1c09c0454a 100644 --- a/drivers/tty/serial/sh-sci.h +++ b/drivers/tty/serial/sh-sci.h @@ -9,8 +9,6 @@ # define SCPCR 0xA4000116 /* 16 bit SCI and SCIF */ # define SCPDR 0xA4000136 /* 8 bit SCI and SCIF */ #elif defined(CONFIG_CPU_SUBTYPE_SH7705) -# define SCIF0 0xA4400000 -# define SCIF2 0xA4410000 # define SCPCR 0xA4000116 # define SCPDR 0xA4000136 #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ @@ -48,8 +46,6 @@ #elif defined(CONFIG_CPU_SUBTYPE_SH7343) # define SCSPTR0 0xffe00010 /* 16 bit SCIF */ #elif defined(CONFIG_CPU_SUBTYPE_SH7722) -# define PADR 0xA4050120 -# define PSDR 0xA405013e # define PWDR 0xA4050166 # define PSCR 0xA405011E # define SCIF_ORER 0x0001 /* overrun error bit */ -- cgit v1.2.3 From debf9507166eede1e676d27d3298cdfb27399cb4 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 8 Jun 2011 18:19:37 +0900 Subject: serial: sh-sci: Generalize overrun handling. This consolidates all of the broken out overrun handling and ensures that we have sensible defaults per-port type, in addition to making sure that overruns are flagged appropriately in the error mask for parts that haven't explicitly disabled support for it. Signed-off-by: Paul Mundt --- drivers/tty/serial/sh-sci.c | 51 ++++++++++++++++++++++++++++++++++++++------ drivers/tty/serial/sh-sci.h | 52 ++------------------------------------------- include/linux/serial_sci.h | 30 ++++++++++++++++++++++++++ 3 files changed, 77 insertions(+), 56 deletions(-) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 280c02af0eae..bb27885ea2e5 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -563,13 +563,19 @@ static int sci_handle_errors(struct uart_port *port) int copied = 0; unsigned short status = sci_in(port, SCxSR); struct tty_struct *tty = port->state->port.tty; + struct sci_port *s = to_sci_port(port); - if (status & SCxSR_ORER(port)) { - /* overrun error */ - if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) - copied++; + /* + * Handle overruns, if supported. + */ + if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) { + if (status & (1 << s->cfg->overrun_bit)) { + /* overrun error */ + if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) + copied++; - dev_notice(port->dev, "overrun error"); + dev_notice(port->dev, "overrun error"); + } } if (status & SCxSR_FER(port)) { @@ -617,12 +623,19 @@ static int sci_handle_errors(struct uart_port *port) static int sci_handle_fifo_overrun(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; + struct sci_port *s = to_sci_port(port); int copied = 0; + /* + * XXX: Technically not limited to non-SCIFs, it's simply the + * SCLSR check that is for the moment SCIF-specific. This + * probably wants to be revisited for SCIFA/B as well as for + * factoring in SCI overrun detection. + */ if (port->type != PORT_SCIF) return 0; - if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) { + if ((sci_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) { sci_out(port, SCLSR, 0); tty_insert_flip_char(tty, 0, TTY_OVERRUN); @@ -1755,6 +1768,32 @@ static int __devinit sci_init_single(struct platform_device *dev, sci_port->break_timer.function = sci_break_timer; init_timer(&sci_port->break_timer); + /* + * Establish some sensible defaults for the error detection. + */ + if (!p->error_mask) + p->error_mask = (p->type == PORT_SCI) ? + SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK; + + /* + * Establish sensible defaults for the overrun detection, unless + * the part has explicitly disabled support for it. + */ + if (p->overrun_bit != SCIx_NOT_SUPPORTED) { + if (p->type == PORT_SCI) + p->overrun_bit = 5; + else if (p->scbrr_algo_id == SCBRR_ALGO_4) + p->overrun_bit = 9; + else + p->overrun_bit = 0; + + /* + * Make the error mask inclusive of overrun detection, if + * supported. + */ + p->error_mask |= (1 << p->overrun_bit); + } + sci_port->cfg = p; port->mapbase = p->mapbase; diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h index ed1c09c0454a..caab353a98b5 100644 --- a/drivers/tty/serial/sh-sci.h +++ b/drivers/tty/serial/sh-sci.h @@ -19,11 +19,9 @@ defined(CONFIG_ARCH_SH7372) # define PORT_PTCR 0xA405011EUL # define PORT_PVCR 0xA4050122UL -# define SCIF_ORER 0x0200 /* overrun error bit */ #elif defined(CONFIG_SH_RTS7751R2D) # define SCSPTR1 0xFFE0001C /* 8 bit SCIF */ # define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ @@ -32,15 +30,12 @@ defined(CONFIG_CPU_SUBTYPE_SH7751R) # define SCSPTR1 0xffe0001c /* 8 bit SCI */ # define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7760) # define SCSPTR0 0xfe600024 /* 16 bit SCIF */ # define SCSPTR1 0xfe610024 /* 16 bit SCIF */ # define SCSPTR2 0xfe620024 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) # define SCSPTR0 0xA4400000 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ # define PACR 0xa4050100 # define PBCR 0xa4050102 #elif defined(CONFIG_CPU_SUBTYPE_SH7343) @@ -48,35 +43,24 @@ #elif defined(CONFIG_CPU_SUBTYPE_SH7722) # define PWDR 0xA4050166 # define PSCR 0xA405011E -# define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7366) # define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */ # define SCSPTR0 SCPDR0 -# define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7723) # define SCSPTR0 0xa4050160 -# define SCIF_ORER 0x0001 /* overrun error bit */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7724) -# define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH4_202) # define SCSPTR2 0xffe80020 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7757) # define SCSPTR0 0xfe4b0020 -# define SCIF_ORER 0x0001 #elif defined(CONFIG_CPU_SUBTYPE_SH7763) # define SCSPTR0 0xffe00024 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7770) # define SCSPTR0 0xff923020 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7780) # define SCSPTR0 0xffe00024 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* Overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \ defined(CONFIG_CPU_SUBTYPE_SH7786) # define SCSPTR0 0xffea0024 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* Overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \ defined(CONFIG_CPU_SUBTYPE_SH7203) || \ defined(CONFIG_CPU_SUBTYPE_SH7206) || \ @@ -84,36 +68,12 @@ # define SCSPTR0 0xfffe8020 /* 16 bit SCIF */ #elif defined(CONFIG_CPU_SUBTYPE_SH7619) # define SCSPTR0 0xf8400020 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SHX3) # define SCSPTR0 0xffc30020 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* Overrun error bit */ #else # error CPU subtype not defined #endif -/* SCxSR SCI */ -#define SCI_TDRE 0x80 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -#define SCI_RDRF 0x40 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -#define SCI_ORER 0x20 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -#define SCI_FER 0x10 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -#define SCI_PER 0x08 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -#define SCI_TEND 0x04 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -/* SCI_MPB 0x02 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -/* SCI_MPBT 0x01 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ - -#define SCI_ERRORS ( SCI_PER | SCI_FER | SCI_ORER) - -/* SCxSR SCIF */ -#define SCIF_ER 0x0080 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ -#define SCIF_TEND 0x0040 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ -#define SCIF_TDFE 0x0020 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ -#define SCIF_BRK 0x0010 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ -#define SCIF_FER 0x0008 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ -#define SCIF_PER 0x0004 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ -#define SCIF_RDF 0x0002 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ -#define SCIF_DR 0x0001 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ - #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ defined(CONFIG_CPU_SUBTYPE_SH7720) || \ defined(CONFIG_CPU_SUBTYPE_SH7721) || \ @@ -121,35 +81,27 @@ defined(CONFIG_ARCH_SH7367) || \ defined(CONFIG_ARCH_SH7377) || \ defined(CONFIG_ARCH_SH7372) -# define SCIF_ORER 0x0200 -# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK | SCIF_ORER) # define SCIF_RFDC_MASK 0x007f # define SCIF_TXROOM_MAX 64 #elif defined(CONFIG_CPU_SUBTYPE_SH7763) -# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK ) # define SCIF_RFDC_MASK 0x007f # define SCIF_TXROOM_MAX 64 /* SH7763 SCIF2 support */ # define SCIF2_RFDC_MASK 0x001f # define SCIF2_TXROOM_MAX 16 #else -# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK) # define SCIF_RFDC_MASK 0x001f # define SCIF_TXROOM_MAX 16 #endif -#ifndef SCIF_ORER -#define SCIF_ORER 0x0000 -#endif - #define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND) -#define SCxSR_ERRORS(port) (((port)->type == PORT_SCI) ? SCI_ERRORS : SCIF_ERRORS) #define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF) #define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE) #define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER) #define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER) #define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK) -#define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : SCIF_ORER) + +#define SCxSR_ERRORS(port) (to_sci_port(port)->cfg->error_mask) #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ defined(CONFIG_CPU_SUBTYPE_SH7720) || \ diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index a2afc9fbe186..5fac3bccfd87 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -8,6 +8,8 @@ * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts) */ +#define SCIx_NOT_SUPPORTED (-1) + enum { SCBRR_ALGO_1, /* ((clk + 16 * bps) / (16 * bps) - 1) */ SCBRR_ALGO_2, /* ((clk + 16 * bps) / (32 * bps) - 1) */ @@ -25,6 +27,28 @@ enum { #define SCSCR_CKE1 (1 << 1) #define SCSCR_CKE0 (1 << 0) +/* SCxSR SCI */ +#define SCI_TDRE 0x80 +#define SCI_RDRF 0x40 +#define SCI_ORER 0x20 +#define SCI_FER 0x10 +#define SCI_PER 0x08 +#define SCI_TEND 0x04 + +#define SCI_DEFAULT_ERROR_MASK (SCI_PER | SCI_FER) + +/* SCxSR SCIF */ +#define SCIF_ER 0x0080 +#define SCIF_TEND 0x0040 +#define SCIF_TDFE 0x0020 +#define SCIF_BRK 0x0010 +#define SCIF_FER 0x0008 +#define SCIF_PER 0x0004 +#define SCIF_RDF 0x0002 +#define SCIF_DR 0x0001 + +#define SCIF_DEFAULT_ERROR_MASK (SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK) + /* Offsets into the sci_port->irqs array */ enum { SCIx_ERI_IRQ, @@ -56,6 +80,12 @@ struct plat_sci_port { unsigned int scbrr_algo_id; /* SCBRR calculation algo */ unsigned int scscr; /* SCSCR initialization */ + /* + * Platform overrides if necessary, defaults otherwise. + */ + int overrun_bit; + unsigned int error_mask; + struct device *dma_dev; unsigned int dma_slave_tx; -- cgit v1.2.3 From 514820eb982eb85677ed2ecef9710e90e24fbdab Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 8 Jun 2011 18:51:32 +0900 Subject: serial: sh-sci: Consolidate RXD pin handling. Non-SCI parts do not have the special port reg necessary for cases where the RX and SCI pins are muxed and need to be manually polled, so these like always fall back on the normal FIFO processing paths. SH7760 is in a class in and of itself with regards to mapping its SIM card interface via the SCI port class despite not having any of the RXD lines wired up and so implicitly behaving more like a SCIF in this regard. Out of the other CPUs, some support the port check via the same block while others do it through an external SuperI/O, so it's not even possible to perform the check relative to the ioremapped cookie offset, so the separate read semantics are preserved here, too. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/setup-sh770x.c | 1 + arch/sh/kernel/cpu/sh4/setup-sh7750.c | 1 + drivers/tty/serial/sh-sci.c | 13 +++++++++++++ drivers/tty/serial/sh-sci.h | 29 ----------------------------- include/linux/serial_sci.h | 2 ++ 5 files changed, 17 insertions(+), 29 deletions(-) diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c index 4551ad647c2c..6d549792f791 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c @@ -108,6 +108,7 @@ static struct platform_device rtc_device = { static struct plat_sci_port scif0_platform_data = { .mapbase = 0xfffffe80, + .port_reg = 0xa4000136, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_TE | SCSCR_RE, .scbrr_algo_id = SCBRR_ALGO_2, diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index e53b4b38bd11..8ea26e791187 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c @@ -38,6 +38,7 @@ static struct platform_device rtc_device = { static struct plat_sci_port sci_platform_data = { .mapbase = 0xffe00000, + .port_reg = 0xffe0001C .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_TE | SCSCR_RE, .scbrr_algo_id = SCBRR_ALGO_2, diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index bb27885ea2e5..3248ddaa889d 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -363,6 +363,19 @@ static int sci_rxfill(struct uart_port *port) return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; } +/* + * SCI helper for checking the state of the muxed port/RXD pins. + */ +static inline int sci_rxd_in(struct uart_port *port) +{ + struct sci_port *s = to_sci_port(port); + + if (s->cfg->port_reg <= 0) + return 1; + + return !!__raw_readb(s->cfg->port_reg); +} + /* ********************************************************************** * * the interrupt related routines * * ********************************************************************** */ diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h index caab353a98b5..1c20f7f9ba4f 100644 --- a/drivers/tty/serial/sh-sci.h +++ b/drivers/tty/serial/sh-sci.h @@ -287,32 +287,3 @@ SCIF_FNS(SCLSR, 0, 0, 0x24, 16) #endif #define sci_in(port, reg) sci_##reg##_in(port) #define sci_out(port, reg, value) sci_##reg##_out(port, value) - -#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ - defined(CONFIG_CPU_SUBTYPE_SH7707) || \ - defined(CONFIG_CPU_SUBTYPE_SH7708) || \ - defined(CONFIG_CPU_SUBTYPE_SH7709) -static inline int sci_rxd_in(struct uart_port *port) -{ - if (port->mapbase == 0xfffffe80) - return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */ - return 1; -} -#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ - defined(CONFIG_CPU_SUBTYPE_SH7751) || \ - defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ - defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ - defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ - defined(CONFIG_CPU_SUBTYPE_SH7091) -static inline int sci_rxd_in(struct uart_port *port) -{ - if (port->mapbase == 0xffe00000) - return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */ - return 1; -} -#else /* default case for non-SCI processors */ -static inline int sci_rxd_in(struct uart_port *port) -{ - return 1; -} -#endif diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 5fac3bccfd87..ecefec7c0b67 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -86,6 +86,8 @@ struct plat_sci_port { int overrun_bit; unsigned int error_mask; + int port_reg; + struct device *dma_dev; unsigned int dma_slave_tx; -- cgit v1.2.3 From e13198894bf6308c097e5678ee315e12b2e1b7a8 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 8 Jun 2011 19:13:06 +0900 Subject: serial: sh-sci: More unused define purging. Signed-off-by: Paul Mundt --- drivers/tty/serial/sh-sci.h | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h index 1c20f7f9ba4f..5834f33d20ff 100644 --- a/drivers/tty/serial/sh-sci.h +++ b/drivers/tty/serial/sh-sci.h @@ -2,15 +2,13 @@ #include #include -#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ +#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ + defined(CONFIG_CPU_SUBTYPE_SH7706) || \ defined(CONFIG_CPU_SUBTYPE_SH7707) || \ defined(CONFIG_CPU_SUBTYPE_SH7708) || \ defined(CONFIG_CPU_SUBTYPE_SH7709) # define SCPCR 0xA4000116 /* 16 bit SCI and SCIF */ # define SCPDR 0xA4000136 /* 8 bit SCI and SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7705) -# define SCPCR 0xA4000116 -# define SCPDR 0xA4000136 #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ defined(CONFIG_CPU_SUBTYPE_SH7721) || \ defined(CONFIG_ARCH_SH73A0) || \ @@ -19,20 +17,16 @@ defined(CONFIG_ARCH_SH7372) # define PORT_PTCR 0xA405011EUL # define PORT_PVCR 0xA4050122UL -#elif defined(CONFIG_SH_RTS7751R2D) -# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */ -# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ #elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ defined(CONFIG_CPU_SUBTYPE_SH7091) || \ defined(CONFIG_CPU_SUBTYPE_SH7751) || \ - defined(CONFIG_CPU_SUBTYPE_SH7751R) -# define SCSPTR1 0xffe0001c /* 8 bit SCI */ + defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ + defined(CONFIG_CPU_SUBTYPE_SH4_202) # define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ #elif defined(CONFIG_CPU_SUBTYPE_SH7760) # define SCSPTR0 0xfe600024 /* 16 bit SCIF */ -# define SCSPTR1 0xfe610024 /* 16 bit SCIF */ # define SCSPTR2 0xfe620024 /* 16 bit SCIF */ #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) # define SCSPTR0 0xA4400000 /* 16 bit SCIF */ @@ -48,16 +42,13 @@ # define SCSPTR0 SCPDR0 #elif defined(CONFIG_CPU_SUBTYPE_SH7723) # define SCSPTR0 0xa4050160 -#elif defined(CONFIG_CPU_SUBTYPE_SH4_202) -# define SCSPTR2 0xffe80020 /* 16 bit SCIF */ #elif defined(CONFIG_CPU_SUBTYPE_SH7757) # define SCSPTR0 0xfe4b0020 -#elif defined(CONFIG_CPU_SUBTYPE_SH7763) +#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ + defined(CONFIG_CPU_SUBTYPE_SH7780) # define SCSPTR0 0xffe00024 /* 16 bit SCIF */ #elif defined(CONFIG_CPU_SUBTYPE_SH7770) # define SCSPTR0 0xff923020 /* 16 bit SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7780) -# define SCSPTR0 0xffe00024 /* 16 bit SCIF */ #elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \ defined(CONFIG_CPU_SUBTYPE_SH7786) # define SCSPTR0 0xffea0024 /* 16 bit SCIF */ -- cgit v1.2.3 From c08957a2cf3c4a14e68d72c845d3c52cf3d826e1 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 7 Jun 2011 23:36:18 +0100 Subject: regulator: Properly register dummy regulator driver Recent changes in the driver core appear to mean that the data structures for the driver core are not fully initialised unless the driver is bound. Make sure the driver core knows the dummy driver is in use by binding it to a driver. Signed-off-by: Mark Brown Signed-off-by: Liam Girdwood --- drivers/regulator/dummy.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c index c7410bde7b5d..f6ef6694ab98 100644 --- a/drivers/regulator/dummy.c +++ b/drivers/regulator/dummy.c @@ -36,6 +36,29 @@ static struct regulator_desc dummy_desc = { .ops = &dummy_ops, }; +static int __devinit dummy_regulator_probe(struct platform_device *pdev) +{ + int ret; + + dummy_regulator_rdev = regulator_register(&dummy_desc, NULL, + &dummy_initdata, NULL); + if (IS_ERR(dummy_regulator_rdev)) { + ret = PTR_ERR(dummy_regulator_rdev); + pr_err("Failed to register regulator: %d\n", ret); + return ret; + } + + return 0; +} + +static struct platform_driver dummy_regulator_driver = { + .probe = dummy_regulator_probe, + .driver = { + .name = "reg-dummy", + .owner = THIS_MODULE, + }, +}; + static struct platform_device *dummy_pdev; void __init regulator_dummy_init(void) @@ -55,12 +78,9 @@ void __init regulator_dummy_init(void) return; } - dummy_regulator_rdev = regulator_register(&dummy_desc, NULL, - &dummy_initdata, NULL); - if (IS_ERR(dummy_regulator_rdev)) { - ret = PTR_ERR(dummy_regulator_rdev); - pr_err("Failed to register regulator: %d\n", ret); + ret = platform_driver_register(&dummy_regulator_driver); + if (ret != 0) { + pr_err("Failed to register dummy regulator driver: %d\n", ret); platform_device_unregister(dummy_pdev); - return; } } -- cgit v1.2.3 -- cgit v1.2.3 From 2ce9738bac1b386f46e8478fd2c263460e7c2b09 Mon Sep 17 00:00:00 2001 From: "eparis@redhat" Date: Thu, 2 Jun 2011 21:20:51 +1000 Subject: cgroupfs: use init_cred when populating new cgroupfs mount We recently found that in some configurations SELinux was blocking the ability for cgroupfs to be mounted. The reason for this is because cgroupfs creates files and directories during the get_sb() call and also uses lookup_one_len() during that same get_sb() call. This is a problem since the security subsystem cannot initialize the superblock and the inodes in that filesystem until after the get_sb() call returns. Thus we leave the inodes in an unitialized state during get_sb(). For the vast majority of filesystems this is not an issue, but since cgroupfs uses lookup_on_len() it does search permission checks on the directories in the path it walks. Since the inode security state is not set up SELinux does these checks as if the inodes were 'unlabeled.' Many 'normal' userspace process do not have permission to interact with unlabeled inodes. The solution presented here is to do the permission checks of path walk and inode creation as the kernel rather than as the task that called mount. Since the kernel has permission to read/write/create unlabeled inodes the get_sb() call will complete successfully and the SELinux code will be able to initialize the superblock and those inodes created during the get_sb() call. This appears to be the same solution used by other filesystems such as devtmpfs to solve the same issue and should thus have no negative impact on other LSMs which currently work. Signed-off-by: Eric Paris Acked-by: Paul Menage Signed-off-by: James Morris --- kernel/cgroup.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2731d115d725..81a867851fee 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -27,9 +27,11 @@ */ #include +#include #include #include #include +#include #include #include #include @@ -1514,6 +1516,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, struct cgroup *root_cgrp = &root->top_cgroup; struct inode *inode; struct cgroupfs_root *existing_root; + const struct cred *cred; int i; BUG_ON(sb->s_root != NULL); @@ -1593,7 +1596,9 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, BUG_ON(!list_empty(&root_cgrp->children)); BUG_ON(root->number_of_cgroups != 1); + cred = override_creds(&init_cred); cgroup_populate_dir(root_cgrp); + revert_creds(cred); mutex_unlock(&cgroup_mutex); mutex_unlock(&inode->i_mutex); } else { -- cgit v1.2.3 From 284d952968d60cca156ef0c5efa62592b72264cb Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Mon, 6 Jun 2011 17:12:49 -0700 Subject: drm/i915: Call intel_enable_plane from i9xx_crtc_mode_set (again) This change got placed in the ironlake path instead of the 9xx path during a recent code shuffle. Signed-off-by: Keith Packard --- drivers/gpu/drm/i915/intel_display.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 81a9059b6a94..aa43e7be6053 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4687,6 +4687,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(DSPCNTR(plane), dspcntr); POSTING_READ(DSPCNTR(plane)); + intel_enable_plane(dev_priv, plane, pipe); ret = intel_pipe_set_base(crtc, x, y, old_fb); @@ -5217,8 +5218,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(DSPCNTR(plane), dspcntr); POSTING_READ(DSPCNTR(plane)); - if (!HAS_PCH_SPLIT(dev)) - intel_enable_plane(dev_priv, plane, pipe); ret = intel_pipe_set_base(crtc, x, y, old_fb); -- cgit v1.2.3 From f5726ae33c382366ea1b23240d5620dcf675d81d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 9 Jun 2011 16:22:20 +0100 Subject: regulator: Increase the limit on sysfs file names With verbose filenames we can easily hit 32 characters. Signed-off-by: Mark Brown Signed-off-by: Liam Girdwood --- drivers/regulator/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 7b38af90a012..75312bd6aac4 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1033,7 +1033,7 @@ static void unset_regulator_supplies(struct regulator_dev *rdev) } } -#define REG_STR_SIZE 32 +#define REG_STR_SIZE 64 static struct regulator *create_regulator(struct regulator_dev *rdev, struct device *dev, -- cgit v1.2.3 From e0eaedefda8e14ed3f445f382c568c5d69e4223f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 9 Jun 2011 16:22:21 +0100 Subject: regulator: Include the device name in the microamps_requested_ file We may have multiple devices requesting a supply with the same name so include the device name in the generated filename for microamps_requested to avoid duplicate files. Signed-off-by: Mark Brown Signed-off-by: Liam Girdwood --- drivers/regulator/core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 75312bd6aac4..e3b67ee48b23 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1053,8 +1053,9 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, if (dev) { /* create a 'requested_microamps_name' sysfs entry */ - size = scnprintf(buf, REG_STR_SIZE, "microamps_requested_%s", - supply_name); + size = scnprintf(buf, REG_STR_SIZE, + "microamps_requested_%s-%s", + dev_name(dev), supply_name); if (size >= REG_STR_SIZE) goto overflow_err; -- cgit v1.2.3 From 3801b86aa482d26a8ae460f67fca29e016491a86 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 9 Jun 2011 16:22:22 +0100 Subject: regulator: Refactor supply implementation to work as regular consumers Currently the regulator supply implementation is somewhat complex and fragile as it doesn't look like standard consumers but is instead a parallel implementation. This causes issues with locking and reference counting. Move the implementation over to using standard consumers to address this. Rather than only notifying the supply on the first enable/disable we do so every time the regulator is enabled or disabled, simplifying locking as we don't need to hold a lock on the consumer we are about to enable. Signed-off-by: Mark Brown Signed-off-by: Liam Girdwood --- drivers/regulator/core.c | 101 ++++++++++++++------------------------- include/linux/regulator/driver.h | 4 +- 2 files changed, 37 insertions(+), 68 deletions(-) diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index e3b67ee48b23..f0cc3983ffee 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -82,8 +82,7 @@ struct regulator { }; static int _regulator_is_enabled(struct regulator_dev *rdev); -static int _regulator_disable(struct regulator_dev *rdev, - struct regulator_dev **supply_rdev_ptr); +static int _regulator_disable(struct regulator_dev *rdev); static int _regulator_get_voltage(struct regulator_dev *rdev); static int _regulator_get_current_limit(struct regulator_dev *rdev); static unsigned int _regulator_get_mode(struct regulator_dev *rdev); @@ -91,6 +90,9 @@ static void _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data); static int _regulator_do_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV); +static struct regulator *create_regulator(struct regulator_dev *rdev, + struct device *dev, + const char *supply_name); static const char *rdev_get_name(struct regulator_dev *rdev) { @@ -930,21 +932,20 @@ out: * core if it's child is enabled. */ static int set_supply(struct regulator_dev *rdev, - struct regulator_dev *supply_rdev) + struct regulator_dev *supply_rdev) { int err; - err = sysfs_create_link(&rdev->dev.kobj, &supply_rdev->dev.kobj, - "supply"); - if (err) { - rdev_err(rdev, "could not add device link %s err %d\n", - supply_rdev->dev.kobj.name, err); - goto out; + rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); + + rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); + if (IS_ERR(rdev->supply)) { + err = PTR_ERR(rdev->supply); + rdev->supply = NULL; + return err; } - rdev->supply = supply_rdev; - list_add(&rdev->slist, &supply_rdev->supply_list); -out: - return err; + + return 0; } /** @@ -1303,19 +1304,6 @@ static int _regulator_enable(struct regulator_dev *rdev) { int ret, delay; - if (rdev->use_count == 0) { - /* do we need to enable the supply regulator first */ - if (rdev->supply) { - mutex_lock(&rdev->supply->mutex); - ret = _regulator_enable(rdev->supply); - mutex_unlock(&rdev->supply->mutex); - if (ret < 0) { - rdev_err(rdev, "failed to enable: %d\n", ret); - return ret; - } - } - } - /* check voltage and requested load before enabling */ if (rdev->constraints && (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) @@ -1390,19 +1378,27 @@ int regulator_enable(struct regulator *regulator) struct regulator_dev *rdev = regulator->rdev; int ret = 0; + if (rdev->supply) { + ret = regulator_enable(rdev->supply); + if (ret != 0) + return ret; + } + mutex_lock(&rdev->mutex); ret = _regulator_enable(rdev); mutex_unlock(&rdev->mutex); + + if (ret != 0) + regulator_disable(rdev->supply); + return ret; } EXPORT_SYMBOL_GPL(regulator_enable); /* locks held by regulator_disable() */ -static int _regulator_disable(struct regulator_dev *rdev, - struct regulator_dev **supply_rdev_ptr) +static int _regulator_disable(struct regulator_dev *rdev) { int ret = 0; - *supply_rdev_ptr = NULL; if (WARN(rdev->use_count <= 0, "unbalanced disables for %s\n", rdev_get_name(rdev))) @@ -1429,9 +1425,6 @@ static int _regulator_disable(struct regulator_dev *rdev, NULL); } - /* decrease our supplies ref count and disable if required */ - *supply_rdev_ptr = rdev->supply; - rdev->use_count = 0; } else if (rdev->use_count > 1) { @@ -1442,6 +1435,7 @@ static int _regulator_disable(struct regulator_dev *rdev, rdev->use_count--; } + return ret; } @@ -1460,29 +1454,21 @@ static int _regulator_disable(struct regulator_dev *rdev, int regulator_disable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; - struct regulator_dev *supply_rdev = NULL; int ret = 0; mutex_lock(&rdev->mutex); - ret = _regulator_disable(rdev, &supply_rdev); + ret = _regulator_disable(rdev); mutex_unlock(&rdev->mutex); - /* decrease our supplies ref count and disable if required */ - while (supply_rdev != NULL) { - rdev = supply_rdev; - - mutex_lock(&rdev->mutex); - _regulator_disable(rdev, &supply_rdev); - mutex_unlock(&rdev->mutex); - } + if (ret == 0 && rdev->supply) + regulator_disable(rdev->supply); return ret; } EXPORT_SYMBOL_GPL(regulator_disable); /* locks held by regulator_force_disable() */ -static int _regulator_force_disable(struct regulator_dev *rdev, - struct regulator_dev **supply_rdev_ptr) +static int _regulator_force_disable(struct regulator_dev *rdev) { int ret = 0; @@ -1499,10 +1485,6 @@ static int _regulator_force_disable(struct regulator_dev *rdev, REGULATOR_EVENT_DISABLE, NULL); } - /* decrease our supplies ref count and disable if required */ - *supply_rdev_ptr = rdev->supply; - - rdev->use_count = 0; return ret; } @@ -1518,16 +1500,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev, int regulator_force_disable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; - struct regulator_dev *supply_rdev = NULL; int ret; mutex_lock(&rdev->mutex); regulator->uA_load = 0; - ret = _regulator_force_disable(rdev, &supply_rdev); + ret = _regulator_force_disable(regulator->rdev); mutex_unlock(&rdev->mutex); - if (supply_rdev) - regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev))); + if (rdev->supply) + while (rdev->open_count--) + regulator_disable(rdev->supply); return ret; } @@ -2138,7 +2120,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) /* get input voltage */ input_uV = 0; if (rdev->supply) - input_uV = _regulator_get_voltage(rdev->supply); + input_uV = regulator_get_voltage(rdev->supply); if (input_uV <= 0) input_uV = rdev->constraints->input_uV; if (input_uV <= 0) { @@ -2208,17 +2190,8 @@ EXPORT_SYMBOL_GPL(regulator_unregister_notifier); static void _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { - struct regulator_dev *_rdev; - /* call rdev chain first */ blocking_notifier_call_chain(&rdev->notifier, event, NULL); - - /* now notify regulator we supply */ - list_for_each_entry(_rdev, &rdev->supply_list, slist) { - mutex_lock(&_rdev->mutex); - _notifier_call_chain(_rdev, event, data); - mutex_unlock(&_rdev->mutex); - } } /** @@ -2610,9 +2583,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, rdev->owner = regulator_desc->owner; rdev->desc = regulator_desc; INIT_LIST_HEAD(&rdev->consumer_list); - INIT_LIST_HEAD(&rdev->supply_list); INIT_LIST_HEAD(&rdev->list); - INIT_LIST_HEAD(&rdev->slist); BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier); /* preform any regulator specific init */ @@ -2724,7 +2695,7 @@ void regulator_unregister(struct regulator_dev *rdev) unset_regulator_supplies(rdev); list_del(&rdev->list); if (rdev->supply) - sysfs_remove_link(&rdev->dev.kobj, "supply"); + regulator_put(rdev->supply); device_unregister(&rdev->dev); kfree(rdev->constraints); mutex_unlock(®ulator_list_mutex); diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 6c433b89c80d..1a80bc77517d 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -188,18 +188,16 @@ struct regulator_dev { /* lists we belong to */ struct list_head list; /* list of all regulators */ - struct list_head slist; /* list of supplied regulators */ /* lists we own */ struct list_head consumer_list; /* consumers we supply */ - struct list_head supply_list; /* regulators we supply */ struct blocking_notifier_head notifier; struct mutex mutex; /* consumer lock */ struct module *owner; struct device dev; struct regulation_constraints *constraints; - struct regulator_dev *supply; /* for tree */ + struct regulator *supply; /* for tree */ void *reg_data; /* regulator_dev data */ -- cgit v1.2.3 From 7d51a0dbe51282f3ed13cadf6e7f13a974374be2 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 9 Jun 2011 16:06:37 +0100 Subject: regulator: Add rdev_crit() macro No actual users but provide the macro so there's less surprise when it's not there. Signed-off-by: Mark Brown Signed-off-by: Liam Girdwood --- drivers/regulator/core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index f0cc3983ffee..cc3dfd66f395 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -34,6 +34,8 @@ #include "dummy.h" +#define rdev_crit(rdev, fmt, ...) \ + pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_err(rdev, fmt, ...) \ pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_warn(rdev, fmt, ...) \ -- cgit v1.2.3 From a8198eea156df47e0e843ac5c7d4c8774e121c42 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 13 Apr 2011 22:04:09 +0100 Subject: drm/i915: Introduce i915_gem_object_finish_gpu() ... reincarnated from i915_gem_object_flush_gpu(). The semantic difference is that after calling finish_gpu() the object no longer resides in any GPU domain, and so will cause the GPU caches to be invalidated if it is ever used again. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 29 +++++++++++++++++++---------- drivers/gpu/drm/i915/intel_display.c | 2 +- 3 files changed, 21 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f63ee162f124..4d1a8ae70a33 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1190,7 +1190,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, uint32_t read_domains, uint32_t write_domain); -int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj); +int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); void i915_gem_do_init(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 12d32579b951..6291dcdf5d40 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2165,23 +2165,29 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) return -EINVAL; } + ret = i915_gem_object_finish_gpu(obj); + if (ret == -ERESTARTSYS) + return ret; + /* Continue on if we fail due to EIO, the GPU is hung so we + * should be safe and we need to cleanup or else we might + * cause memory corruption through use-after-free. + */ + /* blow away mappings if mapped through GTT */ i915_gem_release_mmap(obj); /* Move the object to the CPU domain to ensure that * any possible CPU writes while it's not in the GTT - * are flushed when we go to remap it. This will - * also ensure that all pending GPU writes are finished - * before we unbind. + * are flushed when we go to remap it. */ - ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret == 0) + ret = i915_gem_object_set_to_cpu_domain(obj, 1); if (ret == -ERESTARTSYS) return ret; - /* Continue on if we fail due to EIO, the GPU is hung so we - * should be safe and we need to cleanup or else we might - * cause memory corruption through use-after-free. - */ if (ret) { + /* In the event of a disaster, abandon all caches and + * hope for the best. + */ i915_gem_clflush_object(obj); obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; } @@ -3045,11 +3051,11 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, } int -i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj) +i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) { int ret; - if (!obj->active) + if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) return 0; if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { @@ -3058,6 +3064,9 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj) return ret; } + /* Ensure that we invalidate the GPU's caches and TLBs. */ + obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; + return i915_gem_object_wait_rendering(obj); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index aa43e7be6053..e32fb89b3165 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1971,7 +1971,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, * This should only fail upon a hung GPU, in which case we * can safely continue. */ - ret = i915_gem_object_flush_gpu(obj); + ret = i915_gem_object_finish_gpu(obj); (void) ret; } -- cgit v1.2.3 From b5ffc9bc38a4766d586c3aca6830ed2bd6952e5b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 13 Apr 2011 22:06:03 +0100 Subject: drm/i915: Introduce i915_gem_object_finish_gtt() Like its siblings finish_gpu(), this function clears the object from the GTT domain forcing it to be trigger a domain invalidation should we ever need to use via the GTT again. Note that the most important side-effect of finishing the GTT domain (aside from clearing the tracking read/write domains) is that it imposes an memory barrier so that all accesses are complete before it returns, which is important if you intend to be modifying translation tables shortly afterwards. The second most important side-effect is that it tears down the GTT mappings forcing a page-fault and invalidation on next user access to the object. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6291dcdf5d40..e78a7ef634d9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2149,6 +2149,30 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) return 0; } +static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) +{ + u32 old_write_domain, old_read_domains; + + if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) + return; + + /* Act a barrier for all accesses through the GTT */ + mb(); + + /* Force a pagefault for domain tracking on next user access */ + i915_gem_release_mmap(obj); + + old_read_domains = obj->base.read_domains; + old_write_domain = obj->base.write_domain; + + obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; + obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; + + trace_i915_gem_object_change_domain(obj, + old_read_domains, + old_write_domain); +} + /** * Unbinds an object from the GTT aperture. */ @@ -2173,8 +2197,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) * cause memory corruption through use-after-free. */ - /* blow away mappings if mapped through GTT */ - i915_gem_release_mmap(obj); + i915_gem_object_finish_gtt(obj); /* Move the object to the CPU domain to ensure that * any possible CPU writes while it's not in the GTT -- cgit v1.2.3 From d5bd144959e639443f387c34989cec7c9efff091 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 14 Apr 2011 06:48:26 +0100 Subject: drm/i915/gtt: Split out i915_gem_gtt_rebind_object() ... in preparation for changing the cache level (and thus the flags upon the PTEs) dynamically. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 42 ++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e46b645773cf..837033cf2ce5 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -29,6 +29,9 @@ #include "i915_trace.h" #include "intel_drv.h" +static void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level); + /* XXX kill agp_type! */ static unsigned int cache_level_to_agp_type(struct drm_device *dev, enum i915_cache_level cache_level) @@ -59,24 +62,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { - unsigned int agp_type = - cache_level_to_agp_type(dev, obj->cache_level); - i915_gem_clflush_object(obj); - - if (dev_priv->mm.gtt->needs_dmar) { - BUG_ON(!obj->sg_list); - - intel_gtt_insert_sg_entries(obj->sg_list, - obj->num_sg, - obj->gtt_space->start >> PAGE_SHIFT, - agp_type); - } else - intel_gtt_insert_pages(obj->gtt_space->start - >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT, - obj->pages, - agp_type); + i915_gem_gtt_rebind_object(obj, obj->cache_level); } intel_gtt_chipset_flush(); @@ -110,6 +97,27 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) return 0; } +static void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); + + if (dev_priv->mm.gtt->needs_dmar) { + BUG_ON(!obj->sg_list); + + intel_gtt_insert_sg_entries(obj->sg_list, + obj->num_sg, + obj->gtt_space->start >> PAGE_SHIFT, + agp_type); + } else + intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT, + obj->pages, + agp_type); +} + void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) { intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, -- cgit v1.2.3 From e4ffd173a1c2f96b43127c2537dd99d89e759bba Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 4 Apr 2011 09:44:39 +0100 Subject: drm/i915: Add an interface to dynamically change the cache level [anholt v2: Don't forget that when going from cached to uncached, we haven't been tracking the write domain from the CPU perspective, since we haven't needed it for GPU coherency.] [ickle v3: We also need to make sure we relinquish any fences on older chipsets and clear the GTT for sane domain tracking.] Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 5 +++ drivers/gpu/drm/i915/i915_gem.c | 60 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_gem_gtt.c | 7 ++-- drivers/gpu/drm/i915/intel_ringbuffer.c | 6 ++-- 4 files changed, 71 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4d1a8ae70a33..e552aa6bc859 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1223,9 +1223,14 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file); uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); +int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level); + /* i915_gem_gtt.c */ void i915_gem_restore_gtt_mappings(struct drm_device *dev); int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); +void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level); void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); /* i915_gem_evict.c */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e78a7ef634d9..e6915072ba72 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3034,6 +3034,66 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) return 0; } +int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level) +{ + int ret; + + if (obj->cache_level == cache_level) + return 0; + + if (obj->pin_count) { + DRM_DEBUG("can not change the cache level of pinned objects\n"); + return -EBUSY; + } + + if (obj->gtt_space) { + ret = i915_gem_object_finish_gpu(obj); + if (ret) + return ret; + + i915_gem_object_finish_gtt(obj); + + /* Before SandyBridge, you could not use tiling or fence + * registers with snooped memory, so relinquish any fences + * currently pointing to our region in the aperture. + */ + if (INTEL_INFO(obj->base.dev)->gen < 6) { + ret = i915_gem_object_put_fence(obj); + if (ret) + return ret; + } + + i915_gem_gtt_rebind_object(obj, cache_level); + } + + if (cache_level == I915_CACHE_NONE) { + u32 old_read_domains, old_write_domain; + + /* If we're coming from LLC cached, then we haven't + * actually been tracking whether the data is in the + * CPU cache or not, since we only allow one bit set + * in obj->write_domain and have been skipping the clflushes. + * Just set it to the CPU cache for now. + */ + WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); + WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU); + + old_read_domains = obj->base.read_domains; + old_write_domain = obj->base.write_domain; + + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + + trace_i915_gem_object_change_domain(obj, + old_read_domains, + old_write_domain); + } + + obj->cache_level = cache_level; + return 0; +} + /* * Prepare buffer for display plane. Use uninterruptible for possible flush * wait, as in modesetting process we're not supposed to be interrupted. diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 837033cf2ce5..7a709cd8d543 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -29,9 +29,6 @@ #include "i915_trace.h" #include "intel_drv.h" -static void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level); - /* XXX kill agp_type! */ static unsigned int cache_level_to_agp_type(struct drm_device *dev, enum i915_cache_level cache_level) @@ -97,8 +94,8 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) return 0; } -static void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level) +void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 95c4b1429935..e9615685a39c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -236,7 +236,8 @@ init_pipe_control(struct intel_ring_buffer *ring) ret = -ENOMEM; goto err; } - obj->cache_level = I915_CACHE_LLC; + + i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); ret = i915_gem_object_pin(obj, 4096, true); if (ret) @@ -776,7 +777,8 @@ static int init_status_page(struct intel_ring_buffer *ring) ret = -ENOMEM; goto err; } - obj->cache_level = I915_CACHE_LLC; + + i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); ret = i915_gem_object_pin(obj, 4096, true); if (ret != 0) { -- cgit v1.2.3 From c411964209508e32cf36f6512ed339996751f55f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 29 Mar 2011 16:59:51 -0700 Subject: drm/i915: Mark the cursor and the overlay as being part of the display planes Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_overlay.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e32fb89b3165..f79863a9a693 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5440,7 +5440,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, goto fail_locked; } - ret = i915_gem_object_set_to_gtt_domain(obj, 0); + ret = i915_gem_object_set_to_display_plane(obj, NULL); if (ret) { DRM_ERROR("failed to move cursor bo into the GTT\n"); goto fail_unpin; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index a670c006982e..e0903c5f0ca2 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -777,7 +777,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret != 0) return ret; - ret = i915_gem_object_set_to_gtt_domain(new_bo, 0); + ret = i915_gem_object_set_to_display_plane(new_bo, NULL); if (ret != 0) goto out_unpin; -- cgit v1.2.3 From 2da3b9b940e2a18147422c54ed8b29d01e1ade88 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 14 Apr 2011 09:41:17 +0100 Subject: drm/i915: Combine pinning with setting to the display plane We need to perform a few operations in order to move the object into the display plane (where it can be accessed coherently by the display engine) that are important for future safety to forbid whilst pinned. As a result, we want to need to perform some of the operations before pinning, but some are required once we have been bound into the GTT. So combine the pinning performed by all the callers with set_to_display_plane(), so this complication is contained within the single function. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 3 ++- drivers/gpu/drm/i915/i915_gem.c | 37 +++++++++++++++++++++++++----------- drivers/gpu/drm/i915/intel_display.c | 18 ++++-------------- drivers/gpu/drm/i915/intel_overlay.c | 6 +----- 4 files changed, 33 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e552aa6bc859..8a9fd9177860 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1209,7 +1209,8 @@ int __must_check i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); int __must_check -i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, +i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, + u32 alignment, struct intel_ring_buffer *pipelined); int i915_gem_attach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e6915072ba72..8a439f0c21f5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3095,40 +3095,55 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, } /* - * Prepare buffer for display plane. Use uninterruptible for possible flush - * wait, as in modesetting process we're not supposed to be interrupted. + * Prepare buffer for display plane (scanout, cursors, etc). + * Can be called from an uninterruptible phase (modesetting) and allows + * any flushes to be pipelined (for pageflips). + * + * For the display plane, we want to be in the GTT but out of any write + * domains. So in many ways this looks like set_to_gtt_domain() apart from the + * ability to pipeline the waits, pinning and any additional subtleties + * that may differentiate the display plane from ordinary buffers. */ int -i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, +i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, + u32 alignment, struct intel_ring_buffer *pipelined) { - uint32_t old_read_domains; + u32 old_read_domains, old_write_domain; int ret; - /* Not valid to be called on unbound objects. */ - if (obj->gtt_space == NULL) - return -EINVAL; - ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; - - /* Currently, we are always called from an non-interruptible context. */ if (pipelined != obj->ring) { ret = i915_gem_object_wait_rendering(obj); if (ret) return ret; } + /* As the user may map the buffer once pinned in the display plane + * (e.g. libkms for the bootup splash), we have to ensure that we + * always use map_and_fenceable for all scanout buffers. + */ + ret = i915_gem_object_pin(obj, alignment, true); + if (ret) + return ret; + i915_gem_object_flush_cpu_write_domain(obj); + old_write_domain = obj->base.write_domain; old_read_domains = obj->base.read_domains; + + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. + */ + BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); obj->base.read_domains |= I915_GEM_DOMAIN_GTT; trace_i915_gem_object_change_domain(obj, old_read_domains, - obj->base.write_domain); + old_write_domain); return 0; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f79863a9a693..86a3ec1469ba 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1812,14 +1812,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, } dev_priv->mm.interruptible = false; - ret = i915_gem_object_pin(obj, alignment, true); + ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); if (ret) goto err_interruptible; - ret = i915_gem_object_set_to_display_plane(obj, pipelined); - if (ret) - goto err_unpin; - /* Install a fence for tiled scan-out. Pre-i965 always needs a * fence, whereas 965+ only requires a fence if using * framebuffer compression. For simplicity, we always install @@ -5434,21 +5430,15 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, goto fail_locked; } - ret = i915_gem_object_pin(obj, PAGE_SIZE, true); - if (ret) { - DRM_ERROR("failed to pin cursor bo\n"); - goto fail_locked; - } - - ret = i915_gem_object_set_to_display_plane(obj, NULL); + ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL); if (ret) { DRM_ERROR("failed to move cursor bo into the GTT\n"); - goto fail_unpin; + goto fail_locked; } ret = i915_gem_object_put_fence(obj); if (ret) { - DRM_ERROR("failed to move cursor bo into the GTT\n"); + DRM_ERROR("failed to release fence for cursor"); goto fail_unpin; } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index e0903c5f0ca2..fcf6fcb0b482 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -773,14 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret != 0) return ret; - ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true); + ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL); if (ret != 0) return ret; - ret = i915_gem_object_set_to_display_plane(new_bo, NULL); - if (ret != 0) - goto out_unpin; - ret = i915_gem_object_put_fence(new_bo); if (ret) goto out_unpin; -- cgit v1.2.3 From a7ef0640d984e265393a76aa08a09febd3e7ce34 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 29 Mar 2011 16:59:54 -0700 Subject: drm/i915: Use the uncached domain for the display planes The simplest and common method for ensuring scanout coherency on all chipsets is to mark the scanout buffers as uncached (and for userspace to remember to flush the render cache every so often). We can improve upon this for later generations by marking scanout objects as GFDT and only flush those cachelines when required. However, we start simple. [v2: Move the set to uncached above the clflush. Otherwise, we'd skip the clflush and try to scan out data that was still sitting in the cache.] Signed-off-by: Eric Anholt Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8a439f0c21f5..fb0538358c34 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3122,6 +3122,19 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, return ret; } + /* The display engine is not coherent with the LLC cache on gen6. As + * a result, we make sure that the pinning that is about to occur is + * done with uncached PTEs. This is lowest common denominator for all + * chipsets. + * + * However for gen6+, we could do better by using the GFDT bit instead + * of uncaching, which would allow us to flush all the LLC-cached data + * with that bit in the PTE to main memory with just one PIPE_CONTROL. + */ + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); + if (ret) + return ret; + /* As the user may map the buffer once pinned in the display plane * (e.g. libkms for the bootup splash), we have to ensure that we * always use map_and_fenceable for all scanout buffers. -- cgit v1.2.3 From a18711120764dd96ed2ee6a4d436c448542bad77 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 29 Mar 2011 16:59:55 -0700 Subject: drm/i915: Use the LLC mode on gen6 for everything but display. Improves full-screen openarena on my laptop 20.3% +/- 4.0% (n=3) Improves 800x600 nexuiz on my laptop 12.3% +/- 0.1% (n=3) We have more room to improve with doing LLC caching for display using GFDT, and in doing LLC+MLC caching, but this was an easy performance win and incremental improvement toward those two. Signed-off-by: Eric Anholt Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fb0538358c34..cb1f61dba5eb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3700,7 +3700,23 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->base.read_domains = I915_GEM_DOMAIN_CPU; - obj->cache_level = I915_CACHE_NONE; + if (IS_GEN6(dev)) { + /* On Gen6, we can have the GPU use the LLC (the CPU + * cache) for about a 10% performance improvement + * compared to uncached. Graphics requests other than + * display scanout are coherent with the CPU in + * accessing this cache. This means in this mode we + * don't need to clflush on the CPU side, and on the + * GPU side we only need to flush internal caches to + * get data visible to the CPU. + * + * However, we maintain the display planes as UC, and so + * need to rebind when first used as such. + */ + obj->cache_level = I915_CACHE_LLC; + } else + obj->cache_level = I915_CACHE_NONE; + obj->base.driver_private = NULL; obj->fence_reg = I915_FENCE_REG_NONE; INIT_LIST_HEAD(&obj->mm_list); -- cgit v1.2.3 From ea41b1e5440442cea5c029b192e9ebbe68e423f6 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Fri, 10 Jun 2011 10:14:03 -0400 Subject: ioctl-number.txt: add the tile hardwall ioctl range Signed-off-by: Chris Metcalf --- Documentation/ioctl/ioctl-number.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 3a46e360496d..b7dc3747b711 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -292,6 +292,7 @@ Code Seq#(hex) Include File Comments 0xA0 all linux/sdp/sdp.h Industrial Device Project +0xA2 00-0F arch/tile/include/asm/hardwall.h 0xA3 80-8F Port ACL in development: 0xA3 90-9F linux/dtlk.h -- cgit v1.2.3 From dbcb4a1a3f16702918caa4d4ab7062965050a780 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Fri, 10 Jun 2011 13:07:48 -0400 Subject: arch/tile: add hypervisor-based character driver for SPI flash ROM The first version of this patch proposed an arch/tile/drivers/ directory, but the consensus was that this was probably a poor choice for a place to group Tilera-specific drivers, and that in any case grouping by platform was discouraged, and grouping by function was preferred. This version of the patch addresses various issues raised in the community, primarily the absence of sysfs integration. The sysfs integration now handles passing information on sector size, page size, and total partition size to userspace as well. In addition, we now use a single "struct cdev" to manage all the partition minor devices, and dynamically discover the correct number of partitions from the hypervisor rather than using a module_param with a default value. This driver has no particular "peer" drivers it can be grouped with. It is sort of like an MTD driver for SPI ROM, but it doesn't group well with the other MTD devices since it relies on hypervisor virtualization to handle many of the irritating aspects of flash ROM management: sector awareness, background read for sub-sector writes, bit examination to determine whether a sector erase needs to be issued, etc. It is in fact more like an EEPROM driver, but the hypervisor virtualization does require a "flush" command if you wish to commit a sector write prior to writing to a different sector, and this is sufficiently different from generic I2C/SPI EEPROMs that as a result it doesn't group well with them either. The simple character device is already in use by a range of Tilera SPI ROM management tools, as well as by customers. In addition, using the simple character device actually simplifies the userspace tools, since they don't need to manage sector erase, background read, etc. This both simplifies the code (since we can uniformly manage plain files and the SPI ROM) as well as makes the user code portable to non-Linux platforms that don't offer the same MTD ioctls. Signed-off-by: Chris Metcalf Reviewed-by: Arnd Bergmann --- arch/tile/include/hv/drv_srom_intf.h | 41 +++ drivers/char/Kconfig | 11 + drivers/char/Makefile | 2 + drivers/char/tile-srom.c | 481 +++++++++++++++++++++++++++++++++++ 4 files changed, 535 insertions(+) create mode 100644 arch/tile/include/hv/drv_srom_intf.h create mode 100644 drivers/char/tile-srom.c diff --git a/arch/tile/include/hv/drv_srom_intf.h b/arch/tile/include/hv/drv_srom_intf.h new file mode 100644 index 000000000000..6395faa6d9e6 --- /dev/null +++ b/arch/tile/include/hv/drv_srom_intf.h @@ -0,0 +1,41 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +/** + * @file drv_srom_intf.h + * Interface definitions for the SPI Flash ROM driver. + */ + +#ifndef _SYS_HV_INCLUDE_DRV_SROM_INTF_H +#define _SYS_HV_INCLUDE_DRV_SROM_INTF_H + +/** Read this offset to get the total device size. */ +#define SROM_TOTAL_SIZE_OFF 0xF0000000 + +/** Read this offset to get the device sector size. */ +#define SROM_SECTOR_SIZE_OFF 0xF0000004 + +/** Read this offset to get the device page size. */ +#define SROM_PAGE_SIZE_OFF 0xF0000008 + +/** Write this offset to flush any pending writes. */ +#define SROM_FLUSH_OFF 0xF1000000 + +/** Write this offset, plus the byte offset of the start of a sector, to + * erase a sector. Any write data is ignored, but there must be at least + * one byte of write data. Only applies when the driver is in MTD mode. + */ +#define SROM_ERASE_OFF 0xF2000000 + +#endif /* _SYS_HV_INCLUDE_DRV_SROM_INTF_H */ diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 49502bc5360a..423fd56bf612 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -616,5 +616,16 @@ config MSM_SMD_PKT Enables userspace clients to read and write to some packet SMD ports via device interface for MSM chipset. +config TILE_SROM + bool "Character-device access via hypervisor to the Tilera SPI ROM" + depends on TILE + default y + ---help--- + This device provides character-level read-write access + to the SROM, typically via the "0", "1", and "2" devices + in /dev/srom/. The Tilera hypervisor makes the flash + device appear much like a simple EEPROM, and knows + how to partition a single ROM for multiple purposes. + endmenu diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 7a00672bd85d..32762ba769c2 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -63,3 +63,5 @@ obj-$(CONFIG_RAMOOPS) += ramoops.o obj-$(CONFIG_JS_RTC) += js-rtc.o js-rtc-y = rtc.o + +obj-$(CONFIG_TILE_SROM) += tile-srom.o diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c new file mode 100644 index 000000000000..cf3ee008dca2 --- /dev/null +++ b/drivers/char/tile-srom.c @@ -0,0 +1,481 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + * SPI Flash ROM driver + * + * This source code is derived from code provided in "Linux Device + * Drivers, Third Edition", by Jonathan Corbet, Alessandro Rubini, and + * Greg Kroah-Hartman, published by O'Reilly Media, Inc. + */ + +#include +#include +#include +#include /* printk() */ +#include /* kmalloc() */ +#include /* everything... */ +#include /* error codes */ +#include /* size_t */ +#include +#include /* O_ACCMODE */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Size of our hypervisor I/O requests. We break up large transfers + * so that we don't spend large uninterrupted spans of time in the + * hypervisor. Erasing an SROM sector takes a significant fraction of + * a second, so if we allowed the user to, say, do one I/O to write the + * entire ROM, we'd get soft lockup timeouts, or worse. + */ +#define SROM_CHUNK_SIZE ((size_t)4096) + +/* + * When hypervisor is busy (e.g. erasing), poll the status periodically. + */ + +/* + * Interval to poll the state in msec + */ +#define SROM_WAIT_TRY_INTERVAL 20 + +/* + * Maximum times to poll the state + */ +#define SROM_MAX_WAIT_TRY_TIMES 1000 + +struct srom_dev { + int hv_devhdl; /* Handle for hypervisor device */ + u32 total_size; /* Size of this device */ + u32 sector_size; /* Size of a sector */ + u32 page_size; /* Size of a page */ + struct mutex lock; /* Allow only one accessor at a time */ +}; + +static int srom_major; /* Dynamic major by default */ +module_param(srom_major, int, 0); +MODULE_AUTHOR("Tilera Corporation"); +MODULE_LICENSE("GPL"); + +static int srom_devs; /* Number of SROM partitions */ +static struct cdev srom_cdev; +static struct class *srom_class; +static struct srom_dev *srom_devices; + +/* + * Handle calling the hypervisor and managing EAGAIN/EBUSY. + */ + +static ssize_t _srom_read(int hv_devhdl, void *buf, + loff_t off, size_t count) +{ + int retval, retries = SROM_MAX_WAIT_TRY_TIMES; + for (;;) { + retval = hv_dev_pread(hv_devhdl, 0, (HV_VirtAddr)buf, + count, off); + if (retval >= 0) + return retval; + if (retval == HV_EAGAIN) + continue; + if (retval == HV_EBUSY && --retries > 0) { + msleep(SROM_WAIT_TRY_INTERVAL); + continue; + } + pr_err("_srom_read: error %d\n", retval); + return -EIO; + } +} + +static ssize_t _srom_write(int hv_devhdl, const void *buf, + loff_t off, size_t count) +{ + int retval, retries = SROM_MAX_WAIT_TRY_TIMES; + for (;;) { + retval = hv_dev_pwrite(hv_devhdl, 0, (HV_VirtAddr)buf, + count, off); + if (retval >= 0) + return retval; + if (retval == HV_EAGAIN) + continue; + if (retval == HV_EBUSY && --retries > 0) { + msleep(SROM_WAIT_TRY_INTERVAL); + continue; + } + pr_err("_srom_write: error %d\n", retval); + return -EIO; + } +} + +/** + * srom_open() - Device open routine. + * @inode: Inode for this device. + * @filp: File for this specific open of the device. + * + * Returns zero, or an error code. + */ +static int srom_open(struct inode *inode, struct file *filp) +{ + filp->private_data = &srom_devices[iminor(inode)]; + return 0; +} + + +/** + * srom_release() - Device release routine. + * @inode: Inode for this device. + * @filp: File for this specific open of the device. + * + * Returns zero, or an error code. + */ +static int srom_release(struct inode *inode, struct file *filp) +{ + struct srom_dev *srom = filp->private_data; + char dummy; + + /* Make sure we've flushed anything written to the ROM. */ + mutex_lock(&srom->lock); + if (srom->hv_devhdl >= 0) + _srom_write(srom->hv_devhdl, &dummy, SROM_FLUSH_OFF, 1); + mutex_unlock(&srom->lock); + + filp->private_data = NULL; + + return 0; +} + + +/** + * srom_read() - Read data from the device. + * @filp: File for this specific open of the device. + * @buf: User's data buffer. + * @count: Number of bytes requested. + * @f_pos: File position. + * + * Returns number of bytes read, or an error code. + */ +static ssize_t srom_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + int retval = 0; + void *kernbuf; + struct srom_dev *srom = filp->private_data; + + kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL); + if (!kernbuf) + return -ENOMEM; + + if (mutex_lock_interruptible(&srom->lock)) { + retval = -ERESTARTSYS; + kfree(kernbuf); + return retval; + } + + while (count) { + int hv_retval; + int bytes_this_pass = min(count, SROM_CHUNK_SIZE); + + hv_retval = _srom_read(srom->hv_devhdl, kernbuf, + *f_pos, bytes_this_pass); + if (hv_retval > 0) { + if (copy_to_user(buf, kernbuf, hv_retval) != 0) { + retval = -EFAULT; + break; + } + } else if (hv_retval <= 0) { + if (retval == 0) + retval = hv_retval; + break; + } + + retval += hv_retval; + *f_pos += hv_retval; + buf += hv_retval; + count -= hv_retval; + } + + mutex_unlock(&srom->lock); + kfree(kernbuf); + + return retval; +} + +/** + * srom_write() - Write data to the device. + * @filp: File for this specific open of the device. + * @buf: User's data buffer. + * @count: Number of bytes requested. + * @f_pos: File position. + * + * Returns number of bytes written, or an error code. + */ +static ssize_t srom_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + int retval = 0; + void *kernbuf; + struct srom_dev *srom = filp->private_data; + + kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL); + if (!kernbuf) + return -ENOMEM; + + if (mutex_lock_interruptible(&srom->lock)) { + retval = -ERESTARTSYS; + kfree(kernbuf); + return retval; + } + + while (count) { + int hv_retval; + int bytes_this_pass = min(count, SROM_CHUNK_SIZE); + + if (copy_from_user(kernbuf, buf, bytes_this_pass) != 0) { + retval = -EFAULT; + break; + } + + hv_retval = _srom_write(srom->hv_devhdl, kernbuf, + *f_pos, bytes_this_pass); + if (hv_retval <= 0) { + if (retval == 0) + retval = hv_retval; + break; + } + + retval += hv_retval; + *f_pos += hv_retval; + buf += hv_retval; + count -= hv_retval; + } + + mutex_unlock(&srom->lock); + kfree(kernbuf); + + return retval; +} + +/* Provide our own implementation so we can use srom->total_size. */ +loff_t srom_llseek(struct file *filp, loff_t offset, int origin) +{ + struct srom_dev *srom = filp->private_data; + + if (mutex_lock_interruptible(&srom->lock)) + return -ERESTARTSYS; + + switch (origin) { + case SEEK_END: + offset += srom->total_size; + break; + case SEEK_CUR: + offset += filp->f_pos; + break; + } + + if (offset < 0 || offset > srom->total_size) { + offset = -EINVAL; + } else { + filp->f_pos = offset; + filp->f_version = 0; + } + + mutex_unlock(&srom->lock); + + return offset; +} + +static ssize_t total_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srom_dev *srom = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", srom->total_size); +} + +static ssize_t sector_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srom_dev *srom = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", srom->sector_size); +} + +static ssize_t page_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srom_dev *srom = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", srom->page_size); +} + +static struct device_attribute srom_dev_attrs[] = { + __ATTR(total_size, S_IRUGO, total_show, NULL), + __ATTR(sector_size, S_IRUGO, sector_show, NULL), + __ATTR(page_size, S_IRUGO, page_show, NULL), + __ATTR_NULL +}; + +static char *srom_devnode(struct device *dev, mode_t *mode) +{ + *mode = S_IRUGO | S_IWUSR; + return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev)); +} + +/* + * The fops + */ +static const struct file_operations srom_fops = { + .owner = THIS_MODULE, + .llseek = srom_llseek, + .read = srom_read, + .write = srom_write, + .open = srom_open, + .release = srom_release, +}; + +/** + * srom_setup_minor() - Initialize per-minor information. + * @srom: Per-device SROM state. + * @index: Device to set up. + */ +static int srom_setup_minor(struct srom_dev *srom, int index) +{ + struct device *dev; + int devhdl = srom->hv_devhdl; + + mutex_init(&srom->lock); + + if (_srom_read(devhdl, &srom->total_size, + SROM_TOTAL_SIZE_OFF, sizeof(srom->total_size)) < 0) + return -EIO; + if (_srom_read(devhdl, &srom->sector_size, + SROM_SECTOR_SIZE_OFF, sizeof(srom->sector_size)) < 0) + return -EIO; + if (_srom_read(devhdl, &srom->page_size, + SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0) + return -EIO; + + dev = device_create(srom_class, &platform_bus, + MKDEV(srom_major, index), srom, "%d", index); + return IS_ERR(dev) ? PTR_ERR(dev) : 0; +} + +/** srom_init() - Initialize the driver's module. */ +static int srom_init(void) +{ + int result, i; + dev_t dev = MKDEV(srom_major, 0); + + /* + * Start with a plausible number of partitions; the krealloc() call + * below will yield about log(srom_devs) additional allocations. + */ + srom_devices = kzalloc(4 * sizeof(struct srom_dev), GFP_KERNEL); + + /* Discover the number of srom partitions. */ + for (i = 0; ; i++) { + int devhdl; + char buf[20]; + struct srom_dev *new_srom_devices = + krealloc(srom_devices, (i+1) * sizeof(struct srom_dev), + GFP_KERNEL | __GFP_ZERO); + if (!new_srom_devices) { + result = -ENOMEM; + goto fail_mem; + } + srom_devices = new_srom_devices; + sprintf(buf, "srom/0/%d", i); + devhdl = hv_dev_open((HV_VirtAddr)buf, 0); + if (devhdl < 0) { + if (devhdl != HV_ENODEV) + pr_notice("srom/%d: hv_dev_open failed: %d.\n", + i, devhdl); + break; + } + srom_devices[i].hv_devhdl = devhdl; + } + srom_devs = i; + + /* Bail out early if we have no partitions at all. */ + if (srom_devs == 0) { + result = -ENODEV; + goto fail_mem; + } + + /* Register our major, and accept a dynamic number. */ + if (srom_major) + result = register_chrdev_region(dev, srom_devs, "srom"); + else { + result = alloc_chrdev_region(&dev, 0, srom_devs, "srom"); + srom_major = MAJOR(dev); + } + if (result < 0) + goto fail_mem; + + /* Register a character device. */ + cdev_init(&srom_cdev, &srom_fops); + srom_cdev.owner = THIS_MODULE; + srom_cdev.ops = &srom_fops; + result = cdev_add(&srom_cdev, dev, srom_devs); + if (result < 0) + goto fail_chrdev; + + /* Create a sysfs class. */ + srom_class = class_create(THIS_MODULE, "srom"); + if (IS_ERR(srom_class)) { + result = PTR_ERR(srom_class); + goto fail_cdev; + } + srom_class->dev_attrs = srom_dev_attrs; + srom_class->devnode = srom_devnode; + + /* Do per-partition initialization */ + for (i = 0; i < srom_devs; i++) { + result = srom_setup_minor(srom_devices + i, i); + if (result < 0) + goto fail_class; + } + + return 0; + +fail_class: + for (i = 0; i < srom_devs; i++) + device_destroy(srom_class, MKDEV(srom_major, i)); + class_destroy(srom_class); +fail_cdev: + cdev_del(&srom_cdev); +fail_chrdev: + unregister_chrdev_region(dev, srom_devs); +fail_mem: + kfree(srom_devices); + return result; +} + +/** srom_cleanup() - Clean up the driver's module. */ +static void srom_cleanup(void) +{ + int i; + for (i = 0; i < srom_devs; i++) + device_destroy(srom_class, MKDEV(srom_major, i)); + class_destroy(srom_class); + cdev_del(&srom_cdev); + unregister_chrdev_region(MKDEV(srom_major, 0), srom_devs); + kfree(srom_devices); +} + +module_init(srom_init); +module_exit(srom_cleanup); -- cgit v1.2.3 From cd4f1d536c2b2221d5a80399698d39717bf40077 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 13 Jun 2011 10:26:27 -0400 Subject: ktest: Notify reason to break out of monitoring boot Different timeouts can cause the ktest monitor to break out of the loop. It becomes annoying that one does not know the reason why it exited the monitor loop. Display the cause of the reason why the loop was exited. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index cef28e6632b9..b96d3819c42e 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -841,12 +841,20 @@ sub monitor { if ($booted) { $line = wait_for_input($monitor_fp, $booted_timeout); + if (!defined($line)) { + my $s = $booted_timeout == 1 ? "" : "s"; + doprint "Successful boot found: break after $booted_timeout second$s\n"; + last; + } } else { $line = wait_for_input($monitor_fp); + if (!defined($line)) { + my $s = $timeout == 1 ? "" : "s"; + doprint "Timed out after $timeout second$s\n"; + last; + } } - last if (!defined($line)); - doprint $line; print DMESG $line; -- cgit v1.2.3 From f1a5b96219e3483ab519bed9bb04cc8fadf74816 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 13 Jun 2011 10:30:00 -0400 Subject: ktest: Add detection of triple faults When a triple fault happens in a test, no call trace nor panic is displayed. Instead, the system reboots to the good kernel. Since the good kernel may display a boot prompt that matches the success string, ktest may think that the test succeeded, when it did not. Detecting triple faults is tricky because it is hard to generalize what a reboot looks like. The best that we can come up with for now is to examine the Linux banner. If we detect that the Linux banner matches the test we want to test, then look to see if we hit another Linux banner with a different kernel is booted. This can be assumed to be a triple fault. We can't just check for two Linux banners because things like early printk may cause the Linux banner to be displayed twice. Checking for different kernel versions should be the safe bet. If this for some reason detects a false triple boot. A new ktest config option is also created: DETECT_TRIPLE_FAULT This can be set to 0 to disable this checking. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 20 ++++++++++++++++++++ tools/testing/ktest/sample.conf | 10 ++++++++++ 2 files changed, 30 insertions(+) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index b96d3819c42e..a8e1826e0cba 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -41,6 +41,7 @@ $default{"CLEAR_LOG"} = 0; $default{"BISECT_MANUAL"} = 0; $default{"BISECT_SKIP"} = 1; $default{"SUCCESS_LINE"} = "login:"; +$default{"DETECT_TRIPLE_FAULT"} = 1; $default{"BOOTED_TIMEOUT"} = 1; $default{"DIE_ON_FAILURE"} = 1; $default{"SSH_EXEC"} = "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND"; @@ -101,6 +102,7 @@ my $patchcheck_sleep_time; my $store_failures; my $timeout; my $booted_timeout; +my $detect_triplefault; my $console; my $success_line; my $stop_after_success; @@ -836,6 +838,7 @@ sub monitor { my $failure_start; my $monitor_start = time; my $done = 0; + my $version_found = 0; while (!$done) { @@ -904,6 +907,22 @@ sub monitor { $bug = 1; } + # Detect triple faults by testing the banner + if ($full_line =~ /\bLinux version (\S+).*\n/) { + if ($1 eq $version) { + $version_found = 1; + } elsif ($version_found && $detect_triplefault) { + # We already booted into the kernel we are testing, + # but now we booted into another kernel? + # Consider this a triple fault. + doprint "Aleady booted in Linux kernel $version, but now\n"; + doprint "we booted into Linux kernel $1.\n"; + doprint "Assuming that this is a triple fault.\n"; + doprint "To disable this: set DETECT_TRIPLE_FAULT to 0\n"; + last; + } + } + if ($line =~ /\n/) { $full_line = ""; } @@ -2159,6 +2178,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { $timeout = set_test_option("TIMEOUT", $i); $booted_timeout = set_test_option("BOOTED_TIMEOUT", $i); $console = set_test_option("CONSOLE", $i); + $detect_triplefault = set_test_option("DETECT_TRIPLE_FAULT", $i); $success_line = set_test_option("SUCCESS_LINE", $i); $stop_after_success = set_test_option("STOP_AFTER_SUCCESS", $i); $stop_after_failure = set_test_option("STOP_AFTER_FAILURE", $i); diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf index 48cbcc80602a..c2c072e96032 100644 --- a/tools/testing/ktest/sample.conf +++ b/tools/testing/ktest/sample.conf @@ -518,6 +518,16 @@ # The variables SSH_USER and MACHINE are defined. #REBOOT = ssh $SSH_USER@$MACHINE reboot +# The way triple faults are detected is by testing the kernel +# banner. If the kernel banner for the kernel we are testing is +# found, and then later a kernel banner for another kernel version +# is found, it is considered that we encountered a triple fault, +# and there is no panic or callback, but simply a reboot. +# To disable this (because it did a false positive) set the following +# to 0. +# (default 1) +#DETECT_TRIPLE_FAULT = 0 + #### Per test run options #### # The following options are only allowed in TEST_START sections. # They are ignored in the DEFAULTS sections. -- cgit v1.2.3 From 30f75da5ff475f1f455c0b009f3c06767963c54f Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 13 Jun 2011 10:35:35 -0400 Subject: ktest: Add CONFIG_BISECT_GOOD option Currently the config_bisect compares the min config with the CONFIG_BISECT config. There may be another config that we know is good that we want to ignore configs on. By passing in this config it will ignore the options that are set in the good config. Note: This only ignores the config, it does not (yet) handle options that are different between the two configs. If the good config has "SLAB" set and the bad config has "SLUB" it will not find the bug if the bug had to do with changing these two options. This is something that I intend to implement in the future. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 6 ++++++ tools/testing/ktest/sample.conf | 19 ++++++++++++------- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index a8e1826e0cba..dbc02de93e59 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -88,6 +88,7 @@ my $bisect_bad = ""; my $reverse_bisect; my $bisect_manual; my $bisect_skip; +my $config_bisect_good; my $in_patchcheck = 0; my $run_test; my $redirect; @@ -1745,6 +1746,10 @@ sub config_bisect { my $tmpconfig = "$tmpdir/use_config"; + if (defined($config_bisect_good)) { + process_config_ignore $config_bisect_good; + } + # Make the file with the bad config and the min config if (defined($minconfig)) { # read the min config for things to ignore @@ -2174,6 +2179,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { $patchcheck_sleep_time = set_test_option("PATCHCHECK_SLEEP_TIME", $i); $bisect_manual = set_test_option("BISECT_MANUAL", $i); $bisect_skip = set_test_option("BISECT_SKIP", $i); + $config_bisect_good = set_test_option("CONFIG_BISECT_GOOD", $i); $store_failures = set_test_option("STORE_FAILURES", $i); $timeout = set_test_option("TIMEOUT", $i); $booted_timeout = set_test_option("BOOTED_TIMEOUT", $i); diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf index c2c072e96032..be531c20643d 100644 --- a/tools/testing/ktest/sample.conf +++ b/tools/testing/ktest/sample.conf @@ -749,13 +749,18 @@ # boot - bad builds but fails to boot # test - bad boots but fails a test # -# CONFIG_BISECT is the config that failed to boot -# -# If BISECT_MANUAL is set, it will pause between iterations. -# This is useful to use just ktest.pl just for the config bisect. -# If you set it to build, it will run the bisect and you can -# control what happens in between iterations. It will ask you if -# the test succeeded or not and continue the config bisect. +# CONFIG_BISECT is the config that failed to boot +# +# If BISECT_MANUAL is set, it will pause between iterations. +# This is useful to use just ktest.pl just for the config bisect. +# If you set it to build, it will run the bisect and you can +# control what happens in between iterations. It will ask you if +# the test succeeded or not and continue the config bisect. +# +# CONFIG_BISECT_GOOD (optional) +# If you have a good config to start with, then you +# can specify it with CONFIG_BISECT_GOOD. Otherwise +# the MIN_CONFIG is the base. # # Example: # TEST_START -- cgit v1.2.3 From 9064af5206c26ce0d47621fef216b0c43d65d693 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 13 Jun 2011 10:38:48 -0400 Subject: ktest: Add TEST_NAME option Searching through several tests, it gets confusing which test result is for which test. By adding the TEST_NAME option, the user can tell which test result belongs to which test. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 18 ++++++++++++++++-- tools/testing/ktest/sample.conf | 6 ++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index dbc02de93e59..579569f57b06 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -101,6 +101,7 @@ my $sleep_time; my $bisect_sleep_time; my $patchcheck_sleep_time; my $store_failures; +my $test_name; my $timeout; my $booted_timeout; my $detect_triplefault; @@ -620,9 +621,15 @@ sub fail { end_monitor; } + my $name = ""; + + if (defined($test_name)) { + $name = " ($test_name)"; + } + doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; - doprint "KTEST RESULT: TEST $i Failed: ", @_, "\n"; + doprint "KTEST RESULT: TEST $i$name Failed: ", @_, "\n"; doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; @@ -1130,9 +1137,15 @@ sub success { $successes++; + my $name = ""; + + if (defined($test_name)) { + $name = " ($test_name)"; + } + doprint "\n\n*******************************************\n"; doprint "*******************************************\n"; - doprint "KTEST RESULT: TEST $i SUCCESS!!!! **\n"; + doprint "KTEST RESULT: TEST $i$name SUCCESS!!!! **\n"; doprint "*******************************************\n"; doprint "*******************************************\n"; @@ -2181,6 +2194,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { $bisect_skip = set_test_option("BISECT_SKIP", $i); $config_bisect_good = set_test_option("CONFIG_BISECT_GOOD", $i); $store_failures = set_test_option("STORE_FAILURES", $i); + $test_name = set_test_option("TEST_NAME", $i); $timeout = set_test_option("TIMEOUT", $i); $booted_timeout = set_test_option("BOOTED_TIMEOUT", $i); $console = set_test_option("CONSOLE", $i); diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf index be531c20643d..0e5f764ac9ee 100644 --- a/tools/testing/ktest/sample.conf +++ b/tools/testing/ktest/sample.conf @@ -545,6 +545,12 @@ # all preceding tests until a new CHECKOUT is set. # # +# TEST_NAME = name +# +# If you want the test to have a name that is displayed in +# the test result banner at the end of the test, then use this +# option. This is useful to search for the RESULT keyword and +# not have to translate a test number to a test in the config. # # For TEST_TYPE = patchcheck # -- cgit v1.2.3 From fcb3f16a4f4bf4e667ae4c68b1d5401824058efb Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 13 Jun 2011 10:40:58 -0400 Subject: ktest: Implement our own force min config Using the build KCONFIG_ALLCONFIG environment variable to force the min config may not always work properly. Since ktest is written in perl, it is trivial to read and replace the current config with the configs specified by the min config. Now the min config (and add configs) are read by perl and before a make is done, these configs in the .config file are replaced by the version in the min config. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 74 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 61 insertions(+), 13 deletions(-) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 579569f57b06..aa442a9075db 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -119,6 +119,7 @@ my $successes = 0; my %entered_configs; my %config_help; my %variable; +my %force_config; $config_help{"MACHINE"} = << "EOF" The machine hostname that you will test. @@ -1044,21 +1045,69 @@ sub check_buildlog { return 1; } +sub apply_min_config { + my $outconfig = "$output_config.new"; + + # Read the config file and remove anything that + # is in the force_config hash (from minconfig and others) + # then add the force config back. + + doprint "Applying minimum configurations into $output_config.new\n"; + + open (OUT, ">$outconfig") or + dodie "Can't create $outconfig"; + + if (-f $output_config) { + open (IN, $output_config) or + dodie "Failed to open $output_config"; + while () { + if (/^(# )?(CONFIG_[^\s=]*)/) { + next if (defined($force_config{$2})); + } + print OUT; + } + close IN; + } + foreach my $config (keys %force_config) { + print OUT "$force_config{$config}\n"; + } + close OUT; + + run_command "mv $outconfig $output_config"; +} + sub make_oldconfig { - my ($defconfig) = @_; - if (!run_command "$defconfig $make oldnoconfig") { + apply_min_config; + + if (!run_command "$make oldnoconfig") { # Perhaps oldnoconfig doesn't exist in this version of the kernel # try a yes '' | oldconfig doprint "oldnoconfig failed, trying yes '' | make oldconfig\n"; - run_command "yes '' | $defconfig $make oldconfig" or + run_command "yes '' | $make oldconfig" or dodie "failed make config oldconfig"; } } +# read a config file and use this to force new configs. +sub load_force_config { + my ($config) = @_; + + open(IN, $config) or + dodie "failed to read $config"; + while () { + chomp; + if (/^(CONFIG[^\s=]*)(\s*=.*)/) { + $force_config{$1} = $_; + } elsif (/^# (CONFIG_\S*) is not set/) { + $force_config{$1} = $_; + } + } + close IN; +} + sub build { my ($type) = @_; - my $defconfig = ""; unlink $buildlog; @@ -1098,15 +1147,15 @@ sub build { close(OUT); if (defined($minconfig)) { - $defconfig = "KCONFIG_ALLCONFIG=$minconfig"; + load_force_config($minconfig); } - if ($type eq "oldnoconfig") { - make_oldconfig $defconfig; - } else { - run_command "$defconfig $make $type" or + if ($type ne "oldnoconfig") { + run_command "$make $type" or dodie "failed make config"; } + # Run old config regardless, to enforce min configurations + make_oldconfig; $redirect = "$buildlog"; if (!run_command "$make $build_options") { @@ -1587,7 +1636,7 @@ sub create_config { close(OUT); # exit; - make_oldconfig ""; + make_oldconfig; } sub compare_configs { @@ -1778,9 +1827,8 @@ sub config_bisect { dodie "failed to append $addconfig"; } - my $defconfig = ""; if (-f $tmpconfig) { - $defconfig = "KCONFIG_ALLCONFIG=$tmpconfig"; + load_force_config($tmpconfig); process_config_ignore $tmpconfig; } @@ -1801,7 +1849,7 @@ sub config_bisect { close(IN); # Now run oldconfig with the minconfig (and addconfigs) - make_oldconfig $defconfig; + make_oldconfig; # check to see what we lost (or gained) open (IN, $output_config) -- cgit v1.2.3 From ecaf8e521324d5a7f85976bb8689e248b8d3a2f6 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 13 Jun 2011 10:48:10 -0400 Subject: ktest: Have wait on stdio honor bug timeout After a bug is found, the STOP_AFTER_FAILURE timeout is used to determine how much output should be printed before breaking out of the monitor loop. This is to get things like call traces and enough infromation about the bug to help determine what caused it. The STOP_AFTER_FAILURE is usually much shorter than the TIMEOUT that is used to determine when to quit after no more stdio is given. But since the stdio read uses a wait on I/O, the STOP_AFTER_FAILURE is only checked after we get something from I/O. But if the I/O does not return any more data, we wait the TIMEOUT period instead, even though we already triggered a bug report. The wait on I/O should honor the STOP_AFTER_FAILURE time if a bug has been found. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index aa442a9075db..1e1fe835df48 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -851,7 +851,16 @@ sub monitor { while (!$done) { - if ($booted) { + if ($bug && defined($stop_after_failure) && + $stop_after_failure >= 0) { + my $time = $stop_after_failure - (time - $failure_start); + $line = wait_for_input($monitor_fp, $time); + if (!defined($line)) { + doprint "bug timed out after $booted_timeout seconds\n"; + doprint "Test forced to stop after $stop_after_failure seconds after failure\n"; + last; + } + } elsif ($booted) { $line = wait_for_input($monitor_fp, $booted_timeout); if (!defined($line)) { my $s = $booted_timeout == 1 ? "" : "s"; -- cgit v1.2.3 From 23715c3c9a31dd34c8c2f27086a9562e35da423b Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 13 Jun 2011 11:03:34 -0400 Subject: ktest: Have LOG_FILE evaluate options as well The LOG_FILE variable needs to evaluate the $ options as well. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 126 +++++++++++++++++++++++-------------------- 1 file changed, 68 insertions(+), 58 deletions(-) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 1e1fe835df48..83dcfaf0cac4 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -478,6 +478,69 @@ sub read_config { } } +sub __eval_option { + my ($option, $i) = @_; + + # Add space to evaluate the character before $ + $option = " $option"; + my $retval = ""; + + while ($option =~ /(.*?[^\\])\$\{(.*?)\}(.*)/) { + my $start = $1; + my $var = $2; + my $end = $3; + + # Append beginning of line + $retval = "$retval$start"; + + # If the iteration option OPT[$i] exists, then use that. + # otherwise see if the default OPT (without [$i]) exists. + + my $o = "$var\[$i\]"; + + if (defined($opt{$o})) { + $o = $opt{$o}; + $retval = "$retval$o"; + } elsif (defined($opt{$var})) { + $o = $opt{$var}; + $retval = "$retval$o"; + } else { + $retval = "$retval\$\{$var\}"; + } + + $option = $end; + } + + $retval = "$retval$option"; + + $retval =~ s/^ //; + + return $retval; +} + +sub eval_option { + my ($option, $i) = @_; + + my $prev = ""; + + # Since an option can evaluate to another option, + # keep iterating until we do not evaluate any more + # options. + my $r = 0; + while ($prev ne $option) { + # Check for recursive evaluations. + # 100 deep should be more than enough. + if ($r++ > 100) { + die "Over 100 evaluations accurred with $option\n" . + "Check for recursive variables\n"; + } + $prev = $option; + $option = __eval_option($option, $i); + } + + return $option; +} + sub _logit { if (defined($opt{"LOG_FILE"})) { open(OUT, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}"; @@ -2079,6 +2142,10 @@ EOF } read_config $ktest_config; +if (defined($opt{"LOG_FILE"})) { + $opt{"LOG_FILE"} = eval_option($opt{"LOG_FILE"}, -1); +} + # Append any configs entered in manually to the config file. my @new_configs = keys %entered_configs; if ($#new_configs >= 0) { @@ -2147,70 +2214,13 @@ sub __set_test_option { return undef; } -sub eval_option { - my ($option, $i) = @_; - - # Add space to evaluate the character before $ - $option = " $option"; - my $retval = ""; - - while ($option =~ /(.*?[^\\])\$\{(.*?)\}(.*)/) { - my $start = $1; - my $var = $2; - my $end = $3; - - # Append beginning of line - $retval = "$retval$start"; - - # If the iteration option OPT[$i] exists, then use that. - # otherwise see if the default OPT (without [$i]) exists. - - my $o = "$var\[$i\]"; - - if (defined($opt{$o})) { - $o = $opt{$o}; - $retval = "$retval$o"; - } elsif (defined($opt{$var})) { - $o = $opt{$var}; - $retval = "$retval$o"; - } else { - $retval = "$retval\$\{$var\}"; - } - - $option = $end; - } - - $retval = "$retval$option"; - - $retval =~ s/^ //; - - return $retval; -} - sub set_test_option { my ($name, $i) = @_; my $option = __set_test_option($name, $i); return $option if (!defined($option)); - my $prev = ""; - - # Since an option can evaluate to another option, - # keep iterating until we do not evaluate any more - # options. - my $r = 0; - while ($prev ne $option) { - # Check for recursive evaluations. - # 100 deep should be more than enough. - if ($r++ > 100) { - die "Over 100 evaluations accurred with $name\n" . - "Check for recursive variables\n"; - } - $prev = $option; - $option = eval_option($option, $i); - } - - return $option; + return eval_option($option, $i); } # First we need to do is the builds -- cgit v1.2.3 From db05cfefce6e6120267974345599760b1d653439 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 13 Jun 2011 11:09:22 -0400 Subject: ktest: Allow initrd processing without modules defined When a config is set with CONFIG_MODULES=n, it does not mean that the kernel does not need an initrd to boot. For systems that depend on LVM and such, an initrd must run first. If POST_INSTALL is defined, then run the post install regardless if modules are needed or not. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 83dcfaf0cac4..fb46e12eb1d7 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -1031,6 +1031,16 @@ sub monitor { return 1; } +sub do_post_install { + + return if (!defined($post_install)); + + my $cp_post_install = $post_install; + $cp_post_install =~ s/\$KERNEL_VERSION/$version/g; + run_command "$cp_post_install" or + dodie "Failed to run post install"; +} + sub install { run_scp "$outputdir/$build_target", "$target_image" or @@ -1050,6 +1060,7 @@ sub install { close(IN); if (!$install_mods) { + do_post_install; doprint "No modules needed\n"; return; } @@ -1077,12 +1088,7 @@ sub install { run_ssh "rm -f /tmp/$modtar"; - return if (!defined($post_install)); - - my $cp_post_install = $post_install; - $cp_post_install =~ s/\$KERNEL_VERSION/$version/g; - run_command "$cp_post_install" or - dodie "Failed to run post install"; + do_post_install; } sub check_buildlog { -- cgit v1.2.3 From 61a6976bf19a6cf5dfcf37c3536665b316f22d49 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 14 Jun 2011 12:40:19 +0900 Subject: serial: sh-sci: Abstract register maps. This takes a bit of a sledgehammer to the horribly CPU subtype ifdef-ridden header and abstracts all of the different register layouts in to distinct types which in turn can be overriden on a per-port basis, or permitted to default to the map matching the port type at probe time. In the process this ultimately fixes up inumerable bugs with mismatches on various CPU types (particularly the legacy ones that were obviously broken years ago and no one noticed) and provides a more tightly coupled and consolidated platform for extending and implementing generic features. Signed-off-by: Paul Mundt --- arch/sh/Makefile | 1 + arch/sh/include/cpu-sh3/cpu/serial.h | 10 + arch/sh/include/cpu-sh4a/cpu/serial.h | 7 + arch/sh/kernel/cpu/sh3/Makefile | 18 +- arch/sh/kernel/cpu/sh3/serial-sh770x.c | 33 +++ arch/sh/kernel/cpu/sh3/serial-sh7710.c | 20 ++ arch/sh/kernel/cpu/sh3/serial-sh7720.c | 36 ++++ arch/sh/kernel/cpu/sh3/setup-sh7705.c | 5 + arch/sh/kernel/cpu/sh3/setup-sh770x.c | 8 + arch/sh/kernel/cpu/sh3/setup-sh7720.c | 5 + arch/sh/kernel/cpu/sh4/setup-sh7750.c | 3 +- arch/sh/kernel/cpu/sh4/setup-sh7760.c | 4 + arch/sh/kernel/cpu/sh4a/Makefile | 2 +- arch/sh/kernel/cpu/sh4a/serial-sh7722.c | 23 ++ arch/sh/kernel/cpu/sh4a/setup-sh7366.c | 1 + arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 6 + arch/sh/kernel/cpu/sh4a/setup-sh7723.c | 9 + arch/sh/kernel/cpu/sh4a/setup-sh7724.c | 9 + arch/sh/kernel/cpu/sh4a/setup-sh7763.c | 3 + arch/sh/kernel/cpu/sh4a/setup-sh7780.c | 3 +- arch/sh/kernel/cpu/sh4a/setup-sh7785.c | 8 +- arch/sh/kernel/cpu/sh4a/setup-sh7786.c | 8 +- drivers/tty/serial/sh-sci.c | 364 ++++++++++++++++++++++++-------- drivers/tty/serial/sh-sci.h | 222 ------------------- include/linux/serial_sci.h | 36 ++++ 25 files changed, 516 insertions(+), 328 deletions(-) create mode 100644 arch/sh/include/cpu-sh3/cpu/serial.h create mode 100644 arch/sh/include/cpu-sh4a/cpu/serial.h create mode 100644 arch/sh/kernel/cpu/sh3/serial-sh770x.c create mode 100644 arch/sh/kernel/cpu/sh3/serial-sh7710.c create mode 100644 arch/sh/kernel/cpu/sh3/serial-sh7720.c create mode 100644 arch/sh/kernel/cpu/sh4a/serial-sh7722.c diff --git a/arch/sh/Makefile b/arch/sh/Makefile index e3d8170ad00b..99385d0b3f3b 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -173,6 +173,7 @@ core-$(CONFIG_HD6446X_SERIES) += arch/sh/cchips/hd6446x/ cpuincdir-$(CONFIG_CPU_SH2A) += cpu-sh2a cpuincdir-$(CONFIG_CPU_SH2) += cpu-sh2 cpuincdir-$(CONFIG_CPU_SH3) += cpu-sh3 +cpuincdir-$(CONFIG_CPU_SH4A) += cpu-sh4a cpuincdir-$(CONFIG_CPU_SH4) += cpu-sh4 cpuincdir-$(CONFIG_CPU_SH5) += cpu-sh5 cpuincdir-y += cpu-common # Must be last diff --git a/arch/sh/include/cpu-sh3/cpu/serial.h b/arch/sh/include/cpu-sh3/cpu/serial.h new file mode 100644 index 000000000000..7766329bc103 --- /dev/null +++ b/arch/sh/include/cpu-sh3/cpu/serial.h @@ -0,0 +1,10 @@ +#ifndef __CPU_SH3_SERIAL_H +#define __CPU_SH3_SERIAL_H + +#include + +extern struct plat_sci_port_ops sh770x_sci_port_ops; +extern struct plat_sci_port_ops sh7710_sci_port_ops; +extern struct plat_sci_port_ops sh7720_sci_port_ops; + +#endif /* __CPU_SH3_SERIAL_H */ diff --git a/arch/sh/include/cpu-sh4a/cpu/serial.h b/arch/sh/include/cpu-sh4a/cpu/serial.h new file mode 100644 index 000000000000..ff1bc275d210 --- /dev/null +++ b/arch/sh/include/cpu-sh4a/cpu/serial.h @@ -0,0 +1,7 @@ +#ifndef __CPU_SH4A_SERIAL_H +#define __CPU_SH4A_SERIAL_H + +/* arch/sh/kernel/cpu/sh4a/serial-sh7722.c */ +extern struct plat_sci_port_ops sh7722_sci_port_ops; + +#endif /* __CPU_SH4A_SERIAL_H */ diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile index ecab274141a8..6f13f33a35ff 100644 --- a/arch/sh/kernel/cpu/sh3/Makefile +++ b/arch/sh/kernel/cpu/sh3/Makefile @@ -7,15 +7,15 @@ obj-y := ex.o probe.o entry.o setup-sh3.o obj-$(CONFIG_HIBERNATION) += swsusp.o # CPU subtype setup -obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o -obj-$(CONFIG_CPU_SUBTYPE_SH7706) += setup-sh770x.o -obj-$(CONFIG_CPU_SUBTYPE_SH7707) += setup-sh770x.o -obj-$(CONFIG_CPU_SUBTYPE_SH7708) += setup-sh770x.o -obj-$(CONFIG_CPU_SUBTYPE_SH7709) += setup-sh770x.o -obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o -obj-$(CONFIG_CPU_SUBTYPE_SH7712) += setup-sh7710.o -obj-$(CONFIG_CPU_SUBTYPE_SH7720) += setup-sh7720.o -obj-$(CONFIG_CPU_SUBTYPE_SH7721) += setup-sh7720.o +obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o serial-sh770x.o +obj-$(CONFIG_CPU_SUBTYPE_SH7706) += setup-sh770x.o serial-sh770x.o +obj-$(CONFIG_CPU_SUBTYPE_SH7707) += setup-sh770x.o serial-sh770x.o +obj-$(CONFIG_CPU_SUBTYPE_SH7708) += setup-sh770x.o serial-sh770x.o +obj-$(CONFIG_CPU_SUBTYPE_SH7709) += setup-sh770x.o serial-sh770x.o +obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o serial-sh7710.o +obj-$(CONFIG_CPU_SUBTYPE_SH7712) += setup-sh7710.o serial-sh7710.o +obj-$(CONFIG_CPU_SUBTYPE_SH7720) += setup-sh7720.o serial-sh7720.o +obj-$(CONFIG_CPU_SUBTYPE_SH7721) += setup-sh7720.o serial-sh7720.o # Primary on-chip clocks (common) clock-$(CONFIG_CPU_SH3) := clock-sh3.o diff --git a/arch/sh/kernel/cpu/sh3/serial-sh770x.c b/arch/sh/kernel/cpu/sh3/serial-sh770x.c new file mode 100644 index 000000000000..4f7242c676b3 --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/serial-sh770x.c @@ -0,0 +1,33 @@ +#include +#include +#include +#include + +#define SCPCR 0xA4000116 +#define SCPDR 0xA4000136 + +static void sh770x_sci_init_pins(struct uart_port *port, unsigned int cflag) +{ + unsigned short data; + + /* We need to set SCPCR to enable RTS/CTS */ + data = __raw_readw(SCPCR); + /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/ + __raw_writew(data & 0x0fcf, SCPCR); + + if (!(cflag & CRTSCTS)) { + /* We need to set SCPCR to enable RTS/CTS */ + data = __raw_readw(SCPCR); + /* Clear out SCP7MD1,0, SCP4MD1,0, + Set SCP6MD1,0 = {01} (output) */ + __raw_writew((data & 0x0fcf) | 0x1000, SCPCR); + + data = __raw_readb(SCPDR); + /* Set /RTS2 (bit6) = 0 */ + __raw_writeb(data & 0xbf, SCPDR); + } +} + +struct plat_sci_port_ops sh770x_sci_port_ops = { + .init_pins = sh770x_sci_init_pins, +}; diff --git a/arch/sh/kernel/cpu/sh3/serial-sh7710.c b/arch/sh/kernel/cpu/sh3/serial-sh7710.c new file mode 100644 index 000000000000..42190ef6aebf --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/serial-sh7710.c @@ -0,0 +1,20 @@ +#include +#include +#include +#include + +#define PACR 0xa4050100 +#define PBCR 0xa4050102 + +static void sh7710_sci_init_pins(struct uart_port *port, unsigned int cflag) +{ + if (port->mapbase == 0xA4400000) { + __raw_writew(__raw_readw(PACR) & 0xffc0, PACR); + __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR); + } else if (port->mapbase == 0xA4410000) + __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR); +} + +struct plat_sci_port_ops sh7710_sci_port_ops = { + .init_pins = sh7710_sci_init_pins, +}; diff --git a/arch/sh/kernel/cpu/sh3/serial-sh7720.c b/arch/sh/kernel/cpu/sh3/serial-sh7720.c new file mode 100644 index 000000000000..8234e1e7abd9 --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/serial-sh7720.c @@ -0,0 +1,36 @@ +#include +#include +#include +#include + +static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag) +{ + unsigned short data; + + if (cflag & CRTSCTS) { + /* enable RTS/CTS */ + if (port->mapbase == 0xa4430000) { /* SCIF0 */ + /* Clear PTCR bit 9-2; enable all scif pins but sck */ + data = __raw_readw(PORT_PTCR); + __raw_writew((data & 0xfc03), PORT_PTCR); + } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ + /* Clear PVCR bit 9-2 */ + data = __raw_readw(PORT_PVCR); + __raw_writew((data & 0xfc03), PORT_PVCR); + } + } else { + if (port->mapbase == 0xa4430000) { /* SCIF0 */ + /* Clear PTCR bit 5-2; enable only tx and rx */ + data = __raw_readw(PORT_PTCR); + __raw_writew((data & 0xffc3), PORT_PTCR); + } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ + /* Clear PVCR bit 5-2 */ + data = __raw_readw(PORT_PVCR); + __raw_writew((data & 0xffc3), PORT_PVCR); + } + } +} + +struct plat_sci_port_ops sh7720_sci_port_ops = { + .init_pins = sh7720_sci_init_pins, +}; diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c index cd2e702feb7e..2309618c015d 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c @@ -15,6 +15,7 @@ #include #include #include +#include enum { UNUSED = 0, @@ -75,6 +76,8 @@ static struct plat_sci_port scif0_platform_data = { .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { 56, 56, 56 }, + .ops = &sh770x_sci_port_ops, + .regtype = SCIx_SH7705_SCIF_REGTYPE, }; static struct platform_device scif0_device = { @@ -92,6 +95,8 @@ static struct plat_sci_port scif1_platform_data = { .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { 52, 52, 52 }, + .ops = &sh770x_sci_port_ops, + .regtype = SCIx_SH7705_SCIF_REGTYPE, }; static struct platform_device scif1_device = { diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c index 6d549792f791..3f3d5fe5892d 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c @@ -19,6 +19,7 @@ #include #include #include +#include enum { UNUSED = 0, @@ -114,6 +115,8 @@ static struct plat_sci_port scif0_platform_data = { .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCI, .irqs = { 23, 23, 23, 0 }, + .ops = &sh770x_sci_port_ops, + .regshift = 1, }; static struct platform_device scif0_device = { @@ -133,6 +136,8 @@ static struct plat_sci_port scif1_platform_data = { .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 56, 56, 56, 56 }, + .ops = &sh770x_sci_port_ops, + .regtype = SCIx_SH3_SCIF_REGTYPE, }; static struct platform_device scif1_device = { @@ -147,11 +152,14 @@ static struct platform_device scif1_device = { defined(CONFIG_CPU_SUBTYPE_SH7709) static struct plat_sci_port scif2_platform_data = { .mapbase = 0xa4000140, + .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_TE | SCSCR_RE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_IRDA, .irqs = { 52, 52, 52, 52 }, + .ops = &sh770x_sci_port_ops, + .regshift = 1, }; static struct platform_device scif2_device = { diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c index 365b94a6fcb7..94920345c14d 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c @@ -20,6 +20,7 @@ #include #include #include +#include static struct resource rtc_resources[] = { [0] = { @@ -55,6 +56,8 @@ static struct plat_sci_port scif0_platform_data = { .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { 80, 80, 80, 80 }, + .ops = &sh7720_sci_port_ops, + .regtype = SCIx_SH7705_SCIF_REGTYPE, }; static struct platform_device scif0_device = { @@ -72,6 +75,8 @@ static struct plat_sci_port scif1_platform_data = { .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIF, .irqs = { 81, 81, 81, 81 }, + .ops = &sh7720_sci_port_ops, + .regtype = SCIx_SH7705_SCIF_REGTYPE, }; static struct platform_device scif1_device = { diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index 8ea26e791187..c10db5b96e59 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c @@ -1,5 +1,5 @@ /* - * SH7750/SH7751 Setup + * SH7091/SH7750/SH7750S/SH7750R/SH7751/SH7751R Setup * * Copyright (C) 2006 Paul Mundt * Copyright (C) 2006 Jamie Lenehan @@ -44,6 +44,7 @@ static struct plat_sci_port sci_platform_data = { .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCI, .irqs = { 23, 23, 23, 0 }, + .regshift = 2, }; static struct platform_device sci_device = { diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c index 78bbf232e391..c0b4c774700e 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c @@ -133,6 +133,7 @@ static struct plat_sci_port scif0_platform_data = { .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 52, 53, 55, 54 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif0_device = { @@ -150,6 +151,7 @@ static struct plat_sci_port scif1_platform_data = { .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .irqs = { 72, 73, 75, 74 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif1_device = { @@ -167,6 +169,7 @@ static struct plat_sci_port scif2_platform_data = { .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 76, 77, 79, 78 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif2_device = { @@ -184,6 +187,7 @@ static struct plat_sci_port scif3_platform_data = { .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCI, .irqs = { 80, 81, 82, 0 }, + .regshift = 2, }; static struct platform_device scif3_device = { diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index cc122b1d3035..c57fb287011e 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o obj-$(CONFIG_CPU_SUBTYPE_SH7786) += setup-sh7786.o intc-shx3.o obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o -obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o +obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o serial-sh7722.o obj-$(CONFIG_CPU_SUBTYPE_SH7723) += setup-sh7723.o obj-$(CONFIG_CPU_SUBTYPE_SH7724) += setup-sh7724.o obj-$(CONFIG_CPU_SUBTYPE_SH7366) += setup-sh7366.o diff --git a/arch/sh/kernel/cpu/sh4a/serial-sh7722.c b/arch/sh/kernel/cpu/sh4a/serial-sh7722.c new file mode 100644 index 000000000000..59bc3a72702e --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/serial-sh7722.c @@ -0,0 +1,23 @@ +#include +#include +#include + +#define PSCR 0xA405011E + +static void sh7722_sci_init_pins(struct uart_port *port, unsigned int cflag) +{ + unsigned short data; + + if (port->mapbase == 0xffe00000) { + data = __raw_readw(PSCR); + data &= ~0x03cf; + if (!(cflag & CRTSCTS)) + data |= 0x0340; + + __raw_writew(data, PSCR); + } +} + +struct plat_sci_port_ops sh7722_sci_port_ops = { + .init_pins = sh7722_sci_init_pins, +}; diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c index 82616af64d62..87773869a2f3 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c @@ -20,6 +20,7 @@ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, + .port_reg = 0xa405013e, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index 5813d8023619..863249dbf05b 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -185,6 +185,8 @@ static struct plat_sci_port scif0_platform_data = { .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 80, 80, 80, 80 }, + .ops = &sh7722_sci_port_ops, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, }; static struct platform_device scif0_device = { @@ -202,6 +204,8 @@ static struct plat_sci_port scif1_platform_data = { .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 81, 81, 81, 81 }, + .ops = &sh7722_sci_port_ops, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, }; static struct platform_device scif1_device = { @@ -219,6 +223,8 @@ static struct plat_sci_port scif2_platform_data = { .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 82, 82, 82, 82 }, + .ops = &sh7722_sci_port_ops, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, }; static struct platform_device scif2_device = { diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index 072382280f96..3c2810d8f72e 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c @@ -23,11 +23,13 @@ /* Serial */ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, + .port_reg = 0xa4050160, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 80, 80, 80, 80 }, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, }; static struct platform_device scif0_device = { @@ -40,11 +42,13 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffe10000, + .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 81, 81, 81, 81 }, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, }; static struct platform_device scif1_device = { @@ -57,11 +61,13 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffe20000, + .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 82, 82, 82, 82 }, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, }; static struct platform_device scif2_device = { @@ -75,6 +81,7 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xa4e30000, .flags = UPF_BOOT_AUTOCONF, + .port_reg = SCIx_NOT_SUPPORTED, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_3, .type = PORT_SCIFA, @@ -91,6 +98,7 @@ static struct platform_device scif3_device = { static struct plat_sci_port scif4_platform_data = { .mapbase = 0xa4e40000, + .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_3, @@ -108,6 +116,7 @@ static struct platform_device scif4_device = { static struct plat_sci_port scif5_platform_data = { .mapbase = 0xa4e50000, + .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_3, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index 0333fe9e3881..8c892887ebd7 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c @@ -256,11 +256,13 @@ static struct platform_device dma1_device = { /* Serial */ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, + .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 80, 80, 80, 80 }, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, }; static struct platform_device scif0_device = { @@ -273,11 +275,13 @@ static struct platform_device scif0_device = { static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffe10000, + .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 81, 81, 81, 81 }, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, }; static struct platform_device scif1_device = { @@ -290,11 +294,13 @@ static struct platform_device scif1_device = { static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffe20000, + .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 82, 82, 82, 82 }, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, }; static struct platform_device scif2_device = { @@ -307,6 +313,7 @@ static struct platform_device scif2_device = { static struct plat_sci_port scif3_platform_data = { .mapbase = 0xa4e30000, + .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_3, @@ -324,6 +331,7 @@ static struct platform_device scif3_device = { static struct plat_sci_port scif4_platform_data = { .mapbase = 0xa4e40000, + .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_3, @@ -341,6 +349,7 @@ static struct platform_device scif4_device = { static struct plat_sci_port scif5_platform_data = { .mapbase = 0xa4e50000, + .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_3, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c index 593eca6509b5..00113515f233 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c @@ -23,6 +23,7 @@ static struct plat_sci_port scif0_platform_data = { .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 40, 40, 40, 40 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif0_device = { @@ -40,6 +41,7 @@ static struct plat_sci_port scif1_platform_data = { .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 76, 76, 76, 76 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif1_device = { @@ -57,6 +59,7 @@ static struct plat_sci_port scif2_platform_data = { .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 104, 104, 104, 104 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif2_device = { diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c index 08add7fa6849..3d4d2075c19a 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c @@ -14,7 +14,6 @@ #include #include #include - #include static struct plat_sci_port scif0_platform_data = { @@ -24,6 +23,7 @@ static struct plat_sci_port scif0_platform_data = { .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 40, 40, 40, 40 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif0_device = { @@ -41,6 +41,7 @@ static struct plat_sci_port scif1_platform_data = { .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 76, 76, 76, 76 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif1_device = { diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c index 18d8fc136fb2..b29e6340414a 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c @@ -15,9 +15,7 @@ #include #include #include - #include - #include static struct plat_sci_port scif0_platform_data = { @@ -27,6 +25,7 @@ static struct plat_sci_port scif0_platform_data = { .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 40, 40, 40, 40 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif0_device = { @@ -44,6 +43,7 @@ static struct plat_sci_port scif1_platform_data = { .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 44, 44, 44, 44 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif1_device = { @@ -61,6 +61,7 @@ static struct plat_sci_port scif2_platform_data = { .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 60, 60, 60, 60 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif2_device = { @@ -78,6 +79,7 @@ static struct plat_sci_port scif3_platform_data = { .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 61, 61, 61, 61 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif3_device = { @@ -95,6 +97,7 @@ static struct plat_sci_port scif4_platform_data = { .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 62, 62, 62, 62 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif4_device = { @@ -112,6 +115,7 @@ static struct plat_sci_port scif5_platform_data = { .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 63, 63, 63, 63 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif5_device = { diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c index beba32beb6d9..dd5e709f9821 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c @@ -1,7 +1,7 @@ /* * SH7786 Setup * - * Copyright (C) 2009 - 2010 Renesas Solutions Corp. + * Copyright (C) 2009 - 2011 Renesas Solutions Corp. * Kuninori Morimoto * Paul Mundt * @@ -33,6 +33,7 @@ static struct plat_sci_port scif0_platform_data = { .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 40, 41, 43, 42 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif0_device = { @@ -53,6 +54,7 @@ static struct plat_sci_port scif1_platform_data = { .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 44, 44, 44, 44 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif1_device = { @@ -70,6 +72,7 @@ static struct plat_sci_port scif2_platform_data = { .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 50, 50, 50, 50 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif2_device = { @@ -87,6 +90,7 @@ static struct plat_sci_port scif3_platform_data = { .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 51, 51, 51, 51 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif3_device = { @@ -104,6 +108,7 @@ static struct plat_sci_port scif4_platform_data = { .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 52, 52, 52, 52 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif4_device = { @@ -121,6 +126,7 @@ static struct plat_sci_port scif5_platform_data = { .scbrr_algo_id = SCBRR_ALGO_1, .type = PORT_SCIF, .irqs = { 53, 53, 53, 53 }, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif5_device = { diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 3248ddaa889d..14e1bae50392 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -117,6 +117,255 @@ to_sci_port(struct uart_port *uart) return container_of(uart, struct sci_port, port); } +struct plat_sci_reg { + u8 offset, size; +}; + +/* Helper for invalidating specific entries of an inherited map. */ +#define sci_reg_invalid { .offset = 0, .size = 0 } + +static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { + [SCIx_PROBE_REGTYPE] = { + [0 ... SCIx_NR_REGS - 1] = sci_reg_invalid, + }, + + /* + * Common SCI definitions, dependent on the port's regshift + * value. + */ + [SCIx_SCI_REGTYPE] = { + [SCSMR] = { 0x00, 8 }, + [SCBRR] = { 0x01, 8 }, + [SCSCR] = { 0x02, 8 }, + [SCxTDR] = { 0x03, 8 }, + [SCxSR] = { 0x04, 8 }, + [SCxRDR] = { 0x05, 8 }, + [SCFCR] = sci_reg_invalid, + [SCFDR] = sci_reg_invalid, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = sci_reg_invalid, + [SCLSR] = sci_reg_invalid, + }, + + /* + * Common definitions for legacy IrDA ports, dependent on + * regshift value. + */ + [SCIx_IRDA_REGTYPE] = { + [SCSMR] = { 0x00, 8 }, + [SCBRR] = { 0x01, 8 }, + [SCSCR] = { 0x02, 8 }, + [SCxTDR] = { 0x03, 8 }, + [SCxSR] = { 0x04, 8 }, + [SCxRDR] = { 0x05, 8 }, + [SCFCR] = { 0x06, 8 }, + [SCFDR] = { 0x07, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = sci_reg_invalid, + [SCLSR] = sci_reg_invalid, + }, + + /* + * Common SCIFA definitions. + */ + [SCIx_SCIFA_REGTYPE] = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x20, 8 }, + [SCxSR] = { 0x14, 16 }, + [SCxRDR] = { 0x24, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = sci_reg_invalid, + [SCLSR] = sci_reg_invalid, + }, + + /* + * Common SCIFB definitions. + */ + [SCIx_SCIFB_REGTYPE] = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x40, 8 }, + [SCxSR] = { 0x14, 16 }, + [SCxRDR] = { 0x60, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = sci_reg_invalid, + [SCLSR] = sci_reg_invalid, + }, + + /* + * Common SH-3 SCIF definitions. + */ + [SCIx_SH3_SCIF_REGTYPE] = { + [SCSMR] = { 0x00, 8 }, + [SCBRR] = { 0x02, 8 }, + [SCSCR] = { 0x04, 8 }, + [SCxTDR] = { 0x06, 8 }, + [SCxSR] = { 0x08, 16 }, + [SCxRDR] = { 0x0a, 8 }, + [SCFCR] = { 0x0c, 8 }, + [SCFDR] = { 0x0e, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = sci_reg_invalid, + [SCLSR] = sci_reg_invalid, + }, + + /* + * Common SH-4(A) SCIF(B) definitions. + */ + [SCIx_SH4_SCIF_REGTYPE] = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x0c, 8 }, + [SCxSR] = { 0x10, 16 }, + [SCxRDR] = { 0x14, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = { 0x20, 16 }, + [SCLSR] = { 0x24, 16 }, + }, + + /* + * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR + * register. + */ + [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x0c, 8 }, + [SCxSR] = { 0x10, 16 }, + [SCxRDR] = { 0x14, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = sci_reg_invalid, + [SCLSR] = { 0x24, 16 }, + }, + + /* + * Common SH-4(A) SCIF(B) definitions for ports with FIFO data + * count registers. + */ + [SCIx_SH4_SCIF_FIFODATA_REGTYPE] = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x0c, 8 }, + [SCxSR] = { 0x10, 16 }, + [SCxRDR] = { 0x14, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = { 0x1c, 16 }, /* aliased to SCFDR */ + [SCRFDR] = { 0x20, 16 }, + [SCSPTR] = { 0x24, 16 }, + [SCLSR] = { 0x28, 16 }, + }, + + /* + * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR + * registers. + */ + [SCIx_SH7705_SCIF_REGTYPE] = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x20, 8 }, + [SCxSR] = { 0x14, 16 }, + [SCxRDR] = { 0x24, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = sci_reg_invalid, + [SCLSR] = sci_reg_invalid, + }, +}; + +/* + * The "offset" here is rather misleading, in that it refers to an enum + * value relative to the port mapping rather than the fixed offset + * itself, which needs to be manually retrieved from the platform's + * register map for the given port. + */ +static unsigned int sci_serial_in(struct uart_port *p, int offset) +{ + struct sci_port *s = to_sci_port(p); + struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + offset; + + if (reg->size == 8) + return ioread8(p->membase + (reg->offset << p->regshift)); + else if (reg->size == 16) + return ioread16(p->membase + (reg->offset << p->regshift)); + else + WARN(1, "Invalid register access\n"); + + return 0; +} + +static void sci_serial_out(struct uart_port *p, int offset, int value) +{ + struct sci_port *s = to_sci_port(p); + struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + offset; + + if (reg->size == 8) + iowrite8(value, p->membase + (reg->offset << p->regshift)); + else if (reg->size == 16) + iowrite16(value, p->membase + (reg->offset << p->regshift)); + else + WARN(1, "Invalid register access\n"); +} + +#define sci_in(up, offset) (up->serial_in(up, offset)) +#define sci_out(up, offset, value) (up->serial_out(up, offset, value)) + +static int sci_probe_regmap(struct plat_sci_port *cfg) +{ + switch (cfg->type) { + case PORT_SCI: + cfg->regtype = SCIx_SCI_REGTYPE; + break; + case PORT_IRDA: + cfg->regtype = SCIx_IRDA_REGTYPE; + break; + case PORT_SCIFA: + cfg->regtype = SCIx_SCIFA_REGTYPE; + break; + case PORT_SCIFB: + cfg->regtype = SCIx_SCIFB_REGTYPE; + break; + case PORT_SCIF: + /* + * The SH-4 is a bit of a misnomer here, although that's + * where this particular port layout originated. This + * configuration (or some slight variation thereof) + * remains the dominant model for all SCIFs. + */ + cfg->regtype = SCIx_SH4_SCIF_REGTYPE; + break; + default: + printk(KERN_ERR "Can't probe register map for given port\n"); + return -EINVAL; + } + + return 0; +} + #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) #ifdef CONFIG_CONSOLE_POLL @@ -160,103 +409,29 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c) } #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ -#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) -static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) -{ - if (port->mapbase == 0xA4400000) { - __raw_writew(__raw_readw(PACR) & 0xffc0, PACR); - __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR); - } else if (port->mapbase == 0xA4410000) - __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR); -} -#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721) -static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) -{ - unsigned short data; - - if (cflag & CRTSCTS) { - /* enable RTS/CTS */ - if (port->mapbase == 0xa4430000) { /* SCIF0 */ - /* Clear PTCR bit 9-2; enable all scif pins but sck */ - data = __raw_readw(PORT_PTCR); - __raw_writew((data & 0xfc03), PORT_PTCR); - } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ - /* Clear PVCR bit 9-2 */ - data = __raw_readw(PORT_PVCR); - __raw_writew((data & 0xfc03), PORT_PVCR); - } - } else { - if (port->mapbase == 0xa4430000) { /* SCIF0 */ - /* Clear PTCR bit 5-2; enable only tx and rx */ - data = __raw_readw(PORT_PTCR); - __raw_writew((data & 0xffc3), PORT_PTCR); - } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ - /* Clear PVCR bit 5-2 */ - data = __raw_readw(PORT_PVCR); - __raw_writew((data & 0xffc3), PORT_PVCR); - } - } -} -#elif defined(CONFIG_CPU_SH3) -/* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */ -static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) +static void sci_init_pins(struct uart_port *port, unsigned int cflag) { - unsigned short data; - - /* We need to set SCPCR to enable RTS/CTS */ - data = __raw_readw(SCPCR); - /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/ - __raw_writew(data & 0x0fcf, SCPCR); - - if (!(cflag & CRTSCTS)) { - /* We need to set SCPCR to enable RTS/CTS */ - data = __raw_readw(SCPCR); - /* Clear out SCP7MD1,0, SCP4MD1,0, - Set SCP6MD1,0 = {01} (output) */ - __raw_writew((data & 0x0fcf) | 0x1000, SCPCR); + struct sci_port *s = to_sci_port(port); + struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR; - data = __raw_readb(SCPDR); - /* Set /RTS2 (bit6) = 0 */ - __raw_writeb(data & 0xbf, SCPDR); + /* + * Use port-specific handler if provided. + */ + if (s->cfg->ops && s->cfg->ops->init_pins) { + s->cfg->ops->init_pins(port, cflag); + return; } -} -#elif defined(CONFIG_CPU_SUBTYPE_SH7722) -static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) -{ - unsigned short data; - if (port->mapbase == 0xffe00000) { - data = __raw_readw(PSCR); - data &= ~0x03cf; - if (!(cflag & CRTSCTS)) - data |= 0x0340; + /* + * For the generic path SCSPTR is necessary. Bail out if that's + * unavailable, too. + */ + if (!reg->size) + return; - __raw_writew(data, PSCR); - } -} -#elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \ - defined(CONFIG_CPU_SUBTYPE_SH7763) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) || \ - defined(CONFIG_CPU_SUBTYPE_SH7786) || \ - defined(CONFIG_CPU_SUBTYPE_SHX3) -static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) -{ - if (!(cflag & CRTSCTS)) - __raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */ -} -#elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A) -static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) -{ if (!(cflag & CRTSCTS)) - __raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */ + sci_out(port, SCSPTR, 0x0080); /* Set RTS = 1 */ } -#else -static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) -{ - /* Nothing to do */ -} -#endif #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ @@ -1752,6 +1927,9 @@ static int __devinit sci_init_single(struct platform_device *dev, break; } + if (p->regtype == SCIx_PROBE_REGTYPE) + BUG_ON(sci_probe_regmap(p) != 0); + if (dev) { sci_port->iclk = clk_get(&dev->dev, "sci_ick"); if (IS_ERR(sci_port->iclk)) { @@ -1812,9 +1990,10 @@ static int __devinit sci_init_single(struct platform_device *dev, port->mapbase = p->mapbase; port->type = p->type; port->flags = p->flags; + port->regshift = p->regshift; /* - * The UART port needs an IRQ value, so we peg this to the TX IRQ + * The UART port needs an IRQ value, so we peg this to the RX IRQ * for the multi-IRQ ports, which is where we are primarily * concerned with the shutdown path synchronization. * @@ -1822,6 +2001,9 @@ static int __devinit sci_init_single(struct platform_device *dev, */ port->irq = p->irqs[SCIx_RXI_IRQ]; + port->serial_in = sci_serial_in; + port->serial_out = sci_serial_out; + if (p->dma_dev) dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n", p->dma_dev, p->dma_slave_tx, p->dma_slave_rx); diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h index 5834f33d20ff..26de640a9d01 100644 --- a/drivers/tty/serial/sh-sci.h +++ b/drivers/tty/serial/sh-sci.h @@ -2,69 +2,6 @@ #include #include -#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ - defined(CONFIG_CPU_SUBTYPE_SH7706) || \ - defined(CONFIG_CPU_SUBTYPE_SH7707) || \ - defined(CONFIG_CPU_SUBTYPE_SH7708) || \ - defined(CONFIG_CPU_SUBTYPE_SH7709) -# define SCPCR 0xA4000116 /* 16 bit SCI and SCIF */ -# define SCPDR 0xA4000136 /* 8 bit SCI and SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) || \ - defined(CONFIG_ARCH_SH73A0) || \ - defined(CONFIG_ARCH_SH7367) || \ - defined(CONFIG_ARCH_SH7377) || \ - defined(CONFIG_ARCH_SH7372) -# define PORT_PTCR 0xA405011EUL -# define PORT_PVCR 0xA4050122UL -#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ - defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ - defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ - defined(CONFIG_CPU_SUBTYPE_SH7091) || \ - defined(CONFIG_CPU_SUBTYPE_SH7751) || \ - defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ - defined(CONFIG_CPU_SUBTYPE_SH4_202) -# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7760) -# define SCSPTR0 0xfe600024 /* 16 bit SCIF */ -# define SCSPTR2 0xfe620024 /* 16 bit SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) -# define SCSPTR0 0xA4400000 /* 16 bit SCIF */ -# define PACR 0xa4050100 -# define PBCR 0xa4050102 -#elif defined(CONFIG_CPU_SUBTYPE_SH7343) -# define SCSPTR0 0xffe00010 /* 16 bit SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7722) -# define PWDR 0xA4050166 -# define PSCR 0xA405011E -#elif defined(CONFIG_CPU_SUBTYPE_SH7366) -# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */ -# define SCSPTR0 SCPDR0 -#elif defined(CONFIG_CPU_SUBTYPE_SH7723) -# define SCSPTR0 0xa4050160 -#elif defined(CONFIG_CPU_SUBTYPE_SH7757) -# define SCSPTR0 0xfe4b0020 -#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) -# define SCSPTR0 0xffe00024 /* 16 bit SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7770) -# define SCSPTR0 0xff923020 /* 16 bit SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \ - defined(CONFIG_CPU_SUBTYPE_SH7786) -# define SCSPTR0 0xffea0024 /* 16 bit SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \ - defined(CONFIG_CPU_SUBTYPE_SH7203) || \ - defined(CONFIG_CPU_SUBTYPE_SH7206) || \ - defined(CONFIG_CPU_SUBTYPE_SH7263) -# define SCSPTR0 0xfffe8020 /* 16 bit SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7619) -# define SCSPTR0 0xf8400020 /* 16 bit SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SHX3) -# define SCSPTR0 0xffc30020 /* 16 bit SCIF */ -#else -# error CPU subtype not defined -#endif - #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ defined(CONFIG_CPU_SUBTYPE_SH7720) || \ defined(CONFIG_CPU_SUBTYPE_SH7721) || \ @@ -119,162 +56,3 @@ #define SCI_MAJOR 204 #define SCI_MINOR_START 8 - -#define SCI_IN(size, offset) \ - ioread##size(port->membase + (offset)) - -#define SCI_OUT(size, offset, value) \ - iowrite##size(value, port->membase + (offset)) - -#define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\ - static inline unsigned int sci_##name##_in(struct uart_port *port) \ - { \ - if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \ - return SCI_IN(scif_size, scif_offset); \ - } else { /* PORT_SCI or PORT_SCIFA */ \ - return SCI_IN(sci_size, sci_offset); \ - } \ - } \ - static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \ - { \ - if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \ - SCI_OUT(scif_size, scif_offset, value); \ - } else { /* PORT_SCI or PORT_SCIFA */ \ - SCI_OUT(sci_size, sci_offset, value); \ - } \ - } - -#define CPU_SCIF_FNS(name, scif_offset, scif_size) \ - static inline unsigned int sci_##name##_in(struct uart_port *port) \ - { \ - return SCI_IN(scif_size, scif_offset); \ - } \ - static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \ - { \ - SCI_OUT(scif_size, scif_offset, value); \ - } - -#if defined(CONFIG_CPU_SH3) || \ - defined(CONFIG_ARCH_SH73A0) || \ - defined(CONFIG_ARCH_SH7367) || \ - defined(CONFIG_ARCH_SH7377) || \ - defined(CONFIG_ARCH_SH7372) -#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) -#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ - sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size) -#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) -#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ - defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) || \ - defined(CONFIG_ARCH_SH7367) -#define SCIF_FNS(name, scif_offset, scif_size) \ - CPU_SCIF_FNS(name, scif_offset, scif_size) -#elif defined(CONFIG_ARCH_SH7377) || \ - defined(CONFIG_ARCH_SH7372) || \ - defined(CONFIG_ARCH_SH73A0) -#define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) \ - CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) -#define SCIF_FNS(name, scif_offset, scif_size) \ - CPU_SCIF_FNS(name, scif_offset, scif_size) -#else -#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ - sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh3_scif_offset, sh3_scif_size) -#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIF_FNS(name, sh3_scif_offset, sh3_scif_size) -#endif -#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\ - defined(CONFIG_CPU_SUBTYPE_SH7724) - #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) - #define SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) -#else -#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ - sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size) -#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) -#endif - -#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ - defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) || \ - defined(CONFIG_ARCH_SH7367) - -SCIF_FNS(SCSMR, 0x00, 16) -SCIF_FNS(SCBRR, 0x04, 8) -SCIF_FNS(SCSCR, 0x08, 16) -SCIF_FNS(SCxSR, 0x14, 16) -SCIF_FNS(SCFCR, 0x18, 16) -SCIF_FNS(SCFDR, 0x1c, 16) -SCIF_FNS(SCxTDR, 0x20, 8) -SCIF_FNS(SCxRDR, 0x24, 8) -SCIF_FNS(SCLSR, 0x00, 0) -#elif defined(CONFIG_ARCH_SH7377) || \ - defined(CONFIG_ARCH_SH7372) || \ - defined(CONFIG_ARCH_SH73A0) -SCIF_FNS(SCSMR, 0x00, 16) -SCIF_FNS(SCBRR, 0x04, 8) -SCIF_FNS(SCSCR, 0x08, 16) -SCIF_FNS(SCTDSR, 0x0c, 16) -SCIF_FNS(SCFER, 0x10, 16) -SCIF_FNS(SCxSR, 0x14, 16) -SCIF_FNS(SCFCR, 0x18, 16) -SCIF_FNS(SCFDR, 0x1c, 16) -SCIF_FNS(SCTFDR, 0x38, 16) -SCIF_FNS(SCRFDR, 0x3c, 16) -SCIx_FNS(SCxTDR, 0x20, 8, 0x40, 8) -SCIx_FNS(SCxRDR, 0x24, 8, 0x60, 8) -SCIF_FNS(SCLSR, 0x00, 0) -#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\ - defined(CONFIG_CPU_SUBTYPE_SH7724) -SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16) -SCIx_FNS(SCBRR, 0x04, 8, 0x04, 8) -SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16) -SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8) -SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16) -SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8) -SCIx_FNS(SCSPTR, 0, 0, 0, 0) -SCIF_FNS(SCFCR, 0x18, 16) -SCIF_FNS(SCFDR, 0x1c, 16) -SCIF_FNS(SCLSR, 0x24, 16) -#else -/* reg SCI/SH3 SCI/SH4 SCIF/SH3 SCIF/SH4 */ -/* name off sz off sz off sz off sz */ -SCIx_FNS(SCSMR, 0x00, 8, 0x00, 8, 0x00, 8, 0x00, 16) -SCIx_FNS(SCBRR, 0x02, 8, 0x04, 8, 0x02, 8, 0x04, 8) -SCIx_FNS(SCSCR, 0x04, 8, 0x08, 8, 0x04, 8, 0x08, 16) -SCIx_FNS(SCxTDR, 0x06, 8, 0x0c, 8, 0x06, 8, 0x0C, 8) -SCIx_FNS(SCxSR, 0x08, 8, 0x10, 8, 0x08, 16, 0x10, 16) -SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8) -SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16) -#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) || \ - defined(CONFIG_CPU_SUBTYPE_SH7786) -SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16) -SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) -SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) -SCIF_FNS(SCSPTR, 0, 0, 0x24, 16) -SCIF_FNS(SCLSR, 0, 0, 0x28, 16) -#elif defined(CONFIG_CPU_SUBTYPE_SH7763) -SCIF_FNS(SCFDR, 0, 0, 0x1C, 16) -SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) -SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) -SCIF_FNS(SCSPTR, 0, 0, 0x24, 16) -SCIF_FNS(SCLSR, 0, 0, 0x28, 16) -#else -SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16) -#if defined(CONFIG_CPU_SUBTYPE_SH7722) -SCIF_FNS(SCSPTR, 0, 0, 0, 0) -#else -SCIF_FNS(SCSPTR, 0, 0, 0x20, 16) -#endif -SCIF_FNS(SCLSR, 0, 0, 0x24, 16) -#endif -#endif -#define sci_in(port, reg) sci_##reg##_in(port) -#define sci_out(port, reg, value) sci_##reg##_out(port, value) diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index ecefec7c0b67..4ca130a90ea5 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -58,6 +58,22 @@ enum { SCIx_NR_IRQS, }; +enum { + SCIx_PROBE_REGTYPE, + + SCIx_SCI_REGTYPE, + SCIx_IRDA_REGTYPE, + SCIx_SCIFA_REGTYPE, + SCIx_SCIFB_REGTYPE, + SCIx_SH3_SCIF_REGTYPE, + SCIx_SH4_SCIF_REGTYPE, + SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, + SCIx_SH4_SCIF_FIFODATA_REGTYPE, + SCIx_SH7705_SCIF_REGTYPE, + + SCIx_NR_REGTYPES, +}; + #define SCIx_IRQ_MUXED(irq) \ { \ [SCIx_ERI_IRQ] = (irq), \ @@ -66,8 +82,24 @@ enum { [SCIx_BRI_IRQ] = (irq), \ } +/* + * SCI register subset common for all port types. + * Not all registers will exist on all parts. + */ +enum { + SCSMR, SCBRR, SCSCR, SCxSR, + SCFCR, SCFDR, SCxTDR, SCxRDR, + SCLSR, SCTFDR, SCRFDR, SCSPTR, + + SCIx_NR_REGS, +}; + struct device; +struct plat_sci_port_ops { + void (*init_pins)(struct uart_port *, unsigned int cflag); +}; + /* * Platform device specific platform_data struct */ @@ -87,6 +119,10 @@ struct plat_sci_port { unsigned int error_mask; int port_reg; + unsigned char regshift; + unsigned char regtype; + + struct plat_sci_port_ops *ops; struct device *dma_dev; -- cgit v1.2.3 From 4d67431f80b1b822f0286afc9123ee453eac7334 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 13 Jun 2011 22:33:52 +0100 Subject: KEYS: Don't return EAGAIN to keyctl_assume_authority() Don't return EAGAIN to keyctl_assume_authority() to indicate that a key could not be found (ENOKEY is only returned if a negative key is found). Instead return ENOKEY in both cases. Signed-off-by: David Howells Signed-off-by: James Morris --- security/keys/request_key_auth.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index 6cff37529b80..60d4e3f5e4bb 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -251,6 +251,8 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id) if (IS_ERR(authkey_ref)) { authkey = ERR_CAST(authkey_ref); + if (authkey == ERR_PTR(-EAGAIN)) + authkey = ERR_PTR(-ENOKEY); goto error; } -- cgit v1.2.3 From 72b294cf76dcd6d37891387049ddbe3c25043cb8 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 14 Jun 2011 17:38:19 +0900 Subject: serial: sh-sci: FIFO sizing helper consolidation. This consolidates all of the TX/RX fill/room nonsense in to a single set of fairly heavyweight definitions. The implementation goes in descending order of complexity, testing the register map for capabilities until we run out of options and do it the legacy SCI way. Masks are derived directly from the per-port FIFO size, meaning that platforms with FIFO sizes not matching the standard port types will still need to manually fix them up. This also fixes up a number of issues such as tx_empty being completely bogus for SCI and IrDA ports, some ports using masks smaller or greater than their FIFO size, and so forth. Signed-off-by: Paul Mundt --- drivers/tty/serial/sh-sci.c | 126 +++++++++----------------------------------- drivers/tty/serial/sh-sci.h | 20 ------- 2 files changed, 24 insertions(+), 122 deletions(-) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 14e1bae50392..60027d51bb51 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -297,6 +297,8 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { }, }; +#define sci_getreg(up, offset) (sci_regmap[to_sci_port(up)->cfg->regtype] + offset) + /* * The "offset" here is rather misleading, in that it refers to an enum * value relative to the port mapping rather than the fixed offset @@ -305,8 +307,7 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { */ static unsigned int sci_serial_in(struct uart_port *p, int offset) { - struct sci_port *s = to_sci_port(p); - struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + offset; + struct plat_sci_reg *reg = sci_getreg(p, offset); if (reg->size == 8) return ioread8(p->membase + (reg->offset << p->regshift)); @@ -320,8 +321,7 @@ static unsigned int sci_serial_in(struct uart_port *p, int offset) static void sci_serial_out(struct uart_port *p, int offset, int value) { - struct sci_port *s = to_sci_port(p); - struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + offset; + struct plat_sci_reg *reg = sci_getreg(p, offset); if (reg->size == 8) iowrite8(value, p->membase + (reg->offset << p->regshift)); @@ -433,108 +433,38 @@ static void sci_init_pins(struct uart_port *port, unsigned int cflag) sci_out(port, SCSPTR, 0x0080); /* Set RTS = 1 */ } -#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) || \ - defined(CONFIG_CPU_SUBTYPE_SH7786) -static int scif_txfill(struct uart_port *port) -{ - return sci_in(port, SCTFDR) & 0xff; -} - -static int scif_txroom(struct uart_port *port) +static int sci_txfill(struct uart_port *port) { - return SCIF_TXROOM_MAX - scif_txfill(port); -} + struct plat_sci_reg *reg; -static int scif_rxfill(struct uart_port *port) -{ - return sci_in(port, SCRFDR) & 0xff; -} -#elif defined(CONFIG_CPU_SUBTYPE_SH7763) -static int scif_txfill(struct uart_port *port) -{ - if (port->mapbase == 0xffe00000 || - port->mapbase == 0xffe08000) - /* SCIF0/1*/ + reg = sci_getreg(port, SCTFDR); + if (reg->size) return sci_in(port, SCTFDR) & 0xff; - else - /* SCIF2 */ - return sci_in(port, SCFDR) >> 8; -} -static int scif_txroom(struct uart_port *port) -{ - if (port->mapbase == 0xffe00000 || - port->mapbase == 0xffe08000) - /* SCIF0/1*/ - return SCIF_TXROOM_MAX - scif_txfill(port); - else - /* SCIF2 */ - return SCIF2_TXROOM_MAX - scif_txfill(port); -} - -static int scif_rxfill(struct uart_port *port) -{ - if ((port->mapbase == 0xffe00000) || - (port->mapbase == 0xffe08000)) { - /* SCIF0/1*/ - return sci_in(port, SCRFDR) & 0xff; - } else { - /* SCIF2 */ - return sci_in(port, SCFDR) & SCIF2_RFDC_MASK; - } -} -#elif defined(CONFIG_ARCH_SH7372) -static int scif_txfill(struct uart_port *port) -{ - if (port->type == PORT_SCIFA) + reg = sci_getreg(port, SCFDR); + if (reg->size) return sci_in(port, SCFDR) >> 8; - else - return sci_in(port, SCTFDR); -} - -static int scif_txroom(struct uart_port *port) -{ - return port->fifosize - scif_txfill(port); -} -static int scif_rxfill(struct uart_port *port) -{ - if (port->type == PORT_SCIFA) - return sci_in(port, SCFDR) & SCIF_RFDC_MASK; - else - return sci_in(port, SCRFDR); -} -#else -static int scif_txfill(struct uart_port *port) -{ - return sci_in(port, SCFDR) >> 8; -} - -static int scif_txroom(struct uart_port *port) -{ - return SCIF_TXROOM_MAX - scif_txfill(port); -} - -static int scif_rxfill(struct uart_port *port) -{ - return sci_in(port, SCFDR) & SCIF_RFDC_MASK; -} -#endif - -static int sci_txfill(struct uart_port *port) -{ return !(sci_in(port, SCxSR) & SCI_TDRE); } static int sci_txroom(struct uart_port *port) { - return !sci_txfill(port); + return port->fifosize - sci_txfill(port); } static int sci_rxfill(struct uart_port *port) { + struct plat_sci_reg *reg; + + reg = sci_getreg(port, SCRFDR); + if (reg->size) + return sci_in(port, SCRFDR) & 0xff; + + reg = sci_getreg(port, SCFDR); + if (reg->size) + return sci_in(port, SCFDR) & ((port->fifosize << 1) - 1); + return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; } @@ -574,10 +504,7 @@ static void sci_transmit_chars(struct uart_port *port) return; } - if (port->type == PORT_SCI) - count = sci_txroom(port); - else - count = scif_txroom(port); + count = sci_txroom(port); do { unsigned char c; @@ -632,13 +559,8 @@ static void sci_receive_chars(struct uart_port *port) return; while (1) { - if (port->type == PORT_SCI) - count = sci_rxfill(port); - else - count = scif_rxfill(port); - /* Don't copy more bytes than there is room for in the buffer */ - count = tty_buffer_request_room(tty, count); + count = tty_buffer_request_room(tty, sci_rxfill(port)); /* If for any reason we can't copy more data, we're done! */ if (count == 0) @@ -1096,7 +1018,7 @@ static void sci_free_irq(struct sci_port *port) static unsigned int sci_tx_empty(struct uart_port *port) { unsigned short status = sci_in(port, SCxSR); - unsigned short in_tx_fifo = scif_txfill(port); + unsigned short in_tx_fifo = sci_txfill(port); return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; } diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h index 26de640a9d01..e9bed038aa1f 100644 --- a/drivers/tty/serial/sh-sci.h +++ b/drivers/tty/serial/sh-sci.h @@ -2,26 +2,6 @@ #include #include -#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ - defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) || \ - defined(CONFIG_ARCH_SH73A0) || \ - defined(CONFIG_ARCH_SH7367) || \ - defined(CONFIG_ARCH_SH7377) || \ - defined(CONFIG_ARCH_SH7372) -# define SCIF_RFDC_MASK 0x007f -# define SCIF_TXROOM_MAX 64 -#elif defined(CONFIG_CPU_SUBTYPE_SH7763) -# define SCIF_RFDC_MASK 0x007f -# define SCIF_TXROOM_MAX 64 -/* SH7763 SCIF2 support */ -# define SCIF2_RFDC_MASK 0x001f -# define SCIF2_TXROOM_MAX 16 -#else -# define SCIF_RFDC_MASK 0x001f -# define SCIF_TXROOM_MAX 16 -#endif - #define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND) #define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF) #define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE) -- cgit v1.2.3 From 4b8c59a3d83e9cf2b65b16999a0c704fc72de056 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 14 Jun 2011 17:53:34 +0900 Subject: serial: sh-sci: Support generic SCLSR overrun detection. For all ports with a valid SCLSR register we can use the generic FIFO overrun detection logic. Test the validity of the SCLSR register rather than depending explicitly on port type, which can be ambiguous for the SCIFA/B types. Signed-off-by: Paul Mundt --- drivers/tty/serial/sh-sci.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 60027d51bb51..8e55e0a2733a 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -734,15 +734,11 @@ static int sci_handle_fifo_overrun(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; struct sci_port *s = to_sci_port(port); + struct plat_sci_reg *reg; int copied = 0; - /* - * XXX: Technically not limited to non-SCIFs, it's simply the - * SCLSR check that is for the moment SCIF-specific. This - * probably wants to be revisited for SCIFA/B as well as for - * factoring in SCI overrun detection. - */ - if (port->type != PORT_SCIF) + reg = sci_getreg(port, SCLSR); + if (!reg->size) return 0; if ((sci_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) { -- cgit v1.2.3 From 0bd6c1a38f57127eeb9444ed74cf5b65f36f563c Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 14 Jun 2011 20:39:31 -0400 Subject: ktest: Add POST/PRE_BUILD options There are some cases that a patch may be needed to apply to the kernel in patchcheck or bisect tests. Adding a PRE_BUILD option to apply the patch and POST_BUILD to remove it, allows for this to be done easily. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 31 ++++++++++++++++++++++++++++--- tools/testing/ktest/sample.conf | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 3 deletions(-) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index fb46e12eb1d7..d0e1de6e4d1f 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -63,6 +63,10 @@ my $output_config; my $test_type; my $build_type; my $build_options; +my $pre_build; +my $post_build; +my $pre_build_die; +my $post_build_die; my $reboot_type; my $reboot_script; my $power_cycle; @@ -1189,6 +1193,14 @@ sub build { unlink $buildlog; + if (defined($pre_build)) { + my $ret = run_command $pre_build; + if (!$ret && defined($pre_build_die) && + $pre_build_die) { + dodie "failed to pre_build\n"; + } + } + if ($type =~ /^useconfig:(.*)/) { run_command "cp $1 $output_config" or dodie "could not copy $1 to .config"; @@ -1236,13 +1248,22 @@ sub build { make_oldconfig; $redirect = "$buildlog"; - if (!run_command "$make $build_options") { - undef $redirect; + my $build_ret = run_command "$make $build_options"; + undef $redirect; + + if (defined($post_build)) { + my $ret = run_command $post_build; + if (!$ret && defined($post_build_die) && + $post_build_die) { + dodie "failed to post_build\n"; + } + } + + if (!$build_ret) { # bisect may need this to pass return 0 if ($in_bisect); fail "failed build" and return 0; } - undef $redirect; return 1; } @@ -2244,6 +2265,10 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { $test_type = set_test_option("TEST_TYPE", $i); $build_type = set_test_option("BUILD_TYPE", $i); $build_options = set_test_option("BUILD_OPTIONS", $i); + $pre_build = set_test_option("PRE_BUILD", $i); + $post_build = set_test_option("POST_BUILD", $i); + $pre_build_die = set_test_option("PRE_BUILD_DIE", $i); + $post_build_die = set_test_option("POST_BUILD_DIE", $i); $power_cycle = set_test_option("POWER_CYCLE", $i); $reboot = set_test_option("REBOOT", $i); $noclean = set_test_option("BUILD_NOCLEAN", $i); diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf index 0e5f764ac9ee..1092e4759c1e 100644 --- a/tools/testing/ktest/sample.conf +++ b/tools/testing/ktest/sample.conf @@ -293,6 +293,38 @@ # or on some systems: #POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION +# If there is a script that you require to run before the build is done +# you can specify it with PRE_BUILD. +# +# One example may be if you must add a temporary patch to the build to +# fix a unrelated bug to perform a patchcheck test. This will apply the +# patch before each build that is made. Use the POST_BUILD to do a git reset --hard +# to remove the patch. +# +# (default undef) +#PRE_BUILD = cd ${BUILD_DIR} && patch -p1 < /tmp/temp.patch + +# To specify if the test should fail if the PRE_BUILD fails, +# PRE_BUILD_DIE needs to be set to 1. Otherwise the PRE_BUILD +# result is ignored. +# (default 0) +# PRE_BUILD_DIE = 1 + +# If there is a script that should run after the build is done +# you can specify it with POST_BUILD. +# +# As the example in PRE_BUILD, POST_BUILD can be used to reset modifications +# made by the PRE_BUILD. +# +# (default undef) +#POST_BUILD = cd ${BUILD_DIR} && git reset --hard + +# To specify if the test should fail if the POST_BUILD fails, +# POST_BUILD_DIE needs to be set to 1. Otherwise the POST_BUILD +# result is ignored. +# (default 0) +#POST_BUILD_DIE = 1 + # Way to reboot the box to the test kernel. # Only valid options so far are "grub" and "script" # (default grub) -- cgit v1.2.3 From 4892063043282229c1296d86a2f86989ef30a97c Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 14 Jun 2011 20:42:19 -0400 Subject: ktest: Have the testing tmp dir include machine name As multiple tests may be executed by the same server, have the test machine name add uniqueness to the value of the temp directory. Otherwise the temp directories may overwrite each other's tests. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 2 +- tools/testing/ktest/sample.conf | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index d0e1de6e4d1f..24286cea14af 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -27,7 +27,7 @@ $default{"TEST_TYPE"} = "test"; $default{"BUILD_TYPE"} = "randconfig"; $default{"MAKE_CMD"} = "make"; $default{"TIMEOUT"} = 120; -$default{"TMP_DIR"} = "/tmp/ktest"; +$default{"TMP_DIR"} = "/tmp/ktest/\${MACHINE}"; $default{"SLEEP_TIME"} = 60; # sleep time between tests $default{"BUILD_NOCLEAN"} = 0; $default{"REBOOT_ON_ERROR"} = 0; diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf index 1092e4759c1e..e2d8d8338e9a 100644 --- a/tools/testing/ktest/sample.conf +++ b/tools/testing/ktest/sample.conf @@ -392,8 +392,8 @@ #ADD_CONFIG = /home/test/config-broken # The location on the host where to write temp files -# (default /tmp/ktest) -#TMP_DIR = /tmp/ktest +# (default /tmp/ktest/${MACHINE}) +#TMP_DIR = /tmp/ktest/${MACHINE} # Optional log file to write the status (recommended) # Note, this is a DEFAULT section only option. -- cgit v1.2.3 From e7b13441895fd0f95c34a004eed364524cca71cb Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 14 Jun 2011 20:44:36 -0400 Subject: ktest: Fix tar extracting of modules to target The tar command to create the module directory is cjf, but the extraction only had xf. This works on most versions of tar, but some versions of tar require xjf for extraction as well. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 24286cea14af..5b35fa04429b 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -1087,7 +1087,7 @@ sub install { unlink "$tmpdir/$modtar"; - run_ssh "'(cd / && tar xf /tmp/$modtar)'" or + run_ssh "'(cd / && tar xjf /tmp/$modtar)'" or dodie "failed to tar modules"; run_ssh "rm -f /tmp/$modtar"; -- cgit v1.2.3 From 1990207d538885e678f374e3e79f454c2e6c7383 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 14 Jun 2011 20:46:25 -0400 Subject: ktest: Add IGNORE_WARNINGS to ignore warnings in some patches Doing a patchcheck test, there may be warnings that gcc produces which may be OK, and the test should not fail on that commit. By adding a IGNORE_WARNINGS option to list a space delimited SHA1s that are ignored lets the user avoid having the test fail on certain commits. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 14 +++++++++++++- tools/testing/ktest/sample.conf | 8 +++++++- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 5b35fa04429b..5924f14ba418 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -104,6 +104,7 @@ my $monitor_cnt = 0; my $sleep_time; my $bisect_sleep_time; my $patchcheck_sleep_time; +my $ignore_warnings; my $store_failures; my $test_name; my $timeout; @@ -2074,6 +2075,13 @@ sub patchcheck { @list = reverse @list; my $save_clean = $noclean; + my %ignored_warnings; + + if (defined($ignore_warnings)) { + foreach my $sha1 (split /\s+/, $ignore_warnings) { + $ignored_warnings{$sha1} = 1; + } + } $in_patchcheck = 1; foreach my $item (@list) { @@ -2100,7 +2108,10 @@ sub patchcheck { build "oldconfig" or return 0; } - check_buildlog $sha1 or return 0; + + if (!defined($ignored_warnings{$sha1})) { + check_buildlog $sha1 or return 0; + } next if ($type eq "build"); @@ -2288,6 +2299,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { $sleep_time = set_test_option("SLEEP_TIME", $i); $bisect_sleep_time = set_test_option("BISECT_SLEEP_TIME", $i); $patchcheck_sleep_time = set_test_option("PATCHCHECK_SLEEP_TIME", $i); + $ignore_warnings = set_test_option("IGNORE_WARNINGS", $i); $bisect_manual = set_test_option("BISECT_MANUAL", $i); $bisect_skip = set_test_option("BISECT_SKIP", $i); $config_bisect_good = set_test_option("CONFIG_BISECT_GOOD", $i); diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf index e2d8d8338e9a..82c966c32d61 100644 --- a/tools/testing/ktest/sample.conf +++ b/tools/testing/ktest/sample.conf @@ -604,7 +604,12 @@ # build, boot, test. # # Note, the build test will look for warnings, if a warning occurred -# in a file that a commit touches, the build will fail. +# in a file that a commit touches, the build will fail, unless +# IGNORE_WARNINGS is set for the given commit's sha1 +# +# IGNORE_WARNINGS can be used to disable the failure of patchcheck +# on a particuler commit (SHA1). You can add more than one commit +# by adding a list of SHA1s that are space delimited. # # If BUILD_NOCLEAN is set, then make mrproper will not be run on # any of the builds, just like all other TEST_TYPE tests. But @@ -619,6 +624,7 @@ # PATCHCHECK_TYPE = boot # PATCHCHECK_START = 747e94ae3d1b4c9bf5380e569f614eb9040b79e7 # PATCHCHECK_END = HEAD~2 +# IGNORE_WARNINGS = 42f9c6b69b54946ffc0515f57d01dc7f5c0e4712 0c17ca2c7187f431d8ffc79e81addc730f33d128 # # # -- cgit v1.2.3 From ddf607e5f853ae172e81e6051e1e12e24ea8a3c6 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 14 Jun 2011 20:49:13 -0400 Subject: ktest: Add helper function to avoid duplicate code Several places had the following code: get_grub_index; get_version; install; start_monitor; return monitor; Creating a function "start_monitor_and_boot()" replaces these mulitple uses with a single call. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 46 +++++++++++++++++++------------------------- 1 file changed, 20 insertions(+), 26 deletions(-) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 5924f14ba418..099ceeed4144 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -1096,6 +1096,23 @@ sub install { do_post_install; } +sub get_version { + # get the release name + doprint "$make kernelrelease ... "; + $version = `$make kernelrelease | tail -1`; + chomp($version); + doprint "$version\n"; +} + +sub start_monitor_and_boot { + get_grub_index; + get_version; + install; + + start_monitor; + return monitor; +} + sub check_buildlog { my ($patch) = @_; @@ -1307,14 +1324,6 @@ sub success { } } -sub get_version { - # get the release name - doprint "$make kernelrelease ... "; - $version = `$make kernelrelease | tail -1`; - chomp($version); - doprint "$version\n"; -} - sub answer_bisect { for (;;) { doprint "Pass or fail? [p/f]"; @@ -1479,12 +1488,7 @@ sub run_bisect_test { dodie "Failed on build" if $failed; # Now boot the box - get_grub_index; - get_version; - install; - - start_monitor; - monitor or $failed = 1; + start_monitor_and_boot or $failed = 1; if ($type ne "boot") { if ($failed && $bisect_skip) { @@ -2115,14 +2119,9 @@ sub patchcheck { next if ($type eq "build"); - get_grub_index; - get_version; - install; - my $failed = 0; - start_monitor; - monitor or $failed = 1; + start_monitor_and_boot or $failed = 1; if (!$failed && $type ne "boot"){ do_run_test or $failed = 1; @@ -2393,13 +2392,8 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { } if ($test_type ne "build") { - get_grub_index; - get_version; - install; - my $failed = 0; - start_monitor; - monitor or $failed = 1;; + start_monitor_and_boot or $failed = 1; if (!$failed && $test_type ne "boot" && defined($run_test)) { do_run_test or $failed = 1; -- cgit v1.2.3 From 0df213ca31f43faf0b1d6c7108e190ff198b42d3 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 14 Jun 2011 20:51:37 -0400 Subject: ktest: Require one TEST_START in config file There has been too many times that I put in one too many SKIP TEST_STARTs and start the test with the default randconfig by accident that I added this to have ktest ask the user for which test they want to run if no TEST_START is specified. Now if I accidently start the test with all TEST_STARTs skipped, ktest asks what test do I want to run, and I now have a chance to kill it before it does a make mrproper on my build directory. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 099ceeed4144..6166f3a0f2ea 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -345,6 +345,7 @@ sub read_config { my $num_tests_set = 0; my $skip = 0; my $rest; + my $test_case = 0; while () { @@ -370,6 +371,7 @@ sub read_config { $rest = $1; $skip = 1; } else { + $test_case = 1; $skip = 0; } @@ -474,6 +476,15 @@ sub read_config { # make sure we have all mandatory configs get_ktest_configs; + # was a test specified? + if (!$test_case) { + print "No test case specified.\n"; + print "What test case would you like to run?\n"; + my $ans = ; + chomp $ans; + $default{"TEST_TYPE"} = $ans; + } + # set any defaults foreach my $default (keys %default) { -- cgit v1.2.3 From cf8e98d15361f8c594da00a3f7a500787fc1a426 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Wed, 15 Jun 2011 10:35:38 -0400 Subject: arch/tile: remove useless set_fixmap_nocache() macro TILE doesn't support PAGE_KERNEL_NOCACHE so the macro isn't useful; it's a copy-and-paste from the first version of this header in 2007. Signed-off-by: Chris Metcalf --- arch/tile/include/asm/fixmap.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arch/tile/include/asm/fixmap.h b/arch/tile/include/asm/fixmap.h index 51537ff9265a..c66f7933beaa 100644 --- a/arch/tile/include/asm/fixmap.h +++ b/arch/tile/include/asm/fixmap.h @@ -75,12 +75,6 @@ extern void __set_fixmap(enum fixed_addresses idx, #define set_fixmap(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL) -/* - * Some hardware wants to get fixmapped without caching. - */ -#define set_fixmap_nocache(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) - #define clear_fixmap(idx) \ __set_fixmap(idx, 0, __pgprot(0)) -- cgit v1.2.3 From e2f5e5a71dfe6bf155590de0fdd6d748ac79bf76 Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Fri, 10 Jun 2011 15:15:05 -0700 Subject: dma/ep93xx_dma.c: local symbols should be static The symbol 'ep93xx_dma_prep_dma_memcpy' is only used in this driver and should be marked static. Signed-off-by: H Hartley Sweeten Cc: Mika Westerberg Cc: Dan Williams Cc: Vinod Koul Acked-by: Mika Westerberg Signed-off-by: Vinod Koul --- drivers/dma/ep93xx_dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 0766c1e53b1d..5d7a49bd7c26 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -902,7 +902,7 @@ static void ep93xx_dma_free_chan_resources(struct dma_chan *chan) * * Returns a valid DMA descriptor or %NULL in case of failure. */ -struct dma_async_tx_descriptor * +static struct dma_async_tx_descriptor * ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { -- cgit v1.2.3 From c4e0dd7835d12d9765a372b586a5020ac29cc706 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 16 Jun 2011 05:08:09 +0000 Subject: dmaengine: shdma: add to_sh_dev define This patch adds "to_sh_dev" macro, and clean up codes. Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- drivers/dma/shdma.c | 13 ++++--------- drivers/dma/shdma.h | 2 ++ 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 2a638f9f09a2..d2fb16d31bb9 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -130,8 +130,7 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) { - struct sh_dmae_device *shdev = container_of(sh_chan->common.device, - struct sh_dmae_device, common); + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); @@ -144,8 +143,7 @@ static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) { - struct sh_dmae_device *shdev = container_of(sh_chan->common.device, - struct sh_dmae_device, common); + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; int i; @@ -209,8 +207,7 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) { - struct sh_dmae_device *shdev = container_of(sh_chan->common.device, - struct sh_dmae_device, common); + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; u16 __iomem *addr = shdev->dmars; @@ -296,9 +293,7 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) static const struct sh_dmae_slave_config *sh_dmae_find_slave( struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param) { - struct dma_device *dma_dev = sh_chan->common.device; - struct sh_dmae_device *shdev = container_of(dma_dev, - struct sh_dmae_device, common); + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; int i; diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 5ae9fc512180..6c73b654a5c3 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h @@ -52,5 +52,7 @@ struct sh_dmae_device { #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) #define to_sh_desc(lh) container_of(lh, struct sh_desc, node) #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) +#define to_sh_dev(chan) container_of(chan->common.device,\ + struct sh_dmae_device, common) #endif /* __DMA_SHDMA_H */ -- cgit v1.2.3 From 1d2c0980262e70f5643df34493ffd7e608282c16 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 16 Jun 2011 05:08:18 +0000 Subject: dmaengine: shdma: tidyup spin_unlock_bh on sh_chan_xfer_ld_queue It is not readable that there is any spin_unlock_bh on same function. Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- drivers/dma/shdma.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index d2fb16d31bb9..3d22eb82289d 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -766,10 +766,8 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) spin_lock_bh(&sh_chan->desc_lock); /* DMA work check */ - if (dmae_is_busy(sh_chan)) { - spin_unlock_bh(&sh_chan->desc_lock); - return; - } + if (dmae_is_busy(sh_chan)) + goto sh_chan_xfer_ld_queue_end; /* Find the first not transferred descriptor */ list_for_each_entry(desc, &sh_chan->ld_queue, node) @@ -783,6 +781,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) break; } +sh_chan_xfer_ld_queue_end: spin_unlock_bh(&sh_chan->desc_lock); } -- cgit v1.2.3 From 090b91805a97f58a7deff0f2b40aad1cc2f1b7e0 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 16 Jun 2011 05:08:28 +0000 Subject: dmaengine: shdma: fixup parameter definition on dmae_set_dmars chan_pdata->dmars_bit is unsigned int Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- drivers/dma/shdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 3d22eb82289d..41a21b322960 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -211,7 +211,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) struct sh_dmae_pdata *pdata = shdev->pdata; const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; u16 __iomem *addr = shdev->dmars; - int shift = chan_pdata->dmars_bit; + unsigned int shift = chan_pdata->dmars_bit; if (dmae_is_busy(sh_chan)) return -EBUSY; -- cgit v1.2.3 From 36715cef0770b7e2547892b7c3197fc024274630 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Sat, 11 Jun 2011 17:53:57 -0600 Subject: writeback: skip tmpfs early in balance_dirty_pages_ratelimited_nr() This helps prevent tmpfs dirtiers from skewing the per-cpu bdp_ratelimits. Acked-by: Jan Kara Signed-off-by: Wu Fengguang --- mm/page-writeback.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b2529f8f8be0..1965d05a29cc 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -490,9 +490,6 @@ static void balance_dirty_pages(struct address_space *mapping, bool dirty_exceeded = false; struct backing_dev_info *bdi = mapping->backing_dev_info; - if (!bdi_cap_account_dirty(bdi)) - return; - for (;;) { struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, @@ -631,9 +628,13 @@ static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0; void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, unsigned long nr_pages_dirtied) { + struct backing_dev_info *bdi = mapping->backing_dev_info; unsigned long ratelimit; unsigned long *p; + if (!bdi_cap_account_dirty(bdi)) + return; + ratelimit = ratelimit_pages; if (mapping->backing_dev_info->dirty_exceeded) ratelimit = 8; -- cgit v1.2.3 From 08ef2e427b59393d68a65b16e97e894b662a5573 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 20 Jun 2011 12:24:53 +0900 Subject: sh: Fix up build fallout from serial merge. This fixes up build issues for SH7720/SH7722/SH7750 that crept in with the serial rework. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/serial-sh7720.c | 1 + arch/sh/kernel/cpu/sh4/setup-sh7750.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/sh/kernel/cpu/sh3/serial-sh7720.c b/arch/sh/kernel/cpu/sh3/serial-sh7720.c index 8234e1e7abd9..8832c526cdf9 100644 --- a/arch/sh/kernel/cpu/sh3/serial-sh7720.c +++ b/arch/sh/kernel/cpu/sh3/serial-sh7720.c @@ -2,6 +2,7 @@ #include #include #include +#include static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag) { diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index c10db5b96e59..98cc0c794c76 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c @@ -38,7 +38,7 @@ static struct platform_device rtc_device = { static struct plat_sci_port sci_platform_data = { .mapbase = 0xffe00000, - .port_reg = 0xffe0001C + .port_reg = 0xffe0001C, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_TE | SCSCR_RE, .scbrr_algo_id = SCBRR_ALGO_2, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index 863249dbf05b..278a0e572158 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -22,6 +22,7 @@ #include #include +#include static const struct sh_dmae_slave_config sh7722_dmae_slaves[] = { { -- cgit v1.2.3 From b85a3ef4ac65169b65fd2fe9bec7912bbf475ba4 Mon Sep 17 00:00:00 2001 From: John Linn Date: Mon, 20 Jun 2011 11:47:27 -0600 Subject: ARM: Xilinx: Adding Xilinx board support The 1st board support is minimal to get a system up and running on the Xilinx platform. This platform reuses the clock implementation from plat-versatile, and it depends entirely on CONFIG_OF support. There is only one board support file which obtains all device information from a device tree dtb file which is passed to the kernel at boot time. Signed-off-by: John Linn --- Documentation/devicetree/bindings/arm/xilinx.txt | 7 + arch/arm/Kconfig | 14 ++ arch/arm/Makefile | 2 + arch/arm/boot/dts/zynq-ep107.dts | 52 ++++ arch/arm/mach-zynq/Makefile | 6 + arch/arm/mach-zynq/Makefile.boot | 3 + arch/arm/mach-zynq/board_dt.c | 37 +++ arch/arm/mach-zynq/common.c | 102 ++++++++ arch/arm/mach-zynq/common.h | 29 +++ arch/arm/mach-zynq/include/mach/clkdev.h | 32 +++ arch/arm/mach-zynq/include/mach/debug-macro.S | 36 +++ arch/arm/mach-zynq/include/mach/entry-macro.S | 30 +++ arch/arm/mach-zynq/include/mach/hardware.h | 18 ++ arch/arm/mach-zynq/include/mach/io.h | 33 +++ arch/arm/mach-zynq/include/mach/irqs.h | 21 ++ arch/arm/mach-zynq/include/mach/memory.h | 22 ++ arch/arm/mach-zynq/include/mach/system.h | 28 +++ arch/arm/mach-zynq/include/mach/timex.h | 23 ++ arch/arm/mach-zynq/include/mach/uart.h | 25 ++ arch/arm/mach-zynq/include/mach/uncompress.h | 51 ++++ arch/arm/mach-zynq/include/mach/vmalloc.h | 20 ++ arch/arm/mach-zynq/include/mach/zynq_soc.h | 48 ++++ arch/arm/mach-zynq/timer.c | 298 +++++++++++++++++++++++ arch/arm/mm/Kconfig | 2 +- 24 files changed, 938 insertions(+), 1 deletion(-) create mode 100644 Documentation/devicetree/bindings/arm/xilinx.txt create mode 100644 arch/arm/boot/dts/zynq-ep107.dts create mode 100644 arch/arm/mach-zynq/Makefile create mode 100644 arch/arm/mach-zynq/Makefile.boot create mode 100644 arch/arm/mach-zynq/board_dt.c create mode 100644 arch/arm/mach-zynq/common.c create mode 100644 arch/arm/mach-zynq/common.h create mode 100644 arch/arm/mach-zynq/include/mach/clkdev.h create mode 100644 arch/arm/mach-zynq/include/mach/debug-macro.S create mode 100644 arch/arm/mach-zynq/include/mach/entry-macro.S create mode 100644 arch/arm/mach-zynq/include/mach/hardware.h create mode 100644 arch/arm/mach-zynq/include/mach/io.h create mode 100644 arch/arm/mach-zynq/include/mach/irqs.h create mode 100644 arch/arm/mach-zynq/include/mach/memory.h create mode 100644 arch/arm/mach-zynq/include/mach/system.h create mode 100644 arch/arm/mach-zynq/include/mach/timex.h create mode 100644 arch/arm/mach-zynq/include/mach/uart.h create mode 100644 arch/arm/mach-zynq/include/mach/uncompress.h create mode 100644 arch/arm/mach-zynq/include/mach/vmalloc.h create mode 100644 arch/arm/mach-zynq/include/mach/zynq_soc.h create mode 100644 arch/arm/mach-zynq/timer.c diff --git a/Documentation/devicetree/bindings/arm/xilinx.txt b/Documentation/devicetree/bindings/arm/xilinx.txt new file mode 100644 index 000000000000..6f1ed830b4f7 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/xilinx.txt @@ -0,0 +1,7 @@ +Xilinx Zynq EP107 Emulation Platform board + +This board is an emulation platform for the Zynq product which is +based on an ARM Cortex A9 processor. + +Required root node properties: + - compatible = "xlnx,zynq-ep107"; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9adc278a22ab..9e76a75a490b 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -879,6 +879,20 @@ config ARCH_VT8500 select HAVE_PWM help Support for VIA/WonderMedia VT8500/WM85xx System-on-Chip. + +config ARCH_ZYNQ + bool "Xilinx Zynq ARM Cortex A9 Platform" + select CPU_V7 + select GENERIC_TIME + select GENERIC_CLOCKEVENTS + select CLKDEV_LOOKUP + select ARM_GIC + select ARM_AMBA + select ICST + select USE_OF + help + Support for Xilinx Zynq ARM Cortex A9 Platform + endchoice # diff --git a/arch/arm/Makefile b/arch/arm/Makefile index f5b2b390c8f2..999c17aa8571 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -196,6 +196,7 @@ machine-$(CONFIG_MACH_SPEAR300) := spear3xx machine-$(CONFIG_MACH_SPEAR310) := spear3xx machine-$(CONFIG_MACH_SPEAR320) := spear3xx machine-$(CONFIG_MACH_SPEAR600) := spear6xx +machine-$(CONFIG_ARCH_ZYNQ) := zynq # Platform directory name. This list is sorted alphanumerically # by CONFIG_* macro name. @@ -203,6 +204,7 @@ plat-$(CONFIG_ARCH_MXC) := mxc plat-$(CONFIG_ARCH_OMAP) := omap plat-$(CONFIG_ARCH_S3C64XX) := samsung plat-$(CONFIG_ARCH_TCC_926) := tcc +plat-$(CONFIG_ARCH_ZYNQ) := versatile plat-$(CONFIG_PLAT_IOP) := iop plat-$(CONFIG_PLAT_NOMADIK) := nomadik plat-$(CONFIG_PLAT_ORION) := orion diff --git a/arch/arm/boot/dts/zynq-ep107.dts b/arch/arm/boot/dts/zynq-ep107.dts new file mode 100644 index 000000000000..37ca192fb193 --- /dev/null +++ b/arch/arm/boot/dts/zynq-ep107.dts @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2011 Xilinx + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/ { + model = "Xilinx Zynq EP107"; + compatible = "xlnx,zynq-ep107"; + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&intc>; + + memory { + device_type = "memory"; + reg = <0x0 0x10000000>; + }; + + chosen { + bootargs = "console=ttyPS0,9600 root=/dev/ram rw initrd=0x800000,8M earlyprintk"; + linux,stdout-path = &uart0; + }; + + amba { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + intc: interrupt-controller@f8f01000 { + interrupt-controller; + compatible = "arm,gic"; + reg = <0xF8F01000 0x1000>; + #interrupt-cells = <2>; + }; + + uart0: uart@e0000000 { + compatible = "xlnx,xuartps"; + reg = <0xE0000000 0x1000>; + interrupts = <59 0>; + clock = <50000000>; + }; + }; +}; diff --git a/arch/arm/mach-zynq/Makefile b/arch/arm/mach-zynq/Makefile new file mode 100644 index 000000000000..c550c67aa893 --- /dev/null +++ b/arch/arm/mach-zynq/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the linux kernel. +# + +# Common support +obj-y := common.o timer.o board_dt.o diff --git a/arch/arm/mach-zynq/Makefile.boot b/arch/arm/mach-zynq/Makefile.boot new file mode 100644 index 000000000000..67039c3e0c48 --- /dev/null +++ b/arch/arm/mach-zynq/Makefile.boot @@ -0,0 +1,3 @@ + zreladdr-y := 0x00008000 +params_phys-y := 0x00000100 +initrd_phys-y := 0x00800000 diff --git a/arch/arm/mach-zynq/board_dt.c b/arch/arm/mach-zynq/board_dt.c new file mode 100644 index 000000000000..5b4710d09258 --- /dev/null +++ b/arch/arm/mach-zynq/board_dt.c @@ -0,0 +1,37 @@ +/* + * This file contains code for boards with device tree support. + * + * Copyright (C) 2011 Xilinx + * + * based on arch/arm/mach-realview/core.c + * + * Copyright (C) 1999 - 2003 ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include "common.h" + +static const char *xilinx_dt_match[] = { + "xlnx,zynq-ep107", + NULL +}; + +MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") + .map_io = xilinx_map_io, + .init_irq = xilinx_irq_init, + .init_machine = xilinx_init_machine, + .timer = &xttcpss_sys_timer, + .dt_compat = xilinx_dt_match, +MACHINE_END diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c new file mode 100644 index 000000000000..b3ac5c2e12dc --- /dev/null +++ b/arch/arm/mach-zynq/common.c @@ -0,0 +1,102 @@ +/* + * This file contains common code that is intended to be used across + * boards so that it's not replicated. + * + * Copyright (C) 2011 Xilinx + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include "common.h" + +static struct of_device_id zynq_of_bus_ids[] __initdata = { + { .compatible = "simple-bus", }, + {} +}; + +/** + * xilinx_init_machine() - System specific initialization, intended to be + * called from board specific initialization. + */ +void __init xilinx_init_machine(void) +{ +#ifdef CONFIG_CACHE_L2X0 + /* + * 64KB way size, 8-way associativity, parity disabled + */ + l2x0_init(PL310_L2CC_BASE, 0x02060000, 0xF0F0FFFF); +#endif + + of_platform_bus_probe(NULL, zynq_of_bus_ids, NULL); +} + +/** + * xilinx_irq_init() - Interrupt controller initialization for the GIC. + */ +void __init xilinx_irq_init(void) +{ + gic_init(0, 29, SCU_GIC_DIST_BASE, SCU_GIC_CPU_BASE); +} + +/* The minimum devices needed to be mapped before the VM system is up and + * running include the GIC, UART and Timer Counter. + */ + +static struct map_desc io_desc[] __initdata = { + { + .virtual = TTC0_VIRT, + .pfn = __phys_to_pfn(TTC0_PHYS), + .length = SZ_4K, + .type = MT_DEVICE, + }, { + .virtual = SCU_PERIPH_VIRT, + .pfn = __phys_to_pfn(SCU_PERIPH_PHYS), + .length = SZ_8K, + .type = MT_DEVICE, + }, { + .virtual = PL310_L2CC_VIRT, + .pfn = __phys_to_pfn(PL310_L2CC_PHYS), + .length = SZ_4K, + .type = MT_DEVICE, + }, + +#ifdef CONFIG_DEBUG_LL + { + .virtual = UART0_VIRT, + .pfn = __phys_to_pfn(UART0_PHYS), + .length = SZ_4K, + .type = MT_DEVICE, + }, +#endif + +}; + +/** + * xilinx_map_io() - Create memory mappings needed for early I/O. + */ +void __init xilinx_map_io(void) +{ + iotable_init(io_desc, ARRAY_SIZE(io_desc)); +} diff --git a/arch/arm/mach-zynq/common.h b/arch/arm/mach-zynq/common.h new file mode 100644 index 000000000000..bca21968f80b --- /dev/null +++ b/arch/arm/mach-zynq/common.h @@ -0,0 +1,29 @@ +/* + * This file contains common function prototypes to avoid externs + * in the c files. + * + * Copyright (C) 2011 Xilinx + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MACH_ZYNQ_COMMON_H__ +#define __MACH_ZYNQ_COMMON_H__ + +#include +#include + +extern void xilinx_init_machine(void); +extern void xilinx_irq_init(void); +extern void xilinx_map_io(void); + +extern struct sys_timer xttcpss_sys_timer; + +#endif diff --git a/arch/arm/mach-zynq/include/mach/clkdev.h b/arch/arm/mach-zynq/include/mach/clkdev.h new file mode 100644 index 000000000000..c6e73d81a459 --- /dev/null +++ b/arch/arm/mach-zynq/include/mach/clkdev.h @@ -0,0 +1,32 @@ +/* + * arch/arm/mach-zynq/include/mach/clkdev.h + * + * Copyright (C) 2011 Xilinx, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __MACH_CLKDEV_H__ +#define __MACH_CLKDEV_H__ + +#include + +struct clk { + unsigned long rate; + const struct clk_ops *ops; + const struct icst_params *params; + void __iomem *vcoreg; +}; + +#define __clk_get(clk) ({ 1; }) +#define __clk_put(clk) do { } while (0) + +#endif diff --git a/arch/arm/mach-zynq/include/mach/debug-macro.S b/arch/arm/mach-zynq/include/mach/debug-macro.S new file mode 100644 index 000000000000..9f664d5eb81d --- /dev/null +++ b/arch/arm/mach-zynq/include/mach/debug-macro.S @@ -0,0 +1,36 @@ +/* arch/arm/mach-zynq/include/mach/debug-macro.S + * + * Debugging macro include header + * + * Copyright (C) 2011 Xilinx + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + + .macro addruart, rp, rv + ldr \rp, =LL_UART_PADDR @ physical + ldr \rv, =LL_UART_VADDR @ virtual + .endm + + .macro senduart,rd,rx + str \rd, [\rx, #UART_FIFO_OFFSET] @ TXDATA + .endm + + .macro waituart,rd,rx + .endm + + .macro busyuart,rd,rx +1002: ldr \rd, [\rx, #UART_SR_OFFSET] @ get status register + tst \rd, #UART_SR_TXFULL @ + bne 1002b @ wait if FIFO is full + .endm diff --git a/arch/arm/mach-zynq/include/mach/entry-macro.S b/arch/arm/mach-zynq/include/mach/entry-macro.S new file mode 100644 index 000000000000..3cfc01b37461 --- /dev/null +++ b/arch/arm/mach-zynq/include/mach/entry-macro.S @@ -0,0 +1,30 @@ +/* + * arch/arm/mach-zynq/include/mach/entry-macro.S + * + * Low-level IRQ helper macros + * + * Copyright (C) 2011 Xilinx + * + * based on arch/plat-mxc/include/mach/entry-macro.S + * + * Copyright (C) 2007 Lennert Buytenhek + * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + + .macro disable_fiq + .endm + + .macro arch_ret_to_user, tmp1, tmp2 + .endm diff --git a/arch/arm/mach-zynq/include/mach/hardware.h b/arch/arm/mach-zynq/include/mach/hardware.h new file mode 100644 index 000000000000..d558d8a94be7 --- /dev/null +++ b/arch/arm/mach-zynq/include/mach/hardware.h @@ -0,0 +1,18 @@ +/* arch/arm/mach-zynq/include/mach/hardware.h + * + * Copyright (C) 2011 Xilinx + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MACH_HARDWARE_H__ +#define __MACH_HARDWARE_H__ + +#endif diff --git a/arch/arm/mach-zynq/include/mach/io.h b/arch/arm/mach-zynq/include/mach/io.h new file mode 100644 index 000000000000..39d9885e0e9a --- /dev/null +++ b/arch/arm/mach-zynq/include/mach/io.h @@ -0,0 +1,33 @@ +/* arch/arm/mach-zynq/include/mach/io.h + * + * Copyright (C) 2011 Xilinx + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MACH_IO_H__ +#define __MACH_IO_H__ + +/* Allow IO space to be anywhere in the memory */ + +#define IO_SPACE_LIMIT 0xffff + +/* IO address mapping macros, nothing special at this time but required */ + +#ifdef __ASSEMBLER__ +#define IOMEM(x) (x) +#else +#define IOMEM(x) ((void __force __iomem *)(x)) +#endif + +#define __io(a) __typesafe_io(a) +#define __mem_pci(a) (a) + +#endif diff --git a/arch/arm/mach-zynq/include/mach/irqs.h b/arch/arm/mach-zynq/include/mach/irqs.h new file mode 100644 index 000000000000..5fb04fd3bac8 --- /dev/null +++ b/arch/arm/mach-zynq/include/mach/irqs.h @@ -0,0 +1,21 @@ +/* arch/arm/mach-zynq/include/mach/irqs.h + * + * Copyright (C) 2011 Xilinx + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MACH_IRQS_H +#define __MACH_IRQS_H + +#define ARCH_NR_GPIOS 118 +#define NR_IRQS (128 + ARCH_NR_GPIOS) + +#endif diff --git a/arch/arm/mach-zynq/include/mach/memory.h b/arch/arm/mach-zynq/include/mach/memory.h new file mode 100644 index 000000000000..35a92634dcc1 --- /dev/null +++ b/arch/arm/mach-zynq/include/mach/memory.h @@ -0,0 +1,22 @@ +/* arch/arm/mach-zynq/include/mach/memory.h + * + * Copyright (C) 2011 Xilinx + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MACH_MEMORY_H__ +#define __MACH_MEMORY_H__ + +#include + +#define PLAT_PHYS_OFFSET UL(0x0) + +#endif diff --git a/arch/arm/mach-zynq/include/mach/system.h b/arch/arm/mach-zynq/include/mach/system.h new file mode 100644 index 000000000000..1b84d705c675 --- /dev/null +++ b/arch/arm/mach-zynq/include/mach/system.h @@ -0,0 +1,28 @@ +/* arch/arm/mach-zynq/include/mach/system.h + * + * Copyright (C) 2011 Xilinx + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MACH_SYSTEM_H__ +#define __MACH_SYSTEM_H__ + +static inline void arch_idle(void) +{ + cpu_do_idle(); +} + +static inline void arch_reset(char mode, const char *cmd) +{ + /* Add architecture specific reset processing here */ +} + +#endif diff --git a/arch/arm/mach-zynq/include/mach/timex.h b/arch/arm/mach-zynq/include/mach/timex.h new file mode 100644 index 000000000000..6c0245e42a5e --- /dev/null +++ b/arch/arm/mach-zynq/include/mach/timex.h @@ -0,0 +1,23 @@ +/* arch/arm/mach-zynq/include/mach/timex.h + * + * Copyright (C) 2011 Xilinx + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MACH_TIMEX_H__ +#define __MACH_TIMEX_H__ + +/* the following is needed for the system to build but will be removed + in the future, the value is not important but won't hurt +*/ +#define CLOCK_TICK_RATE (100 * HZ) + +#endif diff --git a/arch/arm/mach-zynq/include/mach/uart.h b/arch/arm/mach-zynq/include/mach/uart.h new file mode 100644 index 000000000000..5c47c97156f3 --- /dev/null +++ b/arch/arm/mach-zynq/include/mach/uart.h @@ -0,0 +1,25 @@ +/* arch/arm/mach-zynq/include/mach/uart.h + * + * Copyright (C) 2011 Xilinx + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MACH_UART_H__ +#define __MACH_UART_H__ + +#define UART_CR_OFFSET 0x00 /* Control Register [8:0] */ +#define UART_SR_OFFSET 0x2C /* Channel Status [11:0] */ +#define UART_FIFO_OFFSET 0x30 /* FIFO [15:0] or [7:0] */ + +#define UART_SR_TXFULL 0x00000010 /* TX FIFO full */ +#define UART_SR_TXEMPTY 0x00000008 /* TX FIFO empty */ + +#endif diff --git a/arch/arm/mach-zynq/include/mach/uncompress.h b/arch/arm/mach-zynq/include/mach/uncompress.h new file mode 100644 index 000000000000..af4e8447bfa3 --- /dev/null +++ b/arch/arm/mach-zynq/include/mach/uncompress.h @@ -0,0 +1,51 @@ +/* arch/arm/mach-zynq/include/mach/uncompress.h + * + * Copyright (C) 2011 Xilinx + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MACH_UNCOMPRESS_H__ +#define __MACH_UNCOMPRESS_H__ + +#include +#include +#include +#include + +void arch_decomp_setup(void) +{ +} + +static inline void flush(void) +{ + /* + * Wait while the FIFO is not empty + */ + while (!(__raw_readl(IOMEM(LL_UART_PADDR + UART_SR_OFFSET)) & + UART_SR_TXEMPTY)) + cpu_relax(); +} + +#define arch_decomp_wdog() + +static void putc(char ch) +{ + /* + * Wait for room in the FIFO, then write the char into the FIFO + */ + while (__raw_readl(IOMEM(LL_UART_PADDR + UART_SR_OFFSET)) & + UART_SR_TXFULL) + cpu_relax(); + + __raw_writel(ch, IOMEM(LL_UART_PADDR + UART_FIFO_OFFSET)); +} + +#endif diff --git a/arch/arm/mach-zynq/include/mach/vmalloc.h b/arch/arm/mach-zynq/include/mach/vmalloc.h new file mode 100644 index 000000000000..2398eff1e8b8 --- /dev/null +++ b/arch/arm/mach-zynq/include/mach/vmalloc.h @@ -0,0 +1,20 @@ +/* arch/arm/mach-zynq/include/mach/vmalloc.h + * + * Copyright (C) 2011 Xilinx + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MACH_VMALLOC_H__ +#define __MACH_VMALLOC_H__ + +#define VMALLOC_END 0xE0000000UL + +#endif diff --git a/arch/arm/mach-zynq/include/mach/zynq_soc.h b/arch/arm/mach-zynq/include/mach/zynq_soc.h new file mode 100644 index 000000000000..d0d3f8fb06dd --- /dev/null +++ b/arch/arm/mach-zynq/include/mach/zynq_soc.h @@ -0,0 +1,48 @@ +/* arch/arm/mach-zynq/include/mach/zynq_soc.h + * + * Copyright (C) 2011 Xilinx + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MACH_XILINX_SOC_H__ +#define __MACH_XILINX_SOC_H__ + +#define PERIPHERAL_CLOCK_RATE 2500000 + +/* For now, all mappings are flat (physical = virtual) + */ +#define UART0_PHYS 0xE0000000 +#define UART0_VIRT UART0_PHYS + +#define TTC0_PHYS 0xF8001000 +#define TTC0_VIRT TTC0_PHYS + +#define PL310_L2CC_PHYS 0xF8F02000 +#define PL310_L2CC_VIRT PL310_L2CC_PHYS + +#define SCU_PERIPH_PHYS 0xF8F00000 +#define SCU_PERIPH_VIRT SCU_PERIPH_PHYS + +/* The following are intended for the devices that are mapped early */ + +#define TTC0_BASE IOMEM(TTC0_VIRT) +#define SCU_PERIPH_BASE IOMEM(SCU_PERIPH_VIRT) +#define SCU_GIC_CPU_BASE (SCU_PERIPH_BASE + 0x100) +#define SCU_GIC_DIST_BASE (SCU_PERIPH_BASE + 0x1000) +#define PL310_L2CC_BASE IOMEM(PL310_L2CC_VIRT) + +/* + * Mandatory for CONFIG_LL_DEBUG, UART is mapped virtual = physical + */ +#define LL_UART_PADDR UART0_PHYS +#define LL_UART_VADDR UART0_VIRT + +#endif diff --git a/arch/arm/mach-zynq/timer.c b/arch/arm/mach-zynq/timer.c new file mode 100644 index 000000000000..c2c96cc7d6e7 --- /dev/null +++ b/arch/arm/mach-zynq/timer.c @@ -0,0 +1,298 @@ +/* + * This file contains driver for the Xilinx PS Timer Counter IP. + * + * Copyright (C) 2011 Xilinx + * + * based on arch/mips/kernel/time.c timer driver + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "common.h" + +#define IRQ_TIMERCOUNTER0 42 + +/* + * This driver configures the 2 16-bit count-up timers as follows: + * + * T1: Timer 1, clocksource for generic timekeeping + * T2: Timer 2, clockevent source for hrtimers + * T3: Timer 3, + * + * The input frequency to the timer module for emulation is 2.5MHz which is + * common to all the timer channels (T1, T2, and T3). With a pre-scaler of 32, + * the timers are clocked at 78.125KHz (12.8 us resolution). + * + * The input frequency to the timer module in silicon will be 200MHz. With the + * pre-scaler of 32, the timers are clocked at 6.25MHz (160ns resolution). + */ +#define XTTCPSS_CLOCKSOURCE 0 /* Timer 1 as a generic timekeeping */ +#define XTTCPSS_CLOCKEVENT 1 /* Timer 2 as a clock event */ + +#define XTTCPSS_TIMER_BASE TTC0_BASE +#define XTTCPCC_EVENT_TIMER_IRQ (IRQ_TIMERCOUNTER0 + 1) +/* + * Timer Register Offset Definitions of Timer 1, Increment base address by 4 + * and use same offsets for Timer 2 + */ +#define XTTCPSS_CLK_CNTRL_OFFSET 0x00 /* Clock Control Reg, RW */ +#define XTTCPSS_CNT_CNTRL_OFFSET 0x0C /* Counter Control Reg, RW */ +#define XTTCPSS_COUNT_VAL_OFFSET 0x18 /* Counter Value Reg, RO */ +#define XTTCPSS_INTR_VAL_OFFSET 0x24 /* Interval Count Reg, RW */ +#define XTTCPSS_MATCH_1_OFFSET 0x30 /* Match 1 Value Reg, RW */ +#define XTTCPSS_MATCH_2_OFFSET 0x3C /* Match 2 Value Reg, RW */ +#define XTTCPSS_MATCH_3_OFFSET 0x48 /* Match 3 Value Reg, RW */ +#define XTTCPSS_ISR_OFFSET 0x54 /* Interrupt Status Reg, RO */ +#define XTTCPSS_IER_OFFSET 0x60 /* Interrupt Enable Reg, RW */ + +#define XTTCPSS_CNT_CNTRL_DISABLE_MASK 0x1 + +/* Setup the timers to use pre-scaling */ + +#define TIMER_RATE (PERIPHERAL_CLOCK_RATE / 32) + +/** + * struct xttcpss_timer - This definition defines local timer structure + * + * @base_addr: Base address of timer + **/ +struct xttcpss_timer { + void __iomem *base_addr; +}; + +static struct xttcpss_timer timers[2]; +static struct clock_event_device xttcpss_clockevent; + +/** + * xttcpss_set_interval - Set the timer interval value + * + * @timer: Pointer to the timer instance + * @cycles: Timer interval ticks + **/ +static void xttcpss_set_interval(struct xttcpss_timer *timer, + unsigned long cycles) +{ + u32 ctrl_reg; + + /* Disable the counter, set the counter value and re-enable counter */ + ctrl_reg = __raw_readl(timer->base_addr + XTTCPSS_CNT_CNTRL_OFFSET); + ctrl_reg |= XTTCPSS_CNT_CNTRL_DISABLE_MASK; + __raw_writel(ctrl_reg, timer->base_addr + XTTCPSS_CNT_CNTRL_OFFSET); + + __raw_writel(cycles, timer->base_addr + XTTCPSS_INTR_VAL_OFFSET); + + /* Reset the counter (0x10) so that it starts from 0, one-shot + mode makes this needed for timing to be right. */ + ctrl_reg |= 0x10; + ctrl_reg &= ~XTTCPSS_CNT_CNTRL_DISABLE_MASK; + __raw_writel(ctrl_reg, timer->base_addr + XTTCPSS_CNT_CNTRL_OFFSET); +} + +/** + * xttcpss_clock_event_interrupt - Clock event timer interrupt handler + * + * @irq: IRQ number of the Timer + * @dev_id: void pointer to the xttcpss_timer instance + * + * returns: Always IRQ_HANDLED - success + **/ +static irqreturn_t xttcpss_clock_event_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = &xttcpss_clockevent; + struct xttcpss_timer *timer = dev_id; + + /* Acknowledge the interrupt and call event handler */ + __raw_writel(__raw_readl(timer->base_addr + XTTCPSS_ISR_OFFSET), + timer->base_addr + XTTCPSS_ISR_OFFSET); + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static struct irqaction event_timer_irq = { + .name = "xttcpss clockevent", + .flags = IRQF_DISABLED | IRQF_TIMER, + .handler = xttcpss_clock_event_interrupt, +}; + +/** + * xttcpss_timer_hardware_init - Initialize the timer hardware + * + * Initialize the hardware to start the clock source, get the clock + * event timer ready to use, and hook up the interrupt. + **/ +static void __init xttcpss_timer_hardware_init(void) +{ + /* Setup the clock source counter to be an incrementing counter + * with no interrupt and it rolls over at 0xFFFF. Pre-scale + it by 32 also. Let it start running now. + */ + timers[XTTCPSS_CLOCKSOURCE].base_addr = XTTCPSS_TIMER_BASE; + + __raw_writel(0x0, timers[XTTCPSS_CLOCKSOURCE].base_addr + + XTTCPSS_IER_OFFSET); + __raw_writel(0x9, timers[XTTCPSS_CLOCKSOURCE].base_addr + + XTTCPSS_CLK_CNTRL_OFFSET); + __raw_writel(0x10, timers[XTTCPSS_CLOCKSOURCE].base_addr + + XTTCPSS_CNT_CNTRL_OFFSET); + + /* Setup the clock event timer to be an interval timer which + * is prescaled by 32 using the interval interrupt. Leave it + * disabled for now. + */ + + timers[XTTCPSS_CLOCKEVENT].base_addr = XTTCPSS_TIMER_BASE + 4; + + __raw_writel(0x23, timers[XTTCPSS_CLOCKEVENT].base_addr + + XTTCPSS_CNT_CNTRL_OFFSET); + __raw_writel(0x9, timers[XTTCPSS_CLOCKEVENT].base_addr + + XTTCPSS_CLK_CNTRL_OFFSET); + __raw_writel(0x1, timers[XTTCPSS_CLOCKEVENT].base_addr + + XTTCPSS_IER_OFFSET); + + /* Setup IRQ the clock event timer */ + event_timer_irq.dev_id = &timers[XTTCPSS_CLOCKEVENT]; + setup_irq(XTTCPCC_EVENT_TIMER_IRQ, &event_timer_irq); +} + +/** + * __raw_readl_cycles - Reads the timer counter register + * + * returns: Current timer counter register value + **/ +static cycle_t __raw_readl_cycles(struct clocksource *cs) +{ + struct xttcpss_timer *timer = &timers[XTTCPSS_CLOCKSOURCE]; + + return (cycle_t)__raw_readl(timer->base_addr + + XTTCPSS_COUNT_VAL_OFFSET); +} + + +/* + * Instantiate and initialize the clock source structure + */ +static struct clocksource clocksource_xttcpss = { + .name = "xttcpss_timer1", + .rating = 200, /* Reasonable clock source */ + .read = __raw_readl_cycles, + .mask = CLOCKSOURCE_MASK(16), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + + +/** + * xttcpss_set_next_event - Sets the time interval for next event + * + * @cycles: Timer interval ticks + * @evt: Address of clock event instance + * + * returns: Always 0 - success + **/ +static int xttcpss_set_next_event(unsigned long cycles, + struct clock_event_device *evt) +{ + struct xttcpss_timer *timer = &timers[XTTCPSS_CLOCKEVENT]; + + xttcpss_set_interval(timer, cycles); + return 0; +} + +/** + * xttcpss_set_mode - Sets the mode of timer + * + * @mode: Mode to be set + * @evt: Address of clock event instance + **/ +static void xttcpss_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + struct xttcpss_timer *timer = &timers[XTTCPSS_CLOCKEVENT]; + u32 ctrl_reg; + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + xttcpss_set_interval(timer, TIMER_RATE / HZ); + break; + case CLOCK_EVT_MODE_ONESHOT: + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + ctrl_reg = __raw_readl(timer->base_addr + + XTTCPSS_CNT_CNTRL_OFFSET); + ctrl_reg |= XTTCPSS_CNT_CNTRL_DISABLE_MASK; + __raw_writel(ctrl_reg, + timer->base_addr + XTTCPSS_CNT_CNTRL_OFFSET); + break; + case CLOCK_EVT_MODE_RESUME: + ctrl_reg = __raw_readl(timer->base_addr + + XTTCPSS_CNT_CNTRL_OFFSET); + ctrl_reg &= ~XTTCPSS_CNT_CNTRL_DISABLE_MASK; + __raw_writel(ctrl_reg, + timer->base_addr + XTTCPSS_CNT_CNTRL_OFFSET); + break; + } +} + +/* + * Instantiate and initialize the clock event structure + */ +static struct clock_event_device xttcpss_clockevent = { + .name = "xttcpss_timer2", + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = xttcpss_set_next_event, + .set_mode = xttcpss_set_mode, + .rating = 200, +}; + +/** + * xttcpss_timer_init - Initialize the timer + * + * Initializes the timer hardware and register the clock source and clock event + * timers with Linux kernal timer framework + **/ +static void __init xttcpss_timer_init(void) +{ + xttcpss_timer_hardware_init(); + clocksource_register_hz(&clocksource_xttcpss, TIMER_RATE); + + /* Calculate the parameters to allow the clockevent to operate using + integer math + */ + clockevents_calc_mult_shift(&xttcpss_clockevent, TIMER_RATE, 4); + + xttcpss_clockevent.max_delta_ns = + clockevent_delta2ns(0xfffe, &xttcpss_clockevent); + xttcpss_clockevent.min_delta_ns = + clockevent_delta2ns(1, &xttcpss_clockevent); + + /* Indicate that clock event is on 1st CPU as SMP boot needs it */ + + xttcpss_clockevent.cpumask = cpumask_of(0); + clockevents_register_device(&xttcpss_clockevent); +} + +/* + * Instantiate and initialize the system timer structure + */ +struct sys_timer xttcpss_sys_timer = { + .init = xttcpss_timer_init, +}; diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 0074b8dba793..f0469823faaa 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -821,7 +821,7 @@ config CACHE_L2X0 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ REALVIEW_EB_A9MP || SOC_IMX35 || SOC_IMX31 || MACH_REALVIEW_PBX || \ ARCH_NOMADIK || ARCH_OMAP4 || ARCH_EXYNOS4 || ARCH_TEGRA || \ - ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE + ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE || ARCH_ZYNQ default y select OUTER_CACHE select OUTER_CACHE_SYNC -- cgit v1.2.3 From 304eda32920b5e23f6b9bc12eb40c7dc52a464ba Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 9 Jun 2011 00:24:59 +0000 Subject: drm/gem: add hooks to notify driver when object handle is created/destroyed Nouveau is going to use these hooks to map/unmap objects from a client's private GPU address space. Signed-off-by: Ben Skeggs Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 21 +++++++++++++++++++-- include/drm/drmP.h | 2 ++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 74e4ff578017..bad335966398 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -210,6 +210,8 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) idr_remove(&filp->object_idr, handle); spin_unlock(&filp->table_lock); + if (dev->driver->gem_close_object) + dev->driver->gem_close_object(obj, filp); drm_gem_object_handle_unreference_unlocked(obj); return 0; @@ -226,7 +228,8 @@ drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep) { - int ret; + struct drm_device *dev = obj->dev; + int ret; /* * Get the user-visible handle using idr. @@ -247,6 +250,15 @@ again: return ret; drm_gem_object_handle_reference(obj); + + if (dev->driver->gem_open_object) { + ret = dev->driver->gem_open_object(obj, file_priv); + if (ret) { + drm_gem_handle_delete(file_priv, *handlep); + return ret; + } + } + return 0; } EXPORT_SYMBOL(drm_gem_handle_create); @@ -401,7 +413,12 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private) static int drm_gem_object_release_handle(int id, void *ptr, void *data) { + struct drm_file *file_priv = data; struct drm_gem_object *obj = ptr; + struct drm_device *dev = obj->dev; + + if (dev->driver->gem_close_object) + dev->driver->gem_close_object(obj, file_priv); drm_gem_object_handle_unreference_unlocked(obj); @@ -417,7 +434,7 @@ void drm_gem_release(struct drm_device *dev, struct drm_file *file_private) { idr_for_each(&file_private->object_idr, - &drm_gem_object_release_handle, NULL); + &drm_gem_object_release_handle, file_private); idr_remove_all(&file_private->object_idr); idr_destroy(&file_private->object_idr); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 738b3a5faa12..4912cb75ae4c 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -886,6 +886,8 @@ struct drm_driver { */ int (*gem_init_object) (struct drm_gem_object *obj); void (*gem_free_object) (struct drm_gem_object *obj); + int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); + void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); /* vga arb irq handler */ void (*vgaarb_irq)(struct drm_device *dev, bool state); -- cgit v1.2.3 From 20633442eb6ce7b0b55252a24b981afe42b3d361 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 13 Jun 2011 21:33:39 +0000 Subject: drm/radeon/kms: set dma_copy to NULL for r6xx+ No need to assign the same copy callback for both copy blit and dma. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_asic.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index b2449629537d..df8218bb83a6 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -625,7 +625,7 @@ static struct radeon_asic r600_asic = { .fence_ring_emit = &r600_fence_ring_emit, .cs_parse = &r600_cs_parse, .copy_blit = &r600_copy_blit, - .copy_dma = &r600_copy_blit, + .copy_dma = NULL, .copy = &r600_copy_blit, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, @@ -672,7 +672,7 @@ static struct radeon_asic rs780_asic = { .fence_ring_emit = &r600_fence_ring_emit, .cs_parse = &r600_cs_parse, .copy_blit = &r600_copy_blit, - .copy_dma = &r600_copy_blit, + .copy_dma = NULL, .copy = &r600_copy_blit, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, @@ -719,7 +719,7 @@ static struct radeon_asic rv770_asic = { .fence_ring_emit = &r600_fence_ring_emit, .cs_parse = &r600_cs_parse, .copy_blit = &r600_copy_blit, - .copy_dma = &r600_copy_blit, + .copy_dma = NULL, .copy = &r600_copy_blit, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, @@ -766,7 +766,7 @@ static struct radeon_asic evergreen_asic = { .fence_ring_emit = &r600_fence_ring_emit, .cs_parse = &evergreen_cs_parse, .copy_blit = &evergreen_copy_blit, - .copy_dma = &evergreen_copy_blit, + .copy_dma = NULL, .copy = &evergreen_copy_blit, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, @@ -813,7 +813,7 @@ static struct radeon_asic sumo_asic = { .fence_ring_emit = &r600_fence_ring_emit, .cs_parse = &evergreen_cs_parse, .copy_blit = &evergreen_copy_blit, - .copy_dma = &evergreen_copy_blit, + .copy_dma = NULL, .copy = &evergreen_copy_blit, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, @@ -860,7 +860,7 @@ static struct radeon_asic btc_asic = { .fence_ring_emit = &r600_fence_ring_emit, .cs_parse = &evergreen_cs_parse, .copy_blit = &evergreen_copy_blit, - .copy_dma = &evergreen_copy_blit, + .copy_dma = NULL, .copy = &evergreen_copy_blit, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, @@ -907,7 +907,7 @@ static struct radeon_asic cayman_asic = { .fence_ring_emit = &r600_fence_ring_emit, .cs_parse = &evergreen_cs_parse, .copy_blit = &evergreen_copy_blit, - .copy_dma = &evergreen_copy_blit, + .copy_dma = NULL, .copy = &evergreen_copy_blit, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, -- cgit v1.2.3 From 0d74f86f37306da8619eb049d88ab7ee523eec9c Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Wed, 8 Jun 2011 17:06:15 +0000 Subject: ttm: Fix spelling mistakes and remove unused #ifdef . and some comments to make it easier to understand. Ackedby: Randy Dunlap [v2: Added some more updates from Randy Dunlap] Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 16 ++++++++-------- include/drm/ttm/ttm_bo_api.h | 3 --- include/drm/ttm/ttm_bo_driver.h | 6 +++--- include/drm/ttm/ttm_memory.h | 2 +- include/drm/ttm/ttm_object.h | 4 ++-- include/drm/ttm/ttm_page_alloc.h | 2 +- 6 files changed, 15 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index d948575717bf..170e751c283e 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -355,7 +355,7 @@ restart: if (nr_free) goto restart; - /* Not allowed to fall tough or break because + /* Not allowed to fall through or break because * following context is inside spinlock while we are * outside here. */ @@ -556,7 +556,7 @@ out: } /** - * Fill the given pool if there isn't enough pages and requested number of + * Fill the given pool if there aren't enough pages and the requested number of * pages is small. */ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, @@ -576,8 +576,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, pool->fill_lock = true; - /* If allocation request is small and there is not enough - * pages in pool we fill the pool first */ + /* If allocation request is small and there are not enough + * pages in a pool we fill the pool up first. */ if (count < _manager->options.small && count > pool->npages) { struct list_head new_pages; @@ -614,9 +614,9 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, } /** - * Cut count nubmer of pages from the pool and put them to return list + * Cut 'count' number of pages from the pool and put them on the return list. * - * @return count of pages still to allocate to fill the request. + * @return count of pages still required to fulfill the request. */ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, struct list_head *pages, int ttm_flags, @@ -637,7 +637,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, goto out; } /* find the last pages to include for requested number of pages. Split - * pool to begin and halves to reduce search space. */ + * pool to begin and halve it to reduce search space. */ if (count <= pool->npages/2) { i = 0; list_for_each(p, &pool->list) { @@ -651,7 +651,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, break; } } - /* Cut count number of pages from pool */ + /* Cut 'count' number of pages from the pool */ list_cut_position(pages, &pool->list, p); pool->npages -= count; count = 0; diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 62a0e4c4ceee..42e346985186 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -662,9 +662,6 @@ extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); -#if 0 -#endif - /** * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object. * diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 09af2d746d1c..94eb1434316e 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -78,7 +78,7 @@ struct ttm_backend_func { * * Bind the backend pages into the aperture in the location * indicated by @bo_mem. This function should be able to handle - * differences between aperture- and system page sizes. + * differences between aperture and system page sizes. */ int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem); @@ -88,7 +88,7 @@ struct ttm_backend_func { * @backend: Pointer to a struct ttm_backend. * * Unbind previously bound backend pages. This function should be - * able to handle differences between aperture- and system page sizes. + * able to handle differences between aperture and system page sizes. */ int (*unbind) (struct ttm_backend *backend); @@ -786,7 +786,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev); * ttm_bo_device_init * * @bdev: A pointer to a struct ttm_bo_device to initialize. - * @mem_global: A pointer to an initialized struct ttm_mem_global. + * @glob: A pointer to an initialized struct ttm_bo_global. * @driver: A pointer to a struct ttm_bo_driver set up by the caller. * @file_page_offset: Offset into the device address space that is available * for buffer data. This ensures compatibility with other users of the diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index b199170b3c2c..26c1f78d136f 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h @@ -41,7 +41,7 @@ * @do_shrink: The callback function. * * Arguments to the do_shrink functions are intended to be passed using - * inheritance. That is, the argument class derives from struct ttm_mem_srink, + * inheritance. That is, the argument class derives from struct ttm_mem_shrink, * and can be accessed using container_of(). */ diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h index 0d9db099978b..e46054e5255b 100644 --- a/include/drm/ttm/ttm_object.h +++ b/include/drm/ttm/ttm_object.h @@ -111,7 +111,7 @@ struct ttm_object_device; * * @ref_obj_release: A function to be called when a reference object * with another ttm_ref_type than TTM_REF_USAGE is deleted. - * this function may, for example, release a lock held by a user-space + * This function may, for example, release a lock held by a user-space * process. * * This struct is intended to be used as a base struct for objects that @@ -172,7 +172,7 @@ extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file /** * ttm_base_object_unref * - * @p_base: Pointer to a pointer referncing a struct ttm_base_object. + * @p_base: Pointer to a pointer referencing a struct ttm_base_object. * * Decrements the base object refcount and clears the pointer pointed to by * p_base. diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index 8062890f725e..129de12353f1 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h @@ -32,7 +32,7 @@ /** * Get count number of pages from pool to pages list. * - * @pages: heado of empty linked list where pages are filled. + * @pages: head of empty linked list where pages are filled. * @flags: ttm flags for page allocation. * @cstate: ttm caching state for the page. * @count: number of pages to allocate. -- cgit v1.2.3 From 033b5650010652c069494df58424c4b98412fe3b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 8 Jun 2011 15:26:45 -0400 Subject: drm/radeon/kms: add initial CS checker support for compute - Add some new compute regs - Add new dispatch packets for evergreen/cayman Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen_cs.c | 57 ++++++++++++++++++++++++++++++- drivers/gpu/drm/radeon/evergreend.h | 2 ++ drivers/gpu/drm/radeon/r600_cs.c | 9 +++++ drivers/gpu/drm/radeon/radeon_drv.c | 2 +- drivers/gpu/drm/radeon/reg_srcs/cayman | 2 ++ drivers/gpu/drm/radeon/reg_srcs/evergreen | 3 ++ drivers/gpu/drm/radeon/reg_srcs/r600 | 1 + 7 files changed, 74 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 23d36417158d..189e86522b5b 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -856,7 +856,6 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3 case SQ_PGM_START_PS: case SQ_PGM_START_HS: case SQ_PGM_START_LS: - case GDS_ADDR_BASE: case SQ_CONST_MEM_BASE: case SQ_ALU_CONST_CACHE_GS_0: case SQ_ALU_CONST_CACHE_GS_1: @@ -946,6 +945,34 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3 } ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); break; + case SX_MEMORY_EXPORT_BASE: + if (p->rdev->family >= CHIP_CAYMAN) { + dev_warn(p->dev, "bad SET_CONFIG_REG " + "0x%04X\n", reg); + return -EINVAL; + } + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONFIG_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + break; + case CAYMAN_SX_SCATTER_EXPORT_BASE: + if (p->rdev->family < CHIP_CAYMAN) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + break; default: dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return -EINVAL; @@ -1153,6 +1180,34 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, return r; } break; + case PACKET3_DISPATCH_DIRECT: + if (pkt->count != 3) { + DRM_ERROR("bad DISPATCH_DIRECT\n"); + return -EINVAL; + } + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); + return r; + } + break; + case PACKET3_DISPATCH_INDIRECT: + if (pkt->count != 1) { + DRM_ERROR("bad DISPATCH_INDIRECT\n"); + return -EINVAL; + } + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad DISPATCH_INDIRECT\n"); + return -EINVAL; + } + ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } + break; case PACKET3_WAIT_REG_MEM: if (pkt->count != 5) { DRM_ERROR("bad WAIT_REG_MEM\n"); diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 1636e3449825..321c822065fc 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -351,6 +351,7 @@ #define COLOR_BUFFER_SIZE(x) ((x) << 0) #define POSITION_BUFFER_SIZE(x) ((x) << 8) #define SMX_BUFFER_SIZE(x) ((x) << 16) +#define SX_MEMORY_EXPORT_BASE 0x9010 #define SX_MISC 0x28350 #define CB_PERF_CTR0_SEL_0 0x9A20 @@ -1122,6 +1123,7 @@ #define CAYMAN_PA_SC_AA_CONFIG 0x28BE0 #define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0 #define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7 +#define CAYMAN_SX_SCATTER_EXPORT_BASE 0x28358 /* cayman packet3 addition */ #define CAYMAN_PACKET3_DEALLOC_STATE 0x14 diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 909bda8dd550..db8ef1905d5f 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -1200,6 +1200,15 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx } ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); break; + case SX_MEMORY_EXPORT_BASE: + r = r600_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONFIG_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + break; default: dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 73dfbe8e5f9e..cbb4584a4a23 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -50,7 +50,7 @@ * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query - * 2.10.0 - fusion 2D tiling + * 2.10.0 - fusion 2D tiling, initial compute support for the CS checker */ #define KMS_DRIVER_MAJOR 2 #define KMS_DRIVER_MINOR 10 diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index 0aa8e85a9457..2316977eb924 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman @@ -208,6 +208,7 @@ cayman 0x9400 0x0002834C PA_SC_VPORT_ZMAX_15 0x00028350 SX_MISC 0x00028354 SX_SURFACE_SYNC +0x0002835C SX_SCATTER_EXPORT_SIZE 0x00028380 SQ_VTX_SEMANTIC_0 0x00028384 SQ_VTX_SEMANTIC_1 0x00028388 SQ_VTX_SEMANTIC_2 @@ -432,6 +433,7 @@ cayman 0x9400 0x00028700 SPI_STACK_MGMT 0x00028704 SPI_WAVE_MGMT_1 0x00028708 SPI_WAVE_MGMT_2 +0x00028720 GDS_ADDR_BASE 0x00028724 GDS_ADDR_SIZE 0x00028780 CB_BLEND0_CONTROL 0x00028784 CB_BLEND1_CONTROL diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index 0e28cae7ea43..161737a28c23 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen @@ -44,6 +44,7 @@ evergreen 0x9400 0x00008E28 SQ_STATIC_THREAD_MGMT_3 0x00008E2C SQ_LDS_RESOURCE_MGMT 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS +0x00009014 SX_MEMORY_EXPORT_SIZE 0x00009100 SPI_CONFIG_CNTL 0x0000913C SPI_CONFIG_CNTL_1 0x00009508 TA_CNTL_AUX @@ -442,7 +443,9 @@ evergreen 0x9400 0x000286EC SPI_COMPUTE_NUM_THREAD_X 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z +0x00028720 GDS_ADDR_BASE 0x00028724 GDS_ADDR_SIZE +0x00028728 GDS_ORDERED_WAVE_PER_SE 0x00028780 CB_BLEND0_CONTROL 0x00028784 CB_BLEND1_CONTROL 0x00028788 CB_BLEND2_CONTROL diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index ea49752ee99c..0380c5c15f80 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 @@ -429,6 +429,7 @@ r600 0x9400 0x00028438 SX_ALPHA_REF 0x00028410 SX_ALPHA_TEST_CONTROL 0x00028350 SX_MISC +0x00009014 SX_MEMORY_EXPORT_SIZE 0x00009604 TC_INVALIDATE 0x00009400 TD_FILTER4 0x00009404 TD_FILTER4_1 -- cgit v1.2.3 From 5899a723b336da241b492583d7e55f1055f8f3b3 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Fri, 17 Jun 2011 08:20:40 +0000 Subject: dmaengine: shdma: add chcr_write/read function CHCR register position is not same in all DMAC. This patch adds new "chcr_offset" to decide it. Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- drivers/dma/shdma.c | 35 +++++++++++++++++++++++++++-------- drivers/dma/shdma.h | 1 + include/linux/sh_dma.h | 1 + 3 files changed, 29 insertions(+), 8 deletions(-) diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 41a21b322960..40900c1cee9a 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -78,6 +78,20 @@ static void dmaor_write(struct sh_dmae_device *shdev, u16 data) __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32)); } +static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) +{ + struct sh_dmae_device *shdev = to_sh_dev(sh_dc); + + __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); +} + +static u32 chcr_read(struct sh_dmae_chan *sh_dc) +{ + struct sh_dmae_device *shdev = to_sh_dev(sh_dc); + + return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); +} + /* * Reset DMA controller * @@ -120,7 +134,7 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev) static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) { - u32 chcr = sh_dmae_readl(sh_chan, CHCR); + u32 chcr = chcr_read(sh_chan); if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) return true; /* working */ @@ -167,18 +181,18 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) static void dmae_start(struct sh_dmae_chan *sh_chan) { - u32 chcr = sh_dmae_readl(sh_chan, CHCR); + u32 chcr = chcr_read(sh_chan); chcr |= CHCR_DE | CHCR_IE; - sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR); + chcr_write(sh_chan, chcr & ~CHCR_TE); } static void dmae_halt(struct sh_dmae_chan *sh_chan) { - u32 chcr = sh_dmae_readl(sh_chan, CHCR); + u32 chcr = chcr_read(sh_chan); chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); - sh_dmae_writel(sh_chan, chcr, CHCR); + chcr_write(sh_chan, chcr); } static void dmae_init(struct sh_dmae_chan *sh_chan) @@ -190,7 +204,7 @@ static void dmae_init(struct sh_dmae_chan *sh_chan) u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, LOG2_DEFAULT_XFER_SIZE); sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); - sh_dmae_writel(sh_chan, chcr, CHCR); + chcr_write(sh_chan, chcr); } static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) @@ -200,7 +214,7 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) return -EBUSY; sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); - sh_dmae_writel(sh_chan, val, CHCR); + chcr_write(sh_chan, val); return 0; } @@ -840,7 +854,7 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data) spin_lock(&sh_chan->desc_lock); - chcr = sh_dmae_readl(sh_chan, CHCR); + chcr = chcr_read(sh_chan); if (chcr & CHCR_TE) { /* DMA stop */ @@ -1138,6 +1152,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev) /* platform data */ shdev->pdata = pdata; + if (pdata->chcr_offset) + shdev->chcr_offset = pdata->chcr_offset; + else + shdev->chcr_offset = CHCR; + platform_set_drvdata(pdev, shdev); pm_runtime_enable(&pdev->dev); diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 6c73b654a5c3..6f064cad6c0f 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h @@ -47,6 +47,7 @@ struct sh_dmae_device { struct list_head node; u32 __iomem *chan_reg; u16 __iomem *dmars; + unsigned int chcr_offset; }; #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h index b08cd4efa15c..41fe4c2d6481 100644 --- a/include/linux/sh_dma.h +++ b/include/linux/sh_dma.h @@ -62,6 +62,7 @@ struct sh_dmae_pdata { const unsigned int *ts_shift; int ts_shift_num; u16 dmaor_init; + unsigned int chcr_offset; }; /* DMA register */ -- cgit v1.2.3 From 67c6269e5c998b53c2c08ce2befbbe20a7b6f57f Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Fri, 17 Jun 2011 08:20:51 +0000 Subject: dmaengine: shdma: add chcr_ie_bit IE bit position on CHCR register is not same in all DMAC. This patch adds new "chcr_ie_bit" to decide it. Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- drivers/dma/shdma.c | 11 +++++++++-- drivers/dma/shdma.h | 1 + include/linux/sh_dma.h | 1 + 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 40900c1cee9a..9412de3ef899 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -181,17 +181,19 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) static void dmae_start(struct sh_dmae_chan *sh_chan) { + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); u32 chcr = chcr_read(sh_chan); - chcr |= CHCR_DE | CHCR_IE; + chcr |= CHCR_DE | shdev->chcr_ie_bit; chcr_write(sh_chan, chcr & ~CHCR_TE); } static void dmae_halt(struct sh_dmae_chan *sh_chan) { + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); u32 chcr = chcr_read(sh_chan); - chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); + chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); chcr_write(sh_chan, chcr); } @@ -1157,6 +1159,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev) else shdev->chcr_offset = CHCR; + if (pdata->chcr_ie_bit) + shdev->chcr_ie_bit = pdata->chcr_ie_bit; + else + shdev->chcr_ie_bit = CHCR_IE; + platform_set_drvdata(pdev, shdev); pm_runtime_enable(&pdev->dev); diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 6f064cad6c0f..dc56576f9fdb 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h @@ -48,6 +48,7 @@ struct sh_dmae_device { u32 __iomem *chan_reg; u16 __iomem *dmars; unsigned int chcr_offset; + u32 chcr_ie_bit; }; #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h index 41fe4c2d6481..96803aa7b6bd 100644 --- a/include/linux/sh_dma.h +++ b/include/linux/sh_dma.h @@ -63,6 +63,7 @@ struct sh_dmae_pdata { int ts_shift_num; u16 dmaor_init; unsigned int chcr_offset; + u32 chcr_ie_bit; }; /* DMA register */ -- cgit v1.2.3 From e76c3af873025f5a704d56a28882be761e15657b Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Fri, 17 Jun 2011 08:20:56 +0000 Subject: dmaengine: shdma: add dmaor_is_32bit flag Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- drivers/dma/shdma.c | 14 ++++++++++++-- include/linux/sh_dma.h | 2 ++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 9412de3ef899..6a21cd843ab7 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -70,12 +70,22 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) static u16 dmaor_read(struct sh_dmae_device *shdev) { - return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32)); + u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); + + if (shdev->pdata->dmaor_is_32bit) + return __raw_readl(addr); + else + return __raw_readw(addr); } static void dmaor_write(struct sh_dmae_device *shdev, u16 data) { - __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32)); + u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); + + if (shdev->pdata->dmaor_is_32bit) + __raw_writel(data, addr); + else + __raw_writew(data, addr); } static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h index 96803aa7b6bd..f25afc61e1c6 100644 --- a/include/linux/sh_dma.h +++ b/include/linux/sh_dma.h @@ -64,6 +64,8 @@ struct sh_dmae_pdata { u16 dmaor_init; unsigned int chcr_offset; u32 chcr_ie_bit; + + unsigned int dmaor_is_32bit:1; }; /* DMA register */ -- cgit v1.2.3 From 260bf2c5f69f419b04b6861ca91565b5fb16ce48 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Fri, 17 Jun 2011 08:21:05 +0000 Subject: dmaengine: shdma: add .needs_tend_set / .no_dmars flags Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- drivers/dma/shdma.c | 6 ++++++ include/linux/sh_dma.h | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 6a21cd843ab7..0f3ec8d57a7a 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -194,6 +194,9 @@ static void dmae_start(struct sh_dmae_chan *sh_chan) struct sh_dmae_device *shdev = to_sh_dev(sh_chan); u32 chcr = chcr_read(sh_chan); + if (shdev->pdata->needs_tend_set) + sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); + chcr |= CHCR_DE | shdev->chcr_ie_bit; chcr_write(sh_chan, chcr & ~CHCR_TE); } @@ -242,6 +245,9 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) if (dmae_is_busy(sh_chan)) return -EBUSY; + if (pdata->no_dmars) + return 0; + /* in the case of a missing DMARS resource use first memory window */ if (!addr) addr = (u16 __iomem *)shdev->chan_reg; diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h index f25afc61e1c6..cb2dd118cc0f 100644 --- a/include/linux/sh_dma.h +++ b/include/linux/sh_dma.h @@ -66,6 +66,8 @@ struct sh_dmae_pdata { u32 chcr_ie_bit; unsigned int dmaor_is_32bit:1; + unsigned int needs_tend_set:1; + unsigned int no_dmars:1; }; /* DMA register */ @@ -75,6 +77,8 @@ struct sh_dmae_pdata { #define CHCR 0x0C #define DMAOR 0x40 +#define TEND 0x18 /* USB-DMAC */ + /* DMAOR definitions */ #define DMAOR_AE 0x00000004 #define DMAOR_NMIF 0x00000002 -- cgit v1.2.3 From 9372da5073705fe991f0254baf47f82d491c83ff Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 31 Mar 2011 09:07:20 +0200 Subject: mach-ux500: add HREFv60 Kconfig option This is necessary to have any use of the HREFv60 code. Signed-off-by: Linus Walleij --- arch/arm/mach-ux500/Kconfig | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index f8b9392ee347..96d546cef062 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig @@ -20,7 +20,7 @@ config UX500_SOC_DB8500 endmenu -menu "Ux500 target platform" +menu "Ux500 target platform (boards)" config MACH_U8500 bool "U8500 Development platform" @@ -29,6 +29,12 @@ config MACH_U8500 help Include support for the mop500 development platform. +config MACH_HREFV60 + bool "U85000 Development platform, HREFv60 version" + depends on UX500_SOC_DB8500 + help + Include support for the HREFv60 new development platform. + config MACH_U5500 bool "U5500 Development platform" depends on UX500_SOC_DB5500 -- cgit v1.2.3 From f727a05a2c90cfe44749004718bc5a4ef3569b34 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 27 Apr 2011 12:55:37 +0200 Subject: mach-ux500: fix HREFv60 regression This fixes a regression on the HREFv60 ux500 hardware: the wrong level shifter was addressed in the MMCI vdd handler, trying to reconfigure an unclaimed GPIO pin. Signed-off-by: Linus Walleij --- arch/arm/mach-ux500/board-mop500-sdi.c | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c index 7c6cb4fa47a9..f8b195063b62 100644 --- a/arch/arm/mach-ux500/board-mop500-sdi.c +++ b/arch/arm/mach-ux500/board-mop500-sdi.c @@ -32,13 +32,32 @@ #define MCI_DATA31DIREN (1 << 5) #define MCI_FBCLKEN (1 << 7) +/* GPIO pins used by the sdi0 level shifter */ +static int sdi0_en = -1; +static int sdi0_vsel = -1; + static u32 mop500_sdi0_vdd_handler(struct device *dev, unsigned int vdd, unsigned char power_mode) { - if (power_mode == MMC_POWER_UP) - gpio_set_value_cansleep(GPIO_SDMMC_EN, 1); - else if (power_mode == MMC_POWER_OFF) - gpio_set_value_cansleep(GPIO_SDMMC_EN, 0); + switch (power_mode) { + case MMC_POWER_UP: + case MMC_POWER_ON: + /* + * Level shifter voltage should depend on vdd to when deciding + * on either 1.8V or 2.9V. Once the decision has been made the + * level shifter must be disabled and re-enabled with a changed + * select signal in order to switch the voltage. Since there is + * no framework support yet for indicating 1.8V in vdd, use the + * default 2.9V. + */ + gpio_direction_output(sdi0_vsel, 0); + gpio_direction_output(sdi0_en, 1); + break; + case MMC_POWER_OFF: + gpio_direction_output(sdi0_vsel, 0); + gpio_direction_output(sdi0_en, 0); + break; + } return MCI_FBCLKEN | MCI_CMDDIREN | MCI_DATA0DIREN | MCI_DATA2DIREN | MCI_DATA31DIREN; @@ -77,10 +96,6 @@ static struct mmci_platform_data mop500_sdi0_data = { #endif }; -/* GPIO pins used by the sdi0 level shifter */ -static int sdi0_en = -1; -static int sdi0_vsel = -1; - static void sdi0_configure(void) { int ret; @@ -210,6 +225,7 @@ void __init mop500_sdi_init(void) sdi0_vsel = HREFV60_SDMMC_1V8_3V_GPIO; sdi0_configure(); } + /* * On boards with the TC35892 GPIO expander, sdi0 will finally * be added when the TC35892 initializes and calls -- cgit v1.2.3 From 02a734373b998efdcb9d28d8c3aa77e549bb38c0 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 30 Mar 2011 16:00:39 +0200 Subject: mach-ux500: correct MMC/SDI parameters We cannot clock the MMCI blocks more than 50 MHz. A bug prevented us from seeing the effect of actually driving them to 100 MHz, which indeed resulted failure, on the external SD card. Signed-off-by: Linus Walleij --- arch/arm/mach-ux500/board-mop500-sdi.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c index f8b195063b62..5fbd6bc63cb1 100644 --- a/arch/arm/mach-ux500/board-mop500-sdi.c +++ b/arch/arm/mach-ux500/board-mop500-sdi.c @@ -86,8 +86,10 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = { static struct mmci_platform_data mop500_sdi0_data = { .vdd_handler = mop500_sdi0_vdd_handler, .ocr_mask = MMC_VDD_29_30, - .f_max = 100000000, - .capabilities = MMC_CAP_4_BIT_DATA, + .f_max = 50000000, + .capabilities = MMC_CAP_4_BIT_DATA | + MMC_CAP_SD_HIGHSPEED | + MMC_CAP_MMC_HIGHSPEED, .gpio_wp = -1, #ifdef CONFIG_STE_DMA40 .dma_filter = stedma40_filter, @@ -155,7 +157,7 @@ static struct stedma40_chan_cfg mop500_sdi2_dma_cfg_tx = { static struct mmci_platform_data mop500_sdi2_data = { .ocr_mask = MMC_VDD_165_195, - .f_max = 100000000, + .f_max = 50000000, .capabilities = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA, .gpio_cd = -1, .gpio_wp = -1, @@ -192,7 +194,7 @@ static struct stedma40_chan_cfg mop500_sdi4_dma_cfg_tx = { static struct mmci_platform_data mop500_sdi4_data = { .ocr_mask = MMC_VDD_29_30, - .f_max = 100000000, + .f_max = 50000000, .capabilities = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | MMC_CAP_MMC_HIGHSPEED, .gpio_cd = -1, -- cgit v1.2.3 From 451a5edf0ed33f8ca9468ed89dc2a068d09e97be Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 1 Mar 2011 16:09:52 +0100 Subject: mach-ux500: activate USB in the U8500 defconfig Activate the new USB stuff so we atleast get some compile coverage for this stuff. Signed-off-by: Linus Walleij --- arch/arm/configs/u8500_defconfig | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index a5cce242a775..e1d602029a4d 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -67,7 +67,11 @@ CONFIG_AB8500_CORE=y CONFIG_REGULATOR=y CONFIG_REGULATOR_AB8500=y # CONFIG_HID_SUPPORT is not set -# CONFIG_USB_SUPPORT is not set +CONFIG_USB_MUSB_HDRC=y +CONFIG_USB_GADGET_MUSB_HDRC=y +CONFIG_MUSB_PIO_ONLY=y +CONFIG_USB_GADGET=y +CONFIG_AB8500_USB=y CONFIG_MMC=y CONFIG_MMC_ARMMMCI=y CONFIG_NEW_LEDS=y -- cgit v1.2.3 From db24520f905430bd15eb49b5d9810ed445efe73a Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 4 Apr 2011 10:44:51 +0200 Subject: mach-ux500: complete regulator constraints for MOP500 board This board now has complete regulation constraints and can turn off unused regulators. For the moment we need to wire VAUX1 (V-DISPLAY rail) always on since it somehow affects the external MMC. Cc: Liam Girdwood Cc: Mark Brown Signed-off-by: Linus Walleij --- arch/arm/mach-ux500/board-mop500-regulators.c | 9 ++++++++- arch/arm/mach-ux500/board-mop500.c | 3 +++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c index 9ed0f90cfe23..c0bc833df903 100644 --- a/arch/arm/mach-ux500/board-mop500-regulators.c +++ b/arch/arm/mach-ux500/board-mop500-regulators.c @@ -272,7 +272,14 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { .max_uV = 2900000, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS, - .boot_on = 1, /* must be on for display */ + .boot_on = 1, /* display is on at boot */ + /* + * This voltage cannot be disabled right now because + * it is somehow affecting the external MMC + * functionality, though that typically will use + * AUX3. + */ + .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers), .consumer_supplies = ab8500_vaux1_consumers, diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index bb26f40493e6..c64d6aa1355c 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -458,6 +458,9 @@ static void __init mop500_init_machine(void) i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); i2c_register_board_info(2, mop500_i2c2_devices, ARRAY_SIZE(mop500_i2c2_devices)); + + /* This board has full regulator constraints */ + regulator_has_full_constraints(); } MACHINE_START(U8500, "ST-Ericsson MOP500 platform") -- cgit v1.2.3 From dd367e9d06d672a549526f2d3b69a36a120b1563 Mon Sep 17 00:00:00 2001 From: Mattias Wallin Date: Thu, 9 Jun 2011 11:50:35 +0200 Subject: mach-ux500: iomap PRCMU TCDM memory The PRCMU TCDM memory needs to be iomapped for the PRCMU to work properly. Signed-off-by: Mattias Wallin Signed-off-by: Linus Walleij --- arch/arm/mach-ux500/cpu-db5500.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/mach-ux500/cpu-db5500.c b/arch/arm/mach-ux500/cpu-db5500.c index c01bc19e3c5e..22705d246fc7 100644 --- a/arch/arm/mach-ux500/cpu-db5500.c +++ b/arch/arm/mach-ux500/cpu-db5500.c @@ -44,6 +44,7 @@ static struct map_desc u5500_io_desc[] __initdata = { __IO_DEV_DESC(U5500_GPIO3_BASE, SZ_4K), __IO_DEV_DESC(U5500_GPIO4_BASE, SZ_4K), __IO_DEV_DESC(U5500_PRCMU_BASE, SZ_4K), + __IO_DEV_DESC(U5500_PRCMU_TCDM_BASE, SZ_4K), }; static struct resource db5500_pmu_resources[] = { -- cgit v1.2.3 From afe48049ab1d0ca83afe45f9d5116bf4507741eb Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Fri, 17 Jun 2011 08:21:10 +0000 Subject: ARM: mach-shmobile: sh7372: Add USB-DMAC support Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/clock-sh7372.c | 5 +- arch/arm/mach-shmobile/include/mach/sh7372.h | 4 + arch/arm/mach-shmobile/setup-sh7372.c | 146 +++++++++++++++++++++++++++ 3 files changed, 154 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c index c0800d83971e..c239ab10c95b 100644 --- a/arch/arm/mach-shmobile/clock-sh7372.c +++ b/arch/arm/mach-shmobile/clock-sh7372.c @@ -509,7 +509,7 @@ enum { MSTP001, MSTP118, MSTP117, MSTP116, MSTP113, MSTP106, MSTP101, MSTP100, MSTP223, - MSTP218, MSTP217, MSTP216, + MSTP214, MSTP218, MSTP217, MSTP216, MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312, MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403, @@ -538,6 +538,7 @@ static struct clk mstp_clks[MSTP_NR] = { [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */ [MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */ [MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */ + [MSTP214] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 14, 0), /* USBDMAC */ [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */ [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */ [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */ @@ -633,6 +634,8 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */ CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */ CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */ + CLKDEV_DEV_ID("sh-dma-engine.3", &mstp_clks[MSTP214]), /* USB-DMAC0 */ + CLKDEV_DEV_ID("sh-dma-engine.4", &mstp_clks[MSTP214]), /* USB-DMAC1 */ CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */ CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */ diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h index df20d7670172..51db9d3a2cac 100644 --- a/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/arch/arm/mach-shmobile/include/mach/sh7372.h @@ -458,6 +458,10 @@ enum { SHDMA_SLAVE_SDHI2_TX, SHDMA_SLAVE_MMCIF_RX, SHDMA_SLAVE_MMCIF_TX, + SHDMA_SLAVE_USB0_TX, + SHDMA_SLAVE_USB0_RX, + SHDMA_SLAVE_USB1_TX, + SHDMA_SLAVE_USB1_RX, }; extern struct clk sh7372_extal1_clk; diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index cd807eea69e2..f2a58d48bfb4 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -602,6 +602,150 @@ static struct platform_device dma2_device = { }, }; +/* + * USB-DMAC + */ + +unsigned int usbts_shift[] = {3, 4, 5}; + +enum { + XMIT_SZ_8BYTE = 0, + XMIT_SZ_16BYTE = 1, + XMIT_SZ_32BYTE = 2, +}; + +#define USBTS_INDEX2VAL(i) (((i) & 3) << 6) + +static const struct sh_dmae_channel sh7372_usb_dmae_channels[] = { + { + .offset = 0, + }, { + .offset = 0x20, + }, +}; + +/* USB DMAC0 */ +static const struct sh_dmae_slave_config sh7372_usb_dmae0_slaves[] = { + { + .slave_id = SHDMA_SLAVE_USB0_TX, + .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE), + }, { + .slave_id = SHDMA_SLAVE_USB0_RX, + .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE), + }, +}; + +static struct sh_dmae_pdata usb_dma0_platform_data = { + .slave = sh7372_usb_dmae0_slaves, + .slave_num = ARRAY_SIZE(sh7372_usb_dmae0_slaves), + .channel = sh7372_usb_dmae_channels, + .channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels), + .ts_low_shift = 6, + .ts_low_mask = 0xc0, + .ts_high_shift = 0, + .ts_high_mask = 0, + .ts_shift = usbts_shift, + .ts_shift_num = ARRAY_SIZE(usbts_shift), + .dmaor_init = DMAOR_DME, + .chcr_offset = 0x14, + .chcr_ie_bit = 1 << 5, + .dmaor_is_32bit = 1, + .needs_tend_set = 1, + .no_dmars = 1, +}; + +static struct resource sh7372_usb_dmae0_resources[] = { + { + /* Channel registers and DMAOR */ + .start = 0xe68a0020, + .end = 0xe68a0064 - 1, + .flags = IORESOURCE_MEM, + }, + { + /* VCR/SWR/DMICR */ + .start = 0xe68a0000, + .end = 0xe68a0014 - 1, + .flags = IORESOURCE_MEM, + }, + { + /* IRQ for channels */ + .start = evt2irq(0x0a00), + .end = evt2irq(0x0a00), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device usb_dma0_device = { + .name = "sh-dma-engine", + .id = 3, + .resource = sh7372_usb_dmae0_resources, + .num_resources = ARRAY_SIZE(sh7372_usb_dmae0_resources), + .dev = { + .platform_data = &usb_dma0_platform_data, + }, +}; + +/* USB DMAC1 */ +static const struct sh_dmae_slave_config sh7372_usb_dmae1_slaves[] = { + { + .slave_id = SHDMA_SLAVE_USB1_TX, + .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE), + }, { + .slave_id = SHDMA_SLAVE_USB1_RX, + .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE), + }, +}; + +static struct sh_dmae_pdata usb_dma1_platform_data = { + .slave = sh7372_usb_dmae1_slaves, + .slave_num = ARRAY_SIZE(sh7372_usb_dmae1_slaves), + .channel = sh7372_usb_dmae_channels, + .channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels), + .ts_low_shift = 6, + .ts_low_mask = 0xc0, + .ts_high_shift = 0, + .ts_high_mask = 0, + .ts_shift = usbts_shift, + .ts_shift_num = ARRAY_SIZE(usbts_shift), + .dmaor_init = DMAOR_DME, + .chcr_offset = 0x14, + .chcr_ie_bit = 1 << 5, + .dmaor_is_32bit = 1, + .needs_tend_set = 1, + .no_dmars = 1, +}; + +static struct resource sh7372_usb_dmae1_resources[] = { + { + /* Channel registers and DMAOR */ + .start = 0xe68c0020, + .end = 0xe68c0064 - 1, + .flags = IORESOURCE_MEM, + }, + { + /* VCR/SWR/DMICR */ + .start = 0xe68c0000, + .end = 0xe68c0014 - 1, + .flags = IORESOURCE_MEM, + }, + { + /* IRQ for channels */ + .start = evt2irq(0x1d00), + .end = evt2irq(0x1d00), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device usb_dma1_device = { + .name = "sh-dma-engine", + .id = 4, + .resource = sh7372_usb_dmae1_resources, + .num_resources = ARRAY_SIZE(sh7372_usb_dmae1_resources), + .dev = { + .platform_data = &usb_dma1_platform_data, + }, +}; + /* VPU */ static struct uio_info vpu_platform_data = { .name = "VPU5HG", @@ -829,6 +973,8 @@ static struct platform_device *sh7372_late_devices[] __initdata = { &dma0_device, &dma1_device, &dma2_device, + &usb_dma0_device, + &usb_dma1_device, &vpu_device, &veu0_device, &veu1_device, -- cgit v1.2.3 From fef95faeae9fa5f605fbad8693e2d6e2171f5ad4 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 21 Jun 2011 04:20:57 -0700 Subject: Input: qt1070 - remove obsolete cleanup for clientdata A few new i2c-drivers came into the kernel which clear the clientdata pointer on exit or error. This is not required anymore since the core will do it for us. Signed-off-by: Wolfram Sang Acked-by: Jean Delvare Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/qt1070.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c index ca7b89196ab7..b21bf5b876bb 100644 --- a/drivers/input/keyboard/qt1070.c +++ b/drivers/input/keyboard/qt1070.c @@ -239,8 +239,6 @@ static int __devexit qt1070_remove(struct i2c_client *client) input_unregister_device(data->input); kfree(data); - i2c_set_clientdata(client, NULL); - return 0; } -- cgit v1.2.3 From 61cf3813d32411b23d5df8a650bbd2aa89b4618c Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Tue, 21 Jun 2011 04:20:57 -0700 Subject: Input: lm8323 - convert to threaded IRQ There's no need for that workqueue anymore. Get rid of it and move to threaded IRQs instead. Signed-off-by: Felipe Balbi Tested-by: Leigh Brown Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/lm8323.c | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c index 71f744a8e686..3b21f426ebb1 100644 --- a/drivers/input/keyboard/lm8323.c +++ b/drivers/input/keyboard/lm8323.c @@ -146,7 +146,6 @@ struct lm8323_chip { /* device lock */ struct mutex lock; struct i2c_client *client; - struct work_struct work; struct input_dev *idev; bool kp_enabled; bool pm_suspend; @@ -162,7 +161,6 @@ struct lm8323_chip { #define client_to_lm8323(c) container_of(c, struct lm8323_chip, client) #define dev_to_lm8323(d) container_of(d, struct lm8323_chip, client->dev) -#define work_to_lm8323(w) container_of(w, struct lm8323_chip, work) #define cdev_to_pwm(c) container_of(c, struct lm8323_pwm, cdev) #define work_to_pwm(w) container_of(w, struct lm8323_pwm, work) @@ -375,9 +373,9 @@ static void pwm_done(struct lm8323_pwm *pwm) * Bottom half: handle the interrupt by posting key events, or dealing with * errors appropriately. */ -static void lm8323_work(struct work_struct *work) +static irqreturn_t lm8323_irq(int irq, void *_lm) { - struct lm8323_chip *lm = work_to_lm8323(work); + struct lm8323_chip *lm = _lm; u8 ints; int i; @@ -409,16 +407,6 @@ static void lm8323_work(struct work_struct *work) } mutex_unlock(&lm->lock); -} - -/* - * We cannot use I2C in interrupt context, so we just schedule work. - */ -static irqreturn_t lm8323_irq(int irq, void *data) -{ - struct lm8323_chip *lm = data; - - schedule_work(&lm->work); return IRQ_HANDLED; } @@ -675,7 +663,6 @@ static int __devinit lm8323_probe(struct i2c_client *client, lm->client = client; lm->idev = idev; mutex_init(&lm->lock); - INIT_WORK(&lm->work, lm8323_work); lm->size_x = pdata->size_x; lm->size_y = pdata->size_y; @@ -746,9 +733,8 @@ static int __devinit lm8323_probe(struct i2c_client *client, goto fail3; } - err = request_irq(client->irq, lm8323_irq, - IRQF_TRIGGER_FALLING | IRQF_DISABLED, - "lm8323", lm); + err = request_threaded_irq(client->irq, NULL, lm8323_irq, + IRQF_TRIGGER_FALLING, "lm8323", lm); if (err) { dev_err(&client->dev, "could not get IRQ %d\n", client->irq); goto fail4; @@ -783,7 +769,6 @@ static int __devexit lm8323_remove(struct i2c_client *client) disable_irq_wake(client->irq); free_irq(client->irq, lm); - cancel_work_sync(&lm->work); input_unregister_device(lm->idev); -- cgit v1.2.3 From eaa499aebf6265f18ffc836ead30059031c6d7a7 Mon Sep 17 00:00:00 2001 From: Leigh Brown Date: Tue, 21 Jun 2011 04:25:21 -0700 Subject: Input: lm8323 - use oneshot level triggered interrupts According to the data sheet the interrupt should be level rather than edge triggered. This fixes the issue of the Nokia N810 keypad stopping responding if multiple key events occur in quick succession. Signed-off-by: Leigh Brown Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/lm8323.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c index 3b21f426ebb1..ab0acaf7fe8f 100644 --- a/drivers/input/keyboard/lm8323.c +++ b/drivers/input/keyboard/lm8323.c @@ -734,7 +734,7 @@ static int __devinit lm8323_probe(struct i2c_client *client, } err = request_threaded_irq(client->irq, NULL, lm8323_irq, - IRQF_TRIGGER_FALLING, "lm8323", lm); + IRQF_TRIGGER_LOW|IRQF_ONESHOT, "lm8323", lm); if (err) { dev_err(&client->dev, "could not get IRQ %d\n", client->irq); goto fail4; -- cgit v1.2.3 From 7e2ecdf438bb479e2b4667fc16b1a84d6348da04 Mon Sep 17 00:00:00 2001 From: David Jander Date: Tue, 21 Jun 2011 14:26:18 -0700 Subject: Input: gpio_keys - switch to using threaded IRQs Use a threaded interrupt handler in order to permit the handler to use a GPIO driver that causes things like I2C transactions being done inside the handler context. Signed-off-by: David Jander Acked-by: Grant Likely Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/gpio_keys.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index 6e6145b9a4c1..6d0e2f64122b 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -415,7 +415,7 @@ static int __devinit gpio_keys_setup_key(struct platform_device *pdev, if (!button->can_disable) irqflags |= IRQF_SHARED; - error = request_any_context_irq(irq, gpio_keys_isr, irqflags, desc, bdata); + error = request_threaded_irq(irq, NULL, gpio_keys_isr, irqflags, desc, bdata); if (error < 0) { dev_err(dev, "Unable to claim irq %d; error %d\n", irq, error); @@ -649,5 +649,5 @@ module_exit(gpio_keys_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Phil Blundell "); -MODULE_DESCRIPTION("Keyboard driver for CPU GPIOs"); +MODULE_DESCRIPTION("Keyboard driver for GPIOs"); MODULE_ALIAS("platform:gpio-keys"); -- cgit v1.2.3 From fabadbc754cf461e8d68e5f8ff53f287dcee41b2 Mon Sep 17 00:00:00 2001 From: Magnus Hörlin Date: Tue, 21 Jun 2011 14:40:30 -0700 Subject: Input: xpad - add support for two more dance pads and a guitar MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Magnus Hörlin Reviewed-by: Christoph Fritz Signed-off-by: Dmitry Torokhov --- drivers/input/joystick/xpad.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 56abf3d0e911..e91838c83dbc 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -154,10 +154,13 @@ static const struct xpad_device { { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, + { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 }, { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 }, + { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, @@ -236,9 +239,10 @@ static struct usb_device_id xpad_table [] = { XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */ XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ + XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ - XPAD_XBOX360_VENDOR(0x1bad), /* Rock Band Drums */ + XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ { } }; -- cgit v1.2.3 From 71c86ce59791bcd67af937bbea719a508079d7c2 Mon Sep 17 00:00:00 2001 From: Ping Cheng Date: Wed, 22 Jun 2011 01:02:51 -0700 Subject: Input: wacom - Cintiq 21UX2 does not have menu strips So don't set ABS_RX/ABS_RY for them. Signed-off-by: Ping Cheng Signed-off-by: Dmitry Torokhov --- drivers/input/tablet/wacom_wac.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 08ba5ad9c9be..cc6c917e1164 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -1129,8 +1129,11 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, __set_bit(BTN_0 + i, input_dev->keybit); __set_bit(BTN_TOOL_FINGER, input_dev->keybit); - input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); - input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); + if (wacom_wac->features.type != WACOM_21UX2) { + input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); + input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); + } + input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); wacom_setup_cintiq(wacom_wac); break; -- cgit v1.2.3 From 1483f5513b2d215216ad56c618b42454c5bc1e4d Mon Sep 17 00:00:00 2001 From: Aristeu Rozanski Date: Wed, 22 Jun 2011 01:17:17 -0700 Subject: Input: wacom - use only one interface with DTU-2231 The Wacom DTU-2231 tablet has two interfaces on its default configuration and both have HID class, leading to the creation of two input devices instead of one. Only the first one is used, so filter out the second. Signed-off-by: Aristeu Rozanski Signed-off-by: Dmitry Torokhov --- drivers/input/tablet/wacom_wac.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index cc6c917e1164..2ea0d2e55a4e 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -15,6 +15,7 @@ #include "wacom_wac.h" #include "wacom.h" #include +#include /* resolution for penabled devices */ #define WACOM_PL_RES 20 @@ -1486,6 +1487,11 @@ static const struct wacom_features wacom_features_0x6004 = USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \ .driver_info = (kernel_ulong_t)&wacom_features_##prod +#define USB_DEVICE_DETAILED(prod, class, sub, proto) \ + USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_WACOM, prod, class, \ + sub, proto), \ + .driver_info = (kernel_ulong_t)&wacom_features_##prod + #define USB_DEVICE_LENOVO(prod) \ USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \ .driver_info = (kernel_ulong_t)&wacom_features_##prod @@ -1548,7 +1554,13 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0xC5) }, { USB_DEVICE_WACOM(0xC6) }, { USB_DEVICE_WACOM(0xC7) }, - { USB_DEVICE_WACOM(0xCE) }, + /* + * DTU-2231 has two interfaces on the same configuration, + * only one is used. + */ + { USB_DEVICE_DETAILED(0xCE, USB_CLASS_HID, + USB_INTERFACE_SUBCLASS_BOOT, + USB_INTERFACE_PROTOCOL_MOUSE) }, { USB_DEVICE_WACOM(0xD0) }, { USB_DEVICE_WACOM(0xD1) }, { USB_DEVICE_WACOM(0xD2) }, -- cgit v1.2.3 From 3ead8b5ddbe6ca8e79b24535f4119c9d4ffd91e3 Mon Sep 17 00:00:00 2001 From: Eric Miao Date: Wed, 22 Jun 2011 01:02:50 -0700 Subject: Input: add support for mma8450 accelerometer Signed-off-by: Sammy He Signed-off-by: Eric Miao Signed-off-by: Dmitry Torokhov --- drivers/input/misc/Kconfig | 11 ++ drivers/input/misc/Makefile | 2 +- drivers/input/misc/mma8450.c | 256 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 268 insertions(+), 1 deletion(-) create mode 100644 drivers/input/misc/mma8450.c diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 45dc6aa62ba4..0f22918ad9ce 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -100,6 +100,17 @@ config INPUT_MAX8925_ONKEY To compile this driver as a module, choose M here: the module will be called max8925_onkey. +config INPUT_MMA8450 + tristate "MMA8450 - Freescale's 3-Axis, 8/12-bit Digital Accelerometer" + depends on I2C + select INPUT_POLLDEV + help + Say Y here if you want to support Freescale's MMA8450 Accelerometer + through I2C interface. + + To compile this driver as a module, choose M here: the + module will be called mma8450. + config INPUT_APANEL tristate "Fujitsu Lifebook Application Panel buttons" depends on X86 && I2C && LEDS_CLASS diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 38efb2cb182b..99953c3c442c 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o +obj-$(CONFIG_INPUT_MMA8450) += mma8450.o obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o @@ -45,4 +46,3 @@ obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o obj-$(CONFIG_INPUT_YEALINK) += yealink.o - diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c new file mode 100644 index 000000000000..20f8f9284f02 --- /dev/null +++ b/drivers/input/misc/mma8450.c @@ -0,0 +1,256 @@ +/* + * Driver for Freescale's 3-Axis Accelerometer MMA8450 + * + * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include + +#define MMA8450_DRV_NAME "mma8450" + +#define MODE_CHANGE_DELAY_MS 100 +#define POLL_INTERVAL 100 +#define POLL_INTERVAL_MAX 500 + +/* register definitions */ +#define MMA8450_STATUS 0x00 +#define MMA8450_STATUS_ZXYDR 0x08 + +#define MMA8450_OUT_X8 0x01 +#define MMA8450_OUT_Y8 0x02 +#define MMA8450_OUT_Z8 0x03 + +#define MMA8450_OUT_X_LSB 0x05 +#define MMA8450_OUT_X_MSB 0x06 +#define MMA8450_OUT_Y_LSB 0x07 +#define MMA8450_OUT_Y_MSB 0x08 +#define MMA8450_OUT_Z_LSB 0x09 +#define MMA8450_OUT_Z_MSB 0x0a + +#define MMA8450_XYZ_DATA_CFG 0x16 + +#define MMA8450_CTRL_REG1 0x38 +#define MMA8450_CTRL_REG2 0x39 + +/* mma8450 status */ +struct mma8450 { + struct i2c_client *client; + struct input_polled_dev *idev; +}; + +static int mma8450_read(struct mma8450 *m, unsigned off) +{ + struct i2c_client *c = m->client; + int ret; + + ret = i2c_smbus_read_byte_data(c, off); + if (ret < 0) + dev_err(&c->dev, + "failed to read register 0x%02x, error %d\n", + off, ret); + + return ret; +} + +static int mma8450_write(struct mma8450 *m, unsigned off, u8 v) +{ + struct i2c_client *c = m->client; + int error; + + error = i2c_smbus_write_byte_data(c, off, v); + if (error < 0) { + dev_err(&c->dev, + "failed to write to register 0x%02x, error %d\n", + off, error); + return error; + } + + return 0; +} + +static int mma8450_read_xyz(struct mma8450 *m, int *x, int *y, int *z) +{ + struct i2c_client *c = m->client; + u8 buff[6]; + int err; + + err = i2c_smbus_read_i2c_block_data(c, MMA8450_OUT_X_LSB, 6, buff); + if (err < 0) { + dev_err(&c->dev, + "failed to read block data at 0x%02x, error %d\n", + MMA8450_OUT_X_LSB, err); + return err; + } + + *x = ((buff[1] << 4) & 0xff0) | (buff[0] & 0xf); + *y = ((buff[3] << 4) & 0xff0) | (buff[2] & 0xf); + *z = ((buff[5] << 4) & 0xff0) | (buff[4] & 0xf); + + return 0; +} + +static void mma8450_poll(struct input_polled_dev *dev) +{ + struct mma8450 *m = dev->private; + int x, y, z; + int ret; + int err; + + ret = mma8450_read(m, MMA8450_STATUS); + if (ret < 0) + return; + + if (!(ret & MMA8450_STATUS_ZXYDR)) + return; + + err = mma8450_read_xyz(m, &x, &y, &z); + if (err) + return; + + input_report_abs(dev->input, ABS_X, x); + input_report_abs(dev->input, ABS_Y, y); + input_report_abs(dev->input, ABS_Z, z); + input_sync(dev->input); +} + +/* Initialize the MMA8450 chip */ +static void mma8450_open(struct input_polled_dev *dev) +{ + struct mma8450 *m = dev->private; + int err; + + /* enable all events from X/Y/Z, no FIFO */ + err = mma8450_write(m, MMA8450_XYZ_DATA_CFG, 0x07); + if (err) + return; + + /* + * Sleep mode poll rate - 50Hz + * System output data rate - 400Hz + * Full scale selection - Active, +/- 2G + */ + err = mma8450_write(m, MMA8450_CTRL_REG1, 0x01); + if (err < 0) + return; + + msleep(MODE_CHANGE_DELAY_MS); +} + +static void mma8450_close(struct input_polled_dev *dev) +{ + struct mma8450 *m = dev->private; + + mma8450_write(m, MMA8450_CTRL_REG1, 0x00); + mma8450_write(m, MMA8450_CTRL_REG2, 0x01); +} + +/* + * I2C init/probing/exit functions + */ +static int __devinit mma8450_probe(struct i2c_client *c, + const struct i2c_device_id *id) +{ + struct input_polled_dev *idev; + struct mma8450 *m; + int err; + + m = kzalloc(sizeof(struct mma8450), GFP_KERNEL); + idev = input_allocate_polled_device(); + if (!m || !idev) { + err = -ENOMEM; + goto err_free_mem; + } + + m->client = c; + m->idev = idev; + + idev->private = m; + idev->input->name = MMA8450_DRV_NAME; + idev->input->id.bustype = BUS_I2C; + idev->poll = mma8450_poll; + idev->poll_interval = POLL_INTERVAL; + idev->poll_interval_max = POLL_INTERVAL_MAX; + idev->open = mma8450_open; + idev->close = mma8450_close; + + __set_bit(EV_ABS, idev->input->evbit); + input_set_abs_params(idev->input, ABS_X, -2048, 2047, 32, 32); + input_set_abs_params(idev->input, ABS_Y, -2048, 2047, 32, 32); + input_set_abs_params(idev->input, ABS_Z, -2048, 2047, 32, 32); + + err = input_register_polled_device(idev); + if (err) { + dev_err(&c->dev, "failed to register polled input device\n"); + goto err_free_mem; + } + + return 0; + +err_free_mem: + input_free_polled_device(idev); + kfree(m); + return err; +} + +static int __devexit mma8450_remove(struct i2c_client *c) +{ + struct mma8450 *m = i2c_get_clientdata(c); + struct input_polled_dev *idev = m->idev; + + input_unregister_polled_device(idev); + input_free_polled_device(idev); + kfree(m); + + return 0; +} + +static const struct i2c_device_id mma8450_id[] = { + { MMA8450_DRV_NAME, 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, mma8450_id); + +static struct i2c_driver mma8450_driver = { + .driver = { + .name = MMA8450_DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = mma8450_probe, + .remove = __devexit_p(mma8450_remove), + .id_table = mma8450_id, +}; + +static int __init mma8450_init(void) +{ + return i2c_add_driver(&mma8450_driver); +} +module_init(mma8450_init); + +static void __exit mma8450_exit(void) +{ + i2c_del_driver(&mma8450_driver); +} +module_exit(mma8450_exit); + +MODULE_AUTHOR("Freescale Semiconductor, Inc."); +MODULE_DESCRIPTION("MMA8450 3-Axis Accelerometer Driver"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 58402b6e397555884d926fd22fb40ccf6fe68b46 Mon Sep 17 00:00:00 2001 From: Robert Marklund Date: Mon, 20 Jun 2011 13:30:35 +0200 Subject: mach-x500: fix SECTION warnings in uib Fix some simple section warning noise. Signed-off-by: Robert Marklund --- arch/arm/mach-ux500/board-mop500-uib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mach-ux500/board-mop500-uib.c b/arch/arm/mach-ux500/board-mop500-uib.c index 69cce41f602a..5af36aa56c08 100644 --- a/arch/arm/mach-ux500/board-mop500-uib.c +++ b/arch/arm/mach-ux500/board-mop500-uib.c @@ -25,7 +25,7 @@ struct uib { void (*init)(void); }; -static struct __initdata uib mop500_uibs[] = { +static struct uib __initdata mop500_uibs[] = { [STUIB] = { .name = "ST-UIB", .option = "stuib", -- cgit v1.2.3 From 74cfad188b9e7e063ddb8d74fa20b38cbad10d79 Mon Sep 17 00:00:00 2001 From: Martin Peres Date: Thu, 12 May 2011 22:40:47 +0200 Subject: drm/nvc0: Read temperature on Fermi like we do on nv84+ Signed-off-by: Martin Peres Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_state.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 144f79a350ae..dd6f30574a76 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -415,6 +415,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->vram.get = nvc0_vram_new; engine->vram.put = nv50_vram_del; engine->vram.flags_valid = nvc0_vram_flags_valid; + engine->pm.temp_get = nv84_temp_get; break; default: NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); -- cgit v1.2.3 From 310ff414b32e66d832307bedd99ba75908e4e36d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 24 May 2011 11:47:09 +1000 Subject: drm/nvc0/fb: allocate page for some unknown PFFB object Fixes DMAR faults during accel, more than likely a similar problem to what was solved on nv50 previously. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_fb.c | 68 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 66 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c index 26a996025dd2..08e6b118f021 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fb.c +++ b/drivers/gpu/drm/nouveau/nvc0_fb.c @@ -1,5 +1,5 @@ /* - * Copyright 2010 Red Hat Inc. + * Copyright 2011 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,16 +23,80 @@ */ #include "drmP.h" - +#include "drm.h" #include "nouveau_drv.h" +#include "nouveau_drm.h" + +struct nvc0_fb_priv { + struct page *r100c10_page; + dma_addr_t r100c10; +}; + +static void +nvc0_fb_destroy(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; + struct nvc0_fb_priv *priv = pfb->priv; + + if (priv->r100c10_page) { + pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + __free_page(priv->r100c10_page); + } + + kfree(priv); + pfb->priv = NULL; +} + +static int +nvc0_fb_create(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; + struct nvc0_fb_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + pfb->priv = priv; + + priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!priv->r100c10_page) { + nvc0_fb_destroy(dev); + return -ENOMEM; + } + + priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) { + nvc0_fb_destroy(dev); + return -EFAULT; + } + + return 0; +} int nvc0_fb_init(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvc0_fb_priv *priv; + int ret; + + if (!dev_priv->engine.fb.priv) { + ret = nvc0_fb_create(dev); + if (ret) + return ret; + } + priv = dev_priv->engine.fb.priv; + + nv_wr32(dev, 0x100c10, priv->r100c10 >> 8); return 0; } void nvc0_fb_takedown(struct drm_device *dev) { + nvc0_fb_destroy(dev); } -- cgit v1.2.3 From 068da16198ad09343b0c3647d26f81683921bc84 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 24 May 2011 11:54:42 +1000 Subject: drm/nvc0/fifo: fix typos in unload_context Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_fifo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c index fb4f5943e01b..6f9f341c3e86 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fifo.c +++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c @@ -210,10 +210,10 @@ nvc0_fifo_unload_context(struct drm_device *dev) int i; for (i = 0; i < 128; i++) { - if (!(nv_rd32(dev, 0x003004 + (i * 4)) & 1)) + if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1)) continue; - nv_mask(dev, 0x003004 + (i * 4), 0x00000001, 0x00000000); + nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000); nv_wr32(dev, 0x002634, i); if (!nv_wait(dev, 0x002634, 0xffffffff, i)) { NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n", -- cgit v1.2.3 From 847adea2c701b519b43d8c958c5082a22eeba346 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 24 May 2011 14:37:41 +1000 Subject: drm/nvc0/gr: macro to determine fermi class, will use it in a few places Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_graph.c | 13 +++++++------ drivers/gpu/drm/nouveau/nvc0_graph.h | 22 ++++++++++++++++++++++ 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index ca6db204d644..a57fba3da941 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -675,13 +675,10 @@ nvc0_graph_create(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvc0_graph_priv *priv; int ret, gpc, i; + u32 fermi; - switch (dev_priv->chipset) { - case 0xc0: - case 0xc3: - case 0xc4: - break; - default: + fermi = nvc0_graph_class(dev); + if (!fermi) { NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n"); return 0; } @@ -770,6 +767,10 @@ nvc0_graph_create(struct drm_device *dev) NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip); NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ + if (fermi >= 0x9197) + NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */ + if (fermi >= 0x9297) + NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */ NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */ return 0; diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h index f5d184e0689d..2b667d4e88ca 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.h +++ b/drivers/gpu/drm/nouveau/nvc0_graph.h @@ -72,4 +72,26 @@ struct nvc0_graph_chan { int nvc0_grctx_generate(struct nouveau_channel *); +/* nvc0_graph.c uses this also to determine supported chipsets */ +static inline u32 +nvc0_graph_class(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + switch (dev_priv->chipset) { + case 0xc0: + case 0xc3: + case 0xc4: + return 0x9097; +#if 0 + case 0xc1: + return 0x9197; + case 0xc8: + return 0x9297; +#endif + default: + return 0; + } +} + #endif -- cgit v1.2.3 From 6f376460e42220dfd44711cff3ef8d0309e277d4 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 24 May 2011 14:46:06 +1000 Subject: drm/nvc0/gr: 0x9197/0x9297 state init Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_grctx.c | 171 +++++++++-------------------------- 1 file changed, 43 insertions(+), 128 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index 6df066114133..b74f84cc0c53 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c @@ -45,6 +45,9 @@ nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data) static void nvc0_grctx_generate_9097(struct drm_device *dev) { + u32 fermi = nvc0_graph_class(dev); + u32 mthd; + nv_mthd(dev, 0x9097, 0x0800, 0x00000000); nv_mthd(dev, 0x9097, 0x0840, 0x00000000); nv_mthd(dev, 0x9097, 0x0880, 0x00000000); @@ -824,134 +827,10 @@ nvc0_grctx_generate_9097(struct drm_device *dev) nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001); nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001); nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001); - nv_mthd(dev, 0x9097, 0x3400, 0x00000000); - nv_mthd(dev, 0x9097, 0x3404, 0x00000000); - nv_mthd(dev, 0x9097, 0x3408, 0x00000000); - nv_mthd(dev, 0x9097, 0x340c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3410, 0x00000000); - nv_mthd(dev, 0x9097, 0x3414, 0x00000000); - nv_mthd(dev, 0x9097, 0x3418, 0x00000000); - nv_mthd(dev, 0x9097, 0x341c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3420, 0x00000000); - nv_mthd(dev, 0x9097, 0x3424, 0x00000000); - nv_mthd(dev, 0x9097, 0x3428, 0x00000000); - nv_mthd(dev, 0x9097, 0x342c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3430, 0x00000000); - nv_mthd(dev, 0x9097, 0x3434, 0x00000000); - nv_mthd(dev, 0x9097, 0x3438, 0x00000000); - nv_mthd(dev, 0x9097, 0x343c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3440, 0x00000000); - nv_mthd(dev, 0x9097, 0x3444, 0x00000000); - nv_mthd(dev, 0x9097, 0x3448, 0x00000000); - nv_mthd(dev, 0x9097, 0x344c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3450, 0x00000000); - nv_mthd(dev, 0x9097, 0x3454, 0x00000000); - nv_mthd(dev, 0x9097, 0x3458, 0x00000000); - nv_mthd(dev, 0x9097, 0x345c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3460, 0x00000000); - nv_mthd(dev, 0x9097, 0x3464, 0x00000000); - nv_mthd(dev, 0x9097, 0x3468, 0x00000000); - nv_mthd(dev, 0x9097, 0x346c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3470, 0x00000000); - nv_mthd(dev, 0x9097, 0x3474, 0x00000000); - nv_mthd(dev, 0x9097, 0x3478, 0x00000000); - nv_mthd(dev, 0x9097, 0x347c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3480, 0x00000000); - nv_mthd(dev, 0x9097, 0x3484, 0x00000000); - nv_mthd(dev, 0x9097, 0x3488, 0x00000000); - nv_mthd(dev, 0x9097, 0x348c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3490, 0x00000000); - nv_mthd(dev, 0x9097, 0x3494, 0x00000000); - nv_mthd(dev, 0x9097, 0x3498, 0x00000000); - nv_mthd(dev, 0x9097, 0x349c, 0x00000000); - nv_mthd(dev, 0x9097, 0x34a0, 0x00000000); - nv_mthd(dev, 0x9097, 0x34a4, 0x00000000); - nv_mthd(dev, 0x9097, 0x34a8, 0x00000000); - nv_mthd(dev, 0x9097, 0x34ac, 0x00000000); - nv_mthd(dev, 0x9097, 0x34b0, 0x00000000); - nv_mthd(dev, 0x9097, 0x34b4, 0x00000000); - nv_mthd(dev, 0x9097, 0x34b8, 0x00000000); - nv_mthd(dev, 0x9097, 0x34bc, 0x00000000); - nv_mthd(dev, 0x9097, 0x34c0, 0x00000000); - nv_mthd(dev, 0x9097, 0x34c4, 0x00000000); - nv_mthd(dev, 0x9097, 0x34c8, 0x00000000); - nv_mthd(dev, 0x9097, 0x34cc, 0x00000000); - nv_mthd(dev, 0x9097, 0x34d0, 0x00000000); - nv_mthd(dev, 0x9097, 0x34d4, 0x00000000); - nv_mthd(dev, 0x9097, 0x34d8, 0x00000000); - nv_mthd(dev, 0x9097, 0x34dc, 0x00000000); - nv_mthd(dev, 0x9097, 0x34e0, 0x00000000); - nv_mthd(dev, 0x9097, 0x34e4, 0x00000000); - nv_mthd(dev, 0x9097, 0x34e8, 0x00000000); - nv_mthd(dev, 0x9097, 0x34ec, 0x00000000); - nv_mthd(dev, 0x9097, 0x34f0, 0x00000000); - nv_mthd(dev, 0x9097, 0x34f4, 0x00000000); - nv_mthd(dev, 0x9097, 0x34f8, 0x00000000); - nv_mthd(dev, 0x9097, 0x34fc, 0x00000000); - nv_mthd(dev, 0x9097, 0x3500, 0x00000000); - nv_mthd(dev, 0x9097, 0x3504, 0x00000000); - nv_mthd(dev, 0x9097, 0x3508, 0x00000000); - nv_mthd(dev, 0x9097, 0x350c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3510, 0x00000000); - nv_mthd(dev, 0x9097, 0x3514, 0x00000000); - nv_mthd(dev, 0x9097, 0x3518, 0x00000000); - nv_mthd(dev, 0x9097, 0x351c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3520, 0x00000000); - nv_mthd(dev, 0x9097, 0x3524, 0x00000000); - nv_mthd(dev, 0x9097, 0x3528, 0x00000000); - nv_mthd(dev, 0x9097, 0x352c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3530, 0x00000000); - nv_mthd(dev, 0x9097, 0x3534, 0x00000000); - nv_mthd(dev, 0x9097, 0x3538, 0x00000000); - nv_mthd(dev, 0x9097, 0x353c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3540, 0x00000000); - nv_mthd(dev, 0x9097, 0x3544, 0x00000000); - nv_mthd(dev, 0x9097, 0x3548, 0x00000000); - nv_mthd(dev, 0x9097, 0x354c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3550, 0x00000000); - nv_mthd(dev, 0x9097, 0x3554, 0x00000000); - nv_mthd(dev, 0x9097, 0x3558, 0x00000000); - nv_mthd(dev, 0x9097, 0x355c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3560, 0x00000000); - nv_mthd(dev, 0x9097, 0x3564, 0x00000000); - nv_mthd(dev, 0x9097, 0x3568, 0x00000000); - nv_mthd(dev, 0x9097, 0x356c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3570, 0x00000000); - nv_mthd(dev, 0x9097, 0x3574, 0x00000000); - nv_mthd(dev, 0x9097, 0x3578, 0x00000000); - nv_mthd(dev, 0x9097, 0x357c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3580, 0x00000000); - nv_mthd(dev, 0x9097, 0x3584, 0x00000000); - nv_mthd(dev, 0x9097, 0x3588, 0x00000000); - nv_mthd(dev, 0x9097, 0x358c, 0x00000000); - nv_mthd(dev, 0x9097, 0x3590, 0x00000000); - nv_mthd(dev, 0x9097, 0x3594, 0x00000000); - nv_mthd(dev, 0x9097, 0x3598, 0x00000000); - nv_mthd(dev, 0x9097, 0x359c, 0x00000000); - nv_mthd(dev, 0x9097, 0x35a0, 0x00000000); - nv_mthd(dev, 0x9097, 0x35a4, 0x00000000); - nv_mthd(dev, 0x9097, 0x35a8, 0x00000000); - nv_mthd(dev, 0x9097, 0x35ac, 0x00000000); - nv_mthd(dev, 0x9097, 0x35b0, 0x00000000); - nv_mthd(dev, 0x9097, 0x35b4, 0x00000000); - nv_mthd(dev, 0x9097, 0x35b8, 0x00000000); - nv_mthd(dev, 0x9097, 0x35bc, 0x00000000); - nv_mthd(dev, 0x9097, 0x35c0, 0x00000000); - nv_mthd(dev, 0x9097, 0x35c4, 0x00000000); - nv_mthd(dev, 0x9097, 0x35c8, 0x00000000); - nv_mthd(dev, 0x9097, 0x35cc, 0x00000000); - nv_mthd(dev, 0x9097, 0x35d0, 0x00000000); - nv_mthd(dev, 0x9097, 0x35d4, 0x00000000); - nv_mthd(dev, 0x9097, 0x35d8, 0x00000000); - nv_mthd(dev, 0x9097, 0x35dc, 0x00000000); - nv_mthd(dev, 0x9097, 0x35e0, 0x00000000); - nv_mthd(dev, 0x9097, 0x35e4, 0x00000000); - nv_mthd(dev, 0x9097, 0x35e8, 0x00000000); - nv_mthd(dev, 0x9097, 0x35ec, 0x00000000); - nv_mthd(dev, 0x9097, 0x35f0, 0x00000000); - nv_mthd(dev, 0x9097, 0x35f4, 0x00000000); - nv_mthd(dev, 0x9097, 0x35f8, 0x00000000); - nv_mthd(dev, 0x9097, 0x35fc, 0x00000000); + if (fermi == 0x9097) { + for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4) + nv_mthd(dev, 0x9097, mthd, 0x00000000); + } nv_mthd(dev, 0x9097, 0x030c, 0x00000001); nv_mthd(dev, 0x9097, 0x1944, 0x00000000); nv_mthd(dev, 0x9097, 0x1514, 0x00000000); @@ -1320,6 +1199,37 @@ nvc0_grctx_generate_9097(struct drm_device *dev) nv_mthd(dev, 0x9097, 0x3410, 0x80002006); } +static void +nvc0_grctx_generate_9197(struct drm_device *dev) +{ + u32 fermi = nvc0_graph_class(dev); + u32 mthd; + + if (fermi == 0x9197) { + for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4) + nv_mthd(dev, 0x9197, mthd, 0x00000000); + } + nv_mthd(dev, 0x9297, 0x02e4, 0x0000b001); +} + +static void +nvc0_grctx_generate_9297(struct drm_device *dev) +{ + u32 fermi = nvc0_graph_class(dev); + u32 mthd; + + if (fermi == 0x9297) { + for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4) + nv_mthd(dev, 0x9297, mthd, 0x00000000); + } + nv_mthd(dev, 0x9297, 0x036c, 0x00000000); + nv_mthd(dev, 0x9297, 0x0370, 0x00000000); + nv_mthd(dev, 0x9297, 0x07a4, 0x00000000); + nv_mthd(dev, 0x9297, 0x07a8, 0x00000000); + nv_mthd(dev, 0x9297, 0x0374, 0x00000000); + nv_mthd(dev, 0x9297, 0x0378, 0x00000020); +} + static void nvc0_grctx_generate_902d(struct drm_device *dev) { @@ -1801,6 +1711,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan) struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; struct drm_device *dev = chan->dev; int i, gpc, tp, id; + u32 fermi = nvc0_graph_class(dev); u32 r000260, tmp; r000260 = nv_rd32(dev, 0x000260); @@ -2865,6 +2776,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan) nv_wr32(dev, 0x404154, 0x00000400); nvc0_grctx_generate_9097(dev); + if (fermi >= 0x9197) + nvc0_grctx_generate_9197(dev); + if (fermi >= 0x9297) + nvc0_grctx_generate_9297(dev); nvc0_grctx_generate_902d(dev); nvc0_grctx_generate_9039(dev); nvc0_grctx_generate_90c0(dev); -- cgit v1.2.3 From e1b89b1ca59f558d4f7ec18e0b6a8eb34437c8d9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 24 May 2011 14:57:53 +1000 Subject: drm/nvc0/gr: some initial state modifications Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_graph.c | 2 +- drivers/gpu/drm/nouveau/nvc0_grctx.c | 16 ++++------------ 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index a57fba3da941..c99b3caa568c 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -159,7 +159,7 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) nv_wo32(grch->mmio, i++ * 4, 0x00405830); nv_wo32(grch->mmio, i++ * 4, magic); for (gpc = 0; gpc < priv->gpc_nr; gpc++) { - for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) { + for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) { u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); nv_wo32(grch->mmio, i++ * 4, reg); nv_wo32(grch->mmio, i++ * 4, magic); diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index b74f84cc0c53..3ac376235d29 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c @@ -1531,21 +1531,11 @@ nvc0_grctx_generate_ccache(struct drm_device *dev) static void nvc0_grctx_generate_rop(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - /* ROPC_BROADCAST */ nv_wr32(dev, 0x408800, 0x02802a3c); nv_wr32(dev, 0x408804, 0x00000040); nv_wr32(dev, 0x408808, 0x0003e00d); - switch (dev_priv->chipset) { - case 0xc0: - nv_wr32(dev, 0x408900, 0x0080b801); - break; - case 0xc3: - case 0xc4: - nv_wr32(dev, 0x408900, 0x3080b801); - break; - } + nv_wr32(dev, 0x408900, 0x3080b801); nv_wr32(dev, 0x408904, 0x02000001); nv_wr32(dev, 0x408908, 0x00c80929); nv_wr32(dev, 0x40890c, 0x00000000); @@ -1639,6 +1629,8 @@ nvc0_grctx_generate_tp(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; /* GPC_BROADCAST.TP_BROADCAST */ + nv_wr32(dev, 0x419818, 0x00000000); + nv_wr32(dev, 0x41983c, 0x00038bc7); nv_wr32(dev, 0x419848, 0x00000000); nv_wr32(dev, 0x419864, 0x0000012a); nv_wr32(dev, 0x419888, 0x00000000); @@ -1665,7 +1657,7 @@ nvc0_grctx_generate_tp(struct drm_device *dev) nv_wr32(dev, 0x419c04, 0x00000006); nv_wr32(dev, 0x419c08, 0x00000002); nv_wr32(dev, 0x419c20, 0x00000000); - nv_wr32(dev, 0x419cbc, 0x28137606); + nv_wr32(dev, 0x419cb0, 0x00060048); nv_wr32(dev, 0x419ce8, 0x00000000); nv_wr32(dev, 0x419cf4, 0x00000183); nv_wr32(dev, 0x419d20, 0x02180000); -- cgit v1.2.3 From b53a2d06496d9de109620e4fe136b654bb0ce249 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 24 May 2011 15:44:37 +1000 Subject: drm/nvc0/gr: enable 0xc8/0xce support, no idea if it works or not.. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_graph.h | 3 ++- drivers/gpu/drm/nouveau/nvc0_grctx.c | 10 +++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h index 2b667d4e88ca..f067ed232f97 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.h +++ b/drivers/gpu/drm/nouveau/nvc0_graph.h @@ -82,13 +82,14 @@ nvc0_graph_class(struct drm_device *dev) case 0xc0: case 0xc3: case 0xc4: + case 0xce: /* guess, mmio trace shows only 0x9097 state */ return 0x9097; #if 0 case 0xc1: return 0x9197; +#endif case 0xc8: return 0x9297; -#endif default: return 0; } diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index 3ac376235d29..562a0cd950ee 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c @@ -1642,8 +1642,8 @@ nvc0_grctx_generate_tp(struct drm_device *dev) nv_wr32(dev, 0x419a14, 0x00000200); nv_wr32(dev, 0x419a1c, 0x00000000); nv_wr32(dev, 0x419a20, 0x00000800); - if (dev_priv->chipset != 0xc0) - nv_wr32(dev, 0x00419ac4, 0x0007f440); /* 0xc3 */ + if (dev_priv->chipset != 0xc0 && dev_priv->chipset != 0xc8) + nv_wr32(dev, 0x00419ac4, 0x0007f440); nv_wr32(dev, 0x419b00, 0x0a418820); nv_wr32(dev, 0x419b04, 0x062080e6); nv_wr32(dev, 0x419b08, 0x020398a4); @@ -1657,7 +1657,7 @@ nvc0_grctx_generate_tp(struct drm_device *dev) nv_wr32(dev, 0x419c04, 0x00000006); nv_wr32(dev, 0x419c08, 0x00000002); nv_wr32(dev, 0x419c20, 0x00000000); - nv_wr32(dev, 0x419cb0, 0x00060048); + nv_wr32(dev, 0x419cb0, 0x00060048); //XXX: 0xce 0x00020048 nv_wr32(dev, 0x419ce8, 0x00000000); nv_wr32(dev, 0x419cf4, 0x00000183); nv_wr32(dev, 0x419d20, 0x02180000); @@ -1687,11 +1687,11 @@ nvc0_grctx_generate_tp(struct drm_device *dev) nv_wr32(dev, 0x419e8c, 0x00000000); nv_wr32(dev, 0x419e90, 0x00000000); nv_wr32(dev, 0x419e98, 0x00000000); - if (dev_priv->chipset != 0xc0) + if (dev_priv->chipset != 0xc0 && dev_priv->chipset != 0xc8) nv_wr32(dev, 0x419ee0, 0x00011110); nv_wr32(dev, 0x419f50, 0x00000000); nv_wr32(dev, 0x419f54, 0x00000000); - if (dev_priv->chipset != 0xc0) + if (dev_priv->chipset != 0xc0 && dev_priv->chipset != 0xc8) nv_wr32(dev, 0x419f58, 0x00000000); } -- cgit v1.2.3 From d4409cc7e26b5f20b38a791e4c29b6c221e95acf Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 24 May 2011 16:06:42 +1000 Subject: drm/nvc1/gr: switch on acceleration support There's issues with certain 3D apps still, unknown whether this is a kernel issue or not.. It does appear that it may be in the 3D driver however. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_graph.h | 2 -- drivers/gpu/drm/nouveau/nvc0_grctx.c | 49 +++++++++++++++++++++++++++--------- 2 files changed, 37 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h index f067ed232f97..fa2f9cb470ad 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.h +++ b/drivers/gpu/drm/nouveau/nvc0_graph.h @@ -84,10 +84,8 @@ nvc0_graph_class(struct drm_device *dev) case 0xc4: case 0xce: /* guess, mmio trace shows only 0x9097 state */ return 0x9097; -#if 0 case 0xc1: return 0x9197; -#endif case 0xc8: return 0x9297; default: diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index 562a0cd950ee..e99ebb011c05 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c @@ -1469,8 +1469,15 @@ nvc0_grctx_generate_unk47xx(struct drm_device *dev) static void nvc0_grctx_generate_shaders(struct drm_device *dev) { - nv_wr32(dev, 0x405800, 0x078000bf); - nv_wr32(dev, 0x405830, 0x02180000); + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->chipset != 0xc1) { + nv_wr32(dev, 0x405800, 0x078000bf); + nv_wr32(dev, 0x405830, 0x02180000); + } else { + nv_wr32(dev, 0x405800, 0x0f8000bf); + nv_wr32(dev, 0x405830, 0x02180218); + } nv_wr32(dev, 0x405834, 0x00000000); nv_wr32(dev, 0x405838, 0x00000000); nv_wr32(dev, 0x405854, 0x00000000); @@ -1496,10 +1503,16 @@ nvc0_grctx_generate_unk60xx(struct drm_device *dev) static void nvc0_grctx_generate_unk64xx(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + nv_wr32(dev, 0x4064a8, 0x00000000); nv_wr32(dev, 0x4064ac, 0x00003fff); nv_wr32(dev, 0x4064b4, 0x00000000); nv_wr32(dev, 0x4064b8, 0x00000000); + if (dev_priv->chipset == 0xc1) { + nv_wr32(dev, 0x4064c0, 0x80140078); + nv_wr32(dev, 0x4064c4, 0x0086ffff); + } } static void @@ -1531,12 +1544,15 @@ nvc0_grctx_generate_ccache(struct drm_device *dev) static void nvc0_grctx_generate_rop(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + int chipset = dev_priv->chipset; + /* ROPC_BROADCAST */ nv_wr32(dev, 0x408800, 0x02802a3c); nv_wr32(dev, 0x408804, 0x00000040); - nv_wr32(dev, 0x408808, 0x0003e00d); + nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005); nv_wr32(dev, 0x408900, 0x3080b801); - nv_wr32(dev, 0x408904, 0x02000001); + nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001); nv_wr32(dev, 0x408908, 0x00c80929); nv_wr32(dev, 0x40890c, 0x00000000); nv_wr32(dev, 0x408980, 0x0000011d); @@ -1545,6 +1561,8 @@ nvc0_grctx_generate_rop(struct drm_device *dev) static void nvc0_grctx_generate_gpc(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + int chipset = dev_priv->chipset; int i; /* GPC_BROADCAST */ @@ -1576,7 +1594,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev) nv_wr32(dev, 0x41880c, 0x00000000); nv_wr32(dev, 0x418810, 0x00000000); nv_wr32(dev, 0x418828, 0x00008442); - nv_wr32(dev, 0x418830, 0x00000001); + nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001); nv_wr32(dev, 0x4188d8, 0x00000008); nv_wr32(dev, 0x4188e0, 0x01000000); nv_wr32(dev, 0x4188e8, 0x00000000); @@ -1584,7 +1602,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev) nv_wr32(dev, 0x4188f0, 0x00000000); nv_wr32(dev, 0x4188f4, 0x00000000); nv_wr32(dev, 0x4188f8, 0x00000000); - nv_wr32(dev, 0x4188fc, 0x00100000); + nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018); nv_wr32(dev, 0x41891c, 0x00ff00ff); nv_wr32(dev, 0x418924, 0x00000000); nv_wr32(dev, 0x418928, 0x00ffff00); @@ -1615,6 +1633,8 @@ nvc0_grctx_generate_gpc(struct drm_device *dev) nv_wr32(dev, 0x418c24, 0x00000000); nv_wr32(dev, 0x418c28, 0x00000000); nv_wr32(dev, 0x418c2c, 0x00000000); + if (chipset == 0xc1) + nv_wr32(dev, 0x418c6c, 0x00000001); nv_wr32(dev, 0x418c80, 0x20200004); nv_wr32(dev, 0x418c8c, 0x00000001); nv_wr32(dev, 0x419000, 0x00000780); @@ -1627,12 +1647,13 @@ static void nvc0_grctx_generate_tp(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + int chipset = dev_priv->chipset; /* GPC_BROADCAST.TP_BROADCAST */ nv_wr32(dev, 0x419818, 0x00000000); nv_wr32(dev, 0x41983c, 0x00038bc7); nv_wr32(dev, 0x419848, 0x00000000); - nv_wr32(dev, 0x419864, 0x0000012a); + nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129); nv_wr32(dev, 0x419888, 0x00000000); nv_wr32(dev, 0x419a00, 0x000001f0); nv_wr32(dev, 0x419a04, 0x00000001); @@ -1642,7 +1663,7 @@ nvc0_grctx_generate_tp(struct drm_device *dev) nv_wr32(dev, 0x419a14, 0x00000200); nv_wr32(dev, 0x419a1c, 0x00000000); nv_wr32(dev, 0x419a20, 0x00000800); - if (dev_priv->chipset != 0xc0 && dev_priv->chipset != 0xc8) + if (chipset != 0xc0 && chipset != 0xc8) nv_wr32(dev, 0x00419ac4, 0x0007f440); nv_wr32(dev, 0x419b00, 0x0a418820); nv_wr32(dev, 0x419b04, 0x062080e6); @@ -1651,7 +1672,7 @@ nvc0_grctx_generate_tp(struct drm_device *dev) nv_wr32(dev, 0x419b10, 0x0a418820); nv_wr32(dev, 0x419b14, 0x000000e6); nv_wr32(dev, 0x419bd0, 0x00900103); - nv_wr32(dev, 0x419be0, 0x00000001); + nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001); nv_wr32(dev, 0x419be4, 0x00000000); nv_wr32(dev, 0x419c00, 0x00000002); nv_wr32(dev, 0x419c04, 0x00000006); @@ -1660,8 +1681,10 @@ nvc0_grctx_generate_tp(struct drm_device *dev) nv_wr32(dev, 0x419cb0, 0x00060048); //XXX: 0xce 0x00020048 nv_wr32(dev, 0x419ce8, 0x00000000); nv_wr32(dev, 0x419cf4, 0x00000183); - nv_wr32(dev, 0x419d20, 0x02180000); + nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000); nv_wr32(dev, 0x419d24, 0x00001fff); + if (chipset == 0xc1) + nv_wr32(dev, 0x419d44, 0x02180218); nv_wr32(dev, 0x419e04, 0x00000000); nv_wr32(dev, 0x419e08, 0x00000000); nv_wr32(dev, 0x419e0c, 0x00000000); @@ -1687,11 +1710,11 @@ nvc0_grctx_generate_tp(struct drm_device *dev) nv_wr32(dev, 0x419e8c, 0x00000000); nv_wr32(dev, 0x419e90, 0x00000000); nv_wr32(dev, 0x419e98, 0x00000000); - if (dev_priv->chipset != 0xc0 && dev_priv->chipset != 0xc8) + if (chipset != 0xc0 && chipset != 0xc8) nv_wr32(dev, 0x419ee0, 0x00011110); nv_wr32(dev, 0x419f50, 0x00000000); nv_wr32(dev, 0x419f54, 0x00000000); - if (dev_priv->chipset != 0xc0 && dev_priv->chipset != 0xc8) + if (chipset != 0xc0 && chipset != 0xc8) nv_wr32(dev, 0x419f58, 0x00000000); } @@ -2536,6 +2559,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan) nv_icmd(dev, 0x0000053f, 0xffff0000); nv_icmd(dev, 0x00000585, 0x0000003f); nv_icmd(dev, 0x00000576, 0x00000003); + if (dev_priv->chipset == 0xc1) + nv_icmd(dev, 0x0000057b, 0x00000059); nv_icmd(dev, 0x00000586, 0x00000040); nv_icmd(dev, 0x00000582, 0x00000080); nv_icmd(dev, 0x00000583, 0x00000080); -- cgit v1.2.3 From aba99a8400e0b1ca9e6306e3a71013cc7a25bc29 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 25 May 2011 14:48:50 +1000 Subject: drm/nouveau: default to noaccel on 0xc1/0xc8/0xce for now Until we know these should work properly, would much rather default to noaccel than risk giving people corruption/hangs out of the box.. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.c | 2 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_state.c | 26 ++++++++++++++++++++++---- 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 02c6f37d8bd7..bdee1a6956e7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -73,7 +73,7 @@ int nouveau_ignorelid = 0; module_param_named(ignorelid, nouveau_ignorelid, int, 0400); MODULE_PARM_DESC(noaccel, "Disable all acceleration"); -int nouveau_noaccel = 0; +int nouveau_noaccel = -1; module_param_named(noaccel, nouveau_noaccel, int, 0400); MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 9c56331941e2..276fac7b7569 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -608,6 +608,7 @@ enum nouveau_card_type { struct drm_nouveau_private { struct drm_device *dev; + bool noaccel; /* the card type, takes NV_* as values */ enum nouveau_card_type card_type; diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index dd6f30574a76..f65811c3eb4d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -565,7 +565,7 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_timer; - if (!nouveau_noaccel) { + if (!dev_priv->noaccel) { switch (dev_priv->card_type) { case NV_04: nv04_graph_create(dev); @@ -677,10 +677,10 @@ out_vblank: drm_vblank_cleanup(dev); engine->display.destroy(dev); out_fifo: - if (!nouveau_noaccel) + if (!dev_priv->noaccel) engine->fifo.takedown(dev); out_engine: - if (!nouveau_noaccel) { + if (!dev_priv->noaccel) { for (e = e - 1; e >= 0; e--) { if (!dev_priv->eng[e]) continue; @@ -725,7 +725,7 @@ static void nouveau_card_takedown(struct drm_device *dev) nouveau_channel_put_unlocked(&dev_priv->channel); } - if (!nouveau_noaccel) { + if (!dev_priv->noaccel) { engine->fifo.takedown(dev); for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { if (dev_priv->eng[e]) { @@ -936,6 +936,24 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", dev_priv->card_type, reg0); + /* Determine whether we'll attempt acceleration or not, some + * cards are disabled by default here due to them being known + * non-functional, or never been tested due to lack of hw. + */ + dev_priv->noaccel = !!nouveau_noaccel; + if (nouveau_noaccel == -1) { + switch (dev_priv->chipset) { + case 0xc1: /* known broken */ + case 0xc8: /* never tested */ + case 0xce: /* never tested */ + dev_priv->noaccel = true; + break; + default: + dev_priv->noaccel = false; + break; + } + } + ret = nouveau_remove_conflicting_drivers(dev); if (ret) goto err_mmio; -- cgit v1.2.3 From f8522fc80f2e0392fc44b069f61721bd25907270 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 25 May 2011 17:22:43 +1000 Subject: drm/nvc0: fix suspend/resume of PGRAPH/PCOPYn We need the physical VRAM address in vinst, even for objects mapped into a vm, as the gpuobj suspend/resume code uses PMEM to access the object. Previously, vinst was overloaded to mean "VRAM address" for !VM objects, and "VM address" for VM objects, causing the wrong data to be accessed during suspend/resume. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 7 ++++--- drivers/gpu/drm/nouveau/nv50_instmem.c | 3 +-- drivers/gpu/drm/nouveau/nvc0_copy.c | 4 ++-- drivers/gpu/drm/nouveau/nvc0_graph.c | 20 ++++++++++---------- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 276fac7b7569..7136ad34921d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -176,9 +176,10 @@ struct nouveau_gpuobj { uint32_t flags; u32 size; - u32 pinst; - u32 cinst; - u64 vinst; + u32 pinst; /* PRAMIN BAR offset */ + u32 cinst; /* Channel offset */ + u64 vinst; /* VRAM address */ + u64 linst; /* VM address */ uint32_t engine; uint32_t class; diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 4f95a1e5822e..ccea671346c9 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -305,7 +305,6 @@ struct nv50_gpuobj_node { u32 align; }; - int nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) { @@ -345,7 +344,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) } nouveau_vm_map(&node->chan_vma, node->vram); - gpuobj->vinst = node->chan_vma.offset; + gpuobj->linst = node->chan_vma.offset; } gpuobj->size = size; diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.c b/drivers/gpu/drm/nouveau/nvc0_copy.c index 208fa7ab3f42..02c00bbeb9e5 100644 --- a/drivers/gpu/drm/nouveau/nvc0_copy.c +++ b/drivers/gpu/drm/nouveau/nvc0_copy.c @@ -54,8 +54,8 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine) if (ret) return ret; - nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->vinst)); - nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->vinst)); + nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst)); + nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst)); dev_priv->engine.instmem.flush(dev); chan->engctx[engine] = ctx; diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index c99b3caa568c..6c06d6636a3c 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -131,27 +131,27 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) nv_wo32(grch->mmio, i++ * 4, 0x00408004); - nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8); + nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8); nv_wo32(grch->mmio, i++ * 4, 0x00408008); nv_wo32(grch->mmio, i++ * 4, 0x80000018); nv_wo32(grch->mmio, i++ * 4, 0x0040800c); - nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8); + nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8); nv_wo32(grch->mmio, i++ * 4, 0x00408010); nv_wo32(grch->mmio, i++ * 4, 0x80000000); nv_wo32(grch->mmio, i++ * 4, 0x00418810); - nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12); + nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12); nv_wo32(grch->mmio, i++ * 4, 0x00419848); - nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12); + nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12); nv_wo32(grch->mmio, i++ * 4, 0x00419004); - nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8); + nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8); nv_wo32(grch->mmio, i++ * 4, 0x00419008); nv_wo32(grch->mmio, i++ * 4, 0x00000000); nv_wo32(grch->mmio, i++ * 4, 0x00418808); - nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8); + nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8); nv_wo32(grch->mmio, i++ * 4, 0x0041880c); nv_wo32(grch->mmio, i++ * 4, 0x80000018); @@ -197,8 +197,8 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine) if (ret) goto error; - nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4); - nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst)); + nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4); + nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst)); pinstmem->flush(dev); if (!priv->grctx_vals) { @@ -213,8 +213,8 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine) nv_wo32(grctx, 0xf4, 0); nv_wo32(grctx, 0xf8, 0); nv_wo32(grctx, 0x10, grch->mmio_nr); - nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst)); - nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst)); + nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst)); + nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst)); nv_wo32(grctx, 0x1c, 1); nv_wo32(grctx, 0x20, 0); nv_wo32(grctx, 0x28, 0); -- cgit v1.2.3 From 0411de854898a2402cf4bd915bed7ec9a6b76f9a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 25 May 2011 18:32:44 +1000 Subject: drm/nvc0/gr: import and use our own fuc by default The ability to use NVIDIA's fuc has been retained *temporarily* in order to better debug any issues that may be lingering in our initial attempt at writing this ucode. Once I'm fairly confident we're okay, it'll be removed. There's a number of things not implemented by this fuc currently, but most of it is sets of state that our context setup would not have used anyway. No doubt we'll find out what they're for at some point, and implement it if required. This has been tested on 0xc0/0xc4 thus far, and from what I could tell it worked as well as NVIDIA's. It's also been tested on 0xc1, but even with NVIDIA's fuc that chipset doesn't work correctly with nouveau yet. 0xc3/0xc8/0xce should in theory be supported too, but I don't have the hardware to check that. There's no doubt numerous bugs to squash yet, please report any! Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.c | 4 + drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nvc0_graph.c | 185 +++++-- drivers/gpu/drm/nouveau/nvc0_graph.fuc | 400 +++++++++++++++ drivers/gpu/drm/nouveau/nvc0_grgpc.fuc | 474 +++++++++++++++++ drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h | 483 ++++++++++++++++++ drivers/gpu/drm/nouveau/nvc0_grhub.fuc | 808 +++++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h | 838 +++++++++++++++++++++++++++++++ 8 files changed, 3157 insertions(+), 36 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvc0_graph.fuc create mode 100644 drivers/gpu/drm/nouveau/nvc0_grgpc.fuc create mode 100644 drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h create mode 100644 drivers/gpu/drm/nouveau/nvc0_grhub.fuc create mode 100644 drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index bdee1a6956e7..7e25f5a6db96 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -119,6 +119,10 @@ MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n"); int nouveau_msi; module_param_named(msi, nouveau_msi, int, 0400); +MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n"); +int nouveau_ctxfw; +module_param_named(ctxfw, nouveau_ctxfw, int, 0400); + int nouveau_fbpercrtc; #if 0 module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 7136ad34921d..86eb3f40c4f8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -786,6 +786,7 @@ extern int nouveau_override_conntype; extern char *nouveau_perflvl; extern int nouveau_perflvl_wr; extern int nouveau_msi; +extern int nouveau_ctxfw; extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); extern int nouveau_pci_resume(struct pci_dev *pdev); diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index 6c06d6636a3c..c5aa7e7aea0d 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -28,7 +28,34 @@ #include "nouveau_drv.h" #include "nouveau_mm.h" + #include "nvc0_graph.h" +#include "nvc0_grhub.fuc.h" +#include "nvc0_grgpc.fuc.h" + +static void +nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base) +{ + NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base, + nv_rd32(dev, base + 0x400)); + NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, + nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804), + nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c)); + NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, + nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814), + nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c)); +} + +static void +nvc0_graph_ctxctl_debug(struct drm_device *dev) +{ + u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff; + u32 gpc; + + nvc0_graph_ctxctl_debug_unit(dev, 0x409000); + for (gpc = 0; gpc < gpcnr; gpc++) + nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000)); +} static int nvc0_graph_load_context(struct nouveau_channel *chan) @@ -72,13 +99,24 @@ nvc0_graph_construct_context(struct nouveau_channel *chan) if (!ctx) return -ENOMEM; - nvc0_graph_load_context(chan); - - nv_wo32(grch->grctx, 0x1c, 1); - nv_wo32(grch->grctx, 0x20, 0); - nv_wo32(grch->grctx, 0x28, 0); - nv_wo32(grch->grctx, 0x2c, 0); - dev_priv->engine.instmem.flush(dev); + if (!nouveau_ctxfw) { + nv_wr32(dev, 0x409840, 0x80000000); + nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); + nv_wr32(dev, 0x409504, 0x00000001); + if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { + NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n"); + nvc0_graph_ctxctl_debug(dev); + return -EBUSY; + } + } else { + nvc0_graph_load_context(chan); + + nv_wo32(grch->grctx, 0x1c, 1); + nv_wo32(grch->grctx, 0x20, 0); + nv_wo32(grch->grctx, 0x28, 0); + nv_wo32(grch->grctx, 0x2c, 0); + dev_priv->engine.instmem.flush(dev); + } ret = nvc0_grctx_generate(chan); if (ret) { @@ -86,10 +124,21 @@ nvc0_graph_construct_context(struct nouveau_channel *chan) return ret; } - ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst); - if (ret) { - kfree(ctx); - return ret; + if (!nouveau_ctxfw) { + nv_wr32(dev, 0x409840, 0x80000000); + nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); + nv_wr32(dev, 0x409504, 0x00000002); + if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { + NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n"); + nvc0_graph_ctxctl_debug(dev); + return -EBUSY; + } + } else { + ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst); + if (ret) { + kfree(ctx); + return ret; + } } for (i = 0; i < priv->grctx_size; i += 4) @@ -210,15 +259,20 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine) for (i = 0; i < priv->grctx_size; i += 4) nv_wo32(grctx, i, priv->grctx_vals[i / 4]); - nv_wo32(grctx, 0xf4, 0); - nv_wo32(grctx, 0xf8, 0); - nv_wo32(grctx, 0x10, grch->mmio_nr); - nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst)); - nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst)); - nv_wo32(grctx, 0x1c, 1); - nv_wo32(grctx, 0x20, 0); - nv_wo32(grctx, 0x28, 0); - nv_wo32(grctx, 0x2c, 0); + if (!nouveau_ctxfw) { + nv_wo32(grctx, 0x00, grch->mmio_nr); + nv_wo32(grctx, 0x04, grch->mmio->linst >> 8); + } else { + nv_wo32(grctx, 0xf4, 0); + nv_wo32(grctx, 0xf8, 0); + nv_wo32(grctx, 0x10, grch->mmio_nr); + nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst)); + nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst)); + nv_wo32(grctx, 0x1c, 1); + nv_wo32(grctx, 0x20, 0); + nv_wo32(grctx, 0x28, 0); + nv_wo32(grctx, 0x2c, 0); + } pinstmem->flush(dev); return 0; @@ -419,8 +473,51 @@ nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base, static int nvc0_graph_init_ctxctl(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); u32 r000260; + int i; + + if (!nouveau_ctxfw) { + /* load HUB microcode */ + r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); + nv_wr32(dev, 0x4091c0, 0x01000000); + for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++) + nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]); + + nv_wr32(dev, 0x409180, 0x01000000); + for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) { + if ((i & 0x3f) == 0) + nv_wr32(dev, 0x409188, i >> 6); + nv_wr32(dev, 0x409184, nvc0_grhub_code[i]); + } + + /* load GPC microcode */ + nv_wr32(dev, 0x41a1c0, 0x01000000); + for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++) + nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]); + + nv_wr32(dev, 0x41a180, 0x01000000); + for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) { + if ((i & 0x3f) == 0) + nv_wr32(dev, 0x41a188, i >> 6); + nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]); + } + nv_wr32(dev, 0x000260, r000260); + + /* start HUB ucode running, it'll init the GPCs */ + nv_wr32(dev, 0x409800, dev_priv->chipset); + nv_wr32(dev, 0x40910c, 0x00000000); + nv_wr32(dev, 0x409100, 0x00000002); + if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { + NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n"); + nvc0_graph_ctxctl_debug(dev); + return -EBUSY; + } + + priv->grctx_size = nv_rd32(dev, 0x409804); + return 0; + } /* load fuc microcode */ r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); @@ -527,6 +624,22 @@ nvc0_graph_isr_chid(struct drm_device *dev, u64 inst) return i; } +static void +nvc0_graph_ctxctl_isr(struct drm_device *dev) +{ + u32 ustat = nv_rd32(dev, 0x409c18); + + if (ustat & 0x00000001) + NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n"); + if (ustat & 0x00080000) + NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n"); + if (ustat & ~0x00080001) + NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat); + + nvc0_graph_ctxctl_debug(dev); + nv_wr32(dev, 0x409c20, ustat); +} + static void nvc0_graph_isr(struct drm_device *dev) { @@ -578,11 +691,7 @@ nvc0_graph_isr(struct drm_device *dev) } if (stat & 0x00080000) { - u32 ustat = nv_rd32(dev, 0x409c18); - - NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat); - - nv_wr32(dev, 0x409c20, ustat); + nvc0_graph_ctxctl_isr(dev); nv_wr32(dev, 0x400100, 0x00080000); stat &= ~0x00080000; } @@ -651,10 +760,12 @@ nvc0_graph_destroy(struct drm_device *dev, int engine) { struct nvc0_graph_priv *priv = nv_engine(dev, engine); - nvc0_graph_destroy_fw(&priv->fuc409c); - nvc0_graph_destroy_fw(&priv->fuc409d); - nvc0_graph_destroy_fw(&priv->fuc41ac); - nvc0_graph_destroy_fw(&priv->fuc41ad); + if (nouveau_ctxfw) { + nvc0_graph_destroy_fw(&priv->fuc409c); + nvc0_graph_destroy_fw(&priv->fuc409d); + nvc0_graph_destroy_fw(&priv->fuc41ac); + nvc0_graph_destroy_fw(&priv->fuc41ad); + } nouveau_irq_unregister(dev, 12); nouveau_irq_unregister(dev, 25); @@ -698,15 +809,17 @@ nvc0_graph_create(struct drm_device *dev) nouveau_irq_register(dev, 12, nvc0_graph_isr); nouveau_irq_register(dev, 25, nvc0_runk140_isr); - if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) || - nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) || - nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) || - nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) { - ret = 0; - goto error; + if (nouveau_ctxfw) { + NV_INFO(dev, "PGRAPH: using external firmware\n"); + if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) || + nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) || + nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) || + nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) { + ret = 0; + goto error; + } } - ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4); if (ret) goto error; diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/nvc0_graph.fuc new file mode 100644 index 000000000000..2a4b6dc8f9de --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_graph.fuc @@ -0,0 +1,400 @@ +/* fuc microcode util functions for nvc0 PGRAPH + * + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)') +define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))') + +ifdef(`include_code', ` +// Error codes +define(`E_BAD_COMMAND', 0x01) +define(`E_CMD_OVERFLOW', 0x02) + +// Util macros to help with debugging ucode hangs etc +define(`T_WAIT', 0) +define(`T_MMCTX', 1) +define(`T_STRWAIT', 2) +define(`T_STRINIT', 3) +define(`T_AUTO', 4) +define(`T_CHAN', 5) +define(`T_LOAD', 6) +define(`T_SAVE', 7) +define(`T_LCHAN', 8) +define(`T_LCTXH', 9) + +define(`trace_set', ` + mov $r8 0x83c + shl b32 $r8 6 + clear b32 $r9 + bset $r9 $1 + iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7] +') + +define(`trace_clr', ` + mov $r8 0x85c + shl b32 $r8 6 + clear b32 $r9 + bset $r9 $1 + iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7] +') + +// queue_put - add request to queue +// +// In : $r13 queue pointer +// $r14 command +// $r15 data +// +queue_put: + // make sure we have space.. + ld b32 $r8 D[$r13 + 0x0] // GET + ld b32 $r9 D[$r13 + 0x4] // PUT + xor $r8 8 + cmpu b32 $r8 $r9 + bra ne queue_put_next + mov $r15 E_CMD_OVERFLOW + call error + ret + + // store cmd/data on queue + queue_put_next: + and $r8 $r9 7 + shl b32 $r8 3 + add b32 $r8 $r13 + add b32 $r8 8 + st b32 D[$r8 + 0x0] $r14 + st b32 D[$r8 + 0x4] $r15 + + // update PUT + add b32 $r9 1 + and $r9 0xf + st b32 D[$r13 + 0x4] $r9 + ret + +// queue_get - fetch request from queue +// +// In : $r13 queue pointer +// +// Out: $p1 clear on success (data available) +// $r14 command +// $r15 data +// +queue_get: + bset $flags $p1 + ld b32 $r8 D[$r13 + 0x0] // GET + ld b32 $r9 D[$r13 + 0x4] // PUT + cmpu b32 $r8 $r9 + bra e queue_get_done + // fetch first cmd/data pair + and $r9 $r8 7 + shl b32 $r9 3 + add b32 $r9 $r13 + add b32 $r9 8 + ld b32 $r14 D[$r9 + 0x0] + ld b32 $r15 D[$r9 + 0x4] + + // update GET + add b32 $r8 1 + and $r8 0xf + st b32 D[$r13 + 0x0] $r8 + bclr $flags $p1 +queue_get_done: + ret + +// nv_rd32 - read 32-bit value from nv register +// +// In : $r14 register +// Out: $r15 value +// +nv_rd32: + mov $r11 0x728 + shl b32 $r11 6 + mov b32 $r12 $r14 + bset $r12 31 // MMIO_CTRL_PENDING + iowr I[$r11 + 0x000] $r12 // MMIO_CTRL + nv_rd32_wait: + iord $r12 I[$r11 + 0x000] + xbit $r12 $r12 31 + bra ne nv_rd32_wait + mov $r10 6 // DONE_MMIO_RD + call wait_doneo + iord $r15 I[$r11 + 0x100] // MMIO_RDVAL + ret + +// nv_wr32 - write 32-bit value to nv register +// +// In : $r14 register +// $r15 value +// +nv_wr32: + mov $r11 0x728 + shl b32 $r11 6 + iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL + mov b32 $r12 $r14 + bset $r12 31 // MMIO_CTRL_PENDING + bset $r12 30 // MMIO_CTRL_WRITE + iowr I[$r11 + 0x000] $r12 // MMIO_CTRL + nv_wr32_wait: + iord $r12 I[$r11 + 0x000] + xbit $r12 $r12 31 + bra ne nv_wr32_wait + ret + +// (re)set watchdog timer +// +// In : $r15 timeout +// +watchdog_reset: + mov $r8 0x430 + shl b32 $r8 6 + bset $r15 31 + iowr I[$r8 + 0x000] $r15 + ret + +// clear watchdog timer +watchdog_clear: + mov $r8 0x430 + shl b32 $r8 6 + iowr I[$r8 + 0x000] $r0 + ret + +// wait_done{z,o} - wait on FUC_DONE bit to become clear/set +// +// In : $r10 bit to wait on +// +define(`wait_done', ` +$1: + trace_set(T_WAIT); + mov $r8 0x818 + shl b32 $r8 6 + iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit + wait_done_$1: + mov $r8 0x400 + shl b32 $r8 6 + iord $r8 I[$r8 + 0x000] // DONE + xbit $r8 $r8 $r10 + bra $2 wait_done_$1 + trace_clr(T_WAIT) + ret +') +wait_done(wait_donez, ne) +wait_done(wait_doneo, e) + +// mmctx_size - determine size of a mmio list transfer +// +// In : $r14 mmio list head +// $r15 mmio list tail +// Out: $r15 transfer size (in bytes) +// +mmctx_size: + clear b32 $r9 + nv_mmctx_size_loop: + ld b32 $r8 D[$r14] + shr b32 $r8 26 + add b32 $r8 1 + shl b32 $r8 2 + add b32 $r9 $r8 + add b32 $r14 4 + cmpu b32 $r14 $r15 + bra ne nv_mmctx_size_loop + mov b32 $r15 $r9 + ret + +// mmctx_xfer - execute a list of mmio transfers +// +// In : $r10 flags +// bit 0: direction (0 = save, 1 = load) +// bit 1: set if first transfer +// bit 2: set if last transfer +// $r11 base +// $r12 mmio list head +// $r13 mmio list tail +// $r14 multi_stride +// $r15 multi_mask +// +mmctx_xfer: + trace_set(T_MMCTX) + mov $r8 0x710 + shl b32 $r8 6 + clear b32 $r9 + or $r11 $r11 + bra e mmctx_base_disabled + iowr I[$r8 + 0x000] $r11 // MMCTX_BASE + bset $r9 0 // BASE_EN + mmctx_base_disabled: + or $r14 $r14 + bra e mmctx_multi_disabled + iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE + iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK + bset $r9 1 // MULTI_EN + mmctx_multi_disabled: + add b32 $r8 0x100 + + xbit $r11 $r10 0 + shl b32 $r11 16 // DIR + bset $r11 12 // QLIMIT = 0x10 + xbit $r14 $r10 1 + shl b32 $r14 17 + or $r11 $r14 // START_TRIGGER + iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL + + // loop over the mmio list, and send requests to the hw + mmctx_exec_loop: + // wait for space in mmctx queue + mmctx_wait_free: + iord $r14 I[$r8 + 0x000] // MMCTX_CTRL + and $r14 0x1f + bra e mmctx_wait_free + + // queue up an entry + ld b32 $r14 D[$r12] + or $r14 $r9 + iowr I[$r8 + 0x300] $r14 + add b32 $r12 4 + cmpu b32 $r12 $r13 + bra ne mmctx_exec_loop + + xbit $r11 $r10 2 + bra ne mmctx_stop + // wait for queue to empty + mmctx_fini_wait: + iord $r11 I[$r8 + 0x000] // MMCTX_CTRL + and $r11 0x1f + cmpu b32 $r11 0x10 + bra ne mmctx_fini_wait + mov $r10 2 // DONE_MMCTX + call wait_donez + bra mmctx_done + mmctx_stop: + xbit $r11 $r10 0 + shl b32 $r11 16 // DIR + bset $r11 12 // QLIMIT = 0x10 + bset $r11 18 // STOP_TRIGGER + iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL + mmctx_stop_wait: + // wait for STOP_TRIGGER to clear + iord $r11 I[$r8 + 0x000] // MMCTX_CTRL + xbit $r11 $r11 18 + bra ne mmctx_stop_wait + mmctx_done: + trace_clr(T_MMCTX) + ret + +// Wait for DONE_STRAND +// +strand_wait: + push $r10 + mov $r10 2 + call wait_donez + pop $r10 + ret + +// unknown - call before issuing strand commands +// +strand_pre: + mov $r8 0x4afc + sethi $r8 0x20000 + mov $r9 0xc + iowr I[$r8] $r9 + call strand_wait + ret + +// unknown - call after issuing strand commands +// +strand_post: + mov $r8 0x4afc + sethi $r8 0x20000 + mov $r9 0xd + iowr I[$r8] $r9 + call strand_wait + ret + +// Selects strand set?! +// +// In: $r14 id +// +strand_set: + mov $r10 0x4ffc + sethi $r10 0x20000 + sub b32 $r11 $r10 0x500 + mov $r12 0xf + iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf + mov $r12 0xb + iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb + call strand_wait + iowr I[$r10 + 0x000] $r14 // 0x93c = + mov $r12 0xa + iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa + call strand_wait + ret + +// Initialise strand context data +// +// In : $r15 context base +// Out: $r15 context size (in bytes) +// +// Strandset(?) 3 hardcoded currently +// +strand_ctx_init: + trace_set(T_STRINIT) + call strand_pre + mov $r14 3 + call strand_set + mov $r10 0x46fc + sethi $r10 0x20000 + add b32 $r11 $r10 0x400 + iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0 + mov $r12 1 + iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE + call strand_wait + sub b32 $r12 $r0 1 + iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff + mov $r12 2 + iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT + call strand_wait + call strand_post + + // read the size of each strand, poke the context offset of + // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry + // about it later then. + mov $r8 0x880 + shl b32 $r8 6 + iord $r9 I[$r8 + 0x000] // STRANDS + add b32 $r8 0x2200 + shr b32 $r14 $r15 8 + ctx_init_strand_loop: + iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE + iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE + iord $r10 I[$r8 + 0x200] // STRAND_SIZE + shr b32 $r10 6 + add b32 $r10 1 + add b32 $r14 $r10 + add b32 $r8 4 + sub b32 $r9 1 + bra ne ctx_init_strand_loop + + shl b32 $r14 8 + sub b32 $r15 $r14 $r15 + trace_clr(T_STRINIT) + ret +') diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc new file mode 100644 index 000000000000..0ec2add72a76 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc @@ -0,0 +1,474 @@ +/* fuc microcode for nvc0 PGRAPH/GPC + * + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +/* To build: + * m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h + */ + +/* TODO + * - bracket certain functions with scratch writes, useful for debugging + * - watchdog timer around ctx operations + */ + +.section nvc0_grgpc_data +include(`nvc0_graph.fuc') +gpc_id: .b32 0 +gpc_mmio_list_head: .b32 0 +gpc_mmio_list_tail: .b32 0 + +tpc_count: .b32 0 +tpc_mask: .b32 0 +tpc_mmio_list_head: .b32 0 +tpc_mmio_list_tail: .b32 0 + +cmd_queue: queue_init + +// chipset descriptions +chipsets: +.b8 0xc0 0 0 0 +.b16 nvc0_gpc_mmio_head +.b16 nvc0_gpc_mmio_tail +.b16 nvc0_tpc_mmio_head +.b16 nvc0_tpc_mmio_tail +.b8 0xc1 0 0 0 +.b16 nvc0_gpc_mmio_head +.b16 nvc1_gpc_mmio_tail +.b16 nvc0_tpc_mmio_head +.b16 nvc1_tpc_mmio_tail +.b8 0xc3 0 0 0 +.b16 nvc0_gpc_mmio_head +.b16 nvc0_gpc_mmio_tail +.b16 nvc0_tpc_mmio_head +.b16 nvc3_tpc_mmio_tail +.b8 0xc4 0 0 0 +.b16 nvc0_gpc_mmio_head +.b16 nvc0_gpc_mmio_tail +.b16 nvc0_tpc_mmio_head +.b16 nvc3_tpc_mmio_tail +.b8 0xc8 0 0 0 +.b16 nvc0_gpc_mmio_head +.b16 nvc0_gpc_mmio_tail +.b16 nvc0_tpc_mmio_head +.b16 nvc0_tpc_mmio_tail +.b8 0xce 0 0 0 +.b16 nvc0_gpc_mmio_head +.b16 nvc0_gpc_mmio_tail +.b16 nvc0_tpc_mmio_head +.b16 nvc3_tpc_mmio_tail +.b8 0 0 0 0 + +// GPC mmio lists +nvc0_gpc_mmio_head: +mmctx_data(0x000380, 1) +mmctx_data(0x000400, 6) +mmctx_data(0x000450, 9) +mmctx_data(0x000600, 1) +mmctx_data(0x000684, 1) +mmctx_data(0x000700, 5) +mmctx_data(0x000800, 1) +mmctx_data(0x000808, 3) +mmctx_data(0x000828, 1) +mmctx_data(0x000830, 1) +mmctx_data(0x0008d8, 1) +mmctx_data(0x0008e0, 1) +mmctx_data(0x0008e8, 6) +mmctx_data(0x00091c, 1) +mmctx_data(0x000924, 3) +mmctx_data(0x000b00, 1) +mmctx_data(0x000b08, 6) +mmctx_data(0x000bb8, 1) +mmctx_data(0x000c08, 1) +mmctx_data(0x000c10, 8) +mmctx_data(0x000c80, 1) +mmctx_data(0x000c8c, 1) +mmctx_data(0x001000, 3) +mmctx_data(0x001014, 1) +nvc0_gpc_mmio_tail: +mmctx_data(0x000c6c, 1); +nvc1_gpc_mmio_tail: + +// TPC mmio lists +nvc0_tpc_mmio_head: +mmctx_data(0x000018, 1) +mmctx_data(0x00003c, 1) +mmctx_data(0x000048, 1) +mmctx_data(0x000064, 1) +mmctx_data(0x000088, 1) +mmctx_data(0x000200, 6) +mmctx_data(0x00021c, 2) +mmctx_data(0x000300, 6) +mmctx_data(0x0003d0, 1) +mmctx_data(0x0003e0, 2) +mmctx_data(0x000400, 3) +mmctx_data(0x000420, 1) +mmctx_data(0x0004b0, 1) +mmctx_data(0x0004e8, 1) +mmctx_data(0x0004f4, 1) +mmctx_data(0x000520, 2) +mmctx_data(0x000604, 4) +mmctx_data(0x000644, 20) +mmctx_data(0x000698, 1) +mmctx_data(0x000750, 2) +nvc0_tpc_mmio_tail: +mmctx_data(0x000758, 1) +mmctx_data(0x0002c4, 1) +mmctx_data(0x0004bc, 1) +mmctx_data(0x0006e0, 1) +nvc3_tpc_mmio_tail: +mmctx_data(0x000544, 1) +nvc1_tpc_mmio_tail: + + +.section nvc0_grgpc_code +bra init +define(`include_code') +include(`nvc0_graph.fuc') + +// reports an exception to the host +// +// In: $r15 error code (see nvc0_graph.fuc) +// +error: + push $r14 + mov $r14 -0x67ec // 0x9814 + sethi $r14 0x400000 + call nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code + add b32 $r14 0x41c + mov $r15 1 + call nv_wr32 // HUB_CTXCTL_INTR_UP_SET + pop $r14 + ret + +// GPC fuc initialisation, executed by triggering ucode start, will +// fall through to main loop after completion. +// +// Input: +// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh) +// CC_SCRATCH[1]: context base +// +// Output: +// CC_SCRATCH[0]: +// 31:31: set to signal completion +// CC_SCRATCH[1]: +// 31:0: GPC context size +// +init: + clear b32 $r0 + mov $sp $r0 + + // enable fifo access + mov $r1 0x1200 + mov $r2 2 + iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE + + // setup i0 handler, and route all interrupts to it + mov $r1 ih + mov $iv0 $r1 + mov $r1 0x400 + iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH + + // enable fifo interrupt + mov $r2 4 + iowr I[$r1 + 0x000] $r2 // INTR_EN_SET + + // enable interrupts + bset $flags ie0 + + // figure out which GPC we are, and how many TPCs we have + mov $r1 0x608 + shl b32 $r1 6 + iord $r2 I[$r1 + 0x000] // UNITS + mov $r3 1 + and $r2 0x1f + shl b32 $r3 $r2 + sub b32 $r3 1 + st b32 D[$r0 + tpc_count] $r2 + st b32 D[$r0 + tpc_mask] $r3 + add b32 $r1 0x400 + iord $r2 I[$r1 + 0x000] // MYINDEX + st b32 D[$r0 + gpc_id] $r2 + + // find context data for this chipset + mov $r2 0x800 + shl b32 $r2 6 + iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0] + mov $r1 chipsets - 12 + init_find_chipset: + add b32 $r1 12 + ld b32 $r3 D[$r1 + 0x00] + cmpu b32 $r3 $r2 + bra e init_context + cmpu b32 $r3 0 + bra ne init_find_chipset + // unknown chipset + ret + + // initialise context base, and size tracking + init_context: + mov $r2 0x800 + shl b32 $r2 6 + iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base + clear b32 $r3 // track GPC context size here + + // set mmctx base addresses now so we don't have to do it later, + // they don't currently ever change + mov $r4 0x700 + shl b32 $r4 6 + shr b32 $r5 $r2 8 + iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE + iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE + + // calculate GPC mmio context size, store the chipset-specific + // mmio list pointers somewhere we can get at them later without + // re-parsing the chipset list + clear b32 $r14 + clear b32 $r15 + ld b16 $r14 D[$r1 + 4] + ld b16 $r15 D[$r1 + 6] + st b16 D[$r0 + gpc_mmio_list_head] $r14 + st b16 D[$r0 + gpc_mmio_list_tail] $r15 + call mmctx_size + add b32 $r2 $r15 + add b32 $r3 $r15 + + // calculate per-TPC mmio context size, store the list pointers + ld b16 $r14 D[$r1 + 8] + ld b16 $r15 D[$r1 + 10] + st b16 D[$r0 + tpc_mmio_list_head] $r14 + st b16 D[$r0 + tpc_mmio_list_tail] $r15 + call mmctx_size + ld b32 $r14 D[$r0 + tpc_count] + mulu $r14 $r15 + add b32 $r2 $r14 + add b32 $r3 $r14 + + // round up base/size to 256 byte boundary (for strand SWBASE) + add b32 $r4 0x1300 + shr b32 $r3 2 + iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!? + shr b32 $r2 8 + shr b32 $r3 6 + add b32 $r2 1 + add b32 $r3 1 + shl b32 $r2 8 + shl b32 $r3 8 + + // calculate size of strand context data + mov b32 $r15 $r2 + call strand_ctx_init + add b32 $r3 $r15 + + // save context size, and tell HUB we're done + mov $r1 0x800 + shl b32 $r1 6 + iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size + add b32 $r1 0x800 + clear b32 $r2 + bset $r2 31 + iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000 + +// Main program loop, very simple, sleeps until woken up by the interrupt +// handler, pulls a command from the queue and executes its handler +// +main: + bset $flags $p0 + sleep $p0 + mov $r13 cmd_queue + call queue_get + bra $p1 main + + // 0x0000-0x0003 are all context transfers + cmpu b32 $r14 0x04 + bra nc main_not_ctx_xfer + // fetch $flags and mask off $p1/$p2 + mov $r1 $flags + mov $r2 0x0006 + not b32 $r2 + and $r1 $r2 + // set $p1/$p2 according to transfer type + shl b32 $r14 1 + or $r1 $r14 + mov $flags $r1 + // transfer context data + call ctx_xfer + bra main + + main_not_ctx_xfer: + shl b32 $r15 $r14 16 + or $r15 E_BAD_COMMAND + call error + bra main + +// interrupt handler +ih: + push $r8 + mov $r8 $flags + push $r8 + push $r9 + push $r10 + push $r11 + push $r13 + push $r14 + push $r15 + + // incoming fifo command? + iord $r10 I[$r0 + 0x200] // INTR + and $r11 $r10 0x00000004 + bra e ih_no_fifo + // queue incoming fifo command for later processing + mov $r11 0x1900 + mov $r13 cmd_queue + iord $r14 I[$r11 + 0x100] // FIFO_CMD + iord $r15 I[$r11 + 0x000] // FIFO_DATA + call queue_put + add b32 $r11 0x400 + mov $r14 1 + iowr I[$r11 + 0x000] $r14 // FIFO_ACK + + // ack, and wake up main() + ih_no_fifo: + iowr I[$r0 + 0x100] $r10 // INTR_ACK + + pop $r15 + pop $r14 + pop $r13 + pop $r11 + pop $r10 + pop $r9 + pop $r8 + mov $flags $r8 + pop $r8 + bclr $flags $p0 + iret + +// Set this GPC's bit in HUB_BAR, used to signal completion of various +// activities to the HUB fuc +// +hub_barrier_done: + mov $r15 1 + ld b32 $r14 D[$r0 + gpc_id] + shl b32 $r15 $r14 + mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET + sethi $r14 0x400000 + call nv_wr32 + ret + +// Disables various things, waits a bit, and re-enables them.. +// +// Not sure how exactly this helps, perhaps "ENABLE" is not such a +// good description for the bits we turn off? Anyways, without this, +// funny things happen. +// +ctx_redswitch: + mov $r14 0x614 + shl b32 $r14 6 + mov $r15 0x020 + iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER + mov $r15 8 + ctx_redswitch_delay: + sub b32 $r15 1 + bra ne ctx_redswitch_delay + mov $r15 0xa20 + iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER + ret + +// Transfer GPC context data between GPU and storage area +// +// In: $r15 context base address +// $p1 clear on save, set on load +// $p2 set if opposite direction done/will be done, so: +// on save it means: "a load will follow this save" +// on load it means: "a save preceeded this load" +// +ctx_xfer: + // set context base address + mov $r1 0xa04 + shl b32 $r1 6 + iowr I[$r1 + 0x000] $r15// MEM_BASE + bra not $p1 ctx_xfer_not_load + call ctx_redswitch + ctx_xfer_not_load: + + // strands + mov $r1 0x4afc + sethi $r1 0x20000 + mov $r2 0xc + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c + call strand_wait + mov $r2 0x47fc + sethi $r2 0x20000 + iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00 + xbit $r2 $flags $p1 + add b32 $r2 3 + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD) + + // mmio context + xbit $r10 $flags $p1 // direction + or $r10 2 // first + mov $r11 0x0000 + sethi $r11 0x500000 + ld b32 $r12 D[$r0 + gpc_id] + shl b32 $r12 15 + add b32 $r11 $r12 // base = NV_PGRAPH_GPCn + ld b32 $r12 D[$r0 + gpc_mmio_list_head] + ld b32 $r13 D[$r0 + gpc_mmio_list_tail] + mov $r14 0 // not multi + call mmctx_xfer + + // per-TPC mmio context + xbit $r10 $flags $p1 // direction + or $r10 4 // last + mov $r11 0x4000 + sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0 + ld b32 $r12 D[$r0 + gpc_id] + shl b32 $r12 15 + add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0 + ld b32 $r12 D[$r0 + tpc_mmio_list_head] + ld b32 $r13 D[$r0 + tpc_mmio_list_tail] + ld b32 $r15 D[$r0 + tpc_mask] + mov $r14 0x800 // stride = 0x800 + call mmctx_xfer + + // wait for strands to finish + call strand_wait + + // if load, or a save without a load following, do some + // unknown stuff that's done after finishing a block of + // strand commands + bra $p1 ctx_xfer_post + bra not $p2 ctx_xfer_done + ctx_xfer_post: + mov $r1 0x4afc + sethi $r1 0x20000 + mov $r2 0xd + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d + call strand_wait + + // mark completion in HUB's barrier + ctx_xfer_done: + call hub_barrier_done + ret + +.align 256 diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h new file mode 100644 index 000000000000..1896c898f5ba --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h @@ -0,0 +1,483 @@ +uint32_t nvc0_grgpc_data[] = { + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x000000c0, + 0x011000b0, + 0x01640114, + 0x000000c1, + 0x011400b0, + 0x01780114, + 0x000000c3, + 0x011000b0, + 0x01740114, + 0x000000c4, + 0x011000b0, + 0x01740114, + 0x000000c8, + 0x011000b0, + 0x01640114, + 0x000000ce, + 0x011000b0, + 0x01740114, + 0x00000000, + 0x00000380, + 0x14000400, + 0x20000450, + 0x00000600, + 0x00000684, + 0x10000700, + 0x00000800, + 0x08000808, + 0x00000828, + 0x00000830, + 0x000008d8, + 0x000008e0, + 0x140008e8, + 0x0000091c, + 0x08000924, + 0x00000b00, + 0x14000b08, + 0x00000bb8, + 0x00000c08, + 0x1c000c10, + 0x00000c80, + 0x00000c8c, + 0x08001000, + 0x00001014, + 0x00000c6c, + 0x00000018, + 0x0000003c, + 0x00000048, + 0x00000064, + 0x00000088, + 0x14000200, + 0x0400021c, + 0x14000300, + 0x000003d0, + 0x040003e0, + 0x08000400, + 0x00000420, + 0x000004b0, + 0x000004e8, + 0x000004f4, + 0x04000520, + 0x0c000604, + 0x4c000644, + 0x00000698, + 0x04000750, + 0x00000758, + 0x000002c4, + 0x000004bc, + 0x000006e0, + 0x00000544, +}; + +uint32_t nvc0_grgpc_code[] = { + 0x03060ef5, + 0x9800d898, + 0x86f001d9, + 0x0489b808, + 0xf00c1bf4, + 0x21f502f7, + 0x00f802ec, + 0xb60798c4, + 0x8dbb0384, + 0x0880b600, + 0x80008e80, + 0x90b6018f, + 0x0f94f001, + 0xf801d980, + 0x0131f400, + 0x9800d898, + 0x89b801d9, + 0x210bf404, + 0xb60789c4, + 0x9dbb0394, + 0x0890b600, + 0x98009e98, + 0x80b6019f, + 0x0f84f001, + 0xf400d880, + 0x00f80132, + 0x0728b7f1, + 0xb906b4b6, + 0xc9f002ec, + 0x00bcd01f, + 0xc800bccf, + 0x1bf41fcc, + 0x06a7f0fa, + 0x010321f5, + 0xf840bfcf, + 0x28b7f100, + 0x06b4b607, + 0xb980bfd0, + 0xc9f002ec, + 0x1ec9f01f, + 0xcf00bcd0, + 0xccc800bc, + 0xfa1bf41f, + 0x87f100f8, + 0x84b60430, + 0x1ff9f006, + 0xf8008fd0, + 0x3087f100, + 0x0684b604, + 0xf80080d0, + 0x3c87f100, + 0x0684b608, + 0x99f094bd, + 0x0089d000, + 0x081887f1, + 0xd00684b6, + 0x87f1008a, + 0x84b60400, + 0x0088cf06, + 0xf4888aff, + 0x87f1f31b, + 0x84b6085c, + 0xf094bd06, + 0x89d00099, + 0xf100f800, + 0xb6083c87, + 0x94bd0684, + 0xd00099f0, + 0x87f10089, + 0x84b60818, + 0x008ad006, + 0x040087f1, + 0xcf0684b6, + 0x8aff0088, + 0xf30bf488, + 0x085c87f1, + 0xbd0684b6, + 0x0099f094, + 0xf80089d0, + 0x9894bd00, + 0x85b600e8, + 0x0180b61a, + 0xbb0284b6, + 0xe0b60098, + 0x04efb804, + 0xb9eb1bf4, + 0x00f8029f, + 0x083c87f1, + 0xbd0684b6, + 0x0199f094, + 0xf10089d0, + 0xb6071087, + 0x94bd0684, + 0xf405bbfd, + 0x8bd0090b, + 0x0099f000, + 0xf405eefd, + 0x8ed00c0b, + 0xc08fd080, + 0xb70199f0, + 0xc8010080, + 0xb4b600ab, + 0x0cb9f010, + 0xb601aec8, + 0xbefd11e4, + 0x008bd005, + 0xf0008ecf, + 0x0bf41fe4, + 0x00ce98fa, + 0xd005e9fd, + 0xc0b6c08e, + 0x04cdb804, + 0xc8e81bf4, + 0x1bf402ab, + 0x008bcf18, + 0xb01fb4f0, + 0x1bf410b4, + 0x02a7f0f7, + 0xf4c921f4, + 0xabc81b0e, + 0x10b4b600, + 0xf00cb9f0, + 0x8bd012b9, + 0x008bcf00, + 0xf412bbc8, + 0x87f1fa1b, + 0x84b6085c, + 0xf094bd06, + 0x89d00199, + 0xf900f800, + 0x02a7f0a0, + 0xfcc921f4, + 0xf100f8a0, + 0xf04afc87, + 0x97f00283, + 0x0089d00c, + 0x020721f5, + 0x87f100f8, + 0x83f04afc, + 0x0d97f002, + 0xf50089d0, + 0xf8020721, + 0xfca7f100, + 0x02a3f04f, + 0x0500aba2, + 0xd00fc7f0, + 0xc7f000ac, + 0x00bcd00b, + 0x020721f5, + 0xf000aed0, + 0xbcd00ac7, + 0x0721f500, + 0xf100f802, + 0xb6083c87, + 0x94bd0684, + 0xd00399f0, + 0x21f50089, + 0xe7f00213, + 0x3921f503, + 0xfca7f102, + 0x02a3f046, + 0x0400aba0, + 0xf040a0d0, + 0xbcd001c7, + 0x0721f500, + 0x010c9202, + 0xf000acd0, + 0xbcd002c7, + 0x0721f500, + 0x2621f502, + 0x8087f102, + 0x0684b608, + 0xb70089cf, + 0x95220080, + 0x8ed008fe, + 0x408ed000, + 0xb6808acf, + 0xa0b606a5, + 0x00eabb01, + 0xb60480b6, + 0x1bf40192, + 0x08e4b6e8, + 0xf1f2efbc, + 0xb6085c87, + 0x94bd0684, + 0xd00399f0, + 0x00f80089, + 0xe7f1e0f9, + 0xe3f09814, + 0x8d21f440, + 0x041ce0b7, + 0xf401f7f0, + 0xe0fc8d21, + 0x04bd00f8, + 0xf10004fe, + 0xf0120017, + 0x12d00227, + 0x3e17f100, + 0x0010fe04, + 0x040017f1, + 0xf0c010d0, + 0x12d00427, + 0x1031f400, + 0x060817f1, + 0xcf0614b6, + 0x37f00012, + 0x1f24f001, + 0xb60432bb, + 0x02800132, + 0x04038003, + 0x040010b7, + 0x800012cf, + 0x27f10002, + 0x24b60800, + 0x0022cf06, + 0xb65817f0, + 0x13980c10, + 0x0432b800, + 0xb00b0bf4, + 0x1bf40034, + 0xf100f8f1, + 0xb6080027, + 0x22cf0624, + 0xf134bd40, + 0xb6070047, + 0x25950644, + 0x0045d008, + 0xbd4045d0, + 0x58f4bde4, + 0x1f58021e, + 0x020e4003, + 0xf5040f40, + 0xbb013d21, + 0x3fbb002f, + 0x041e5800, + 0x40051f58, + 0x0f400a0e, + 0x3d21f50c, + 0x030e9801, + 0xbb00effd, + 0x3ebb002e, + 0x0040b700, + 0x0235b613, + 0xb60043d0, + 0x35b60825, + 0x0120b606, + 0xb60130b6, + 0x34b60824, + 0x022fb908, + 0x026321f5, + 0xf1003fbb, + 0xb6080017, + 0x13d00614, + 0x0010b740, + 0xf024bd08, + 0x12d01f29, + 0x0031f400, + 0xf00028f4, + 0x21f41cd7, + 0xf401f439, + 0xf404e4b0, + 0x81fe1e18, + 0x0627f001, + 0x12fd20bd, + 0x01e4b604, + 0xfe051efd, + 0x21f50018, + 0x0ef404c3, + 0x10ef94d3, + 0xf501f5f0, + 0xf402ec21, + 0x80f9c60e, + 0xf90188fe, + 0xf990f980, + 0xf9b0f9a0, + 0xf9e0f9d0, + 0x800acff0, + 0xf404abc4, + 0xb7f11d0b, + 0xd7f01900, + 0x40becf1c, + 0xf400bfcf, + 0xb0b70421, + 0xe7f00400, + 0x00bed001, + 0xfc400ad0, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x32f480fc, + 0xf001f800, + 0x0e9801f7, + 0x04febb00, + 0x9418e7f1, + 0xf440e3f0, + 0x00f88d21, + 0x0614e7f1, + 0xf006e4b6, + 0xefd020f7, + 0x08f7f000, + 0xf401f2b6, + 0xf7f1fd1b, + 0xefd00a20, + 0xf100f800, + 0xb60a0417, + 0x1fd00614, + 0x0711f400, + 0x04a421f5, + 0x4afc17f1, + 0xf00213f0, + 0x12d00c27, + 0x0721f500, + 0xfc27f102, + 0x0223f047, + 0xf00020d0, + 0x20b6012c, + 0x0012d003, + 0xf001acf0, + 0xb7f002a5, + 0x50b3f000, + 0xb6000c98, + 0xbcbb0fc4, + 0x010c9800, + 0xf0020d98, + 0x21f500e7, + 0xacf0015c, + 0x04a5f001, + 0x4000b7f1, + 0x9850b3f0, + 0xc4b6000c, + 0x00bcbb0f, + 0x98050c98, + 0x0f98060d, + 0x00e7f104, + 0x5c21f508, + 0x0721f501, + 0x0601f402, + 0xf11412f4, + 0xf04afc17, + 0x27f00213, + 0x0012d00d, + 0x020721f5, + 0x048f21f5, + 0x000000f8, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +}; diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc new file mode 100644 index 000000000000..a1a599124cf4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc @@ -0,0 +1,808 @@ +/* fuc microcode for nvc0 PGRAPH/HUB + * + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +/* To build: + * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h + */ + +.section nvc0_grhub_data +include(`nvc0_graph.fuc') +gpc_count: .b32 0 +rop_count: .b32 0 +cmd_queue: queue_init +hub_mmio_list_head: .b32 0 +hub_mmio_list_tail: .b32 0 + +ctx_current: .b32 0 + +chipsets: +.b8 0xc0 0 0 0 +.b16 nvc0_hub_mmio_head +.b16 nvc0_hub_mmio_tail +.b8 0xc1 0 0 0 +.b16 nvc0_hub_mmio_head +.b16 nvc1_hub_mmio_tail +.b8 0xc3 0 0 0 +.b16 nvc0_hub_mmio_head +.b16 nvc0_hub_mmio_tail +.b8 0xc4 0 0 0 +.b16 nvc0_hub_mmio_head +.b16 nvc0_hub_mmio_tail +.b8 0xc8 0 0 0 +.b16 nvc0_hub_mmio_head +.b16 nvc0_hub_mmio_tail +.b8 0xce 0 0 0 +.b16 nvc0_hub_mmio_head +.b16 nvc0_hub_mmio_tail +.b8 0 0 0 0 + +nvc0_hub_mmio_head: +mmctx_data(0x17e91c, 2) +mmctx_data(0x400204, 2) +mmctx_data(0x404004, 11) +mmctx_data(0x404044, 1) +mmctx_data(0x404094, 14) +mmctx_data(0x4040d0, 7) +mmctx_data(0x4040f8, 1) +mmctx_data(0x404130, 3) +mmctx_data(0x404150, 3) +mmctx_data(0x404164, 2) +mmctx_data(0x404174, 3) +mmctx_data(0x404200, 8) +mmctx_data(0x404404, 14) +mmctx_data(0x404460, 4) +mmctx_data(0x404480, 1) +mmctx_data(0x404498, 1) +mmctx_data(0x404604, 4) +mmctx_data(0x404618, 32) +mmctx_data(0x404698, 21) +mmctx_data(0x4046f0, 2) +mmctx_data(0x404700, 22) +mmctx_data(0x405800, 1) +mmctx_data(0x405830, 3) +mmctx_data(0x405854, 1) +mmctx_data(0x405870, 4) +mmctx_data(0x405a00, 2) +mmctx_data(0x405a18, 1) +mmctx_data(0x406020, 1) +mmctx_data(0x406028, 4) +mmctx_data(0x4064a8, 2) +mmctx_data(0x4064b4, 2) +mmctx_data(0x407804, 1) +mmctx_data(0x40780c, 6) +mmctx_data(0x4078bc, 1) +mmctx_data(0x408000, 7) +mmctx_data(0x408064, 1) +mmctx_data(0x408800, 3) +mmctx_data(0x408900, 4) +mmctx_data(0x408980, 1) +nvc0_hub_mmio_tail: +mmctx_data(0x4064c0, 2) +nvc1_hub_mmio_tail: + +.align 256 +chan_data: +chan_mmio_count: .b32 0 +chan_mmio_address: .b32 0 + +.align 256 +xfer_data: .b32 0 + +.section nvc0_grhub_code +bra init +define(`include_code') +include(`nvc0_graph.fuc') + +// reports an exception to the host +// +// In: $r15 error code (see nvc0_graph.fuc) +// +error: + push $r14 + mov $r14 0x814 + shl b32 $r14 6 + iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code + mov $r14 0xc1c + shl b32 $r14 6 + mov $r15 1 + iowr I[$r14 + 0x000] $r15 // INTR_UP_SET + pop $r14 + ret + +// HUB fuc initialisation, executed by triggering ucode start, will +// fall through to main loop after completion. +// +// Input: +// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh) +// +// Output: +// CC_SCRATCH[0]: +// 31:31: set to signal completion +// CC_SCRATCH[1]: +// 31:0: total PGRAPH context size +// +init: + clear b32 $r0 + mov $sp $r0 + mov $xdbase $r0 + + // enable fifo access + mov $r1 0x1200 + mov $r2 2 + iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE + + // setup i0 handler, and route all interrupts to it + mov $r1 ih + mov $iv0 $r1 + mov $r1 0x400 + iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH + + // route HUB_CHANNEL_SWITCH to fuc interrupt 8 + mov $r3 0x404 + shl b32 $r3 6 + mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8 + iowr I[$r3 + 0x000] $r2 + + // not sure what these are, route them because NVIDIA does, and + // the IRQ handler will signal the host if we ever get one.. we + // may find out if/why we need to handle these if so.. + // + mov $r2 0x2004 + iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9 + mov $r2 0x200b + iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10 + mov $r2 0x200c + iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15 + + // enable all INTR_UP interrupts + mov $r2 0xc24 + shl b32 $r2 6 + not b32 $r3 $r0 + iowr I[$r2] $r3 + + // enable fifo, ctxsw, 9, 10, 15 interrupts + mov $r2 -0x78fc // 0x8704 + sethi $r2 0 + iowr I[$r1 + 0x000] $r2 // INTR_EN_SET + + // fifo level triggered, rest edge + sub b32 $r1 0x100 + mov $r2 4 + iowr I[$r1] $r2 + + // enable interrupts + bset $flags ie0 + + // fetch enabled GPC/ROP counts + mov $r14 -0x69fc // 0x409604 + sethi $r14 0x400000 + call nv_rd32 + extr $r1 $r15 16:20 + st b32 D[$r0 + rop_count] $r1 + and $r15 0x1f + st b32 D[$r0 + gpc_count] $r15 + + // set BAR_REQMASK to GPC mask + mov $r1 1 + shl b32 $r1 $r15 + sub b32 $r1 1 + mov $r2 0x40c + shl b32 $r2 6 + iowr I[$r2 + 0x000] $r1 + iowr I[$r2 + 0x100] $r1 + + // find context data for this chipset + mov $r2 0x800 + shl b32 $r2 6 + iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0] + mov $r15 chipsets - 8 + init_find_chipset: + add b32 $r15 8 + ld b32 $r3 D[$r15 + 0x00] + cmpu b32 $r3 $r2 + bra e init_context + cmpu b32 $r3 0 + bra ne init_find_chipset + // unknown chipset + ret + + // context size calculation, reserve first 256 bytes for use by fuc + init_context: + mov $r1 256 + + // calculate size of mmio context data + ld b16 $r14 D[$r15 + 4] + ld b16 $r15 D[$r15 + 6] + sethi $r14 0 + st b32 D[$r0 + hub_mmio_list_head] $r14 + st b32 D[$r0 + hub_mmio_list_tail] $r15 + call mmctx_size + + // set mmctx base addresses now so we don't have to do it later, + // they don't (currently) ever change + mov $r3 0x700 + shl b32 $r3 6 + shr b32 $r4 $r1 8 + iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE + iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE + add b32 $r3 0x1300 + add b32 $r1 $r15 + shr b32 $r15 2 + iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!? + + // strands, base offset needs to be aligned to 256 bytes + shr b32 $r1 8 + add b32 $r1 1 + shl b32 $r1 8 + mov b32 $r15 $r1 + call strand_ctx_init + add b32 $r1 $r15 + + // initialise each GPC in sequence by passing in the offset of its + // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which + // has previously been uploaded by the host) running. + // + // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31 + // when it has completed, and return the size of its context data + // in GPCn_CC_SCRATCH[1] + // + ld b32 $r3 D[$r0 + gpc_count] + mov $r4 0x2000 + sethi $r4 0x500000 + init_gpc: + // setup, and start GPC ucode running + add b32 $r14 $r4 0x804 + mov b32 $r15 $r1 + call nv_wr32 // CC_SCRATCH[1] = ctx offset + add b32 $r14 $r4 0x800 + mov b32 $r15 $r2 + call nv_wr32 // CC_SCRATCH[0] = chipset + add b32 $r14 $r4 0x10c + clear b32 $r15 + call nv_wr32 + add b32 $r14 $r4 0x104 + call nv_wr32 // ENTRY + add b32 $r14 $r4 0x100 + mov $r15 2 // CTRL_START_TRIGGER + call nv_wr32 // CTRL + + // wait for it to complete, and adjust context size + add b32 $r14 $r4 0x800 + init_gpc_wait: + call nv_rd32 + xbit $r15 $r15 31 + bra e init_gpc_wait + add b32 $r14 $r4 0x804 + call nv_rd32 + add b32 $r1 $r15 + + // next! + add b32 $r4 0x8000 + sub b32 $r3 1 + bra ne init_gpc + + // save context size, and tell host we're ready + mov $r2 0x800 + shl b32 $r2 6 + iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size + add b32 $r2 0x800 + clear b32 $r1 + bset $r1 31 + iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000 + +// Main program loop, very simple, sleeps until woken up by the interrupt +// handler, pulls a command from the queue and executes its handler +// +main: + // sleep until we have something to do + bset $flags $p0 + sleep $p0 + mov $r13 cmd_queue + call queue_get + bra $p1 main + + // context switch, requested by GPU? + cmpu b32 $r14 0x4001 + bra ne main_not_ctx_switch + trace_set(T_AUTO) + mov $r1 0xb00 + shl b32 $r1 6 + iord $r2 I[$r1 + 0x100] // CHAN_NEXT + iord $r1 I[$r1 + 0x000] // CHAN_CUR + + xbit $r3 $r1 31 + bra e chsw_no_prev + xbit $r3 $r2 31 + bra e chsw_prev_no_next + push $r2 + mov b32 $r2 $r1 + trace_set(T_SAVE) + bclr $flags $p1 + bset $flags $p2 + call ctx_xfer + trace_clr(T_SAVE); + pop $r2 + trace_set(T_LOAD); + bset $flags $p1 + call ctx_xfer + trace_clr(T_LOAD); + bra chsw_done + chsw_prev_no_next: + push $r2 + mov b32 $r2 $r1 + bclr $flags $p1 + bclr $flags $p2 + call ctx_xfer + pop $r2 + mov $r1 0xb00 + shl b32 $r1 6 + iowr I[$r1] $r2 + bra chsw_done + chsw_no_prev: + xbit $r3 $r2 31 + bra e chsw_done + bset $flags $p1 + bclr $flags $p2 + call ctx_xfer + + // ack the context switch request + chsw_done: + mov $r1 0xb0c + shl b32 $r1 6 + mov $r2 1 + iowr I[$r1 + 0x000] $r2 // 0x409b0c + trace_clr(T_AUTO) + bra main + + // request to set current channel? (*not* a context switch) + main_not_ctx_switch: + cmpu b32 $r14 0x0001 + bra ne main_not_ctx_chan + mov b32 $r2 $r15 + call ctx_chan + bra main_done + + // request to store current channel context? + main_not_ctx_chan: + cmpu b32 $r14 0x0002 + bra ne main_not_ctx_save + trace_set(T_SAVE) + bclr $flags $p1 + bclr $flags $p2 + call ctx_xfer + trace_clr(T_SAVE) + bra main_done + + main_not_ctx_save: + shl b32 $r15 $r14 16 + or $r15 E_BAD_COMMAND + call error + bra main + + main_done: + mov $r1 0x820 + shl b32 $r1 6 + clear b32 $r2 + bset $r2 31 + iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000 + bra main + +// interrupt handler +ih: + push $r8 + mov $r8 $flags + push $r8 + push $r9 + push $r10 + push $r11 + push $r13 + push $r14 + push $r15 + + // incoming fifo command? + iord $r10 I[$r0 + 0x200] // INTR + and $r11 $r10 0x00000004 + bra e ih_no_fifo + // queue incoming fifo command for later processing + mov $r11 0x1900 + mov $r13 cmd_queue + iord $r14 I[$r11 + 0x100] // FIFO_CMD + iord $r15 I[$r11 + 0x000] // FIFO_DATA + call queue_put + add b32 $r11 0x400 + mov $r14 1 + iowr I[$r11 + 0x000] $r14 // FIFO_ACK + + // context switch request? + ih_no_fifo: + and $r11 $r10 0x00000100 + bra e ih_no_ctxsw + // enqueue a context switch for later processing + mov $r13 cmd_queue + mov $r14 0x4001 + call queue_put + + // anything we didn't handle, bring it to the host's attention + ih_no_ctxsw: + mov $r11 0x104 + not b32 $r11 + and $r11 $r10 $r11 + bra e ih_no_other + mov $r10 0xc1c + shl b32 $r10 6 + iowr I[$r10] $r11 // INTR_UP_SET + + // ack, and wake up main() + ih_no_other: + iowr I[$r0 + 0x100] $r10 // INTR_ACK + + pop $r15 + pop $r14 + pop $r13 + pop $r11 + pop $r10 + pop $r9 + pop $r8 + mov $flags $r8 + pop $r8 + bclr $flags $p0 + iret + +// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done +ctx_4160s: + mov $r14 0x4160 + sethi $r14 0x400000 + mov $r15 1 + call nv_wr32 + ctx_4160s_wait: + call nv_rd32 + xbit $r15 $r15 4 + bra e ctx_4160s_wait + ret + +// Without clearing again at end of xfer, some things cause PGRAPH +// to hang with STATUS=0x00000007 until it's cleared.. fbcon can +// still function with it set however... +ctx_4160c: + mov $r14 0x4160 + sethi $r14 0x400000 + clear b32 $r15 + call nv_wr32 + ret + +// Again, not real sure +// +// In: $r15 value to set 0x404170 to +// +ctx_4170s: + mov $r14 0x4170 + sethi $r14 0x400000 + or $r15 0x10 + call nv_wr32 + ret + +// Waits for a ctx_4170s() call to complete +// +ctx_4170w: + mov $r14 0x4170 + sethi $r14 0x400000 + call nv_rd32 + and $r15 0x10 + bra ne ctx_4170w + ret + +// Disables various things, waits a bit, and re-enables them.. +// +// Not sure how exactly this helps, perhaps "ENABLE" is not such a +// good description for the bits we turn off? Anyways, without this, +// funny things happen. +// +ctx_redswitch: + mov $r14 0x614 + shl b32 $r14 6 + mov $r15 0x270 + iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL + mov $r15 8 + ctx_redswitch_delay: + sub b32 $r15 1 + bra ne ctx_redswitch_delay + mov $r15 0x770 + iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL + ret + +// Not a clue what this is for, except that unless the value is 0x10, the +// strand context is saved (and presumably restored) incorrectly.. +// +// In: $r15 value to set to (0x00/0x10 are used) +// +ctx_86c: + mov $r14 0x86c + shl b32 $r14 6 + iowr I[$r14] $r15 // HUB(0x86c) = val + mov $r14 -0x75ec + sethi $r14 0x400000 + call nv_wr32 // ROP(0xa14) = val + mov $r14 -0x5794 + sethi $r14 0x410000 + call nv_wr32 // GPC(0x86c) = val + ret + +// ctx_load - load's a channel's ctxctl data, and selects its vm +// +// In: $r2 channel address +// +ctx_load: + trace_set(T_CHAN) + + // switch to channel, somewhat magic in parts.. + mov $r10 12 // DONE_UNK12 + call wait_donez + mov $r1 0xa24 + shl b32 $r1 6 + iowr I[$r1 + 0x000] $r0 // 0x409a24 + mov $r3 0xb00 + shl b32 $r3 6 + iowr I[$r3 + 0x100] $r2 // CHAN_NEXT + mov $r1 0xa0c + shl b32 $r1 6 + mov $r4 7 + iowr I[$r1 + 0x000] $r2 // MEM_CHAN + iowr I[$r1 + 0x100] $r4 // MEM_CMD + ctx_chan_wait_0: + iord $r4 I[$r1 + 0x100] + and $r4 0x1f + bra ne ctx_chan_wait_0 + iowr I[$r3 + 0x000] $r2 // CHAN_CUR + + // load channel header, fetch PGRAPH context pointer + mov $xtargets $r0 + bclr $r2 31 + shl b32 $r2 4 + add b32 $r2 2 + + trace_set(T_LCHAN) + mov $r1 0xa04 + shl b32 $r1 6 + iowr I[$r1 + 0x000] $r2 // MEM_BASE + mov $r1 0xa20 + shl b32 $r1 6 + mov $r2 0x0002 + sethi $r2 0x80000000 + iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram + mov $r1 0x10 // chan + 0x0210 + mov $r2 xfer_data + sethi $r2 0x00020000 // 16 bytes + xdld $r1 $r2 + xdwait + trace_clr(T_LCHAN) + + // update current context + ld b32 $r1 D[$r0 + xfer_data + 4] + shl b32 $r1 24 + ld b32 $r2 D[$r0 + xfer_data + 0] + shr b32 $r2 8 + or $r1 $r2 + st b32 D[$r0 + ctx_current] $r1 + + // set transfer base to start of context, and fetch context header + trace_set(T_LCTXH) + mov $r2 0xa04 + shl b32 $r2 6 + iowr I[$r2 + 0x000] $r1 // MEM_BASE + mov $r2 1 + mov $r1 0xa20 + shl b32 $r1 6 + iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm + mov $r1 chan_data + sethi $r1 0x00060000 // 256 bytes + xdld $r0 $r1 + xdwait + trace_clr(T_LCTXH) + + trace_clr(T_CHAN) + ret + +// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as +// the active channel for ctxctl, but not actually transfer +// any context data. intended for use only during initial +// context construction. +// +// In: $r2 channel address +// +ctx_chan: + call ctx_4160s + call ctx_load + mov $r10 12 // DONE_UNK12 + call wait_donez + mov $r1 0xa10 + shl b32 $r1 6 + mov $r2 5 + iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???) + ctx_chan_wait: + iord $r2 I[$r1 + 0x000] + or $r2 $r2 + bra ne ctx_chan_wait + call ctx_4160c + ret + +// Execute per-context state overrides list +// +// Only executed on the first load of a channel. Might want to look into +// removing this and having the host directly modify the channel's context +// to change this state... The nouveau DRM already builds this list as +// it's definitely needed for NVIDIA's, so we may as well use it for now +// +// Input: $r1 mmio list length +// +ctx_mmio_exec: + // set transfer base to be the mmio list + ld b32 $r3 D[$r0 + chan_mmio_address] + mov $r2 0xa04 + shl b32 $r2 6 + iowr I[$r2 + 0x000] $r3 // MEM_BASE + + clear b32 $r3 + ctx_mmio_loop: + // fetch next 256 bytes of mmio list if necessary + and $r4 $r3 0xff + bra ne ctx_mmio_pull + mov $r5 xfer_data + sethi $r5 0x00060000 // 256 bytes + xdld $r3 $r5 + xdwait + + // execute a single list entry + ctx_mmio_pull: + ld b32 $r14 D[$r4 + xfer_data + 0x00] + ld b32 $r15 D[$r4 + xfer_data + 0x04] + call nv_wr32 + + // next! + add b32 $r3 8 + sub b32 $r1 1 + bra ne ctx_mmio_loop + + // set transfer base back to the current context + ctx_mmio_done: + ld b32 $r3 D[$r0 + ctx_current] + iowr I[$r2 + 0x000] $r3 // MEM_BASE + + // disable the mmio list now, we don't need/want to execute it again + st b32 D[$r0 + chan_mmio_count] $r0 + mov $r1 chan_data + sethi $r1 0x00060000 // 256 bytes + xdst $r0 $r1 + xdwait + ret + +// Transfer HUB context data between GPU and storage area +// +// In: $r2 channel address +// $p1 clear on save, set on load +// $p2 set if opposite direction done/will be done, so: +// on save it means: "a load will follow this save" +// on load it means: "a save preceeded this load" +// +ctx_xfer: + bra not $p1 ctx_xfer_pre + bra $p2 ctx_xfer_pre_load + ctx_xfer_pre: + mov $r15 0x10 + call ctx_86c + call ctx_4160s + bra not $p1 ctx_xfer_exec + + ctx_xfer_pre_load: + mov $r15 2 + call ctx_4170s + call ctx_4170w + call ctx_redswitch + clear b32 $r15 + call ctx_4170s + call ctx_load + + // fetch context pointer, and initiate xfer on all GPCs + ctx_xfer_exec: + ld b32 $r1 D[$r0 + ctx_current] + mov $r2 0x414 + shl b32 $r2 6 + iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset + mov $r14 -0x5b00 + sethi $r14 0x410000 + mov b32 $r15 $r1 + call nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer + add b32 $r14 4 + xbit $r15 $flags $p1 + xbit $r2 $flags $p2 + shl b32 $r2 1 + or $r15 $r2 + call nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type) + + // strands + mov $r1 0x4afc + sethi $r1 0x20000 + mov $r2 0xc + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c + call strand_wait + mov $r2 0x47fc + sethi $r2 0x20000 + iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00 + xbit $r2 $flags $p1 + add b32 $r2 3 + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD) + + // mmio context + xbit $r10 $flags $p1 // direction + or $r10 6 // first, last + mov $r11 0 // base = 0 + ld b32 $r12 D[$r0 + hub_mmio_list_head] + ld b32 $r13 D[$r0 + hub_mmio_list_tail] + mov $r14 0 // not multi + call mmctx_xfer + + // wait for GPCs to all complete + mov $r10 8 // DONE_BAR + call wait_doneo + + // wait for strand xfer to complete + call strand_wait + + // post-op + bra $p1 ctx_xfer_post + mov $r10 12 // DONE_UNK12 + call wait_donez + mov $r1 0xa10 + shl b32 $r1 6 + mov $r2 5 + iowr I[$r1] $r2 // MEM_CMD + ctx_xfer_post_save_wait: + iord $r2 I[$r1] + or $r2 $r2 + bra ne ctx_xfer_post_save_wait + + bra $p2 ctx_xfer_done + ctx_xfer_post: + mov $r15 2 + call ctx_4170s + clear b32 $r15 + call ctx_86c + call strand_post + call ctx_4170w + clear b32 $r15 + call ctx_4170s + + bra not $p1 ctx_xfer_no_post_mmio + ld b32 $r1 D[$r0 + chan_mmio_count] + or $r1 $r1 + bra e ctx_xfer_no_post_mmio + call ctx_mmio_exec + + ctx_xfer_no_post_mmio: + call ctx_4160c + + ctx_xfer_done: + ret + +.align 256 diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h new file mode 100644 index 000000000000..b3b541b6d044 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h @@ -0,0 +1,838 @@ +uint32_t nvc0_grhub_data[] = { + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x000000c0, + 0x012c0090, + 0x000000c1, + 0x01300090, + 0x000000c3, + 0x012c0090, + 0x000000c4, + 0x012c0090, + 0x000000c8, + 0x012c0090, + 0x000000ce, + 0x012c0090, + 0x00000000, + 0x0417e91c, + 0x04400204, + 0x28404004, + 0x00404044, + 0x34404094, + 0x184040d0, + 0x004040f8, + 0x08404130, + 0x08404150, + 0x04404164, + 0x08404174, + 0x1c404200, + 0x34404404, + 0x0c404460, + 0x00404480, + 0x00404498, + 0x0c404604, + 0x7c404618, + 0x50404698, + 0x044046f0, + 0x54404700, + 0x00405800, + 0x08405830, + 0x00405854, + 0x0c405870, + 0x04405a00, + 0x00405a18, + 0x00406020, + 0x0c406028, + 0x044064a8, + 0x044064b4, + 0x00407804, + 0x1440780c, + 0x004078bc, + 0x18408000, + 0x00408064, + 0x08408800, + 0x0c408900, + 0x00408980, + 0x044064c0, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +}; + +uint32_t nvc0_grhub_code[] = { + 0x03090ef5, + 0x9800d898, + 0x86f001d9, + 0x0489b808, + 0xf00c1bf4, + 0x21f502f7, + 0x00f802ec, + 0xb60798c4, + 0x8dbb0384, + 0x0880b600, + 0x80008e80, + 0x90b6018f, + 0x0f94f001, + 0xf801d980, + 0x0131f400, + 0x9800d898, + 0x89b801d9, + 0x210bf404, + 0xb60789c4, + 0x9dbb0394, + 0x0890b600, + 0x98009e98, + 0x80b6019f, + 0x0f84f001, + 0xf400d880, + 0x00f80132, + 0x0728b7f1, + 0xb906b4b6, + 0xc9f002ec, + 0x00bcd01f, + 0xc800bccf, + 0x1bf41fcc, + 0x06a7f0fa, + 0x010321f5, + 0xf840bfcf, + 0x28b7f100, + 0x06b4b607, + 0xb980bfd0, + 0xc9f002ec, + 0x1ec9f01f, + 0xcf00bcd0, + 0xccc800bc, + 0xfa1bf41f, + 0x87f100f8, + 0x84b60430, + 0x1ff9f006, + 0xf8008fd0, + 0x3087f100, + 0x0684b604, + 0xf80080d0, + 0x3c87f100, + 0x0684b608, + 0x99f094bd, + 0x0089d000, + 0x081887f1, + 0xd00684b6, + 0x87f1008a, + 0x84b60400, + 0x0088cf06, + 0xf4888aff, + 0x87f1f31b, + 0x84b6085c, + 0xf094bd06, + 0x89d00099, + 0xf100f800, + 0xb6083c87, + 0x94bd0684, + 0xd00099f0, + 0x87f10089, + 0x84b60818, + 0x008ad006, + 0x040087f1, + 0xcf0684b6, + 0x8aff0088, + 0xf30bf488, + 0x085c87f1, + 0xbd0684b6, + 0x0099f094, + 0xf80089d0, + 0x9894bd00, + 0x85b600e8, + 0x0180b61a, + 0xbb0284b6, + 0xe0b60098, + 0x04efb804, + 0xb9eb1bf4, + 0x00f8029f, + 0x083c87f1, + 0xbd0684b6, + 0x0199f094, + 0xf10089d0, + 0xb6071087, + 0x94bd0684, + 0xf405bbfd, + 0x8bd0090b, + 0x0099f000, + 0xf405eefd, + 0x8ed00c0b, + 0xc08fd080, + 0xb70199f0, + 0xc8010080, + 0xb4b600ab, + 0x0cb9f010, + 0xb601aec8, + 0xbefd11e4, + 0x008bd005, + 0xf0008ecf, + 0x0bf41fe4, + 0x00ce98fa, + 0xd005e9fd, + 0xc0b6c08e, + 0x04cdb804, + 0xc8e81bf4, + 0x1bf402ab, + 0x008bcf18, + 0xb01fb4f0, + 0x1bf410b4, + 0x02a7f0f7, + 0xf4c921f4, + 0xabc81b0e, + 0x10b4b600, + 0xf00cb9f0, + 0x8bd012b9, + 0x008bcf00, + 0xf412bbc8, + 0x87f1fa1b, + 0x84b6085c, + 0xf094bd06, + 0x89d00199, + 0xf900f800, + 0x02a7f0a0, + 0xfcc921f4, + 0xf100f8a0, + 0xf04afc87, + 0x97f00283, + 0x0089d00c, + 0x020721f5, + 0x87f100f8, + 0x83f04afc, + 0x0d97f002, + 0xf50089d0, + 0xf8020721, + 0xfca7f100, + 0x02a3f04f, + 0x0500aba2, + 0xd00fc7f0, + 0xc7f000ac, + 0x00bcd00b, + 0x020721f5, + 0xf000aed0, + 0xbcd00ac7, + 0x0721f500, + 0xf100f802, + 0xb6083c87, + 0x94bd0684, + 0xd00399f0, + 0x21f50089, + 0xe7f00213, + 0x3921f503, + 0xfca7f102, + 0x02a3f046, + 0x0400aba0, + 0xf040a0d0, + 0xbcd001c7, + 0x0721f500, + 0x010c9202, + 0xf000acd0, + 0xbcd002c7, + 0x0721f500, + 0x2621f502, + 0x8087f102, + 0x0684b608, + 0xb70089cf, + 0x95220080, + 0x8ed008fe, + 0x408ed000, + 0xb6808acf, + 0xa0b606a5, + 0x00eabb01, + 0xb60480b6, + 0x1bf40192, + 0x08e4b6e8, + 0xf1f2efbc, + 0xb6085c87, + 0x94bd0684, + 0xd00399f0, + 0x00f80089, + 0xe7f1e0f9, + 0xe4b60814, + 0x00efd006, + 0x0c1ce7f1, + 0xf006e4b6, + 0xefd001f7, + 0xf8e0fc00, + 0xfe04bd00, + 0x07fe0004, + 0x0017f100, + 0x0227f012, + 0xf10012d0, + 0xfe05b917, + 0x17f10010, + 0x10d00400, + 0x0437f1c0, + 0x0634b604, + 0x200327f1, + 0xf10032d0, + 0xd0200427, + 0x27f10132, + 0x32d0200b, + 0x0c27f102, + 0x0732d020, + 0x0c2427f1, + 0xb90624b6, + 0x23d00003, + 0x0427f100, + 0x0023f087, + 0xb70012d0, + 0xf0010012, + 0x12d00427, + 0x1031f400, + 0x9604e7f1, + 0xf440e3f0, + 0xf1c76821, + 0x01018090, + 0x801ff4f0, + 0x17f0000f, + 0x041fbb01, + 0xf10112b6, + 0xb6040c27, + 0x21d00624, + 0x4021d000, + 0x080027f1, + 0xcf0624b6, + 0xf7f00022, + 0x08f0b654, + 0xb800f398, + 0x0bf40432, + 0x0034b00b, + 0xf8f11bf4, + 0x0017f100, + 0x02fe5801, + 0xf003ff58, + 0x0e8000e3, + 0x150f8014, + 0x013d21f5, + 0x070037f1, + 0x950634b6, + 0x34d00814, + 0x4034d000, + 0x130030b7, + 0xb6001fbb, + 0x3fd002f5, + 0x0815b600, + 0xb60110b6, + 0x1fb90814, + 0x6321f502, + 0x001fbb02, + 0xf1000398, + 0xf0200047, + 0x4ea05043, + 0x1fb90804, + 0x8d21f402, + 0x08004ea0, + 0xf4022fb9, + 0x4ea08d21, + 0xf4bd010c, + 0xa08d21f4, + 0xf401044e, + 0x4ea08d21, + 0xf7f00100, + 0x8d21f402, + 0x08004ea0, + 0xc86821f4, + 0x0bf41fff, + 0x044ea0fa, + 0x6821f408, + 0xb7001fbb, + 0xb6800040, + 0x1bf40132, + 0x0027f1b4, + 0x0624b608, + 0xb74021d0, + 0xbd080020, + 0x1f19f014, + 0xf40021d0, + 0x28f40031, + 0x08d7f000, + 0xf43921f4, + 0xe4b1f401, + 0x1bf54001, + 0x87f100d1, + 0x84b6083c, + 0xf094bd06, + 0x89d00499, + 0x0017f100, + 0x0614b60b, + 0xcf4012cf, + 0x13c80011, + 0x7e0bf41f, + 0xf41f23c8, + 0x20f95a0b, + 0xf10212b9, + 0xb6083c87, + 0x94bd0684, + 0xd00799f0, + 0x32f40089, + 0x0231f401, + 0x082921f5, + 0x085c87f1, + 0xbd0684b6, + 0x0799f094, + 0xfc0089d0, + 0x3c87f120, + 0x0684b608, + 0x99f094bd, + 0x0089d006, + 0xf50131f4, + 0xf1082921, + 0xb6085c87, + 0x94bd0684, + 0xd00699f0, + 0x0ef40089, + 0xb920f931, + 0x32f40212, + 0x0232f401, + 0x082921f5, + 0x17f120fc, + 0x14b60b00, + 0x0012d006, + 0xc8130ef4, + 0x0bf41f23, + 0x0131f40d, + 0xf50232f4, + 0xf1082921, + 0xb60b0c17, + 0x27f00614, + 0x0012d001, + 0x085c87f1, + 0xbd0684b6, + 0x0499f094, + 0xf50089d0, + 0xb0ff200e, + 0x1bf401e4, + 0x02f2b90d, + 0x07b521f5, + 0xb0420ef4, + 0x1bf402e4, + 0x3c87f12e, + 0x0684b608, + 0x99f094bd, + 0x0089d007, + 0xf40132f4, + 0x21f50232, + 0x87f10829, + 0x84b6085c, + 0xf094bd06, + 0x89d00799, + 0x110ef400, + 0xf010ef94, + 0x21f501f5, + 0x0ef502ec, + 0x17f1fed1, + 0x14b60820, + 0xf024bd06, + 0x12d01f29, + 0xbe0ef500, + 0xfe80f9fe, + 0x80f90188, + 0xa0f990f9, + 0xd0f9b0f9, + 0xf0f9e0f9, + 0xc4800acf, + 0x0bf404ab, + 0x00b7f11d, + 0x08d7f019, + 0xcf40becf, + 0x21f400bf, + 0x00b0b704, + 0x01e7f004, + 0xe400bed0, + 0xf40100ab, + 0xd7f00d0b, + 0x01e7f108, + 0x0421f440, + 0x0104b7f1, + 0xabffb0bd, + 0x0d0bf4b4, + 0x0c1ca7f1, + 0xd006a4b6, + 0x0ad000ab, + 0xfcf0fc40, + 0xfcd0fce0, + 0xfca0fcb0, + 0xfe80fc90, + 0x80fc0088, + 0xf80032f4, + 0x60e7f101, + 0x40e3f041, + 0xf401f7f0, + 0x21f48d21, + 0x04ffc868, + 0xf8fa0bf4, + 0x60e7f100, + 0x40e3f041, + 0x21f4f4bd, + 0xf100f88d, + 0xf04170e7, + 0xf5f040e3, + 0x8d21f410, + 0xe7f100f8, + 0xe3f04170, + 0x6821f440, + 0xf410f4f0, + 0x00f8f31b, + 0x0614e7f1, + 0xf106e4b6, + 0xd00270f7, + 0xf7f000ef, + 0x01f2b608, + 0xf1fd1bf4, + 0xd00770f7, + 0x00f800ef, + 0x086ce7f1, + 0xd006e4b6, + 0xe7f100ef, + 0xe3f08a14, + 0x8d21f440, + 0xa86ce7f1, + 0xf441e3f0, + 0x00f88d21, + 0x083c87f1, + 0xbd0684b6, + 0x0599f094, + 0xf00089d0, + 0x21f40ca7, + 0x2417f1c9, + 0x0614b60a, + 0xf10010d0, + 0xb60b0037, + 0x32d00634, + 0x0c17f140, + 0x0614b60a, + 0xd00747f0, + 0x14d00012, + 0x4014cf40, + 0xf41f44f0, + 0x32d0fa1b, + 0x000bfe00, + 0xb61f2af0, + 0x20b60424, + 0x3c87f102, + 0x0684b608, + 0x99f094bd, + 0x0089d008, + 0x0a0417f1, + 0xd00614b6, + 0x17f10012, + 0x14b60a20, + 0x0227f006, + 0x800023f1, + 0xf00012d0, + 0x27f11017, + 0x23f00300, + 0x0512fa02, + 0x87f103f8, + 0x84b6085c, + 0xf094bd06, + 0x89d00899, + 0xc1019800, + 0x981814b6, + 0x25b6c002, + 0x0512fd08, + 0xf1160180, + 0xb6083c87, + 0x94bd0684, + 0xd00999f0, + 0x27f10089, + 0x24b60a04, + 0x0021d006, + 0xf10127f0, + 0xb60a2017, + 0x12d00614, + 0x0017f100, + 0x0613f002, + 0xf80501fa, + 0x5c87f103, + 0x0684b608, + 0x99f094bd, + 0x0089d009, + 0x085c87f1, + 0xbd0684b6, + 0x0599f094, + 0xf80089d0, + 0x3121f500, + 0xb821f506, + 0x0ca7f006, + 0xf1c921f4, + 0xb60a1017, + 0x27f00614, + 0x0012d005, + 0xfd0012cf, + 0x1bf40522, + 0x4921f5fa, + 0x9800f806, + 0x27f18103, + 0x24b60a04, + 0x0023d006, + 0x34c434bd, + 0x0f1bf4ff, + 0x030057f1, + 0xfa0653f0, + 0x03f80535, + 0x98c04e98, + 0x21f4c14f, + 0x0830b68d, + 0xf40112b6, + 0x0398df1b, + 0x0023d016, + 0xf1800080, + 0xf0020017, + 0x01fa0613, + 0xf803f806, + 0x0611f400, + 0xf01102f4, + 0x21f510f7, + 0x21f50698, + 0x11f40631, + 0x02f7f01c, + 0x065721f5, + 0x066621f5, + 0x067821f5, + 0x21f5f4bd, + 0x21f50657, + 0x019806b8, + 0x1427f116, + 0x0624b604, + 0xf10020d0, + 0xf0a500e7, + 0x1fb941e3, + 0x8d21f402, + 0xf004e0b6, + 0x2cf001fc, + 0x0124b602, + 0xf405f2fd, + 0x17f18d21, + 0x13f04afc, + 0x0c27f002, + 0xf50012d0, + 0xf1020721, + 0xf047fc27, + 0x20d00223, + 0x012cf000, + 0xd00320b6, + 0xacf00012, + 0x06a5f001, + 0x9800b7f0, + 0x0d98140c, + 0x00e7f015, + 0x015c21f5, + 0xf508a7f0, + 0xf5010321, + 0xf4020721, + 0xa7f02201, + 0xc921f40c, + 0x0a1017f1, + 0xf00614b6, + 0x12d00527, + 0x0012cf00, + 0xf40522fd, + 0x02f4fa1b, + 0x02f7f032, + 0x065721f5, + 0x21f5f4bd, + 0x21f50698, + 0x21f50226, + 0xf4bd0666, + 0x065721f5, + 0x981011f4, + 0x11fd8001, + 0x070bf405, + 0x07df21f5, + 0x064921f5, + 0x000000f8, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +}; -- cgit v1.2.3 From a219997a3b17baea478c9e99ac62b8b6b78b15d4 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 26 May 2011 10:54:05 +1000 Subject: drm/nvc0/gr: add some missing magics for 0xc1/0xc8/0xce Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_graph.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index c5aa7e7aea0d..314e938d1525 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -865,6 +865,18 @@ nvc0_graph_create(struct drm_device *dev) priv->magic_not_rop_nr = 0x01; priv->magicgpc918 = 0x00124925; break; + case 0xc1: /* 2/0/0/0, 1 */ + priv->magic_not_rop_nr = 0x01; + priv->magicgpc918 = 0x00400000; + break; + case 0xc8: /* 4/4/3/4, 5 */ + priv->magic_not_rop_nr = 0x06; + priv->magicgpc918 = 0x00088889; + break; + case 0xce: /* 4/4/0/0, 4 */ + priv->magic_not_rop_nr = 0x03; + priv->magicgpc918 = 0x00100000; + break; } if (!priv->magic_not_rop_nr) { -- cgit v1.2.3 From 066d65db11fe4049bae52199c4b76fd6d9e95d47 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 26 May 2011 12:12:43 +1000 Subject: drm/nvc0/gr: calculate magicgpc918 ourselves Not a clue what it is yet, but we get the same numbers as NVIDIA now. My 465 didn't seem to care to greatly *what* I bashed into these registers.. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_graph.c | 22 +++------------------- drivers/gpu/drm/nouveau/nvc0_graph.h | 3 +-- 2 files changed, 4 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index 314e938d1525..68b25ca4015c 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -350,6 +350,7 @@ static void nvc0_graph_init_gpc_0(struct drm_device *dev) { struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); + const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tp_total); u32 data[TP_MAX / 8]; u8 tpnr[GPC_MAX]; int i, gpc, tpc; @@ -361,13 +362,6 @@ nvc0_graph_init_gpc_0(struct drm_device *dev) * 465: 3/4/4/0 4 7 * 470: 3/3/4/4 5 5 * 480: 3/4/4/4 6 6 - * - * magicgpc918 - * 450: 00200000 00000000001000000000000000000000 - * 460: 00124925 00000000000100100100100100100101 - * 465: 000ba2e9 00000000000010111010001011101001 - * 470: 00092493 00000000000010010010010010010011 - * 480: 00088889 00000000000010001000100010001001 */ memset(data, 0x00, sizeof(data)); @@ -390,10 +384,10 @@ nvc0_graph_init_gpc_0(struct drm_device *dev) nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | priv->tp_nr[gpc]); nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total); - nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918); + nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918); } - nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918); + nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918); nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr); } @@ -845,37 +839,28 @@ nvc0_graph_create(struct drm_device *dev) case 0xc0: if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */ priv->magic_not_rop_nr = 0x07; - /* filled values up to tp_total, the rest 0 */ - priv->magicgpc918 = 0x000ba2e9; } else if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */ priv->magic_not_rop_nr = 0x05; - priv->magicgpc918 = 0x00092493; } else if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */ priv->magic_not_rop_nr = 0x06; - priv->magicgpc918 = 0x00088889; } break; case 0xc3: /* 450, 4/0/0/0, 2 */ priv->magic_not_rop_nr = 0x03; - priv->magicgpc918 = 0x00200000; break; case 0xc4: /* 460, 3/4/0/0, 4 */ priv->magic_not_rop_nr = 0x01; - priv->magicgpc918 = 0x00124925; break; case 0xc1: /* 2/0/0/0, 1 */ priv->magic_not_rop_nr = 0x01; - priv->magicgpc918 = 0x00400000; break; case 0xc8: /* 4/4/3/4, 5 */ priv->magic_not_rop_nr = 0x06; - priv->magicgpc918 = 0x00088889; break; case 0xce: /* 4/4/0/0, 4 */ priv->magic_not_rop_nr = 0x03; - priv->magicgpc918 = 0x00100000; break; } @@ -885,7 +870,6 @@ nvc0_graph_create(struct drm_device *dev) priv->tp_nr[3], priv->rop_nr); /* use 0xc3's values... */ priv->magic_not_rop_nr = 0x03; - priv->magicgpc918 = 0x00200000; } NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h index fa2f9cb470ad..55689e997286 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.h +++ b/drivers/gpu/drm/nouveau/nvc0_graph.h @@ -57,8 +57,7 @@ struct nvc0_graph_priv { struct nouveau_gpuobj *unk4188b4; struct nouveau_gpuobj *unk4188b8; - u8 magic_not_rop_nr; - u32 magicgpc918; + u8 magic_not_rop_nr; }; struct nvc0_graph_chan { -- cgit v1.2.3 From 2b6f1c5f17305dd5d9b1c566be31101a66fae4c9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 27 May 2011 08:36:45 +1000 Subject: drm/nvc0/gr: fix typo in class9197 init Reported-by: Christoph Bumiller Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_grctx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index e99ebb011c05..f2f9825ab0a6 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c @@ -1209,7 +1209,7 @@ nvc0_grctx_generate_9197(struct drm_device *dev) for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4) nv_mthd(dev, 0x9197, mthd, 0x00000000); } - nv_mthd(dev, 0x9297, 0x02e4, 0x0000b001); + nv_mthd(dev, 0x9197, 0x02e4, 0x0000b001); } static void -- cgit v1.2.3 From 752ab0a0927add86c8b6604e01147041509b2af4 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 27 May 2011 15:50:59 +1000 Subject: drm/nvc0/gr: fill in some more data for 0xc1/0xc8/0xce Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_grctx.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index f2f9825ab0a6..31018eaf5279 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c @@ -1783,10 +1783,11 @@ nvc0_grctx_generate(struct nouveau_channel *chan) nv_wr32(dev, 0x40587c, 0x00000000); if (1) { - const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 }; + const u8 chipset_tp_max[] = { 16, 4, 0, 4, 8, 0, 0, 0, + 16, 0, 0, 0, 0, 0, 8, 0 }; u8 max = chipset_tp_max[dev_priv->chipset & 0x0f]; u8 tpnr[GPC_MAX]; - u8 data[32]; + u8 data[TP_MAX]; memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); memset(data, 0x1f, sizeof(data)); -- cgit v1.2.3 From ad830d23d31a51997ca0780dddbe919eb1bfb879 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 27 May 2011 16:18:10 +1000 Subject: drm/nouveau: log if accel is disabled by default on a chipset Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_state.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index f65811c3eb4d..a0e17340e145 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -946,6 +946,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) case 0xc1: /* known broken */ case 0xc8: /* never tested */ case 0xce: /* never tested */ + NV_INFO(dev, "acceleration disabled by default, pass " + "noaccel=0 to force enable\n"); dev_priv->noaccel = true; break; default: -- cgit v1.2.3 From e540afc32585664840506a7198966d18318381af Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 30 May 2011 12:53:37 +1000 Subject: drm/nv50: DCB table quirks for another busted XFX board Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bios.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 729d5fd7c88d..e93e3148b8e8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -6377,6 +6377,37 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) } } + /* Some other twisted XFX board (rhbz#694914) + * + * The DVI/VGA encoder combo that's supposed to represent the + * DVI-I connector actually point at two different ones, and + * the HDMI connector ends up paired with the VGA instead. + * + * Connector table is missing anything for VGA at all, pointing it + * an invalid conntab entry 2 so we figure it out ourself. + */ + if (nv_match_device(dev, 0x0615, 0x1682, 0x2605)) { + if (idx == 0) { + *conn = 0x02002300; /* VGA, connector 2 */ + *conf = 0x00000028; + } else + if (idx == 1) { + *conn = 0x01010312; /* DVI, connector 0 */ + *conf = 0x00020030; + } else + if (idx == 2) { + *conn = 0x04020310; /* VGA, connector 0 */ + *conf = 0x00000028; + } else + if (idx == 3) { + *conn = 0x02021322; /* HDMI, connector 1 */ + *conf = 0x00020010; + } else { + *conn = 0x0000000e; /* EOL */ + *conf = 0x00000000; + } + } + return true; } -- cgit v1.2.3 From 1562ffde94fc232e5b7d6d32f43abb3e25468dac Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Jun 2011 14:11:10 +1000 Subject: drm/nouveau: silence error for missing dac loadval table There's lots of boards (all recent ones) that don't have this anymore, so punt the message to debug loglevel. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bios.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index e93e3148b8e8..ff339df6f007 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -5186,7 +5186,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st load_table_ptr = ROM16(bios->data[bitentry->offset]); if (load_table_ptr == 0x0) { - NV_ERROR(dev, "Pointer to BIT loadval table invalid\n"); + NV_DEBUG(dev, "Pointer to BIT loadval table invalid\n"); return -EINVAL; } -- cgit v1.2.3 From 3f0a68d8f8ba9d6c0cd9df948fbba90944c3da62 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 31 May 2011 11:11:28 +1000 Subject: drm/nouveau: allocate structure to store per-client data Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.c | 2 ++ drivers/gpu/drm/nouveau/nouveau_drv.h | 10 +++++++++- drivers/gpu/drm/nouveau/nouveau_state.c | 21 +++++++++++++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 7e25f5a6db96..76cd287c7cec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -393,7 +393,9 @@ static struct drm_driver driver = { .firstopen = nouveau_firstopen, .lastclose = nouveau_lastclose, .unload = nouveau_unload, + .open = nouveau_open, .preclose = nouveau_preclose, + .postclose = nouveau_postclose, #if defined(CONFIG_DRM_NOUVEAU_DEBUG) .debugfs_init = nouveau_debugfs_init, .debugfs_cleanup = nouveau_debugfs_takedown, diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 86eb3f40c4f8..a378a9648198 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -46,9 +46,15 @@ #include "ttm/ttm_module.h" struct nouveau_fpriv { - struct ttm_object_file *tfile; + spinlock_t lock; }; +static inline struct nouveau_fpriv * +nouveau_fpriv(struct drm_file *file_priv) +{ + return file_priv ? file_priv->driver_priv : NULL; +} + #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) #include "nouveau_drm.h" @@ -792,7 +798,9 @@ extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); extern int nouveau_pci_resume(struct pci_dev *pdev); /* nouveau_state.c */ +extern int nouveau_open(struct drm_device *, struct drm_file *); extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); +extern void nouveau_postclose(struct drm_device *, struct drm_file *); extern int nouveau_load(struct drm_device *, unsigned long flags); extern int nouveau_firstopen(struct drm_device *); extern void nouveau_lastclose(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index a0e17340e145..9aa96b9375ae 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -764,6 +764,20 @@ static void nouveau_card_takedown(struct drm_device *dev) vga_client_register(dev->pdev, NULL, NULL, NULL); } +int +nouveau_open(struct drm_device *dev, struct drm_file *file_priv) +{ + struct nouveau_fpriv *fpriv; + + fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); + if (unlikely(!fpriv)) + return -ENOMEM; + + spin_lock_init(&fpriv->lock); + file_priv->driver_priv = fpriv; + return 0; +} + /* here a client dies, release the stuff that was allocated for its * file_priv */ void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) @@ -771,6 +785,13 @@ void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) nouveau_channel_cleanup(dev, file_priv); } +void +nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv) +{ + struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); + kfree(fpriv); +} + /* first module load, setup the mmio/fb mapping */ /* KMS: we need mmio at load time, not when the first drm client opens. */ int nouveau_firstopen(struct drm_device *dev) -- cgit v1.2.3 From f8656f0baa316d1f08e224248e0b40ade85a4e80 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 31 May 2011 11:12:55 +1000 Subject: drm/nouveau: use NULL file_priv for DRM-created channels Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_state.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 9aa96b9375ae..9965063beb69 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -449,8 +449,8 @@ nouveau_card_init_channel(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; - ret = nouveau_channel_alloc(dev, &dev_priv->channel, - (struct drm_file *)-2, NvDmaFB, NvDmaTT); + ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL, + NvDmaFB, NvDmaTT); if (ret) return ret; -- cgit v1.2.3 From e8a863c10f4ca47e942886dddf70c35e3c2d5dd6 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Jun 2011 19:18:48 +1000 Subject: drm/nouveau: store a per-client channel list Removes the need to disable IRQs to lookup channel struct on every pushbuf ioctl, among others. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 42 ++++++++++++++++-------------- drivers/gpu/drm/nouveau/nouveau_drv.h | 4 ++- drivers/gpu/drm/nouveau/nouveau_gem.c | 4 +-- drivers/gpu/drm/nouveau/nouveau_notifier.c | 2 +- drivers/gpu/drm/nouveau/nouveau_object.c | 4 +-- drivers/gpu/drm/nouveau/nouveau_state.c | 2 ++ 6 files changed, 32 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index a7583a8ddb01..764dd672112a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -121,6 +121,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); struct nouveau_channel *chan; unsigned long flags; int ret; @@ -220,6 +221,11 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, nouveau_debugfs_channel_init(chan); NV_DEBUG(dev, "channel %d initialised\n", chan->id); + if (fpriv) { + spin_lock(&fpriv->lock); + list_add(&chan->list, &fpriv->channels); + spin_unlock(&fpriv->lock); + } *chan_ret = chan; return 0; } @@ -236,29 +242,23 @@ nouveau_channel_get_unlocked(struct nouveau_channel *ref) } struct nouveau_channel * -nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) +nouveau_channel_get(struct drm_file *file_priv, int id) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); struct nouveau_channel *chan; - unsigned long flags; - if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR)) - return ERR_PTR(-EINVAL); - - spin_lock_irqsave(&dev_priv->channels.lock, flags); - chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]); - spin_unlock_irqrestore(&dev_priv->channels.lock, flags); - - if (unlikely(!chan)) - return ERR_PTR(-EINVAL); - - if (unlikely(file_priv && chan->file_priv != file_priv)) { - nouveau_channel_put_unlocked(&chan); - return ERR_PTR(-EINVAL); + spin_lock(&fpriv->lock); + list_for_each_entry(chan, &fpriv->channels, list) { + if (chan->id == id) { + chan = nouveau_channel_get_unlocked(chan); + spin_unlock(&fpriv->lock); + mutex_lock(&chan->mutex); + return chan; + } } + spin_unlock(&fpriv->lock); - mutex_lock(&chan->mutex); - return chan; + return ERR_PTR(-EINVAL); } void @@ -383,10 +383,11 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); for (i = 0; i < engine->fifo.channels; i++) { - chan = nouveau_channel_get(dev, file_priv, i); + chan = nouveau_channel_get(file_priv, i); if (IS_ERR(chan)) continue; + list_del(&chan->list); atomic_dec(&chan->users); nouveau_channel_put(&chan); } @@ -459,10 +460,11 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, struct drm_nouveau_channel_free *req = data; struct nouveau_channel *chan; - chan = nouveau_channel_get(dev, file_priv, req->channel); + chan = nouveau_channel_get(file_priv, req->channel); if (IS_ERR(chan)) return PTR_ERR(chan); + list_del(&chan->list); atomic_dec(&chan->users); nouveau_channel_put(&chan); return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index a378a9648198..633f1e6d421f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -47,6 +47,7 @@ struct nouveau_fpriv { spinlock_t lock; + struct list_head channels; }; static inline struct nouveau_fpriv * @@ -208,6 +209,7 @@ enum nouveau_channel_mutex_class { struct nouveau_channel { struct drm_device *dev; + struct list_head list; int id; /* references to the channel data structure */ @@ -858,7 +860,7 @@ extern int nouveau_channel_alloc(struct drm_device *dev, extern struct nouveau_channel * nouveau_channel_get_unlocked(struct nouveau_channel *); extern struct nouveau_channel * -nouveau_channel_get(struct drm_device *, struct drm_file *, int id); +nouveau_channel_get(struct drm_file *, int id); extern void nouveau_channel_put_unlocked(struct nouveau_channel **); extern void nouveau_channel_put(struct nouveau_channel **); extern void nouveau_channel_ref(struct nouveau_channel *chan, diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index b52e46018245..2bd8d6da9c3d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -139,7 +139,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, } if (req->channel_hint) { - chan = nouveau_channel_get(dev, file_priv, req->channel_hint); + chan = nouveau_channel_get(file_priv, req->channel_hint); if (IS_ERR(chan)) return PTR_ERR(chan); } @@ -548,7 +548,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, struct nouveau_fence *fence = NULL; int i, j, ret = 0, do_reloc = 0; - chan = nouveau_channel_get(dev, file_priv, req->channel); + chan = nouveau_channel_get(file_priv, req->channel); if (IS_ERR(chan)) return PTR_ERR(chan); diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 5b39718ae1f8..104b93b9f852 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -183,7 +183,7 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, if (unlikely(dev_priv->card_type >= NV_C0)) return -EINVAL; - chan = nouveau_channel_get(dev, file_priv, na->channel); + chan = nouveau_channel_get(file_priv, na->channel); if (IS_ERR(chan)) return PTR_ERR(chan); diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 8f97016f5b26..8c9895827875 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -909,7 +909,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, if (init->handle == ~0) return -EINVAL; - chan = nouveau_channel_get(dev, file_priv, init->channel); + chan = nouveau_channel_get(file_priv, init->channel); if (IS_ERR(chan)) return PTR_ERR(chan); @@ -936,7 +936,7 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, struct nouveau_channel *chan; int ret; - chan = nouveau_channel_get(dev, file_priv, objfree->channel); + chan = nouveau_channel_get(file_priv, objfree->channel); if (IS_ERR(chan)) return PTR_ERR(chan); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 9965063beb69..b38b28066836 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -774,6 +774,8 @@ nouveau_open(struct drm_device *dev, struct drm_file *file_priv) return -ENOMEM; spin_lock_init(&fpriv->lock); + INIT_LIST_HEAD(&fpriv->channels); + file_priv->driver_priv = fpriv; return 0; } -- cgit v1.2.3 From d359d51cba731a72fede6c09b1d567de02f081dc Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 3 Jun 2011 15:47:39 +1000 Subject: drm/nouveau: no need to update bo.offset from vma after validate On chipsets using nouveau_vm, the virtual address stays constant, so the value set at bo creation time is fine. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2ad49cbf7c8b..633f724b6baf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -312,8 +312,6 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, if (ret) return ret; - if (nvbo->vma.node) - nvbo->bo.offset = nvbo->vma.offset; return 0; } -- cgit v1.2.3 From b79181cbad3ab40509ea6c985b940d48d90abc0b Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 3 Jun 2011 09:57:27 +1000 Subject: drm/nv50-nvc0/vm: don't touch chan_vm Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_vm.c | 2 +- drivers/gpu/drm/nouveau/nvc0_vm.c | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index 1a0dd491a0e4..40b84f22d819 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c @@ -156,7 +156,7 @@ nv50_vm_flush(struct nouveau_vm *vm) pinstmem->flush(vm->dev); /* BAR */ - if (vm != dev_priv->chan_vm) { + if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) { nv50_vm_flush_engine(vm->dev, 6); return; } diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c index a179e6c55afb..9e352944a35a 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vm.c +++ b/drivers/gpu/drm/nouveau/nvc0_vm.c @@ -105,7 +105,11 @@ nvc0_vm_flush(struct nouveau_vm *vm) struct drm_device *dev = vm->dev; struct nouveau_vm_pgd *vpgd; unsigned long flags; - u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; + u32 engine; + + engine = 1; + if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) + engine |= 4; pinstmem->flush(vm->dev); -- cgit v1.2.3 From fe32b16e7998bae28209834c0f7c21766d7524ec Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 3 Jun 2011 10:07:08 +1000 Subject: drm/nv50-nvc0/vm: take client reference on shared channel vm Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_state.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 633f1e6d421f..a49953654a23 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -48,6 +48,7 @@ struct nouveau_fpriv { spinlock_t lock; struct list_head channels; + struct nouveau_vm *vm; }; static inline struct nouveau_fpriv * diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index b38b28066836..12b34710a76f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -767,6 +767,7 @@ static void nouveau_card_takedown(struct drm_device *dev) int nouveau_open(struct drm_device *dev, struct drm_file *file_priv) { + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fpriv *fpriv; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); @@ -776,6 +777,9 @@ nouveau_open(struct drm_device *dev, struct drm_file *file_priv) spin_lock_init(&fpriv->lock); INIT_LIST_HEAD(&fpriv->channels); + if (dev_priv->card_type >= NV_50) + nouveau_vm_ref(dev_priv->chan_vm, &fpriv->vm, NULL); + file_priv->driver_priv = fpriv; return 0; } @@ -791,6 +795,7 @@ void nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv) { struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); + nouveau_vm_ref(NULL, &fpriv->vm, NULL); kfree(fpriv); } -- cgit v1.2.3 From 0320d7910b8905c7a99b3b0be369f91129a59f2f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 3 Jun 2011 10:20:52 +1000 Subject: drm/nv50-nvc0/chan: inherit vm from fpriv, rather than chan_vm directly Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_object.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 8c9895827875..fe95766d03bb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -696,13 +696,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv); + struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm; struct nouveau_gpuobj *vram = NULL, *tt = NULL; int ret, i; NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); - if (dev_priv->card_type == NV_C0) { - struct nouveau_vm *vm = dev_priv->chan_vm; struct nouveau_vm_pgd *vpgd; ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, @@ -731,7 +731,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, * - Allocate per-channel page-directory * - Link with shared channel VM */ - if (dev_priv->chan_vm) { + if (vm) { u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; u64 vm_vinst = chan->ramin->vinst + pgd_offs; u32 vm_pinst = chan->ramin->pinst; @@ -744,7 +744,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, if (ret) return ret; - nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd); + nouveau_vm_ref(vm, &chan->vm, chan->vm_pd); } /* RAMHT */ -- cgit v1.2.3 From f6d4e62145b597c6249c1dc9c1c3ecd66ba165f0 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 12:25:36 +1000 Subject: drm/nouveau: remove 'chan' argument from nouveau_gem_new Userspace hasn't passed us a channel_hint for a long long time now, and there isn't actually a need to do so anymore anyway. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 7 +++---- drivers/gpu/drm/nouveau/nouveau_fbcon.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_gem.c | 19 +++++-------------- drivers/gpu/drm/nouveau/nouveau_notifier.c | 2 +- 4 files changed, 11 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index a49953654a23..21cb385842b5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1324,10 +1324,9 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj) } /* nouveau_gem.c */ -extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, - int size, int align, uint32_t domain, - uint32_t tile_mode, uint32_t tile_flags, - struct nouveau_bo **); +extern int nouveau_gem_new(struct drm_device *, int size, int align, + uint32_t domain, uint32_t tile_mode, + uint32_t tile_flags, struct nouveau_bo **); extern int nouveau_gem_object_new(struct drm_gem_object *); extern void nouveau_gem_object_del(struct drm_gem_object *); extern int nouveau_gem_ioctl_new(struct drm_device *, void *, diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 39aee6d4daf8..59ad9600e555 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -296,8 +296,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, size = mode_cmd.pitch * mode_cmd.height; size = roundup(size, PAGE_SIZE); - ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, - NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo); + ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM, + 0, 0x0000, &nvbo); if (ret) { NV_ERROR(dev, "failed to allocate framebuffer\n"); goto out; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 2bd8d6da9c3d..b4218d719770 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -60,9 +60,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem) } int -nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, - int size, int align, uint32_t domain, uint32_t tile_mode, - uint32_t tile_flags, struct nouveau_bo **pnvbo) +nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, + uint32_t tile_mode, uint32_t tile_flags, + struct nouveau_bo **pnvbo) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_bo *nvbo; @@ -76,7 +76,7 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) flags |= TTM_PL_FLAG_SYSTEM; - ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode, + ret = nouveau_bo_new(dev, NULL, size, align, flags, tile_mode, tile_flags, pnvbo); if (ret) return ret; @@ -127,7 +127,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; - struct nouveau_channel *chan = NULL; int ret = 0; if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) @@ -138,17 +137,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, return -EINVAL; } - if (req->channel_hint) { - chan = nouveau_channel_get(file_priv, req->channel_hint); - if (IS_ERR(chan)) - return PTR_ERR(chan); - } - - ret = nouveau_gem_new(dev, chan, req->info.size, req->align, + ret = nouveau_gem_new(dev, req->info.size, req->align, req->info.domain, req->info.tile_mode, req->info.tile_flags, &nvbo); - if (chan) - nouveau_channel_put(&chan); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 104b93b9f852..29190e845fd7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -46,7 +46,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) ttmpl = TTM_PL_FLAG_TT; } - ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); + ret = nouveau_gem_new(dev, PAGE_SIZE, 0, flags, 0, 0, &ntfy); if (ret) return ret; -- cgit v1.2.3 From 639212d01157266d9ee0b904fbc9f4a556e1c711 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 3 Jun 2011 16:18:26 +1000 Subject: drm/nouveau/gem: implement stub hooks for GEM object open/close Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.c | 2 ++ drivers/gpu/drm/nouveau/nouveau_drv.h | 3 +++ drivers/gpu/drm/nouveau/nouveau_gem.c | 20 ++++++++++++++++++++ 3 files changed, 25 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 76cd287c7cec..4e481c39f5a4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -426,6 +426,8 @@ static struct drm_driver driver = { .gem_init_object = nouveau_gem_object_new, .gem_free_object = nouveau_gem_object_del, + .gem_open_object = nouveau_gem_object_open, + .gem_close_object = nouveau_gem_object_close, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 21cb385842b5..e8357c969adb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1329,6 +1329,9 @@ extern int nouveau_gem_new(struct drm_device *, int size, int align, uint32_t tile_flags, struct nouveau_bo **); extern int nouveau_gem_object_new(struct drm_gem_object *); extern void nouveau_gem_object_del(struct drm_gem_object *); +extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *); +extern void nouveau_gem_object_close(struct drm_gem_object *, + struct drm_file *); extern int nouveau_gem_ioctl_new(struct drm_device *, void *, struct drm_file *); extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index b4218d719770..da3c8bbd9d8f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -59,6 +59,26 @@ nouveau_gem_object_del(struct drm_gem_object *gem) kfree(gem); } +int +nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) +{ + struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); + + if (!fpriv->vm) + return 0; + + return 0; +} + +void +nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) +{ + struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); + + if (!fpriv->vm) + return; +} + int nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, uint32_t tile_mode, uint32_t tile_flags, -- cgit v1.2.3 From b7cb6c01ee549b6c7c365c92f156983d346295a3 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Jun 2011 11:34:27 +1000 Subject: drm/nouveau: modify gpuobj/ntfy takedown ordering gpuobj really needs splitting into channel/gpuobj code instead... Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 4 +++- drivers/gpu/drm/nouveau/nouveau_object.c | 2 -- drivers/gpu/drm/nouveau/nv50_evo.c | 1 + 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 764dd672112a..96ac906cfee5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -27,6 +27,7 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" #include "nouveau_dma.h" +#include "nouveau_ramht.h" static int nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) @@ -316,8 +317,9 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) nouveau_bo_unpin(chan->pushbuf_bo); nouveau_bo_ref(NULL, &chan->pushbuf_bo); } - nouveau_gpuobj_channel_takedown(chan); + nouveau_ramht_ref(NULL, &chan->ramht, chan); nouveau_notifier_takedown_channel(chan); + nouveau_gpuobj_channel_takedown(chan); nouveau_channel_ref(NULL, pchan); } diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index fe95766d03bb..c56ac93aee72 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -844,8 +844,6 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); - nouveau_ramht_ref(NULL, &chan->ramht, chan); - nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); nouveau_gpuobj_ref(NULL, &chan->vm_pd); diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index c8e83c1a4de8..18c61929c450 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c @@ -38,6 +38,7 @@ nv50_evo_channel_del(struct nouveau_channel **pevo) return; *pevo = NULL; + nouveau_ramht_ref(NULL, &evo->ramht, evo); nouveau_gpuobj_channel_takedown(evo); nouveau_bo_unmap(evo->pushbuf_bo); nouveau_bo_ref(NULL, &evo->pushbuf_bo); -- cgit v1.2.3 From dd6a46cc922bec58e9c73782cd59f50a239c4fa7 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 3 Jun 2011 13:59:44 +1000 Subject: drm/nouveau: initialise any vm for a channel before pushbuf/ntfy Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 114 +++++++++++++----------------- 1 file changed, 48 insertions(+), 66 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 96ac906cfee5..23bd0c4f70b1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -30,14 +30,31 @@ #include "nouveau_ramht.h" static int -nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) +nouveau_channel_pushbuf_init(struct nouveau_channel *chan) { + u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT; struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_bo *pb = chan->pushbuf_bo; - struct nouveau_gpuobj *pushbuf = NULL; - int ret = 0; + int ret; + + /* allocate buffer object */ + ret = nouveau_bo_new(dev, NULL, 65536, 0, mem, 0, 0, &chan->pushbuf_bo); + if (ret) + goto out; + ret = nouveau_bo_pin(chan->pushbuf_bo, mem); + if (ret) + goto out; + + ret = nouveau_bo_map(chan->pushbuf_bo); + if (ret) + goto out; + + /* create DMA object covering the entire memtype where the push + * buffer resides, userspace can submit its own push buffers from + * anywhere within the same memtype. + */ + chan->pushbuf_base = chan->pushbuf_bo->bo.mem.start << PAGE_SHIFT; if (dev_priv->card_type >= NV_50) { if (dev_priv->card_type < NV_C0) { ret = nouveau_gpuobj_dma_new(chan, @@ -45,23 +62,23 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) (1ULL << 40), NV_MEM_ACCESS_RO, NV_MEM_TARGET_VM, - &pushbuf); + &chan->pushbuf); } - chan->pushbuf_base = pb->bo.offset; + chan->pushbuf_base = chan->pushbuf_bo->bo.offset; } else - if (pb->bo.mem.mem_type == TTM_PL_TT) { + if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->gart_info.aper_size, NV_MEM_ACCESS_RO, - NV_MEM_TARGET_GART, &pushbuf); - chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; + NV_MEM_TARGET_GART, + &chan->pushbuf); } else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->fb_available_size, NV_MEM_ACCESS_RO, - NV_MEM_TARGET_VRAM, &pushbuf); - chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; + NV_MEM_TARGET_VRAM, + &chan->pushbuf); } else { /* NV04 cmdbuf hack, from original ddx.. not sure of it's * exact reason for existing :) PCI access to cmdbuf in @@ -71,47 +88,21 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) pci_resource_start(dev->pdev, 1), dev_priv->fb_available_size, NV_MEM_ACCESS_RO, - NV_MEM_TARGET_PCI, &pushbuf); - chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; - } - - nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); - nouveau_gpuobj_ref(NULL, &pushbuf); - return ret; -} - -static struct nouveau_bo * -nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) -{ - struct nouveau_bo *pushbuf = NULL; - int location, ret; - - if (nouveau_vram_pushbuf) - location = TTM_PL_FLAG_VRAM; - else - location = TTM_PL_FLAG_TT; - - ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf); - if (ret) { - NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret); - return NULL; + NV_MEM_TARGET_PCI, + &chan->pushbuf); } - ret = nouveau_bo_pin(pushbuf, location); +out: if (ret) { - NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret); - nouveau_bo_ref(NULL, &pushbuf); - return NULL; - } - - ret = nouveau_bo_map(pushbuf); - if (ret) { - nouveau_bo_unpin(pushbuf); - nouveau_bo_ref(NULL, &pushbuf); - return NULL; + NV_ERROR(dev, "error initialising pushbuf: %d\n", ret); + nouveau_gpuobj_ref(NULL, &chan->pushbuf); + if (chan->pushbuf_bo) { + nouveau_bo_unmap(chan->pushbuf_bo); + nouveau_bo_ref(NULL, &chan->pushbuf_bo); + } } - return pushbuf; + return 0; } /* allocates and initializes a fifo for user space consumption */ @@ -162,19 +153,14 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, INIT_LIST_HEAD(&chan->nvsw.flip); INIT_LIST_HEAD(&chan->fence.pending); - /* Allocate DMA push buffer */ - chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); - if (!chan->pushbuf_bo) { - ret = -ENOMEM; - NV_ERROR(dev, "pushbuf %d\n", ret); + /* setup channel's memory and vm */ + ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); + if (ret) { + NV_ERROR(dev, "gpuobj %d\n", ret); nouveau_channel_put(&chan); return ret; } - nouveau_dma_pre_init(chan); - chan->user_put = 0x40; - chan->user_get = 0x44; - /* Allocate space for per-channel fixed notifier memory */ ret = nouveau_notifier_init_channel(chan); if (ret) { @@ -183,21 +169,17 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, return ret; } - /* Setup channel's default objects */ - ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); + /* Allocate DMA push buffer */ + ret = nouveau_channel_pushbuf_init(chan); if (ret) { - NV_ERROR(dev, "gpuobj %d\n", ret); + NV_ERROR(dev, "pushbuf %d\n", ret); nouveau_channel_put(&chan); return ret; } - /* Create a dma object for the push buffer */ - ret = nouveau_channel_pushbuf_ctxdma_init(chan); - if (ret) { - NV_ERROR(dev, "pbctxdma %d\n", ret); - nouveau_channel_put(&chan); - return ret; - } + nouveau_dma_pre_init(chan); + chan->user_put = 0x40; + chan->user_get = 0x44; /* disable the fifo caches */ pfifo->reassign(dev, false); -- cgit v1.2.3 From 6e32fedc8b50d3571bdec4e9849e45659ac96599 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 3 Jun 2011 14:23:30 +1000 Subject: drm/nouveau: will need to specify channel for vm-ful gpuobj allocations Abuses existing gpuobj_new() chan argument for this, which in turn forces all NVOBJ_FLAG_VM allocations to be done from the global heap, not suballocated from the channel's private heap. Not a problem though in practise. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 9 ++++++--- drivers/gpu/drm/nouveau/nouveau_object.c | 4 ++-- drivers/gpu/drm/nouveau/nv04_instmem.c | 3 ++- drivers/gpu/drm/nouveau/nv50_instmem.c | 5 +++-- drivers/gpu/drm/nouveau/nvc0_copy.c | 2 +- drivers/gpu/drm/nouveau/nvc0_graph.c | 10 +++++----- 6 files changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e8357c969adb..0f5396602afb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -324,7 +324,8 @@ struct nouveau_instmem_engine { int (*suspend)(struct drm_device *dev); void (*resume)(struct drm_device *dev); - int (*get)(struct nouveau_gpuobj *, u32 size, u32 align); + int (*get)(struct nouveau_gpuobj *, struct nouveau_channel *, + u32 size, u32 align); void (*put)(struct nouveau_gpuobj *); int (*map)(struct nouveau_gpuobj *); void (*unmap)(struct nouveau_gpuobj *); @@ -1183,7 +1184,8 @@ extern int nv04_instmem_init(struct drm_device *); extern void nv04_instmem_takedown(struct drm_device *); extern int nv04_instmem_suspend(struct drm_device *); extern void nv04_instmem_resume(struct drm_device *); -extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); +extern int nv04_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *, + u32 size, u32 align); extern void nv04_instmem_put(struct nouveau_gpuobj *); extern int nv04_instmem_map(struct nouveau_gpuobj *); extern void nv04_instmem_unmap(struct nouveau_gpuobj *); @@ -1194,7 +1196,8 @@ extern int nv50_instmem_init(struct drm_device *); extern void nv50_instmem_takedown(struct drm_device *); extern int nv50_instmem_suspend(struct drm_device *); extern void nv50_instmem_resume(struct drm_device *); -extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); +extern int nv50_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *, + u32 size, u32 align); extern void nv50_instmem_put(struct nouveau_gpuobj *); extern int nv50_instmem_map(struct nouveau_gpuobj *); extern void nv50_instmem_unmap(struct nouveau_gpuobj *); diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index c56ac93aee72..ab4be9ca43cc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -191,7 +191,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); spin_unlock(&dev_priv->ramin_lock); - if (chan) { + if (!(flags & NVOBJ_FLAG_VM) && chan) { ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); if (ramin) ramin = drm_mm_get_block(ramin, size, align); @@ -208,7 +208,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, gpuobj->vinst = ramin->start + chan->ramin->vinst; gpuobj->node = ramin; } else { - ret = instmem->get(gpuobj, size, align); + ret = instmem->get(gpuobj, chan, size, align); if (ret) { nouveau_gpuobj_ref(NULL, &gpuobj); return ret; diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index b8611b955313..ae36bfc84853 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c @@ -112,7 +112,8 @@ nv04_instmem_resume(struct drm_device *dev) } int -nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) +nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan, + u32 size, u32 align) { struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; struct drm_mm_node *ramin = NULL; diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index ccea671346c9..a7c12c94a5a6 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -306,7 +306,8 @@ struct nv50_gpuobj_node { }; int -nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) +nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan, + u32 size, u32 align) { struct drm_device *dev = gpuobj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -335,7 +336,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER)) flags |= NV_MEM_ACCESS_SYS; - ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags, + ret = nouveau_vm_get(chan->vm, size, 12, flags, &node->chan_vma); if (ret) { vram->put(dev, &node->vram); diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.c b/drivers/gpu/drm/nouveau/nvc0_copy.c index 02c00bbeb9e5..5ebcd74244db 100644 --- a/drivers/gpu/drm/nouveau/nvc0_copy.c +++ b/drivers/gpu/drm/nouveau/nvc0_copy.c @@ -48,7 +48,7 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine) struct nouveau_gpuobj *ctx = NULL; int ret; - ret = nouveau_gpuobj_new(dev, NULL, 256, 256, + ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER | NVOBJ_FLAG_ZERO_ALLOC, &ctx); if (ret) diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index 68b25ca4015c..56aa92fddc05 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -157,23 +157,23 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) int i = 0, gpc, tp, ret; u32 magic; - ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM, + ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM, &grch->unk408004); if (ret) return ret; - ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM, + ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM, &grch->unk40800c); if (ret) return ret; - ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, + ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096, NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER, &grch->unk418810); if (ret) return ret; - ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM, + ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM, &grch->mmio); if (ret) return ret; @@ -235,7 +235,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine) return -ENOMEM; chan->engctx[NVOBJ_ENGINE_GR] = grch; - ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256, + ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256, NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC, &grch->grctx); if (ret) -- cgit v1.2.3 From 111af5c100fa0c0b94301f719dd22dab87f5d0a4 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 3 Jun 2011 14:55:39 +1000 Subject: drm/nouveau: skip move_notify() if bo does not have a vma attached Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 633f724b6baf..12d264b57e1d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -842,13 +842,11 @@ out: static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); struct nouveau_mem *node = new_mem->mm_node; struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_vma *vma = &nvbo->vma; - struct nouveau_vm *vm = vma->vm; - if (dev_priv->card_type < NV_50) + if (!vma->vm) return; switch (new_mem->mem_type) { @@ -856,7 +854,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) nouveau_vm_map(vma, node); break; case TTM_PL_TT: - if (vma->node->type != vm->spg_shift) { + if (vma->node->type != vma->vm->spg_shift) { nouveau_vm_unmap(vma); vma = &node->tmp_vma; } -- cgit v1.2.3 From f91bac5bf694e8060b7473fb0aefb8de09aa9595 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Jun 2011 14:15:46 +1000 Subject: drm/nouveau: store bo's page size in nouveau_bo Was previously assuming a page size of 4KiB unless a VMA was present to override it. Eventually, a buffer won't necessarily have a VMA at all at some stages of its life, so we need to store this info elsewhere. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 27 ++++++++++++--------------- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_mem.c | 6 ++---- 3 files changed, 15 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 12d264b57e1d..71d01ce6598e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -58,7 +58,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) static void nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, - int *align, int *size, int *page_shift) + int *align, int *size) { struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); @@ -82,17 +82,8 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, } } } else { - if (likely(dev_priv->chan_vm)) { - if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024) - *page_shift = dev_priv->chan_vm->lpg_shift; - else - *page_shift = dev_priv->chan_vm->spg_shift; - } else { - *page_shift = 12; - } - - *size = roundup(*size, (1 << *page_shift)); - *align = max((1 << *page_shift), *align); + *size = roundup(*size, (1 << nvbo->page_shift)); + *align = max((1 << nvbo->page_shift), *align); } *size = roundup(*size, PAGE_SIZE); @@ -105,7 +96,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_bo *nvbo; - int ret = 0, page_shift = 0; + int ret; nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); if (!nvbo) @@ -116,11 +107,17 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, nvbo->tile_flags = tile_flags; nvbo->bo.bdev = &dev_priv->ttm.bdev; - nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift); + nvbo->page_shift = 12; + if (dev_priv->bar1_vm) { + if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) + nvbo->page_shift = dev_priv->bar1_vm->lpg_shift; + } + + nouveau_bo_fixup_align(nvbo, flags, &align, &size); align >>= PAGE_SHIFT; if (dev_priv->chan_vm) { - ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift, + ret = nouveau_vm_get(dev_priv->chan_vm, size, nvbo->page_shift, NV_MEM_ACCESS_RW, &nvbo->vma); if (ret) { kfree(nvbo); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 0f5396602afb..1439188c3422 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -116,6 +116,7 @@ struct nouveau_bo { struct nouveau_channel *channel; struct nouveau_vma vma; + unsigned page_shift; uint32_t tile_mode; uint32_t tile_flags; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 5ee14d216ce8..f55b51be1bf1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -794,7 +794,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, int ret; if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) - size_nc = 1 << nvbo->vma.node->type; + size_nc = 1 << nvbo->page_shift; ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, mem->page_alignment << PAGE_SHIFT, size_nc, @@ -804,9 +804,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, return (ret == -ENOSPC) ? 0 : ret; } - node->page_shift = 12; - if (nvbo->vma.node) - node->page_shift = nvbo->vma.node->type; + node->page_shift = nvbo->page_shift; mem->mm_node = node; mem->start = node->offset >> PAGE_SHIFT; -- cgit v1.2.3 From d2f96666c56a501c5b74c645d81918b7805d46ce Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Jun 2011 20:54:42 +1000 Subject: drm/nouveau: create temp vmas for both src and dst of bo moves Greatly simplifies a number of things, particularly once per-client GPU address spaces are involved. May add this back later once I know what things'll look like. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 116 +++++++++++++--------------------- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 +- drivers/gpu/drm/nouveau/nouveau_mem.c | 53 ++++++---------- 3 files changed, 64 insertions(+), 107 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 71d01ce6598e..a04998492bb9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -496,19 +496,12 @@ static int nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) { - struct nouveau_mem *old_node = old_mem->mm_node; - struct nouveau_mem *new_node = new_mem->mm_node; - struct nouveau_bo *nvbo = nouveau_bo(bo); + struct nouveau_mem *node = old_mem->mm_node; + u64 src_offset = node->vma[0].offset; + u64 dst_offset = node->vma[1].offset; u32 page_count = new_mem->num_pages; - u64 src_offset, dst_offset; int ret; - src_offset = old_node->tmp_vma.offset; - if (new_node->tmp_vma.node) - dst_offset = new_node->tmp_vma.offset; - else - dst_offset = nvbo->vma.offset; - page_count = new_mem->num_pages; while (page_count) { int line_count = (page_count > 2047) ? 2047 : page_count; @@ -542,19 +535,13 @@ static int nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) { - struct nouveau_mem *old_node = old_mem->mm_node; - struct nouveau_mem *new_node = new_mem->mm_node; + struct nouveau_mem *node = old_mem->mm_node; struct nouveau_bo *nvbo = nouveau_bo(bo); u64 length = (new_mem->num_pages << PAGE_SHIFT); - u64 src_offset, dst_offset; + u64 src_offset = node->vma[0].offset; + u64 dst_offset = node->vma[1].offset; int ret; - src_offset = old_node->tmp_vma.offset; - if (new_node->tmp_vma.node) - dst_offset = new_node->tmp_vma.offset; - else - dst_offset = nvbo->vma.offset; - while (length) { u32 amount, stride, height; @@ -689,6 +676,27 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, return 0; } +static int +nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo, + struct ttm_mem_reg *mem, struct nouveau_vma *vma) +{ + struct nouveau_mem *node = mem->mm_node; + int ret; + + ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT, + node->page_shift, NV_MEM_ACCESS_RO, vma); + if (ret) + return ret; + + if (mem->mem_type == TTM_PL_VRAM) + nouveau_vm_map(vma, node); + else + nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, + node, node->pages); + + return 0; +} + static int nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, bool no_wait_reserve, bool no_wait_gpu, @@ -706,31 +714,20 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); } - /* create temporary vma for old memory, this will get cleaned - * up after ttm destroys the ttm_mem_reg + /* create temporary vmas for the transfer and attach them to the + * old nouveau_mem node, these will get cleaned up after ttm has + * destroyed the ttm_mem_reg */ if (dev_priv->card_type >= NV_50) { struct nouveau_mem *node = old_mem->mm_node; - if (!node->tmp_vma.node) { - u32 page_shift = nvbo->vma.node->type; - if (old_mem->mem_type == TTM_PL_TT) - page_shift = nvbo->vma.vm->spg_shift; - - ret = nouveau_vm_get(chan->vm, - old_mem->num_pages << PAGE_SHIFT, - page_shift, NV_MEM_ACCESS_RO, - &node->tmp_vma); - if (ret) - goto out; - } - if (old_mem->mem_type == TTM_PL_VRAM) - nouveau_vm_map(&node->tmp_vma, node); - else { - nouveau_vm_map_sg(&node->tmp_vma, 0, - old_mem->num_pages << PAGE_SHIFT, - node, node->pages); - } + ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); + if (ret) + goto out; + + ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]); + if (ret) + goto out; } if (dev_priv->card_type < NV_50) @@ -757,7 +754,6 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; struct ttm_placement placement; struct ttm_mem_reg tmp_mem; @@ -777,23 +773,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) goto out; - if (dev_priv->card_type >= NV_50) { - struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nouveau_mem *node = tmp_mem.mm_node; - struct nouveau_vma *vma = &nvbo->vma; - if (vma->node->type != vma->vm->spg_shift) - vma = &node->tmp_vma; - nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT, - node, node->pages); - } - ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); - - if (dev_priv->card_type >= NV_50) { - struct nouveau_bo *nvbo = nouveau_bo(bo); - nouveau_vm_unmap(&nvbo->vma); - } - if (ret) goto out; @@ -846,21 +826,15 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) if (!vma->vm) return; - switch (new_mem->mem_type) { - case TTM_PL_VRAM: - nouveau_vm_map(vma, node); - break; - case TTM_PL_TT: - if (vma->node->type != vma->vm->spg_shift) { - nouveau_vm_unmap(vma); - vma = &node->tmp_vma; - } - nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT, - node, node->pages); - break; - default: + if (new_mem->mem_type == TTM_PL_VRAM) { + nouveau_vm_map(&nvbo->vma, new_mem->mm_node); + } else + if (new_mem->mem_type == TTM_PL_TT && + nvbo->page_shift == nvbo->vma.vm->spg_shift) { + nouveau_vm_map_sg(&nvbo->vma, 0, new_mem-> + num_pages << PAGE_SHIFT, node, node->pages); + } else { nouveau_vm_unmap(&nvbo->vma); - break; } } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 1439188c3422..d7083d5ffd02 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -77,7 +77,7 @@ struct nouveau_mem { struct drm_device *dev; struct nouveau_vma bar_vma; - struct nouveau_vma tmp_vma; + struct nouveau_vma vma[2]; u8 page_shift; struct drm_mm_node *tag; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index f55b51be1bf1..9d9605644175 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -762,20 +762,29 @@ nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) return 0; } +static inline void +nouveau_mem_node_cleanup(struct nouveau_mem *node) +{ + if (node->vma[0].node) { + nouveau_vm_unmap(&node->vma[0]); + nouveau_vm_put(&node->vma[0]); + } + + if (node->vma[1].node) { + nouveau_vm_unmap(&node->vma[1]); + nouveau_vm_put(&node->vma[1]); + } +} + static void nouveau_vram_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); struct nouveau_vram_engine *vram = &dev_priv->engine.vram; - struct nouveau_mem *node = mem->mm_node; struct drm_device *dev = dev_priv->dev; - if (node->tmp_vma.node) { - nouveau_vm_unmap(&node->tmp_vma); - nouveau_vm_put(&node->tmp_vma); - } - + nouveau_mem_node_cleanup(mem->mm_node); vram->put(dev, (struct nouveau_mem **)&mem->mm_node); } @@ -860,15 +869,9 @@ static void nouveau_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { - struct nouveau_mem *node = mem->mm_node; - - if (node->tmp_vma.node) { - nouveau_vm_unmap(&node->tmp_vma); - nouveau_vm_put(&node->tmp_vma); - } - + nouveau_mem_node_cleanup(mem->mm_node); mem->mm_node = NULL; - kfree(node); + kfree(mem->mm_node); } static int @@ -878,11 +881,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); - struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nouveau_vma *vma = &nvbo->vma; - struct nouveau_vm *vm = vma->vm; struct nouveau_mem *node; - int ret; if (unlikely((mem->num_pages << PAGE_SHIFT) >= dev_priv->gart_info.aper_size)) @@ -891,24 +890,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; + node->page_shift = 12; - /* This node must be for evicting large-paged VRAM - * to system memory. Due to a nv50 limitation of - * not being able to mix large/small pages within - * the same PDE, we need to create a temporary - * small-paged VMA for the eviction. - */ - if (vma->node->type != vm->spg_shift) { - ret = nouveau_vm_get(vm, (u64)vma->node->length << 12, - vm->spg_shift, NV_MEM_ACCESS_RW, - &node->tmp_vma); - if (ret) { - kfree(node); - return ret; - } - } - - node->page_shift = nvbo->vma.node->type; mem->mm_node = node; mem->start = 0; return 0; -- cgit v1.2.3 From 07533ea549e725360209f958bb916085f18ff5b9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 11:02:38 +1000 Subject: drm/nouveau: convert some bo.offset use to vma.offset Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 2 -- drivers/gpu/drm/nouveau/nouveau_channel.c | 2 +- drivers/gpu/drm/nouveau/nouveau_dma.c | 2 +- drivers/gpu/drm/nouveau/nouveau_gem.c | 6 +++++- drivers/gpu/drm/nouveau/nvc0_fbcon.c | 4 ++-- 5 files changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index a04998492bb9..46c0914991dc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -138,8 +138,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, } nvbo->channel = NULL; - if (nvbo->vma.node) - nvbo->bo.offset = nvbo->vma.offset; *pnvbo = nvbo; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 23bd0c4f70b1..d199097b8918 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -64,7 +64,7 @@ nouveau_channel_pushbuf_init(struct nouveau_channel *chan) NV_MEM_TARGET_VM, &chan->pushbuf); } - chan->pushbuf_base = chan->pushbuf_bo->bo.offset; + chan->pushbuf_base = chan->pushbuf_bo->vma.offset; } else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 568caedd7216..4b294295f5e0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -167,7 +167,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, int delta, int length) { struct nouveau_bo *pb = chan->pushbuf_bo; - uint64_t offset = bo->bo.offset + delta; + uint64_t offset = bo->vma.offset + delta; int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; BUG_ON(chan->dma.ib_free < 1); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index da3c8bbd9d8f..d314f34f39d6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -125,6 +125,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, static int nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) { + struct drm_nouveau_private *dev_priv = gem->dev->dev_private; struct nouveau_bo *nvbo = nouveau_gem_object(gem); if (nvbo->bo.mem.mem_type == TTM_PL_TT) @@ -133,7 +134,10 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; - rep->offset = nvbo->bo.offset; + if (dev_priv->card_type < NV_50) + rep->offset = nvbo->bo.offset; + else + rep->offset = nvbo->vma.offset; rep->map_handle = nvbo->bo.addr_space_offset; rep->tile_mode = nvbo->tile_mode; rep->tile_flags = nvbo->tile_flags; diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index fa5d4c234383..4606398858ed 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -203,8 +203,8 @@ nvc0_fbcon_accel_init(struct fb_info *info) BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1); OUT_RING (chan, 0x0000902d); BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2); - OUT_RING (chan, upper_32_bits(chan->notifier_bo->bo.offset)); - OUT_RING (chan, lower_32_bits(chan->notifier_bo->bo.offset)); + OUT_RING (chan, upper_32_bits(chan->notifier_bo->vma.offset)); + OUT_RING (chan, lower_32_bits(chan->notifier_bo->vma.offset)); BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1); OUT_RING (chan, 0); BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1); -- cgit v1.2.3 From 180cc30637b47dafa26e3202a41964b5ebdab365 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 11:24:14 +1000 Subject: drm/nouveau: convert bo.mem.start usage to bo.offset Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 1 - drivers/gpu/drm/nouveau/nouveau_channel.c | 2 +- drivers/gpu/drm/nouveau/nouveau_drv.c | 2 +- drivers/gpu/drm/nouveau/nouveau_notifier.c | 2 +- drivers/gpu/drm/nouveau/nouveau_object.c | 2 +- drivers/gpu/drm/nouveau/nv50_crtc.c | 6 +++--- drivers/gpu/drm/nouveau/nv50_display.c | 2 +- drivers/gpu/drm/nouveau/nv50_evo.c | 5 ++--- 8 files changed, 10 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 46c0914991dc..ae1f0e46e481 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -433,7 +433,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, TTM_MEMTYPE_FLAG_CMA; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; - man->gpu_offset = dev_priv->gart_info.aper_base; break; default: NV_ERROR(dev, "Unknown GART type: %d\n", diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index d199097b8918..d0e458336c75 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -54,7 +54,7 @@ nouveau_channel_pushbuf_init(struct nouveau_channel *chan) * buffer resides, userspace can submit its own push buffers from * anywhere within the same memtype. */ - chan->pushbuf_base = chan->pushbuf_bo->bo.mem.start << PAGE_SHIFT; + chan->pushbuf_base = chan->pushbuf_bo->bo.offset; if (dev_priv->card_type >= NV_50) { if (dev_priv->card_type < NV_C0) { ret = nouveau_gpuobj_dma_new(chan, diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 4e481c39f5a4..8256370e5938 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -358,7 +358,7 @@ nouveau_pci_resume(struct pci_dev *pdev) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT; + u32 offset = nv_crtc->cursor.nvbo->bo.offset; nv_crtc->cursor.set_offset(nv_crtc, offset); nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 29190e845fd7..81b54e0bb822 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -122,7 +122,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, target = NV_MEM_TARGET_VRAM; else target = NV_MEM_TARGET_GART; - offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; + offset = chan->notifier_bo->bo.offset; } else { target = NV_MEM_TARGET_VM; offset = chan->notifier_bo->vma.offset; diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index ab4be9ca43cc..37e6ca8990c6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -768,7 +768,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_gpuobj *sem = NULL; struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i]; - u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT; + u64 offset = dispc->sem.bo->bo.offset; ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff, NV_MEM_ACCESS_RW, diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index ebabacf38da9..115b780247ee 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -104,7 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) OUT_RING(evo, nv_crtc->lut.depth == 8 ? NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON); - OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8); + OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8); if (dev_priv->chipset != 0x50) { BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); OUT_RING(evo, NvEvoVRAM); @@ -372,7 +372,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, nouveau_bo_unmap(cursor); - nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT); + nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset); nv_crtc->cursor.show(nv_crtc, true); out: @@ -546,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, } } - nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT; + nv_crtc->fb.offset = fb->nvbo->bo.offset; nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 08da478ba544..93857e6c662d 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -484,7 +484,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, OUT_RING (evo, 0x00000000); OUT_RING (evo, 0x00000000); BEGIN_RING(evo, 0, 0x0800, 5); - OUT_RING (evo, (nv_fb->nvbo->bo.mem.start << PAGE_SHIFT) >> 8); + OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8); OUT_RING (evo, 0); OUT_RING (evo, (fb->height << 16) | fb->width); OUT_RING (evo, nv_fb->r_pitch); diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index 18c61929c450..9bba97f15b04 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c @@ -154,7 +154,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo) { struct drm_device *dev = evo->dev; int id = evo->id, ret, i; - u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT; + u64 pushbuf = evo->pushbuf_bo->bo.offset; u32 tmp; tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); @@ -335,13 +335,12 @@ nv50_evo_create(struct drm_device *dev) ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM, 0, 0x0000, &dispc->sem.bo); if (!ret) { - offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT; - ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM); if (!ret) ret = nouveau_bo_map(dispc->sem.bo); if (ret) nouveau_bo_ref(NULL, &dispc->sem.bo); + offset = dispc->sem.bo->bo.offset; } if (ret) -- cgit v1.2.3 From a3fcd0a975c4ae272c3e5db0632479633cef19ef Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 11:12:39 +1000 Subject: drm/nv50-nvc0: completely disable relocs GPU virtual addresses are constant now so this should never be getting hit anyway and userspace shouldn't break from them being ignored. This is being done in preference to teaching the code how to deal with BOs that exist at different virtual addresses within separate VMs. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_gem.c | 37 +++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index d314f34f39d6..2d1de2427bc3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -333,6 +333,7 @@ static int validate_list(struct nouveau_channel *chan, struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) { + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct drm_nouveau_gem_pushbuf_bo __user *upbbo = (void __force __user *)(uintptr_t)user_pbbo_ptr; struct drm_device *dev = chan->dev; @@ -371,24 +372,26 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, return ret; } - if (nvbo->bo.offset == b->presumed.offset && - ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && - b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || - (nvbo->bo.mem.mem_type == TTM_PL_TT && - b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) - continue; + if (dev_priv->card_type < NV_50) { + if (nvbo->bo.offset == b->presumed.offset && + ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && + b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || + (nvbo->bo.mem.mem_type == TTM_PL_TT && + b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) + continue; - if (nvbo->bo.mem.mem_type == TTM_PL_TT) - b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; - else - b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; - b->presumed.offset = nvbo->bo.offset; - b->presumed.valid = 0; - relocs++; - - if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, - &b->presumed, sizeof(b->presumed))) - return -EFAULT; + if (nvbo->bo.mem.mem_type == TTM_PL_TT) + b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; + else + b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; + b->presumed.offset = nvbo->bo.offset; + b->presumed.valid = 0; + relocs++; + + if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, + &b->presumed, sizeof(b->presumed))) + return -EFAULT; + } } return relocs; -- cgit v1.2.3 From fd2871af3d2dad4e07df84941128b0813b5dd34b Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Jun 2011 14:07:04 +1000 Subject: drm/nouveau: initial changes to support multiple VMAs per buffer object Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 98 +++++++++++++++++++++++++---------- drivers/gpu/drm/nouveau/nouveau_drv.h | 7 +++ drivers/gpu/drm/nouveau/nouveau_vm.h | 1 + 3 files changed, 80 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index ae1f0e46e481..36f3137b3ae2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -49,10 +49,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) DRM_ERROR("bo %p still attached to GEM object\n", bo); nv10_mem_put_tile_region(dev, nvbo->tile, NULL); - if (nvbo->vma.node) { - nouveau_vm_unmap(&nvbo->vma); - nouveau_vm_put(&nvbo->vma); - } + nouveau_bo_vma_del(nvbo, &nvbo->vma); kfree(nvbo); } @@ -103,6 +100,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, return -ENOMEM; INIT_LIST_HEAD(&nvbo->head); INIT_LIST_HEAD(&nvbo->entry); + INIT_LIST_HEAD(&nvbo->vma_list); nvbo->tile_mode = tile_mode; nvbo->tile_flags = tile_flags; nvbo->bo.bdev = &dev_priv->ttm.bdev; @@ -114,24 +112,22 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, } nouveau_bo_fixup_align(nvbo, flags, &align, &size); - align >>= PAGE_SHIFT; + nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; + nouveau_bo_placement_set(nvbo, flags, 0); if (dev_priv->chan_vm) { - ret = nouveau_vm_get(dev_priv->chan_vm, size, nvbo->page_shift, - NV_MEM_ACCESS_RW, &nvbo->vma); + ret = nouveau_bo_vma_add(nvbo, dev_priv->chan_vm, &nvbo->vma); if (ret) { kfree(nvbo); return ret; } } - nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; - nouveau_bo_placement_set(nvbo, flags, 0); - nvbo->channel = chan; ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, - ttm_bo_type_device, &nvbo->placement, align, 0, - false, NULL, size, nouveau_bo_del_ttm); + ttm_bo_type_device, &nvbo->placement, + align >> PAGE_SHIFT, 0, false, NULL, size, + nouveau_bo_del_ttm); if (ret) { /* ttm will call nouveau_bo_del_ttm if it fails.. */ return ret; @@ -818,20 +814,20 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct nouveau_mem *node = new_mem->mm_node; struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nouveau_vma *vma = &nvbo->vma; - - if (!vma->vm) - return; - - if (new_mem->mem_type == TTM_PL_VRAM) { - nouveau_vm_map(&nvbo->vma, new_mem->mm_node); - } else - if (new_mem->mem_type == TTM_PL_TT && - nvbo->page_shift == nvbo->vma.vm->spg_shift) { - nouveau_vm_map_sg(&nvbo->vma, 0, new_mem-> - num_pages << PAGE_SHIFT, node, node->pages); - } else { - nouveau_vm_unmap(&nvbo->vma); + struct nouveau_vma *vma; + + list_for_each_entry(vma, &nvbo->vma_list, head) { + if (new_mem->mem_type == TTM_PL_VRAM) { + nouveau_vm_map(vma, new_mem->mm_node); + } else + if (new_mem->mem_type == TTM_PL_TT && + nvbo->page_shift == vma->vm->spg_shift) { + nouveau_vm_map_sg(vma, 0, new_mem-> + num_pages << PAGE_SHIFT, + node, node->pages); + } else { + nouveau_vm_unmap(vma); + } } } @@ -1077,3 +1073,53 @@ struct ttm_bo_driver nouveau_bo_driver = { .io_mem_free = &nouveau_ttm_io_mem_free, }; +struct nouveau_vma * +nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm) +{ + struct nouveau_vma *vma; + list_for_each_entry(vma, &nvbo->vma_list, head) { + if (vma->vm == vm) + return vma; + } + + return NULL; +} + +int +nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, + struct nouveau_vma *vma) +{ + const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; + struct nouveau_mem *node = nvbo->bo.mem.mm_node; + int ret; + + ret = nouveau_vm_get(vm, size, nvbo->page_shift, + NV_MEM_ACCESS_RW, vma); + if (ret) + return ret; + + if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) + nouveau_vm_map(vma, nvbo->bo.mem.mm_node); + else + if (nvbo->bo.mem.mem_type == TTM_PL_TT) + nouveau_vm_map_sg(vma, 0, size, node, node->pages); + + list_add_tail(&vma->head, &nvbo->vma_list); + return 0; +} + +void +nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma) +{ + if (vma->node) { + if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) { + spin_lock(&nvbo->bo.bdev->fence_lock); + ttm_bo_wait(&nvbo->bo, false, false, false); + spin_unlock(&nvbo->bo.bdev->fence_lock); + nouveau_vm_unmap(vma); + } + + nouveau_vm_put(vma); + list_del(&vma->head); + } +} diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index d7083d5ffd02..23be8cb8ff54 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -116,6 +116,7 @@ struct nouveau_bo { struct nouveau_channel *channel; struct nouveau_vma vma; + struct list_head vma_list; unsigned page_shift; uint32_t tile_mode; @@ -1283,6 +1284,12 @@ extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, bool no_wait_reserve, bool no_wait_gpu); +extern struct nouveau_vma * +nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *); +extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *, + struct nouveau_vma *); +extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *); + /* nouveau_fence.c */ struct nouveau_fence; extern int nouveau_fence_init(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h index c48a9fc2b47b..07d07ff9e28b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vm.h +++ b/drivers/gpu/drm/nouveau/nouveau_vm.h @@ -41,6 +41,7 @@ struct nouveau_vm_pgd { }; struct nouveau_vma { + struct list_head head; struct nouveau_vm *vm; struct nouveau_mm_node *node; u64 offset; -- cgit v1.2.3 From 45143cb53c793b11b875d555eb96ca32bcbea1c7 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 13:12:44 +1000 Subject: drm/nv50-nvc0: explicitly map fbcon fb into channel vm Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_fb.h | 1 + drivers/gpu/drm/nouveau/nouveau_fbcon.c | 11 +++++++++++ drivers/gpu/drm/nouveau/nv50_fbcon.c | 10 +++++----- drivers/gpu/drm/nouveau/nvc0_fbcon.c | 10 +++++----- 4 files changed, 22 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h index a3a88ad00f86..95c843e684bb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fb.h +++ b/drivers/gpu/drm/nouveau/nouveau_fb.h @@ -30,6 +30,7 @@ struct nouveau_framebuffer { struct drm_framebuffer base; struct nouveau_bo *nvbo; + struct nouveau_vma vma; u32 r_dma; u32 r_format; u32 r_pitch; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 59ad9600e555..14a8627efe4d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -279,6 +279,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, struct fb_info *info; struct drm_framebuffer *fb; struct nouveau_framebuffer *nouveau_fb; + struct nouveau_channel *chan; struct nouveau_bo *nvbo; struct drm_mode_fb_cmd mode_cmd; struct pci_dev *pdev = dev->pdev; @@ -318,6 +319,15 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, goto out; } + chan = nouveau_nofbaccel ? NULL : dev_priv->channel; + if (chan && dev_priv->card_type >= NV_50) { + ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma); + if (ret) { + NV_ERROR(dev, "failed to map fb into chan: %d\n", ret); + chan = NULL; + } + } + mutex_lock(&dev->struct_mutex); info = framebuffer_alloc(0, device); @@ -448,6 +458,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) if (nouveau_fb->nvbo) { nouveau_bo_unmap(nouveau_fb->nvbo); + nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma); drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); nouveau_fb->nvbo = NULL; } diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 791ded1c5c6d..dc75a7206524 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -159,7 +159,7 @@ nv50_fbcon_accel_init(struct fb_info *info) struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; - struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; + struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; int ret, format; switch (info->var.bits_per_pixel) { @@ -247,8 +247,8 @@ nv50_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->fix.line_length); OUT_RING(chan, info->var.xres_virtual); OUT_RING(chan, info->var.yres_virtual); - OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); - OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); + OUT_RING(chan, upper_32_bits(fb->vma.offset)); + OUT_RING(chan, lower_32_bits(fb->vma.offset)); BEGIN_RING(chan, NvSub2D, 0x0230, 2); OUT_RING(chan, format); OUT_RING(chan, 1); @@ -256,8 +256,8 @@ nv50_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->fix.line_length); OUT_RING(chan, info->var.xres_virtual); OUT_RING(chan, info->var.yres_virtual); - OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); - OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); + OUT_RING(chan, upper_32_bits(fb->vma.offset)); + OUT_RING(chan, lower_32_bits(fb->vma.offset)); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index 4606398858ed..5e64a9bcd318 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -159,7 +159,7 @@ nvc0_fbcon_accel_init(struct fb_info *info) struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; - struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; + struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; int ret, format; ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d); @@ -249,8 +249,8 @@ nvc0_fbcon_accel_init(struct fb_info *info) OUT_RING (chan, info->fix.line_length); OUT_RING (chan, info->var.xres_virtual); OUT_RING (chan, info->var.yres_virtual); - OUT_RING (chan, upper_32_bits(nvbo->vma.offset)); - OUT_RING (chan, lower_32_bits(nvbo->vma.offset)); + OUT_RING (chan, upper_32_bits(fb->vma.offset)); + OUT_RING (chan, lower_32_bits(fb->vma.offset)); BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10); OUT_RING (chan, format); OUT_RING (chan, 1); @@ -260,8 +260,8 @@ nvc0_fbcon_accel_init(struct fb_info *info) OUT_RING (chan, info->fix.line_length); OUT_RING (chan, info->var.xres_virtual); OUT_RING (chan, info->var.yres_virtual); - OUT_RING (chan, upper_32_bits(nvbo->vma.offset)); - OUT_RING (chan, lower_32_bits(nvbo->vma.offset)); + OUT_RING (chan, upper_32_bits(fb->vma.offset)); + OUT_RING (chan, lower_32_bits(fb->vma.offset)); FIRE_RING (chan); return 0; -- cgit v1.2.3 From 0b7187335fc2f38691cc169b202ff436abbefd68 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 13:17:45 +1000 Subject: drm/nv50-nvc0: explicitly map notifier bo into channel vm Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_notifier.c | 14 ++++++++++++-- drivers/gpu/drm/nouveau/nvc0_fbcon.c | 4 ++-- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 23be8cb8ff54..78d6899a8388 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -249,6 +249,7 @@ struct nouveau_channel { /* Notifier memory */ struct nouveau_bo *notifier_bo; + struct nouveau_vma notifier_vma; struct drm_mm notifier_heap; /* PFIFO context */ diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 81b54e0bb822..6abdbe6530a7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -34,6 +34,7 @@ int nouveau_notifier_init_channel(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_bo *ntfy = NULL; uint32_t flags, ttmpl; int ret; @@ -58,14 +59,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) if (ret) goto out_err; + if (dev_priv->card_type >= NV_50) { + ret = nouveau_bo_vma_add(ntfy, chan->vm, &chan->notifier_vma); + if (ret) + goto out_err; + } + ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size); if (ret) goto out_err; chan->notifier_bo = ntfy; out_err: - if (ret) + if (ret) { + nouveau_bo_vma_del(ntfy, &chan->notifier_vma); drm_gem_object_unreference_unlocked(ntfy->gem); + } return ret; } @@ -78,6 +87,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) if (!chan->notifier_bo) return; + nouveau_bo_vma_del(chan->notifier_bo, &chan->notifier_vma); nouveau_bo_unmap(chan->notifier_bo); mutex_lock(&dev->struct_mutex); nouveau_bo_unpin(chan->notifier_bo); @@ -125,7 +135,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, offset = chan->notifier_bo->bo.offset; } else { target = NV_MEM_TARGET_VM; - offset = chan->notifier_bo->vma.offset; + offset = chan->notifier_vma.offset; } offset += mem->start; diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index 5e64a9bcd318..a495e48197ca 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -203,8 +203,8 @@ nvc0_fbcon_accel_init(struct fb_info *info) BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1); OUT_RING (chan, 0x0000902d); BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2); - OUT_RING (chan, upper_32_bits(chan->notifier_bo->vma.offset)); - OUT_RING (chan, lower_32_bits(chan->notifier_bo->vma.offset)); + OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset)); + OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset)); BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1); OUT_RING (chan, 0); BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1); -- cgit v1.2.3 From ce163f6967121d77e3983aa06d416afacf3070c2 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 13:20:43 +1000 Subject: drm/nv50-nvc0: explicitly map pushbuf bo into channel vm Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 9 ++++++++- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index d0e458336c75..c03fa7b6944e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -56,6 +56,11 @@ nouveau_channel_pushbuf_init(struct nouveau_channel *chan) */ chan->pushbuf_base = chan->pushbuf_bo->bo.offset; if (dev_priv->card_type >= NV_50) { + ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm, + &chan->pushbuf_vma); + if (ret) + goto out; + if (dev_priv->card_type < NV_C0) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, @@ -64,7 +69,7 @@ nouveau_channel_pushbuf_init(struct nouveau_channel *chan) NV_MEM_TARGET_VM, &chan->pushbuf); } - chan->pushbuf_base = chan->pushbuf_bo->vma.offset; + chan->pushbuf_base = chan->pushbuf_vma.offset; } else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, @@ -95,6 +100,7 @@ nouveau_channel_pushbuf_init(struct nouveau_channel *chan) out: if (ret) { NV_ERROR(dev, "error initialising pushbuf: %d\n", ret); + nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma); nouveau_gpuobj_ref(NULL, &chan->pushbuf); if (chan->pushbuf_bo) { nouveau_bo_unmap(chan->pushbuf_bo); @@ -295,6 +301,7 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) /* destroy any resources the channel owned */ nouveau_gpuobj_ref(NULL, &chan->pushbuf); if (chan->pushbuf_bo) { + nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma); nouveau_bo_unmap(chan->pushbuf_bo); nouveau_bo_unpin(chan->pushbuf_bo); nouveau_bo_ref(NULL, &chan->pushbuf_bo); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 78d6899a8388..06b90dcacc53 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -245,6 +245,7 @@ struct nouveau_channel { /* DMA push buffer */ struct nouveau_gpuobj *pushbuf; struct nouveau_bo *pushbuf_bo; + struct nouveau_vma pushbuf_vma; uint32_t pushbuf_base; /* Notifier memory */ -- cgit v1.2.3 From d02836b4f5c24d2a38b3bdc10f05251e1f6e111d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 15:21:23 +1000 Subject: drm/nv84-nvc0: explicitly map semaphore buffer into channel vm Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_fence.c | 29 +++++++++++++---------------- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 06b90dcacc53..7e12d4d545b6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -240,6 +240,7 @@ struct nouveau_channel { uint32_t sequence; uint32_t sequence_ack; atomic_t last_sequence_irq; + struct nouveau_vma vma; } fence; /* DMA push buffer */ diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 7347075ca5b8..9d5c57778453 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -336,6 +336,7 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema) { struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct nouveau_fence *fence = NULL; + u64 offset = chan->fence.vma.offset + sema->mem->start; int ret; if (dev_priv->chipset < 0x84) { @@ -345,13 +346,10 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema) BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3); OUT_RING (chan, NvSema); - OUT_RING (chan, sema->mem->start); + OUT_RING (chan, offset); OUT_RING (chan, 1); } else if (dev_priv->chipset < 0xc0) { - struct nouveau_vma *vma = &dev_priv->fence.bo->vma; - u64 offset = vma->offset + sema->mem->start; - ret = RING_SPACE(chan, 7); if (ret) return ret; @@ -364,9 +362,6 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema) OUT_RING (chan, 1); OUT_RING (chan, 1); /* ACQUIRE_EQ */ } else { - struct nouveau_vma *vma = &dev_priv->fence.bo->vma; - u64 offset = vma->offset + sema->mem->start; - ret = RING_SPACE(chan, 5); if (ret) return ret; @@ -394,6 +389,7 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema) { struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct nouveau_fence *fence = NULL; + u64 offset = chan->fence.vma.offset + sema->mem->start; int ret; if (dev_priv->chipset < 0x84) { @@ -403,14 +399,11 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema) BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2); OUT_RING (chan, NvSema); - OUT_RING (chan, sema->mem->start); + OUT_RING (chan, offset); BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1); OUT_RING (chan, 1); } else if (dev_priv->chipset < 0xc0) { - struct nouveau_vma *vma = &dev_priv->fence.bo->vma; - u64 offset = vma->offset + sema->mem->start; - ret = RING_SPACE(chan, 7); if (ret) return ret; @@ -423,9 +416,6 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema) OUT_RING (chan, 1); OUT_RING (chan, 2); /* RELEASE */ } else { - struct nouveau_vma *vma = &dev_priv->fence.bo->vma; - u64 offset = vma->offset + sema->mem->start; - ret = RING_SPACE(chan, 5); if (ret) return ret; @@ -540,6 +530,12 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) nouveau_gpuobj_ref(NULL, &obj); if (ret) return ret; + } else { + /* map fence bo into channel's vm */ + ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm, + &chan->fence.vma); + if (ret) + return ret; } INIT_LIST_HEAD(&chan->fence.pending); @@ -551,10 +547,10 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) void nouveau_fence_channel_fini(struct nouveau_channel *chan) { + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct nouveau_fence *tmp, *fence; spin_lock(&chan->fence.lock); - list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { fence->signalled = true; list_del(&fence->entry); @@ -564,8 +560,9 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan) kref_put(&fence->refcount, nouveau_fence_del); } - spin_unlock(&chan->fence.lock); + + nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma); } int -- cgit v1.2.3 From 9f9f51fcb92ba3c1f395e0908407c8c1f5305a31 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 13:23:47 +1000 Subject: drm/nv50-nvc0: lookup pushbuf virtual address on dma_push Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_dma.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 4b294295f5e0..00bc6eaad558 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -167,8 +167,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, int delta, int length) { struct nouveau_bo *pb = chan->pushbuf_bo; - uint64_t offset = bo->vma.offset + delta; + struct nouveau_vma *vma; int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; + u64 offset; + + vma = nouveau_bo_vma_find(bo, chan->vm); + BUG_ON(!vma); + offset = vma->offset + delta; BUG_ON(chan->dma.ib_free < 1); nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); -- cgit v1.2.3 From 3d483d575bfba52eae04bf2575239642c26c355a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 15:43:31 +1000 Subject: drm/nvc0: explicitly map PDISP semaphore buffer into each channel's vm Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_object.c | 16 ++++++++++++++++ drivers/gpu/drm/nouveau/nv50_display.c | 4 ++-- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 7e12d4d545b6..4a48d6c25f95 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -293,6 +293,7 @@ struct nouveau_channel { uint32_t sw_subchannel[8]; + struct nouveau_vma dispc_vma[2]; struct { struct nouveau_gpuobj *vblsem; uint32_t vblsem_head; diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 37e6ca8990c6..115827133bb7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -717,6 +717,17 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst)); nv_wo32(chan->ramin, 0x0208, 0xffffffff); nv_wo32(chan->ramin, 0x020c, 0x000000ff); + + for (i = 0; i < 2; i++) { + struct nv50_display_crtc *dispc = + &nv50_display(dev)->crtc[i]; + + ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm, + &chan->dispc_vma[i]); + if (ret) + return ret; + } + return 0; } @@ -841,9 +852,14 @@ void nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; + int i; NV_DEBUG(dev, "ch%d\n", chan->id); + for (i = 0; i < 2; i++) { + struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i]; + nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]); + } nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); nouveau_gpuobj_ref(NULL, &chan->vm_pd); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 93857e6c662d..db1a5f4b711d 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -415,8 +415,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, /* synchronise with the rendering channel, if necessary */ if (likely(chan)) { - u64 offset = dispc->sem.bo->vma.offset + dispc->sem.offset; - ret = RING_SPACE(chan, 10); if (ret) { WIND_RING(evo); @@ -438,6 +436,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, else OUT_RING (chan, chan->vram_handle); } else { + u64 offset = chan->dispc_vma[nv_crtc->index].offset; + offset += dispc->sem.offset; BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4); OUT_RING (chan, upper_32_bits(offset)); OUT_RING (chan, lower_32_bits(offset)); -- cgit v1.2.3 From e758a3111914af7ee4351be86f1ac0efe87ed06e Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 13:28:11 +1000 Subject: drm/nouveau: fixup gem_info ioctl to return client-specific bo virtual Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_gem.c | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 2d1de2427bc3..05ca72ed7e30 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -123,21 +123,28 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, } static int -nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) +nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, + struct drm_nouveau_gem_info *rep) { - struct drm_nouveau_private *dev_priv = gem->dev->dev_private; + struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_vma *vma; if (nvbo->bo.mem.mem_type == TTM_PL_TT) rep->domain = NOUVEAU_GEM_DOMAIN_GART; else rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; + rep->offset = nvbo->bo.offset; + if (fpriv->vm) { + vma = nouveau_bo_vma_find(nvbo, fpriv->vm); + if (!vma) + return -EINVAL; + + rep->offset = vma->offset; + } + rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; - if (dev_priv->card_type < NV_50) - rep->offset = nvbo->bo.offset; - else - rep->offset = nvbo->vma.offset; rep->map_handle = nvbo->bo.addr_space_offset; rep->tile_mode = nvbo->tile_mode; rep->tile_flags = nvbo->tile_flags; @@ -167,14 +174,15 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, if (ret) return ret; - ret = nouveau_gem_info(nvbo->gem, &req->info); - if (ret) - goto out; - ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); + if (ret == 0) { + ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); + if (ret) + drm_gem_handle_delete(file_priv, req->info.handle); + } + /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(nvbo->gem); -out: return ret; } @@ -800,7 +808,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data, if (!gem) return -ENOENT; - ret = nouveau_gem_info(gem, req); + ret = nouveau_gem_info(file_priv, gem, req); drm_gem_object_unreference_unlocked(gem); return ret; } -- cgit v1.2.3 From 7375c95b343aa575940704a38482a334ea87ac6c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 14:21:29 +1000 Subject: drm/nouveau: remove 'chan' argument from nouveau_bo_new Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 8 +++----- drivers/gpu/drm/nouveau/nouveau_channel.c | 2 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 7 +++---- drivers/gpu/drm/nouveau/nouveau_fence.c | 2 +- drivers/gpu/drm/nouveau/nouveau_gem.c | 2 +- drivers/gpu/drm/nouveau/nouveau_mem.c | 2 +- drivers/gpu/drm/nouveau/nv04_crtc.c | 2 +- drivers/gpu/drm/nouveau/nv50_crtc.c | 4 ++-- drivers/gpu/drm/nouveau/nv50_evo.c | 4 ++-- 9 files changed, 15 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 36f3137b3ae2..49af4072c0f6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -87,9 +87,9 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, } int -nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, - int size, int align, uint32_t flags, uint32_t tile_mode, - uint32_t tile_flags, struct nouveau_bo **pnvbo) +nouveau_bo_new(struct drm_device *dev, int size, int align, + uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, + struct nouveau_bo **pnvbo) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_bo *nvbo; @@ -123,7 +123,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, } } - nvbo->channel = chan; ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, ttm_bo_type_device, &nvbo->placement, align >> PAGE_SHIFT, 0, false, NULL, size, @@ -132,7 +131,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, /* ttm will call nouveau_bo_del_ttm if it fails.. */ return ret; } - nvbo->channel = NULL; *pnvbo = nvbo; return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index c03fa7b6944e..b0d753f45bbd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -38,7 +38,7 @@ nouveau_channel_pushbuf_init(struct nouveau_channel *chan) int ret; /* allocate buffer object */ - ret = nouveau_bo_new(dev, NULL, 65536, 0, mem, 0, 0, &chan->pushbuf_bo); + ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo); if (ret) goto out; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 4a48d6c25f95..bdb682d613d3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1270,10 +1270,9 @@ extern int nv04_crtc_create(struct drm_device *, int index); /* nouveau_bo.c */ extern struct ttm_bo_driver nouveau_bo_driver; -extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *, - int size, int align, uint32_t flags, - uint32_t tile_mode, uint32_t tile_flags, - struct nouveau_bo **); +extern int nouveau_bo_new(struct drm_device *, int size, int align, + uint32_t flags, uint32_t tile_mode, + uint32_t tile_flags, struct nouveau_bo **); extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); extern int nouveau_bo_unpin(struct nouveau_bo *); extern int nouveau_bo_map(struct nouveau_bo *); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 9d5c57778453..8d02d875376d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -574,7 +574,7 @@ nouveau_fence_init(struct drm_device *dev) /* Create a shared VRAM heap for cross-channel sync. */ if (USE_SEMA(dev)) { - ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM, 0, 0, &dev_priv->fence.bo); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 05ca72ed7e30..3e1c7010e076 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -96,7 +96,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) flags |= TTM_PL_FLAG_SYSTEM; - ret = nouveau_bo_new(dev, NULL, size, align, flags, tile_mode, + ret = nouveau_bo_new(dev, size, align, flags, tile_mode, tile_flags, pnvbo); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 9d9605644175..976887dc2bab 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -479,7 +479,7 @@ nouveau_mem_vram_init(struct drm_device *dev) } if (dev_priv->card_type < NV_50) { - ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM, 0, 0, &dev_priv->vga_ram); if (ret == 0) ret = nouveau_bo_pin(dev_priv->vga_ram, diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index f1a3ae491995..118261d4927a 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -1035,7 +1035,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); - ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 0, 0x0000, &nv_crtc->cursor.nvbo); if (!ret) { ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 115b780247ee..46ad59ea2185 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -747,7 +747,7 @@ nv50_crtc_create(struct drm_device *dev, int index) } nv_crtc->lut.depth = 0; - ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM, 0, 0x0000, &nv_crtc->lut.nvbo); if (!ret) { ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); @@ -773,7 +773,7 @@ nv50_crtc_create(struct drm_device *dev, int index) drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs); drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); - ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 0, 0x0000, &nv_crtc->cursor.nvbo); if (!ret) { ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index 9bba97f15b04..c99d9751880c 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c @@ -117,7 +117,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid, evo->user_get = 4; evo->user_put = 0; - ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, + ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, &evo->pushbuf_bo); if (ret == 0) ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); @@ -332,7 +332,7 @@ nv50_evo_create(struct drm_device *dev) if (ret) goto err; - ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 0, 0x0000, &dispc->sem.bo); if (!ret) { ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM); -- cgit v1.2.3 From 2fd3db6f1457050bdebf97e45147ce6827e1742a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 15:25:12 +1000 Subject: drm/nouveau: remove implicit mapping of every bo into chan_vm Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 10 +-------- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 - drivers/gpu/drm/nouveau/nouveau_gem.c | 42 ++++++++++++++++++++++++++++++++++- drivers/gpu/drm/nouveau/nouveau_vm.h | 1 + 4 files changed, 43 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 49af4072c0f6..890d50e4d682 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -49,7 +49,6 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) DRM_ERROR("bo %p still attached to GEM object\n", bo); nv10_mem_put_tile_region(dev, nvbo->tile, NULL); - nouveau_bo_vma_del(nvbo, &nvbo->vma); kfree(nvbo); } @@ -115,14 +114,6 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; nouveau_bo_placement_set(nvbo, flags, 0); - if (dev_priv->chan_vm) { - ret = nouveau_bo_vma_add(nvbo, dev_priv->chan_vm, &nvbo->vma); - if (ret) { - kfree(nvbo); - return ret; - } - } - ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, ttm_bo_type_device, &nvbo->placement, align >> PAGE_SHIFT, 0, false, NULL, size, @@ -1103,6 +1094,7 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, nouveau_vm_map_sg(vma, 0, size, node, node->pages); list_add_tail(&vma->head, &nvbo->vma_list); + vma->refcount = 1; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index bdb682d613d3..bbea0452dca7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -115,7 +115,6 @@ struct nouveau_bo { struct nouveau_channel *channel; - struct nouveau_vma vma; struct list_head vma_list; unsigned page_shift; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 3e1c7010e076..022393777805 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -63,20 +63,60 @@ int nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); + struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_vma *vma; + int ret; if (!fpriv->vm) return 0; - return 0; + ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + if (ret) + return ret; + + vma = nouveau_bo_vma_find(nvbo, fpriv->vm); + if (!vma) { + vma = kzalloc(sizeof(*vma), GFP_KERNEL); + if (!vma) { + ret = -ENOMEM; + goto out; + } + + ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma); + if (ret) { + kfree(vma); + goto out; + } + } else { + vma->refcount++; + } + +out: + ttm_bo_unreserve(&nvbo->bo); + return ret; } void nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); + struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_vma *vma; + int ret; if (!fpriv->vm) return; + + ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + if (ret) + return; + + vma = nouveau_bo_vma_find(nvbo, fpriv->vm); + if (vma) { + if (--vma->refcount == 0) + nouveau_bo_vma_del(nvbo, vma); + } + ttm_bo_unreserve(&nvbo->bo); } int diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h index 07d07ff9e28b..579ca8cc223c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vm.h +++ b/drivers/gpu/drm/nouveau/nouveau_vm.h @@ -42,6 +42,7 @@ struct nouveau_vm_pgd { struct nouveau_vma { struct list_head head; + int refcount; struct nouveau_vm *vm; struct nouveau_mm_node *node; u64 offset; -- cgit v1.2.3 From e41f26e7d18951086611bc78cc35e244bd01d1ca Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 15:35:37 +1000 Subject: drm/nv50: enable use of per-client gpu address space Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_state.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 12b34710a76f..91cc2a64d8e0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -769,6 +769,7 @@ nouveau_open(struct drm_device *dev, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fpriv *fpriv; + int ret; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) @@ -777,8 +778,17 @@ nouveau_open(struct drm_device *dev, struct drm_file *file_priv) spin_lock_init(&fpriv->lock); INIT_LIST_HEAD(&fpriv->channels); - if (dev_priv->card_type >= NV_50) + if (dev_priv->card_type == NV_50) { + ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL, + &fpriv->vm); + if (ret) { + kfree(fpriv); + return ret; + } + } else + if (dev_priv->card_type >= NV_C0) { nouveau_vm_ref(dev_priv->chan_vm, &fpriv->vm, NULL); + } file_priv->driver_priv = fpriv; return 0; -- cgit v1.2.3 From ad9ac437a500f8c0822bd5fe139af8ee2c132e15 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Jun 2011 16:18:19 +1000 Subject: drm/nouveau: add some debug output if nouveau_mm busy at destroy time Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_mm.c | 11 +++++++++-- drivers/gpu/drm/nouveau/nouveau_vm.c | 2 +- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c index 7609756b6faf..1640dec3b823 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.c +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c @@ -158,11 +158,18 @@ int nouveau_mm_fini(struct nouveau_mm **prmm) { struct nouveau_mm *rmm = *prmm; - struct nouveau_mm_node *heap = + struct nouveau_mm_node *node, *heap = list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); - if (!list_is_singular(&rmm->nodes)) + if (!list_is_singular(&rmm->nodes)) { + printk(KERN_ERR "nouveau_mm not empty at destroy time!\n"); + list_for_each_entry(node, &rmm->nodes, nl_entry) { + printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n", + node->type, node->offset, node->length); + } + WARN_ON(1); return -EBUSY; + } kfree(heap); kfree(rmm); diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c index 519a6b4bba46..75ef595db4d5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vm.c +++ b/drivers/gpu/drm/nouveau/nouveau_vm.c @@ -396,8 +396,8 @@ nouveau_vm_del(struct nouveau_vm *vm) list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { nouveau_vm_unlink(vm, vpgd->obj); } - WARN_ON(nouveau_mm_fini(&vm->mm) != 0); + nouveau_mm_fini(&vm->mm); kfree(vm->pgt); kfree(vm); } -- cgit v1.2.3 From 5de8037ab466d397df17ff72382c33a908f42f6c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 8 Jun 2011 18:17:41 +1000 Subject: drm/nvc0: enable per-client address spaces Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_object.c | 74 ++++++++++++++++++++------------ drivers/gpu/drm/nouveau/nouveau_state.c | 7 ++- drivers/gpu/drm/nouveau/nvc0_instmem.c | 14 ++---- 3 files changed, 55 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 115827133bb7..542b451c81f1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -690,46 +690,64 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) return 0; } -int -nouveau_gpuobj_channel_init(struct nouveau_channel *chan, - uint32_t vram_h, uint32_t tt_h) +static int +nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm) { struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv); - struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm; - struct nouveau_gpuobj *vram = NULL, *tt = NULL; + struct nouveau_gpuobj *pgd = NULL; + struct nouveau_vm_pgd *vpgd; int ret, i; - NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); - if (dev_priv->card_type == NV_C0) { - struct nouveau_vm_pgd *vpgd; + ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin); + if (ret) + return ret; - ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, - &chan->ramin); + /* create page directory for this vm if none currently exists, + * will be destroyed automagically when last reference to the + * vm is removed + */ + if (list_empty(&vm->pgd_list)) { + ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd); if (ret) return ret; + } + nouveau_vm_ref(vm, &chan->vm, pgd); + nouveau_gpuobj_ref(NULL, &pgd); - nouveau_vm_ref(vm, &chan->vm, NULL); + /* point channel at vm's page directory */ + vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head); + nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst)); + nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst)); + nv_wo32(chan->ramin, 0x0208, 0xffffffff); + nv_wo32(chan->ramin, 0x020c, 0x000000ff); - vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head); - nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst)); - nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst)); - nv_wo32(chan->ramin, 0x0208, 0xffffffff); - nv_wo32(chan->ramin, 0x020c, 0x000000ff); + /* map display semaphore buffers into channel's vm */ + for (i = 0; i < 2; i++) { + struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i]; - for (i = 0; i < 2; i++) { - struct nv50_display_crtc *dispc = - &nv50_display(dev)->crtc[i]; + ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm, + &chan->dispc_vma[i]); + if (ret) + return ret; + } - ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm, - &chan->dispc_vma[i]); - if (ret) - return ret; - } + return 0; +} - return 0; - } +int +nouveau_gpuobj_channel_init(struct nouveau_channel *chan, + uint32_t vram_h, uint32_t tt_h) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv); + struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm; + struct nouveau_gpuobj *vram = NULL, *tt = NULL; + int ret, i; + + NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); + if (dev_priv->card_type == NV_C0) + return nvc0_gpuobj_channel_init(chan, vm); /* Allocate a chunk of memory for per-channel object storage */ ret = nouveau_gpuobj_channel_init_pramin(chan); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 91cc2a64d8e0..50507e7e3f58 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -787,7 +787,12 @@ nouveau_open(struct drm_device *dev, struct drm_file *file_priv) } } else if (dev_priv->card_type >= NV_C0) { - nouveau_vm_ref(dev_priv->chan_vm, &fpriv->vm, NULL); + ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, + &fpriv->vm); + if (ret) { + kfree(fpriv); + return ret; + } } file_priv->driver_priv = fpriv; diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 82357d2df1f4..b701c439c92e 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c @@ -32,7 +32,6 @@ struct nvc0_instmem_priv { struct nouveau_channel *bar1; struct nouveau_gpuobj *bar3_pgd; struct nouveau_channel *bar3; - struct nouveau_gpuobj *chan_pgd; }; int @@ -181,17 +180,11 @@ nvc0_instmem_init(struct drm_device *dev) goto error; /* channel vm */ - ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm); + ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, + &dev_priv->chan_vm); if (ret) goto error; - ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd); - if (ret) - goto error; - - nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd); - nouveau_vm_ref(NULL, &vm, NULL); - nvc0_instmem_resume(dev); return 0; error: @@ -211,8 +204,7 @@ nvc0_instmem_takedown(struct drm_device *dev) nv_wr32(dev, 0x1704, 0x00000000); nv_wr32(dev, 0x1714, 0x00000000); - nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd); - nouveau_gpuobj_ref(NULL, &priv->chan_pgd); + nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL); nvc0_channel_del(&priv->bar1); nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd); -- cgit v1.2.3 From 06b75e3552394af66cc1ee4bfb5fe01a94929adb Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 8 Jun 2011 18:29:12 +1000 Subject: drm/nouveau: fix display takedown order to match reverse init order Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_state.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 50507e7e3f58..27d2a816d2c6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -720,11 +720,16 @@ static void nouveau_card_takedown(struct drm_device *dev) struct nouveau_engine *engine = &dev_priv->engine; int e; + drm_kms_helper_poll_fini(dev); + nouveau_fbcon_fini(dev); + if (dev_priv->channel) { - nouveau_fence_fini(dev); nouveau_channel_put_unlocked(&dev_priv->channel); + nouveau_fence_fini(dev); } + engine->display.destroy(dev); + if (!dev_priv->noaccel) { engine->fifo.takedown(dev); for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { @@ -1063,11 +1068,7 @@ void nouveau_lastclose(struct drm_device *dev) int nouveau_unload(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; - drm_kms_helper_poll_fini(dev); - nouveau_fbcon_fini(dev); - engine->display.destroy(dev); nouveau_card_takedown(dev); iounmap(dev_priv->mmio); -- cgit v1.2.3 From 15ba79ad44fed84a9dabf6996144789424abae5b Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 8 Jun 2011 18:40:34 +1000 Subject: drm/nouveau: shut lockdep up if last vm ref needs to destroy pgd Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_vm.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c index 75ef595db4d5..244fd38fdb84 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vm.c +++ b/drivers/gpu/drm/nouveau/nouveau_vm.c @@ -369,23 +369,26 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) } static void -nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) +nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) { struct nouveau_vm_pgd *vpgd, *tmp; + struct nouveau_gpuobj *pgd = NULL; - if (!pgd) + if (!mpgd) return; mutex_lock(&vm->mm->mutex); list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { - if (vpgd->obj != pgd) - continue; - - list_del(&vpgd->head); - nouveau_gpuobj_ref(NULL, &vpgd->obj); - kfree(vpgd); + if (vpgd->obj == mpgd) { + pgd = vpgd->obj; + list_del(&vpgd->head); + kfree(vpgd); + break; + } } mutex_unlock(&vm->mm->mutex); + + nouveau_gpuobj_ref(NULL, &pgd); } static void -- cgit v1.2.3 From 24f246ac10ae6a6ae873045387d4501498869f74 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 10 Jun 2011 13:36:08 +1000 Subject: drm/nouveau: rework vram init/fini ordering a little Commit "drm/nouveau: add some debug output if nouveau_mm busy at destroy time" revealed an issue where vram mm takedown would actually fail due to there still being nodes present, causing nouveau to leak a small amount of memory on module unload. This splits TTM/nouveau_mm a bit more cleanly and ensures nouveau_mm fini isn't done until all gpuobjs are also destroyed. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 4 +++- drivers/gpu/drm/nouveau/nouveau_mem.c | 30 +++--------------------- drivers/gpu/drm/nouveau/nouveau_mm.h | 1 + drivers/gpu/drm/nouveau/nouveau_state.c | 22 ++++++++++++++---- drivers/gpu/drm/nouveau/nv50_vram.c | 41 ++++++++++++++++++++------------- drivers/gpu/drm/nouveau/nvc0_vram.c | 14 +++++++---- 6 files changed, 59 insertions(+), 53 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index bbea0452dca7..d610edb044c4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -504,7 +504,10 @@ struct nouveau_pm_engine { }; struct nouveau_vram_engine { + struct nouveau_mm *mm; + int (*init)(struct drm_device *); + void (*takedown)(struct drm_device *dev); int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, u32 type, struct nouveau_mem **); void (*put)(struct drm_device *, struct nouveau_mem **); @@ -717,7 +720,6 @@ struct drm_nouveau_private { /* VRAM/fb configuration */ uint64_t vram_size; uint64_t vram_sys_base; - u32 vram_rblock_size; uint64_t fb_phys; uint64_t fb_available_size; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 976887dc2bab..765f0e57da78 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -451,10 +451,6 @@ nouveau_mem_vram_init(struct drm_device *dev) dev_priv->ramin_rsvd_vram = 512 * 1024; } - ret = dev_priv->engine.vram.init(dev); - if (ret) - return ret; - NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); if (dev_priv->vram_sys_base) { NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", @@ -729,36 +725,16 @@ nouveau_mem_timing_fini(struct drm_device *dev) } static int -nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size) +nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); - struct nouveau_mm *mm; - u64 size, block, rsvd; - int ret; - - rsvd = (256 * 1024); /* vga memory */ - size = (p_size << PAGE_SHIFT) - rsvd; - block = dev_priv->vram_rblock_size; - - ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12); - if (ret) - return ret; - - man->priv = mm; + /* nothing to do */ return 0; } static int nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) { - struct nouveau_mm *mm = man->priv; - int ret; - - ret = nouveau_mm_fini(&mm); - if (ret) - return ret; - - man->priv = NULL; + /* nothing to do */ return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h index 1f7483aae9a4..b9c016d21553 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.h +++ b/drivers/gpu/drm/nouveau/nouveau_mm.h @@ -52,6 +52,7 @@ int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *); int nv50_vram_init(struct drm_device *); +void nv50_vram_fini(struct drm_device *); int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, u32 memtype, struct nouveau_mem **); void nv50_vram_del(struct drm_device *, struct nouveau_mem **); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 27d2a816d2c6..49196fa8ea21 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -91,6 +91,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_pre = nv04_pm_clock_pre; engine->pm.clock_set = nv04_pm_clock_set; engine->vram.init = nouveau_mem_detect; + engine->vram.takedown = nouveau_stub_takedown; engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x10: @@ -139,6 +140,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_pre = nv04_pm_clock_pre; engine->pm.clock_set = nv04_pm_clock_set; engine->vram.init = nouveau_mem_detect; + engine->vram.takedown = nouveau_stub_takedown; engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x20: @@ -187,6 +189,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_pre = nv04_pm_clock_pre; engine->pm.clock_set = nv04_pm_clock_set; engine->vram.init = nouveau_mem_detect; + engine->vram.takedown = nouveau_stub_takedown; engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x30: @@ -237,6 +240,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.voltage_get = nouveau_voltage_gpio_get; engine->pm.voltage_set = nouveau_voltage_gpio_set; engine->vram.init = nouveau_mem_detect; + engine->vram.takedown = nouveau_stub_takedown; engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x40: @@ -289,6 +293,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.voltage_set = nouveau_voltage_gpio_set; engine->pm.temp_get = nv40_temp_get; engine->vram.init = nouveau_mem_detect; + engine->vram.takedown = nouveau_stub_takedown; engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x50: @@ -366,6 +371,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) else engine->pm.temp_get = nv40_temp_get; engine->vram.init = nv50_vram_init; + engine->vram.takedown = nv50_vram_fini; engine->vram.get = nv50_vram_new; engine->vram.put = nv50_vram_del; engine->vram.flags_valid = nv50_vram_flags_valid; @@ -412,6 +418,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.irq_unregister = nv50_gpio_irq_unregister; engine->gpio.irq_enable = nv50_gpio_irq_enable; engine->vram.init = nvc0_vram_init; + engine->vram.takedown = nv50_vram_fini; engine->vram.get = nvc0_vram_new; engine->vram.put = nv50_vram_del; engine->vram.flags_valid = nvc0_vram_flags_valid; @@ -529,7 +536,7 @@ nouveau_card_init(struct drm_device *dev) nouveau_pm_init(dev); - ret = nouveau_mem_vram_init(dev); + ret = engine->vram.init(dev); if (ret) goto out_bios; @@ -541,10 +548,14 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_gpuobj; - ret = nouveau_mem_gart_init(dev); + ret = nouveau_mem_vram_init(dev); if (ret) goto out_instmem; + ret = nouveau_mem_gart_init(dev); + if (ret) + goto out_ttmvram; + /* PMC */ ret = engine->mc.init(dev); if (ret) @@ -698,12 +709,14 @@ out_mc: engine->mc.takedown(dev); out_gart: nouveau_mem_gart_fini(dev); +out_ttmvram: + nouveau_mem_vram_fini(dev); out_instmem: engine->instmem.takedown(dev); out_gpuobj: nouveau_gpuobj_takedown(dev); out_vram: - nouveau_mem_vram_fini(dev); + engine->vram.takedown(dev); out_bios: nouveau_pm_fini(dev); nouveau_bios_takedown(dev); @@ -755,10 +768,11 @@ static void nouveau_card_takedown(struct drm_device *dev) ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); mutex_unlock(&dev->struct_mutex); nouveau_mem_gart_fini(dev); + nouveau_mem_vram_fini(dev); engine->instmem.takedown(dev); nouveau_gpuobj_takedown(dev); - nouveau_mem_vram_fini(dev); + engine->vram.takedown(dev); nouveau_irq_fini(dev); drm_vblank_cleanup(dev); diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c index ffbc3d8cf5be..af32daecd1ed 100644 --- a/drivers/gpu/drm/nouveau/nv50_vram.c +++ b/drivers/gpu/drm/nouveau/nv50_vram.c @@ -51,9 +51,7 @@ void nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; - struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; - struct nouveau_mm *mm = man->priv; + struct nouveau_mm *mm = dev_priv->engine.vram.mm; struct nouveau_mm_node *this; struct nouveau_mem *mem; @@ -84,9 +82,7 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc, u32 memtype, struct nouveau_mem **pmem) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; - struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; - struct nouveau_mm *mm = man->priv; + struct nouveau_mm *mm = dev_priv->engine.vram.mm; struct nouveau_mm_node *r; struct nouveau_mem *mem; int comp = (memtype & 0x300) >> 8; @@ -190,22 +186,35 @@ int nv50_vram_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_vram_engine *vram = &dev_priv->engine.vram; + const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ + const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ + u32 rblock, length; dev_priv->vram_size = nv_rd32(dev, 0x10020c); dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; dev_priv->vram_size &= 0xffffffff00ULL; - switch (dev_priv->chipset) { - case 0xaa: - case 0xac: - case 0xaf: + /* IGPs, no funky reordering happens here, they don't have VRAM */ + if (dev_priv->chipset == 0xaa || + dev_priv->chipset == 0xac || + dev_priv->chipset == 0xaf) { dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12; - dev_priv->vram_rblock_size = 4096; - break; - default: - dev_priv->vram_rblock_size = nv50_vram_rblock(dev); - break; + rblock = 4096 >> 12; + } else { + rblock = nv50_vram_rblock(dev) >> 12; } - return 0; + length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail; + + return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock); +} + +void +nv50_vram_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_vram_engine *vram = &dev_priv->engine.vram; + + nouveau_mm_fini(&vram->mm); } diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c index 67c6ec6f34ea..e45a24d84e98 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vram.c +++ b/drivers/gpu/drm/nouveau/nvc0_vram.c @@ -61,9 +61,7 @@ nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin, u32 type, struct nouveau_mem **pmem) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; - struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; - struct nouveau_mm *mm = man->priv; + struct nouveau_mm *mm = dev_priv->engine.vram.mm; struct nouveau_mm_node *r; struct nouveau_mem *mem; int ret; @@ -105,9 +103,15 @@ int nvc0_vram_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_vram_engine *vram = &dev_priv->engine.vram; + const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ + const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ + u32 length; dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; dev_priv->vram_size *= nv_rd32(dev, 0x121c74); - dev_priv->vram_rblock_size = 4096; - return 0; + + length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail; + + return nouveau_mm_init(&vram->mm, rsvd_head, length, 1); } -- cgit v1.2.3 From bf08bcc6b7260db8eb5ef389e060e4b12bf10cae Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 13 Jun 2011 12:23:35 +1000 Subject: drm/nouveau: fix null pointer deref on pre-nv50 chipsets Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_object.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 542b451c81f1..fb57c1110d24 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -870,16 +870,22 @@ void nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; NV_DEBUG(dev, "ch%d\n", chan->id); - for (i = 0; i < 2; i++) { - struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i]; - nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]); + if (dev_priv->card_type >= NV_50) { + struct nv50_display *disp = nv50_display(dev); + + for (i = 0; i < 2; i++) { + struct nv50_display_crtc *dispc = &disp->crtc[i]; + nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]); + } + + nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); + nouveau_gpuobj_ref(NULL, &chan->vm_pd); } - nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); - nouveau_gpuobj_ref(NULL, &chan->vm_pd); if (drm_mm_initialized(&chan->ramin_heap)) drm_mm_takedown(&chan->ramin_heap); -- cgit v1.2.3 From 63305de75febc2b7f3252a8b9210e2ec84601640 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 17 Jun 2011 19:49:31 +1000 Subject: drm/nouveau: un-blacklist nvce accel Reported working on IRC. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_state.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 49196fa8ea21..1d08875dc8a3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -1007,7 +1007,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) switch (dev_priv->chipset) { case 0xc1: /* known broken */ case 0xc8: /* never tested */ - case 0xce: /* never tested */ NV_INFO(dev, "acceleration disabled by default, pass " "noaccel=0 to force enable\n"); dev_priv->noaccel = true; -- cgit v1.2.3 From 0b33c936599d75b8d8ff6868fa0cbd5676d88b89 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 19 Jun 2011 20:50:59 +1000 Subject: drm/nvc0: push prunk140 irq messages to debug loglevel We know they happen, we don't know why. They're annoying, so hide them from users for the moment. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_graph.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index 56aa92fddc05..39e9208a708c 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -709,7 +709,7 @@ nvc0_runk140_isr(struct drm_device *dev) u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0); u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0); - NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1); + NV_DEBUG(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1); units &= ~(1 << unit); } } -- cgit v1.2.3 From 40ce4279e17e99bb98d02b8746fcf20abff8185b Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Wed, 22 Jun 2011 02:13:23 +0100 Subject: drm/nouveau/temp: Fix signed/unsigned int logic Many (all?) of the coefficients related to calculating the correct temperature are signed integers This patch correcly parses and stores those values It also ensures that the default offset is 0 (previously 1) Affected cards - the original nv50 and the nv40 family Signed-off-by: Emil Velikov Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 6 +++--- drivers/gpu/drm/nouveau/nouveau_temp.c | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index d610edb044c4..72bfc143eb47 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -461,9 +461,9 @@ struct nouveau_pm_level { struct nouveau_pm_temp_sensor_constants { u16 offset_constant; s16 offset_mult; - u16 offset_div; - u16 slope_mult; - u16 slope_div; + s16 offset_div; + s16 slope_mult; + s16 slope_div; }; struct nouveau_pm_threshold_temp { diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c index 649b0413b09f..47630fb669d0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_temp.c +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c @@ -43,7 +43,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) /* Set the default sensor's contants */ sensor->offset_constant = 0; - sensor->offset_mult = 1; + sensor->offset_mult = 0; sensor->offset_div = 1; sensor->slope_mult = 1; sensor->slope_div = 1; @@ -109,7 +109,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) /* Read the entries from the table */ for (i = 0; i < entries; i++) { - u16 value = ROM16(temp[1]); + s16 value = ROM16(temp[1]); switch (temp[0]) { case 0x01: @@ -160,8 +160,8 @@ nv40_sensor_setup(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; - u32 offset = sensor->offset_mult / sensor->offset_div; - u32 sensor_calibration; + s32 offset = sensor->offset_mult / sensor->offset_div; + s32 sensor_calibration; /* set up the sensors */ sensor_calibration = 120 - offset - sensor->offset_constant; -- cgit v1.2.3 From 6d13e9c18843092f3df418b42183f704f6dac053 Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Wed, 22 Jun 2011 02:54:39 +0100 Subject: drm/nouveau/temp: Add default calibration values for nv67 Signed-off-by: Emil Velikov Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_temp.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c index 47630fb669d0..081ca7b03e8a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_temp.c +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c @@ -99,6 +99,13 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) sensor->slope_mult = 431; sensor->slope_div = 10000; break; + + case 0x67: + sensor->offset_mult = -26149; + sensor->offset_div = 100; + sensor->slope_mult = 484; + sensor->slope_div = 10000; + break; } } -- cgit v1.2.3 From 9a11dd65875f9e2401ded5d9a777574eacab814c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 23 Jun 2011 15:47:32 +1000 Subject: drm/nouveau: fix off-by-one Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_object.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index fb57c1110d24..159b7c437d3f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -125,7 +125,7 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid, int ret = -EINVAL; spin_lock_irqsave(&dev_priv->channels.lock, flags); - if (chid > 0 && chid < dev_priv->engine.fifo.channels) + if (chid >= 0 && chid < dev_priv->engine.fifo.channels) chan = dev_priv->channels.ptr[chid]; if (chan) ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); -- cgit v1.2.3 From 350abe0379f8106319f73cbcce37bfa2f0bfa05a Mon Sep 17 00:00:00 2001 From: Robert Marklund Date: Mon, 20 Jun 2011 15:55:46 +0200 Subject: mach-ux500: add basic support for snowball board Based on work from Mathieu J. Poirier For more information on snowball please visit http://www.igloocommunity.org Signed-off-by: Robert Marklund --- arch/arm/mach-ux500/board-mop500.c | 154 ++++++++++++++++++++++++++++++++++--- 1 file changed, 145 insertions(+), 9 deletions(-) diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index c64d6aa1355c..e3bbb5506dd1 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -26,8 +26,10 @@ #include #include #include +#include #include +#include #include #include @@ -44,6 +46,26 @@ #include "board-mop500.h" #include "board-mop500-regulators.h" +static struct gpio_led snowball_led_array[] = { + { + .name = "user_led", + .default_trigger = "none", + .gpio = 142, + }, +}; + +static struct gpio_led_platform_data snowball_led_data = { + .leds = snowball_led_array, + .num_leds = ARRAY_SIZE(snowball_led_array), +}; + +static struct platform_device snowball_led_dev = { + .name = "leds-gpio", + .dev = { + .platform_data = &snowball_led_data, + }, +}; + static struct ab8500_gpio_platform_data ab8500_gpio_pdata = { .gpio_base = MOP500_AB8500_GPIO(0), .irq_base = MOP500_AB8500_VIR_GPIO_IRQ_BASE, @@ -66,6 +88,97 @@ static struct ab8500_gpio_platform_data ab8500_gpio_pdata = { 0x7A, 0x00, 0x00}, }; +static struct gpio_keys_button snowball_key_array[] = { + { + .gpio = 32, + .type = EV_KEY, + .code = KEY_1, + .desc = "userpb", + .active_low = 1, + .debounce_interval = 50, + .wakeup = 1, + }, + { + .gpio = 151, + .type = EV_KEY, + .code = KEY_2, + .desc = "extkb1", + .active_low = 1, + .debounce_interval = 50, + .wakeup = 1, + }, + { + .gpio = 152, + .type = EV_KEY, + .code = KEY_3, + .desc = "extkb2", + .active_low = 1, + .debounce_interval = 50, + .wakeup = 1, + }, + { + .gpio = 161, + .type = EV_KEY, + .code = KEY_4, + .desc = "extkb3", + .active_low = 1, + .debounce_interval = 50, + .wakeup = 1, + }, + { + .gpio = 162, + .type = EV_KEY, + .code = KEY_5, + .desc = "extkb4", + .active_low = 1, + .debounce_interval = 50, + .wakeup = 1, + }, +}; + +static struct gpio_keys_platform_data snowball_key_data = { + .buttons = snowball_key_array, + .nbuttons = ARRAY_SIZE(snowball_key_array), +}; + +static struct platform_device snowball_key_dev = { + .name = "gpio-keys", + .id = -1, + .dev = { + .platform_data = &snowball_key_data, + } +}; + +static struct smsc911x_platform_config snowball_sbnet_cfg = { + .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH, + .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, + .flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY, + .shift = 1, +}; + +static struct resource sbnet_res[] = { + { + .name = "smsc911x-memory", + .start = (0x5000 << 16), + .end = (0x5000 << 16) + 0xffff, + .flags = IORESOURCE_MEM, + }, + { + .start = NOMADIK_GPIO_TO_IRQ(140), + .end = NOMADIK_GPIO_TO_IRQ(140), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, + }, +}; + +static struct platform_device snowball_sbnet_dev = { + .name = "smsc911x", + .num_resources = ARRAY_SIZE(sbnet_res), + .resource = sbnet_res, + .dev = { + .platform_data = &snowball_sbnet_cfg, + }, +}; + static struct ab8500_platform_data ab8500_platdata = { .irq_base = MOP500_AB8500_IRQ_BASE, .regulator_reg_init = ab8500_regulator_reg_init, @@ -292,8 +405,9 @@ static void mop500_prox_deactivate(struct device *dev) } /* add any platform devices here - TODO */ -static struct platform_device *platform_devs[] __initdata = { +static struct platform_device *mop500_platform_devs[] __initdata = { &mop500_gpio_keys_device, + &ab8500_device, }; #ifdef CONFIG_STE_DMA40 @@ -424,6 +538,13 @@ static void __init mop500_uart_init(void) db8500_add_uart2(&uart2_plat); } +static struct platform_device *snowball_platform_devs[] __initdata = { + &snowball_led_dev, + &snowball_key_dev, + &snowball_sbnet_dev, + &ab8500_device, +}; + static void __init mop500_init_machine(void) { int i2c0_devs; @@ -433,24 +554,30 @@ static void __init mop500_init_machine(void) * all these GPIO pins to the internal GPIO controller * instead. */ - if (machine_is_hrefv60()) - mop500_gpio_keys[0].gpio = HREFV60_PROX_SENSE_GPIO; - else - mop500_gpio_keys[0].gpio = GPIO_PROX_SENSOR; + if (!machine_is_snowball()) { + if (machine_is_hrefv60()) + mop500_gpio_keys[0].gpio = HREFV60_PROX_SENSE_GPIO; + else + mop500_gpio_keys[0].gpio = GPIO_PROX_SENSOR; + } u8500_init_devices(); mop500_pins_init(); - platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); + if (machine_is_snowball()) + platform_add_devices(snowball_platform_devs, + ARRAY_SIZE(snowball_platform_devs)); + else + platform_add_devices(mop500_platform_devs, + ARRAY_SIZE(mop500_platform_devs)); mop500_i2c_init(); - mop500_sdi_init(); + if (!machine_is_snowball()) + mop500_sdi_init(); mop500_spi_init(); mop500_uart_init(); - platform_device_register(&ab8500_device); - i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); if (machine_is_hrefv60()) i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES; @@ -480,3 +607,12 @@ MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+") .timer = &ux500_timer, .init_machine = mop500_init_machine, MACHINE_END + +MACHINE_START(SNOWBALL, "Calao Systems Snowball platform") + .boot_params = 0x100, + .map_io = u8500_map_io, + .init_irq = ux500_init_irq, + /* we re-use nomadik timer here */ + .timer = &ux500_timer, + .init_machine = mop500_init_machine, +MACHINE_END -- cgit v1.2.3 From d769d05498f78efdc1adff2075b3a58af40dbb76 Mon Sep 17 00:00:00 2001 From: "Mathieu J. Poirier" Date: Fri, 25 Mar 2011 09:28:56 -0600 Subject: mach-ux500: setting proper uart for snowball The UART setting in uncompress.h changes on the Snowball board. Signed-off-by: Mathieu Poirier Signed-off-by: Linus Walleij Signed-off-by: Robert Marklund --- arch/arm/mach-ux500/include/mach/uncompress.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-ux500/include/mach/uncompress.h b/arch/arm/mach-ux500/include/mach/uncompress.h index 088b550c40df..7dd08074c37b 100644 --- a/arch/arm/mach-ux500/include/mach/uncompress.h +++ b/arch/arm/mach-ux500/include/mach/uncompress.h @@ -54,7 +54,8 @@ static inline void arch_decomp_setup(void) if (machine_is_u8500() || machine_is_svp8500v1() || machine_is_svp8500v2() || - machine_is_hrefv60()) + machine_is_hrefv60() || + machine_is_snowball()) ux500_uart_base = U8500_UART2_BASE; else if (machine_is_u5500()) ux500_uart_base = U5500_UART0_BASE; -- cgit v1.2.3 From 885d0fe40fb97d7f394c24ac9c88721015f852b3 Mon Sep 17 00:00:00 2001 From: "Mathieu J. Poirier" Date: Fri, 25 Mar 2011 09:28:58 -0600 Subject: mach-ux500: Add SDI support for snowball board With SDI support for the Snowball we can boot from the SD card. Signed-off-by: Mathieu Poirier Signed-off-by: Linus Walleij Signed-off-by: Robert Marklund --- arch/arm/mach-ux500/board-mop500-sdi.c | 19 ++++++++++++++----- arch/arm/mach-ux500/board-mop500.c | 3 +-- arch/arm/mach-ux500/board-mop500.h | 5 +++++ 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c index 5fbd6bc63cb1..d0cb9e5eb87c 100644 --- a/arch/arm/mach-ux500/board-mop500-sdi.c +++ b/arch/arm/mach-ux500/board-mop500-sdi.c @@ -216,15 +216,24 @@ void __init mop500_sdi_init(void) /* PoP:ed eMMC on top of DB8500 v1.0 has problems with high speed */ if (!cpu_is_u8500v10()) mop500_sdi2_data.capabilities |= MMC_CAP_MMC_HIGHSPEED; - db8500_add_sdi2(&mop500_sdi2_data, periphid); + /* sdi2 on snowball is in ATL_B mode for FSMC (LAN) */ + if (!machine_is_snowball()) + db8500_add_sdi2(&mop500_sdi2_data, periphid); /* On-board eMMC */ db8500_add_sdi4(&mop500_sdi4_data, periphid); - if (machine_is_hrefv60()) { - mop500_sdi0_data.gpio_cd = HREFV60_SDMMC_CD_GPIO; - sdi0_en = HREFV60_SDMMC_EN_GPIO; - sdi0_vsel = HREFV60_SDMMC_1V8_3V_GPIO; + if (machine_is_hrefv60() || machine_is_snowball()) { + if (machine_is_hrefv60()) { + mop500_sdi0_data.gpio_cd = HREFV60_SDMMC_CD_GPIO; + sdi0_en = HREFV60_SDMMC_EN_GPIO; + sdi0_vsel = HREFV60_SDMMC_1V8_3V_GPIO; + } else if (machine_is_snowball()) { + mop500_sdi0_data.gpio_cd = SNOWBALL_SDMMC_CD_GPIO; + mop500_sdi0_data.cd_invert = true; + sdi0_en = SNOWBALL_SDMMC_EN_GPIO; + sdi0_vsel = SNOWBALL_SDMMC_1V8_3V_GPIO; + } sdi0_configure(); } diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index e3bbb5506dd1..4eead1a0786b 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -573,8 +573,7 @@ static void __init mop500_init_machine(void) ARRAY_SIZE(mop500_platform_devs)); mop500_i2c_init(); - if (!machine_is_snowball()) - mop500_sdi_init(); + mop500_sdi_init(); mop500_spi_init(); mop500_uart_init(); diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h index 03a31cc9b084..ee77a8970c33 100644 --- a/arch/arm/mach-ux500/board-mop500.h +++ b/arch/arm/mach-ux500/board-mop500.h @@ -7,6 +7,11 @@ #ifndef __BOARD_MOP500_H #define __BOARD_MOP500_H +/* snowball GPIO for MMC card */ +#define SNOWBALL_SDMMC_EN_GPIO 217 +#define SNOWBALL_SDMMC_1V8_3V_GPIO 228 +#define SNOWBALL_SDMMC_CD_GPIO 218 + /* HREFv60-specific GPIO assignments, this board has no GPIO expander */ #define HREFV60_TOUCH_RST_GPIO 143 #define HREFV60_PROX_SENSE_GPIO 217 -- cgit v1.2.3 From c41fac8aa9cb9ca31a5b3d9ce1f3b0026b83c16d Mon Sep 17 00:00:00 2001 From: Robert Marklund Date: Tue, 21 Jun 2011 09:39:13 +0200 Subject: mach-ux500: Add pin configuration for snowball board This sets up a few GPIO pins and some pinmuxing on platform boot for the Snowball board. Based on work from Mathieu J. Poirier . Signed-off-by: Robert Marklund --- arch/arm/mach-ux500/board-mop500-pins.c | 43 +++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/arch/arm/mach-ux500/board-mop500-pins.c b/arch/arm/mach-ux500/board-mop500-pins.c index fd4cf1ca5efd..7013d003b8fd 100644 --- a/arch/arm/mach-ux500/board-mop500-pins.c +++ b/arch/arm/mach-ux500/board-mop500-pins.c @@ -228,6 +228,46 @@ static pin_cfg_t mop500_pins_hrefv60[] = { }; +static pin_cfg_t snowball_pins[] = { + /* SSP0, to AB8500 */ + GPIO143_SSP0_CLK, + GPIO144_SSP0_FRM, + GPIO145_SSP0_RXD | PIN_PULL_DOWN, + GPIO146_SSP0_TXD, + + /* MMC0: MicroSD card */ + GPIO21_MC0_DAT31DIR | PIN_OUTPUT_HIGH, + + /* MMC2: LAN */ + GPIO86_SM_ADQ0, + GPIO87_SM_ADQ1, + GPIO88_SM_ADQ2, + GPIO89_SM_ADQ3, + GPIO90_SM_ADQ4, + GPIO91_SM_ADQ5, + GPIO92_SM_ADQ6, + GPIO93_SM_ADQ7, + + GPIO94_SM_ADVn, + GPIO95_SM_CS0n, + GPIO96_SM_OEn, + GPIO97_SM_WEn, + + GPIO128_SM_CKO, + GPIO130_SM_FBCLK, + GPIO131_SM_ADQ8, + GPIO132_SM_ADQ9, + GPIO133_SM_ADQ10, + GPIO134_SM_ADQ11, + GPIO135_SM_ADQ12, + GPIO136_SM_ADQ13, + GPIO137_SM_ADQ14, + GPIO138_SM_ADQ15, + + /* RSTn_LAN */ + GPIO141_GPIO | PIN_OUTPUT_HIGH, +}; + void __init mop500_pins_init(void) { nmk_config_pins(mop500_pins_common, @@ -235,6 +275,9 @@ void __init mop500_pins_init(void) if (machine_is_hrefv60()) nmk_config_pins(mop500_pins_hrefv60, ARRAY_SIZE(mop500_pins_hrefv60)); + else if (machine_is_snowball()) + nmk_config_pins(snowball_pins, + ARRAY_SIZE(snowball_pins)); else nmk_config_pins(mop500_pins_default, ARRAY_SIZE(mop500_pins_default)); -- cgit v1.2.3 From 11ab32a734f5ee5d4f4f27f3488b0099447a2dbf Mon Sep 17 00:00:00 2001 From: "Mathieu J. Poirier" Date: Fri, 25 Mar 2011 09:29:01 -0600 Subject: mach-ux500: Kconfig for snowball board This adds the necessary Kconfig entry for a Snowball board. Signed-off-by: Mathieu Poirier Signed-off-by: Robert Marklund --- arch/arm/mach-ux500/Kconfig | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index 96d546cef062..4210cb434dbc 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig @@ -35,6 +35,13 @@ config MACH_HREFV60 help Include support for the HREFv60 new development platform. +config MACH_SNOWBALL + bool "U8500 Snowball platform" + depends on UX500_SOC_DB8500 + select MACH_U8500 + help + Include support for the snowball development platform. + config MACH_U5500 bool "U5500 Development platform" depends on UX500_SOC_DB5500 -- cgit v1.2.3 From c5314877edd2ea9cccca0ca87c9a439a8d356c14 Mon Sep 17 00:00:00 2001 From: Robert Marklund Date: Tue, 21 Jun 2011 14:01:02 +0200 Subject: mach-ux500: add configs for snowball board Adds defconfig entries needed to boot a single ux500 kernel on the Snowball board. Signed-off-by: Robert Marklund --- arch/arm/configs/u8500_defconfig | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index e1d602029a4d..97d31a4663da 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -11,12 +11,12 @@ CONFIG_ARCH_U8500=y CONFIG_UX500_SOC_DB5500=y CONFIG_UX500_SOC_DB8500=y CONFIG_MACH_U8500=y +CONFIG_MACH_SNOWBALL=y CONFIG_MACH_U5500=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_SMP=y CONFIG_NR_CPUS=2 -CONFIG_HOTPLUG_CPU=y CONFIG_PREEMPT=y CONFIG_AEABI=y CONFIG_CMDLINE="root=/dev/ram0 console=ttyAMA2,115200n8" @@ -25,8 +25,13 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_VFP=y CONFIG_NEON=y CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NETFILTER=y CONFIG_PHONET=y -CONFIG_PHONET_PIPECTRLR=y # CONFIG_WIRELESS is not set CONFIG_CAIF=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -35,6 +40,13 @@ CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_MISC_DEVICES=y CONFIG_AB8500_PWM=y CONFIG_SENSORS_BH1780=y +CONFIG_NETDEVICES=y +CONFIG_SMSC_PHY=y +CONFIG_NET_ETHERNET=y +CONFIG_SMSC911X=y +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +# CONFIG_WLAN is not set # CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set @@ -49,9 +61,9 @@ CONFIG_INPUT_MISC=y CONFIG_INPUT_AB8500_PONKEY=y # CONFIG_SERIO is not set CONFIG_VT_HW_CONSOLE_BINDING=y +# CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -# CONFIG_LEGACY_PTYS is not set CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_NOMADIK=y CONFIG_I2C=y @@ -64,7 +76,6 @@ CONFIG_GPIO_TC3589X=y CONFIG_MFD_STMPE=y CONFIG_MFD_TC3589X=y CONFIG_AB8500_CORE=y -CONFIG_REGULATOR=y CONFIG_REGULATOR_AB8500=y # CONFIG_HID_SUPPORT is not set CONFIG_USB_MUSB_HDRC=y @@ -73,9 +84,11 @@ CONFIG_MUSB_PIO_ONLY=y CONFIG_USB_GADGET=y CONFIG_AB8500_USB=y CONFIG_MMC=y +CONFIG_MMC_CLKGATE=y CONFIG_MMC_ARMMMCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y +CONFIG_LEDS_LM3530=y CONFIG_LEDS_LP5521=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_AB8500=y @@ -83,7 +96,6 @@ CONFIG_RTC_DRV_PL031=y CONFIG_DMADEVICES=y CONFIG_STE_DMA40=y CONFIG_STAGING=y -# CONFIG_STAGING_EXCLUDE_BUILD is not set CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y @@ -95,6 +107,8 @@ CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_CONFIGFS_FS=m # CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_MAGIC_SYSRQ=y @@ -103,7 +117,5 @@ CONFIG_DEBUG_KERNEL=y # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set CONFIG_DEBUG_INFO=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_FTRACE is not set CONFIG_DEBUG_USER=y -CONFIG_DEBUG_ERRORS=y -- cgit v1.2.3 From cde21de148a5bf474bbde59ebf046992fab3c77b Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 30 May 2011 15:51:47 +0200 Subject: mach-u300: cleanup clockevent code Use the new clockevents_config_and_register() function to register the U300 clockevent, since that code requires ->cpumask to be set we set this even on this UP system to please the framework. Cc: Thomas Gleixner Signed-off-by: Linus Walleij --- arch/arm/mach-u300/timer.c | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c index 18d7fa0603c2..5f51bdeef0ef 100644 --- a/arch/arm/mach-u300/timer.c +++ b/arch/arm/mach-u300/timer.c @@ -27,9 +27,6 @@ #include #include -/* Be able to sleep for atleast 4 seconds (usually more) */ -#define APPTIMER_MIN_RANGE 4 - /* * APP side special timer registers * This timer contains four timers which can fire an interrupt each. @@ -309,11 +306,11 @@ static int u300_set_next_event(unsigned long cycles, /* Use general purpose timer 1 as clock event */ static struct clock_event_device clockevent_u300_1mhz = { - .name = "GPT1", - .rating = 300, /* Reasonably fast and accurate clock event */ - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_next_event = u300_set_next_event, - .set_mode = u300_set_mode, + .name = "GPT1", + .rating = 300, /* Reasonably fast and accurate clock event */ + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = u300_set_next_event, + .set_mode = u300_set_mode, }; /* Clock event timer interrupt handler */ @@ -328,9 +325,9 @@ static irqreturn_t u300_timer_interrupt(int irq, void *dev_id) } static struct irqaction u300_timer_irq = { - .name = "U300 Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, - .handler = u300_timer_interrupt, + .name = "U300 Timer Tick", + .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .handler = u300_timer_interrupt, }; /* @@ -413,16 +410,10 @@ static void __init u300_timer_init(void) "GPT2", rate, 300, 32, clocksource_mmio_readl_up)) pr_err("timer: failed to initialize U300 clock source\n"); - clockevents_calc_mult_shift(&clockevent_u300_1mhz, - rate, APPTIMER_MIN_RANGE); - /* 32bit counter, so 32bits delta is max */ - clockevent_u300_1mhz.max_delta_ns = - clockevent_delta2ns(0xffffffff, &clockevent_u300_1mhz); - /* This timer is slow enough to set for 1 cycle == 1 MHz */ - clockevent_u300_1mhz.min_delta_ns = - clockevent_delta2ns(1, &clockevent_u300_1mhz); - clockevent_u300_1mhz.cpumask = cpumask_of(0); - clockevents_register_device(&clockevent_u300_1mhz); + /* Configure and register the clockevent */ + clockevents_config_and_register(&clockevent_u300_1mhz, rate, + 1, 0xffffffff); + /* * TODO: init and register the rest of the timers too, they can be * used by hrtimers! -- cgit v1.2.3 From 32d55ff91635c22dc853c532ec85dc16663d251f Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 16 Jun 2011 09:28:28 +0200 Subject: mach-u300: set apropriate FIFO trigger levels The U300 just defined the fill level limits for the FIFOs to 1 item out of habit. It can easily handle four. Signed-off-by: Linus Walleij --- arch/arm/mach-u300/spi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-u300/spi.c b/arch/arm/mach-u300/spi.c index 5767208f1c1d..7b597e2b19e2 100644 --- a/arch/arm/mach-u300/spi.c +++ b/arch/arm/mach-u300/spi.c @@ -40,8 +40,8 @@ struct pl022_config_chip dummy_chip_info = { .hierarchy = SSP_MASTER, /* 0 = drive TX even as slave, 1 = do not drive TX as slave */ .slave_tx_disable = 0, - .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, - .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, + .rx_lev_trig = SSP_RX_4_OR_MORE_ELEM, + .tx_lev_trig = SSP_TX_4_OR_MORE_EMPTY_LOC, .ctrl_len = SSP_BITS_12, .wait_state = SSP_MWIRE_WAIT_ZERO, .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, -- cgit v1.2.3 From ef7a474cef00594ccef432ce0840464e51ea4ac0 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 1 Jun 2011 14:44:16 +0200 Subject: mach-ux500: register a clock for the SMP TWD The SMP TWD on the ux500 will change frequency at the same time as the CPU. Loop back the frequency presented from the CPU into a clock that is looked up by the SMP TWD driver with the new cpufreq notifier hook. Signed-off-by: Linus Walleij --- arch/arm/mach-ux500/clock.c | 48 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c index 32ce90840ee1..b4e786a0fa4f 100644 --- a/arch/arm/mach-ux500/clock.c +++ b/arch/arm/mach-ux500/clock.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -759,6 +760,51 @@ err_out: late_initcall(clk_debugfs_init); #endif /* defined(CONFIG_DEBUG_FS) */ +unsigned long clk_smp_twd_rate = 400000000; + +unsigned long clk_smp_twd_get_rate(struct clk *clk) +{ + return clk_smp_twd_rate; +} + +static struct clk clk_smp_twd = { + .get_rate = clk_smp_twd_get_rate, + .name = "smp_twd", +}; + +static struct clk_lookup clk_smp_twd_lookup = { + .dev_id = "smp_twd", + .clk = &clk_smp_twd, +}; + +#ifdef CONFIG_CPU_FREQ + +static int clk_twd_cpufreq_transition(struct notifier_block *nb, + unsigned long state, void *data) +{ + struct cpufreq_freqs *f = data; + + if (state == CPUFREQ_PRECHANGE) { + /* Save frequency in simple Hz */ + clk_smp_twd_rate = f->new * 1000; + } + + return NOTIFY_OK; +} + +static struct notifier_block clk_twd_cpufreq_nb = { + .notifier_call = clk_twd_cpufreq_transition, +}; + +static int clk_init_smp_twd_cpufreq(void) +{ + return cpufreq_register_notifier(&clk_twd_cpufreq_nb, + CPUFREQ_TRANSITION_NOTIFIER); +} +late_initcall(clk_init_smp_twd_cpufreq); + +#endif + int __init clk_init(void) { if (cpu_is_u8500ed()) { @@ -779,6 +825,8 @@ int __init clk_init(void) else clkdev_add_table(u8500_v1_clks, ARRAY_SIZE(u8500_v1_clks)); + clkdev_add(&clk_smp_twd_lookup); + #ifdef CONFIG_DEBUG_FS clk_debugfs_add_table(u8500_common_clks, ARRAY_SIZE(u8500_common_clks)); if (cpu_is_u8500ed()) -- cgit v1.2.3 From 794d78fea51504bad3880d14f354a9847f318f25 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 21 Jun 2011 07:55:12 +0000 Subject: drivers: sh: late disabling of clocks V2 This V2 patch changes the clock disabling behavior during boot. Two different changes are made: 1) Delay disabling of clocks until late in the boot process. This fixes an existing issue where in-use clocks without software reference are disabled by mistake during boot. One example of this is the handling of the Mackerel serial console output that shares clock with the I2C controller. 2) Write out the "disabled" state to the hardware for clocks that not have been used by the kernel. In other words, make sure so far unused clocks actually get turned off. Signed-off-by: Magnus Damm Acked-by: Simon Horman Signed-off-by: Paul Mundt --- drivers/sh/clk/core.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index 7e9c39951ecb..ebeaa9e9f068 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c @@ -34,6 +34,9 @@ static LIST_HEAD(clock_list); static DEFINE_SPINLOCK(clock_lock); static DEFINE_MUTEX(clock_list_sem); +/* clock disable operations are not passed on to hardware during boot */ +static int allow_disable; + void clk_rate_table_build(struct clk *clk, struct cpufreq_frequency_table *freq_table, int nr_freqs, @@ -228,7 +231,7 @@ static void __clk_disable(struct clk *clk) return; if (!(--clk->usecount)) { - if (likely(clk->ops && clk->ops->disable)) + if (likely(allow_disable && clk->ops && clk->ops->disable)) clk->ops->disable(clk); if (likely(clk->parent)) __clk_disable(clk->parent); @@ -747,3 +750,25 @@ err_out: return err; } late_initcall(clk_debugfs_init); + +static int __init clk_late_init(void) +{ + unsigned long flags; + struct clk *clk; + + /* disable all clocks with zero use count */ + mutex_lock(&clock_list_sem); + spin_lock_irqsave(&clock_lock, flags); + + list_for_each_entry(clk, &clock_list, node) + if (!clk->usecount && clk->ops && clk->ops->disable) + clk->ops->disable(clk); + + /* from now on allow clock disable operations */ + allow_disable = 1; + + spin_unlock_irqrestore(&clock_lock, flags); + mutex_unlock(&clock_list_sem); + return 0; +} +late_initcall(clk_late_init); -- cgit v1.2.3 From 225ca45c3c64964163ea1fa85e2081af85956eed Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 24 Jun 2011 17:35:40 +0900 Subject: sh: clkfwk: Convert to IS_ERR_OR_NULL. Trivial cleanup. Signed-off-by: Paul Mundt --- drivers/sh/clk/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index ebeaa9e9f068..229ad0991f26 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c @@ -396,7 +396,7 @@ int clk_register(struct clk *clk) { int ret; - if (clk == NULL || IS_ERR(clk)) + if (IS_ERR_OR_NULL(clk)) return -EINVAL; /* -- cgit v1.2.3 From 7912825d8b755e6a5b9839eab910f451b0271aba Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 24 Jun 2011 17:36:23 +0900 Subject: sh: Tidy up pre-clkdev clk_get() error handling. clk_get() used to return NULL or an errno value depending on whether a clkdev lookup failed or a clock wasn't found in the primary clock list. As these disjoint paths were unified and everything now is handled via clkdev lookups, the NULL case never makes it out of clk_get(). Update accordingly and always look to the errno value. Signed-off-by: Paul Mundt --- arch/sh/boards/board-apsh4a3a.c | 2 +- arch/sh/boards/board-apsh4ad0a.c | 2 +- arch/sh/boards/board-sh7785lcr.c | 2 +- arch/sh/boards/board-urquell.c | 2 +- arch/sh/boards/mach-sdk7786/setup.c | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/sh/boards/board-apsh4a3a.c b/arch/sh/boards/board-apsh4a3a.c index 8e2a27057bc9..2823619c6006 100644 --- a/arch/sh/boards/board-apsh4a3a.c +++ b/arch/sh/boards/board-apsh4a3a.c @@ -116,7 +116,7 @@ static int apsh4a3a_clk_init(void) int ret; clk = clk_get(NULL, "extal"); - if (!clk || IS_ERR(clk)) + if (IS_ERR(clk)) return PTR_ERR(clk); ret = clk_set_rate(clk, 33333000); clk_put(clk); diff --git a/arch/sh/boards/board-apsh4ad0a.c b/arch/sh/boards/board-apsh4ad0a.c index e2bd218a054e..b4d6292a9247 100644 --- a/arch/sh/boards/board-apsh4ad0a.c +++ b/arch/sh/boards/board-apsh4ad0a.c @@ -94,7 +94,7 @@ static int apsh4ad0a_clk_init(void) int ret; clk = clk_get(NULL, "extal"); - if (!clk || IS_ERR(clk)) + if (IS_ERR(clk)) return PTR_ERR(clk); ret = clk_set_rate(clk, 33333000); clk_put(clk); diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c index ee65ff05c558..d879848f3cdd 100644 --- a/arch/sh/boards/board-sh7785lcr.c +++ b/arch/sh/boards/board-sh7785lcr.c @@ -299,7 +299,7 @@ static int sh7785lcr_clk_init(void) int ret; clk = clk_get(NULL, "extal"); - if (!clk || IS_ERR(clk)) + if (IS_ERR(clk)) return PTR_ERR(clk); ret = clk_set_rate(clk, 33333333); clk_put(clk); diff --git a/arch/sh/boards/board-urquell.c b/arch/sh/boards/board-urquell.c index d81c609decc7..24e3316c5c17 100644 --- a/arch/sh/boards/board-urquell.c +++ b/arch/sh/boards/board-urquell.c @@ -190,7 +190,7 @@ static int urquell_clk_init(void) return -EINVAL; clk = clk_get(NULL, "extal"); - if (!clk || IS_ERR(clk)) + if (IS_ERR(clk)) return PTR_ERR(clk); ret = clk_set_rate(clk, 33333333); clk_put(clk); diff --git a/arch/sh/boards/mach-sdk7786/setup.c b/arch/sh/boards/mach-sdk7786/setup.c index 1521aa75ee3a..486d1ac3694c 100644 --- a/arch/sh/boards/mach-sdk7786/setup.c +++ b/arch/sh/boards/mach-sdk7786/setup.c @@ -194,7 +194,7 @@ static int sdk7786_clk_init(void) return -EINVAL; clk = clk_get(NULL, "extal"); - if (!clk || IS_ERR(clk)) + if (IS_ERR(clk)) return PTR_ERR(clk); ret = clk_set_rate(clk, 33333333); clk_put(clk); -- cgit v1.2.3 From a03a202e95fdaa3ff52ccfc2594ec531e5917816 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Mon, 20 Jun 2011 17:02:47 +0200 Subject: dmaengine: failure to get a specific DMA channel is not critical There exist systems with multiple DMA controllers with different capabilities. For example, on some sh-mobile / rmobile systems there are DMA controllers, whose channels can be configured to be used with SD- and MMC-host controllers, serial ports etc. Besides there are also DMA controllers, that can only be used for one special function, e.g., for USB. In such cases the DMA client filter function can just choose to specify to the DMA driver, which channel it needs. Then the .device_alloc_chan_resources() method of the DMA driver will check, whether it can provide that dunction. If not, it will fail and the loop in __dma_request_channel() will continue to the next DMA device, until it finds a suitable one. This works fine with just one minor glitch: the kernel logs error messages like dmaengine: failed to get : (-) after each such non-critical failure. This patch lowers priority of this message to the debug level. Reported-by: Kuninori Morimoto Signed-off-by: Guennadi Liakhovetski Tested-by: Kuninori Morimoto Tested-by: Magnus Damm Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 8bcb15fb959d..f7f21a5de3e1 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -509,8 +509,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v dma_chan_name(chan)); list_del_rcu(&device->global_node); } else if (err) - pr_err("dmaengine: failed to get %s: (%d)\n", - dma_chan_name(chan), err); + pr_debug("dmaengine: failed to get %s: (%d)\n", + dma_chan_name(chan), err); else break; if (--device->privatecnt == 0) -- cgit v1.2.3 From ec6452a5ec68498221a0ced3443cefd65b08be36 Mon Sep 17 00:00:00 2001 From: Arnaud Lacombe Date: Wed, 8 Jun 2011 01:42:11 -0400 Subject: kconfig: do not overwrite symbol direct dependency in assignment Considering the following configuration: config F bool "F" choice AB bool "AB" config A bool "A" config B bool "B" endchoice if A config D bool default y if F select E config E bool "E" endif if B config D bool default y if F select E config E bool "E" endif The following configuration: CONFIG_F=y CONFIG_A=y # CONFIG_B is not set CONFIG_D=y CONFIG_E=y emits a spurious warning: (D) selects E which has unmet direct dependencies (B) If a symbol appears in two different branch of the tree, it should inherit the dependency of both parent, not just the last one. Reported-by: Yann E. Morin Tested-by: Yann E. Morin Signed-off-by: Arnaud Lacombe Signed-off-by: Michal Marek --- scripts/kconfig/menu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c index aab5a1fee5a8..d66008639a43 100644 --- a/scripts/kconfig/menu.c +++ b/scripts/kconfig/menu.c @@ -351,7 +351,7 @@ void menu_finalize(struct menu *parent) last_menu->next = NULL; } - sym->dir_dep.expr = parent->dep; + sym->dir_dep.expr = expr_alloc_or(sym->dir_dep.expr, parent->dep); } for (menu = parent->list; menu; menu = menu->next) { if (sym && sym_is_choice(sym) && -- cgit v1.2.3 From b97c3d9c1655522be3adc5ae1aa153a18467e924 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Fri, 24 Jun 2011 21:02:59 -0700 Subject: drm/i915: i915_gem_object_finish_gtt must always release gtt mmap Even if the object is no longer in the GTT domain, there may still be a user space mapping which needs to be released. Without this fix, render-based text (mostly in firefox) would occasionally get corrupted when the system was under load. Signed-off-by: Keith Packard --- drivers/gpu/drm/i915/i915_gem.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b29e0f2b780a..6026817372da 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2155,15 +2155,15 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) { u32 old_write_domain, old_read_domains; - if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) - return; - /* Act a barrier for all accesses through the GTT */ mb(); /* Force a pagefault for domain tracking on next user access */ i915_gem_release_mmap(obj); + if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) + return; + old_read_domains = obj->base.read_domains; old_write_domain = obj->base.write_domain; -- cgit v1.2.3 From 785c4bcc0d88ff006a0b2120815a71e86ecf21ce Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Mon, 23 May 2011 18:33:01 +0200 Subject: ext3: Add fixed tracepoints This commit adds fixed tracepoints to the ext3 code. It is based on ext4 tracepoints, however due to the differences of both file systems, there are some tracepoints missing (those for delaloc and for multi-block allocator) and there are some ext3 specific as well (for reservation windows). Here is a list: ext3_free_inode ext3_request_inode ext3_allocate_inode ext3_evict_inode ext3_drop_inode ext3_mark_inode_dirty ext3_write_begin ext3_ordered_write_end ext3_writeback_write_end ext3_journalled_write_end ext3_ordered_writepage ext3_writeback_writepage ext3_journalled_writepage ext3_readpage ext3_releasepage ext3_invalidatepage ext3_discard_blocks ext3_request_blocks ext3_allocate_blocks ext3_free_blocks ext3_sync_file_enter ext3_sync_file_exit ext3_sync_fs ext3_rsv_window_add ext3_discard_reservation ext3_alloc_new_reservation ext3_reserved ext3_forget ext3_read_block_bitmap ext3_direct_IO_enter ext3_direct_IO_exit ext3_unlink_enter ext3_unlink_exit ext3_truncate_enter ext3_truncate_exit ext3_get_blocks_enter ext3_get_blocks_exit ext3_load_inode Signed-off-by: Lukas Czerner Cc: Jan Kara Signed-off-by: Jan Kara --- fs/ext3/balloc.c | 34 +- fs/ext3/fsync.c | 15 +- fs/ext3/ialloc.c | 4 + fs/ext3/inode.c | 29 ++ fs/ext3/namei.c | 3 + fs/ext3/super.c | 13 + include/trace/events/ext3.h | 864 ++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 946 insertions(+), 16 deletions(-) create mode 100644 include/trace/events/ext3.h diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index fe52297e31ad..f7d111e499ad 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -21,6 +21,7 @@ #include #include #include +#include /* * balloc.c contains the blocks allocation and deallocation routines @@ -161,6 +162,7 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group) desc = ext3_get_group_desc(sb, block_group, NULL); if (!desc) return NULL; + trace_ext3_read_block_bitmap(sb, block_group); bitmap_blk = le32_to_cpu(desc->bg_block_bitmap); bh = sb_getblk(sb, bitmap_blk); if (unlikely(!bh)) { @@ -351,6 +353,7 @@ void ext3_rsv_window_add(struct super_block *sb, struct rb_node * parent = NULL; struct ext3_reserve_window_node *this; + trace_ext3_rsv_window_add(sb, rsv); while (*p) { parent = *p; @@ -476,8 +479,10 @@ void ext3_discard_reservation(struct inode *inode) rsv = &block_i->rsv_window_node; if (!rsv_is_empty(&rsv->rsv_window)) { spin_lock(rsv_lock); - if (!rsv_is_empty(&rsv->rsv_window)) + if (!rsv_is_empty(&rsv->rsv_window)) { + trace_ext3_discard_reservation(inode, rsv); rsv_window_remove(inode->i_sb, rsv); + } spin_unlock(rsv_lock); } } @@ -683,14 +688,10 @@ error_return: void ext3_free_blocks(handle_t *handle, struct inode *inode, ext3_fsblk_t block, unsigned long count) { - struct super_block * sb; + struct super_block *sb = inode->i_sb; unsigned long dquot_freed_blocks; - sb = inode->i_sb; - if (!sb) { - printk ("ext3_free_blocks: nonexistent device"); - return; - } + trace_ext3_free_blocks(inode, block, count); ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); if (dquot_freed_blocks) dquot_free_block(inode, dquot_freed_blocks); @@ -1136,6 +1137,7 @@ static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv, else start_block = grp_goal + group_first_block; + trace_ext3_alloc_new_reservation(sb, start_block); size = my_rsv->rsv_goal_size; if (!rsv_is_empty(&my_rsv->rsv_window)) { @@ -1230,8 +1232,11 @@ retry: * check if the first free block is within the * free space we just reserved */ - if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end) + if (start_block >= my_rsv->rsv_start && + start_block <= my_rsv->rsv_end) { + trace_ext3_reserved(sb, start_block, my_rsv); return 0; /* success */ + } /* * if the first free bit we found is out of the reservable space * continue search for next reservable space, @@ -1514,10 +1519,6 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, *errp = -ENOSPC; sb = inode->i_sb; - if (!sb) { - printk("ext3_new_block: nonexistent device"); - return 0; - } /* * Check quota for allocation of this block. @@ -1528,8 +1529,10 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, return 0; } + trace_ext3_request_blocks(inode, goal, num); + sbi = EXT3_SB(sb); - es = EXT3_SB(sb)->s_es; + es = sbi->s_es; ext3_debug("goal=%lu.\n", goal); /* * Allocate a block from reservation only when @@ -1742,6 +1745,10 @@ allocated: brelse(bitmap_bh); dquot_free_block(inode, *count-num); *count = num; + + trace_ext3_allocate_blocks(inode, goal, num, + (unsigned long long)ret_block); + return ret_block; io_error: @@ -1996,6 +2003,7 @@ ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, unsigned int group, if ((next - start) < minblocks) goto free_extent; + trace_ext3_discard_blocks(sb, discard_block, next - start); /* Send the TRIM command down to the device */ err = sb_issue_discard(sb, discard_block, next - start, GFP_NOFS, 0); diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c index 09b13bb34c94..06a4394d2bc3 100644 --- a/fs/ext3/fsync.c +++ b/fs/ext3/fsync.c @@ -30,6 +30,7 @@ #include #include #include +#include /* * akpm: A new design for ext3_sync_file(). @@ -51,10 +52,13 @@ int ext3_sync_file(struct file *file, int datasync) int ret, needs_barrier = 0; tid_t commit_tid; + J_ASSERT(ext3_journal_current_handle() == NULL); + + trace_ext3_sync_file_enter(file, datasync); + if (inode->i_sb->s_flags & MS_RDONLY) return 0; - J_ASSERT(ext3_journal_current_handle() == NULL); /* * data=writeback,ordered: @@ -70,8 +74,10 @@ int ext3_sync_file(struct file *file, int datasync) * (they were dirtied by commit). But that's OK - the blocks are * safe in-journal, which is all fsync() needs to ensure. */ - if (ext3_should_journal_data(inode)) - return ext3_force_commit(inode->i_sb); + if (ext3_should_journal_data(inode)) { + ret = ext3_force_commit(inode->i_sb); + goto out; + } if (datasync) commit_tid = atomic_read(&ei->i_datasync_tid); @@ -91,5 +97,8 @@ int ext3_sync_file(struct file *file, int datasync) */ if (needs_barrier) blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); + +out: + trace_ext3_sync_file_exit(inode, ret); return ret; } diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c index bfc2dc43681d..bf09cbf938cc 100644 --- a/fs/ext3/ialloc.c +++ b/fs/ext3/ialloc.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -118,6 +119,7 @@ void ext3_free_inode (handle_t *handle, struct inode * inode) ino = inode->i_ino; ext3_debug ("freeing inode %lu\n", ino); + trace_ext3_free_inode(inode); is_directory = S_ISDIR(inode->i_mode); @@ -426,6 +428,7 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, return ERR_PTR(-EPERM); sb = dir->i_sb; + trace_ext3_request_inode(dir, mode); inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); @@ -601,6 +604,7 @@ got: } ext3_debug("allocating inode %lu\n", inode->i_ino); + trace_ext3_allocate_inode(inode, dir, mode); goto really_out; fail: ext3_std_error(sb, err); diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 3451d23c3bae..3aa05eebe0b8 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -38,6 +38,7 @@ #include #include #include +#include #include "xattr.h" #include "acl.h" @@ -70,6 +71,7 @@ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, might_sleep(); + trace_ext3_forget(inode, is_metadata, blocknr); BUFFER_TRACE(bh, "enter"); jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " @@ -198,6 +200,7 @@ void ext3_evict_inode (struct inode *inode) handle_t *handle; int want_delete = 0; + trace_ext3_evict_inode(inode); if (!inode->i_nlink && !is_bad_inode(inode)) { dquot_initialize(inode); want_delete = 1; @@ -842,6 +845,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, ext3_fsblk_t first_block = 0; + trace_ext3_get_blocks_enter(inode, iblock, maxblocks, create); J_ASSERT(handle != NULL || create == 0); depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary); @@ -970,6 +974,9 @@ cleanup: } BUFFER_TRACE(bh_result, "returned"); out: + trace_ext3_get_blocks_exit(inode, iblock, + depth ? le32_to_cpu(chain[depth-1].key) : 0, + count, err); return err; } @@ -1217,6 +1224,8 @@ static int ext3_write_begin(struct file *file, struct address_space *mapping, * we allocate blocks but write fails for some reason */ int needed_blocks = ext3_writepage_trans_blocks(inode) + 1; + trace_ext3_write_begin(inode, pos, len, flags); + index = pos >> PAGE_CACHE_SHIFT; from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; @@ -1332,6 +1341,7 @@ static int ext3_ordered_write_end(struct file *file, unsigned from, to; int ret = 0, ret2; + trace_ext3_ordered_write_end(inode, pos, len, copied); copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); from = pos & (PAGE_CACHE_SIZE - 1); @@ -1367,6 +1377,7 @@ static int ext3_writeback_write_end(struct file *file, struct inode *inode = file->f_mapping->host; int ret; + trace_ext3_writeback_write_end(inode, pos, len, copied); copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); update_file_sizes(inode, pos, copied); /* @@ -1395,6 +1406,7 @@ static int ext3_journalled_write_end(struct file *file, int partial = 0; unsigned from, to; + trace_ext3_journalled_write_end(inode, pos, len, copied); from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; @@ -1577,6 +1589,7 @@ static int ext3_ordered_writepage(struct page *page, if (ext3_journal_current_handle()) goto out_fail; + trace_ext3_ordered_writepage(page); if (!page_has_buffers(page)) { create_empty_buffers(page, inode->i_sb->s_blocksize, (1 << BH_Dirty)|(1 << BH_Uptodate)); @@ -1647,6 +1660,7 @@ static int ext3_writeback_writepage(struct page *page, if (ext3_journal_current_handle()) goto out_fail; + trace_ext3_writeback_writepage(page); if (page_has_buffers(page)) { if (!walk_page_buffers(NULL, page_buffers(page), 0, PAGE_CACHE_SIZE, NULL, buffer_unmapped)) { @@ -1689,6 +1703,7 @@ static int ext3_journalled_writepage(struct page *page, if (ext3_journal_current_handle()) goto no_write; + trace_ext3_journalled_writepage(page); handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); @@ -1739,6 +1754,7 @@ out_unlock: static int ext3_readpage(struct file *file, struct page *page) { + trace_ext3_readpage(page); return mpage_readpage(page, ext3_get_block); } @@ -1753,6 +1769,8 @@ static void ext3_invalidatepage(struct page *page, unsigned long offset) { journal_t *journal = EXT3_JOURNAL(page->mapping->host); + trace_ext3_invalidatepage(page, offset); + /* * If it's a full truncate we just forget about the pending dirtying */ @@ -1766,6 +1784,7 @@ static int ext3_releasepage(struct page *page, gfp_t wait) { journal_t *journal = EXT3_JOURNAL(page->mapping->host); + trace_ext3_releasepage(page); WARN_ON(PageChecked(page)); if (!page_has_buffers(page)) return 0; @@ -1794,6 +1813,8 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, size_t count = iov_length(iov, nr_segs); int retries = 0; + trace_ext3_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); + if (rw == WRITE) { loff_t final_size = offset + count; @@ -1868,6 +1889,8 @@ retry: ret = err; } out: + trace_ext3_direct_IO_exit(inode, offset, + iov_length(iov, nr_segs), rw, ret); return ret; } @@ -2446,6 +2469,8 @@ void ext3_truncate(struct inode *inode) unsigned blocksize = inode->i_sb->s_blocksize; struct page *page; + trace_ext3_truncate_enter(inode); + if (!ext3_can_truncate(inode)) goto out_notrans; @@ -2597,6 +2622,7 @@ out_stop: ext3_orphan_del(handle, inode); ext3_journal_stop(handle); + trace_ext3_truncate_exit(inode); return; out_notrans: /* @@ -2605,6 +2631,7 @@ out_notrans: */ if (inode->i_nlink) ext3_orphan_del(NULL, inode); + trace_ext3_truncate_exit(inode); } static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb, @@ -2746,6 +2773,7 @@ make_io: * has in-inode xattrs, or we don't have this inode in memory. * Read the block from disk. */ + trace_ext3_load_inode(inode); get_bh(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(READ_META, bh); @@ -3372,6 +3400,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) int err; might_sleep(); + trace_ext3_mark_inode_dirty(inode, _RET_IP_); err = ext3_reserve_inode_write(handle, inode, &iloc); if (!err) err = ext3_mark_iloc_dirty(handle, inode, &iloc); diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 34b6d9bfc48a..51736a4ff0cd 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "namei.h" #include "xattr.h" @@ -2144,6 +2145,7 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry) struct ext3_dir_entry_2 * de; handle_t *handle; + trace_ext3_unlink_enter(dir, dentry); /* Initialize quotas before so that eventual writes go * in separate transaction */ dquot_initialize(dir); @@ -2189,6 +2191,7 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry) end_unlink: ext3_journal_stop(handle); brelse (bh); + trace_ext3_unlink_exit(dentry, retval); return retval; } diff --git a/fs/ext3/super.c b/fs/ext3/super.c index aad153ef6b78..662290fb6fff 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -44,6 +44,9 @@ #include "acl.h" #include "namei.h" +#define CREATE_TRACE_POINTS +#include + #ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_ORDERED_DATA #else @@ -497,6 +500,14 @@ static struct inode *ext3_alloc_inode(struct super_block *sb) return &ei->vfs_inode; } +static int ext3_drop_inode(struct inode *inode) +{ + int drop = generic_drop_inode(inode); + + trace_ext3_drop_inode(inode, drop); + return drop; +} + static void ext3_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); @@ -788,6 +799,7 @@ static const struct super_operations ext3_sops = { .destroy_inode = ext3_destroy_inode, .write_inode = ext3_write_inode, .dirty_inode = ext3_dirty_inode, + .drop_inode = ext3_drop_inode, .evict_inode = ext3_evict_inode, .put_super = ext3_put_super, .sync_fs = ext3_sync_fs, @@ -2507,6 +2519,7 @@ static int ext3_sync_fs(struct super_block *sb, int wait) { tid_t target; + trace_ext3_sync_fs(sb, wait); if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) { if (wait) log_wait_commit(EXT3_SB(sb)->s_journal, target); diff --git a/include/trace/events/ext3.h b/include/trace/events/ext3.h new file mode 100644 index 000000000000..7b53c0573dc9 --- /dev/null +++ b/include/trace/events/ext3.h @@ -0,0 +1,864 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ext3 + +#if !defined(_TRACE_EXT3_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EXT3_H + +#include + +TRACE_EVENT(ext3_free_inode, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( umode_t, mode ) + __field( uid_t, uid ) + __field( gid_t, gid ) + __field( blkcnt_t, blocks ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->mode = inode->i_mode; + __entry->uid = inode->i_uid; + __entry->gid = inode->i_gid; + __entry->blocks = inode->i_blocks; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->mode, __entry->uid, __entry->gid, + (unsigned long) __entry->blocks) +); + +TRACE_EVENT(ext3_request_inode, + TP_PROTO(struct inode *dir, int mode), + + TP_ARGS(dir, mode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, dir ) + __field( umode_t, mode ) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->dir = dir->i_ino; + __entry->mode = mode; + ), + + TP_printk("dev %d,%d dir %lu mode 0%o", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->dir, __entry->mode) +); + +TRACE_EVENT(ext3_allocate_inode, + TP_PROTO(struct inode *inode, struct inode *dir, int mode), + + TP_ARGS(inode, dir, mode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ino_t, dir ) + __field( umode_t, mode ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->dir = dir->i_ino; + __entry->mode = mode; + ), + + TP_printk("dev %d,%d ino %lu dir %lu mode 0%o", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long) __entry->dir, __entry->mode) +); + +TRACE_EVENT(ext3_evict_inode, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( int, nlink ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->nlink = inode->i_nlink; + ), + + TP_printk("dev %d,%d ino %lu nlink %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->nlink) +); + +TRACE_EVENT(ext3_drop_inode, + TP_PROTO(struct inode *inode, int drop), + + TP_ARGS(inode, drop), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( int, drop ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->drop = drop; + ), + + TP_printk("dev %d,%d ino %lu drop %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->drop) +); + +TRACE_EVENT(ext3_mark_inode_dirty, + TP_PROTO(struct inode *inode, unsigned long IP), + + TP_ARGS(inode, IP), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field(unsigned long, ip ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->ip = IP; + ), + + TP_printk("dev %d,%d ino %lu caller %pF", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, (void *)__entry->ip) +); + +TRACE_EVENT(ext3_write_begin, + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int flags), + + TP_ARGS(inode, pos, len, flags), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( loff_t, pos ) + __field( unsigned int, len ) + __field( unsigned int, flags ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pos = pos; + __entry->len = len; + __entry->flags = flags; + ), + + TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long long) __entry->pos, __entry->len, + __entry->flags) +); + +DECLARE_EVENT_CLASS(ext3__write_end, + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( loff_t, pos ) + __field( unsigned int, len ) + __field( unsigned int, copied ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pos = pos; + __entry->len = len; + __entry->copied = copied; + ), + + TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long long) __entry->pos, __entry->len, + __entry->copied) +); + +DEFINE_EVENT(ext3__write_end, ext3_ordered_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) +); + +DEFINE_EVENT(ext3__write_end, ext3_writeback_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) +); + +DEFINE_EVENT(ext3__write_end, ext3_journalled_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) +); + +DECLARE_EVENT_CLASS(ext3__page_op, + TP_PROTO(struct page *page), + + TP_ARGS(page), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( pgoff_t, index ) + + ), + + TP_fast_assign( + __entry->index = page->index; + __entry->ino = page->mapping->host->i_ino; + __entry->dev = page->mapping->host->i_sb->s_dev; + ), + + TP_printk("dev %d,%d ino %lu page_index %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->index) +); + +DEFINE_EVENT(ext3__page_op, ext3_ordered_writepage, + + TP_PROTO(struct page *page), + + TP_ARGS(page) +); + +DEFINE_EVENT(ext3__page_op, ext3_writeback_writepage, + + TP_PROTO(struct page *page), + + TP_ARGS(page) +); + +DEFINE_EVENT(ext3__page_op, ext3_journalled_writepage, + + TP_PROTO(struct page *page), + + TP_ARGS(page) +); + +DEFINE_EVENT(ext3__page_op, ext3_readpage, + + TP_PROTO(struct page *page), + + TP_ARGS(page) +); + +DEFINE_EVENT(ext3__page_op, ext3_releasepage, + + TP_PROTO(struct page *page), + + TP_ARGS(page) +); + +TRACE_EVENT(ext3_invalidatepage, + TP_PROTO(struct page *page, unsigned long offset), + + TP_ARGS(page, offset), + + TP_STRUCT__entry( + __field( pgoff_t, index ) + __field( unsigned long, offset ) + __field( ino_t, ino ) + __field( dev_t, dev ) + + ), + + TP_fast_assign( + __entry->index = page->index; + __entry->offset = offset; + __entry->ino = page->mapping->host->i_ino; + __entry->dev = page->mapping->host->i_sb->s_dev; + ), + + TP_printk("dev %d,%d ino %lu page_index %lu offset %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->index, __entry->offset) +); + +TRACE_EVENT(ext3_discard_blocks, + TP_PROTO(struct super_block *sb, unsigned long blk, + unsigned long count), + + TP_ARGS(sb, blk, count), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( unsigned long, blk ) + __field( unsigned long, count ) + + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->blk = blk; + __entry->count = count; + ), + + TP_printk("dev %d,%d blk %lu count %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->blk, __entry->count) +); + +TRACE_EVENT(ext3_request_blocks, + TP_PROTO(struct inode *inode, unsigned long goal, + unsigned long count), + + TP_ARGS(inode, goal, count), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( unsigned long, count ) + __field( unsigned long, goal ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->count = count; + __entry->goal = goal; + ), + + TP_printk("dev %d,%d ino %lu count %lu goal %lu ", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->count, __entry->goal) +); + +TRACE_EVENT(ext3_allocate_blocks, + TP_PROTO(struct inode *inode, unsigned long goal, + unsigned long count, unsigned long block), + + TP_ARGS(inode, goal, count, block), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( unsigned long, block ) + __field( unsigned long, count ) + __field( unsigned long, goal ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->block = block; + __entry->count = count; + __entry->goal = goal; + ), + + TP_printk("dev %d,%d ino %lu count %lu block %lu goal %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->count, __entry->block, + __entry->goal) +); + +TRACE_EVENT(ext3_free_blocks, + TP_PROTO(struct inode *inode, unsigned long block, + unsigned long count), + + TP_ARGS(inode, block, count), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( umode_t, mode ) + __field( unsigned long, block ) + __field( unsigned long, count ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->mode = inode->i_mode; + __entry->block = block; + __entry->count = count; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o block %lu count %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->mode, __entry->block, __entry->count) +); + +TRACE_EVENT(ext3_sync_file_enter, + TP_PROTO(struct file *file, int datasync), + + TP_ARGS(file, datasync), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ino_t, parent ) + __field( int, datasync ) + ), + + TP_fast_assign( + struct dentry *dentry = file->f_path.dentry; + + __entry->dev = dentry->d_inode->i_sb->s_dev; + __entry->ino = dentry->d_inode->i_ino; + __entry->datasync = datasync; + __entry->parent = dentry->d_parent->d_inode->i_ino; + ), + + TP_printk("dev %d,%d ino %lu parent %ld datasync %d ", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long) __entry->parent, __entry->datasync) +); + +TRACE_EVENT(ext3_sync_file_exit, + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret), + + TP_STRUCT__entry( + __field( int, ret ) + __field( ino_t, ino ) + __field( dev_t, dev ) + ), + + TP_fast_assign( + __entry->ret = ret; + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + ), + + TP_printk("dev %d,%d ino %lu ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->ret) +); + +TRACE_EVENT(ext3_sync_fs, + TP_PROTO(struct super_block *sb, int wait), + + TP_ARGS(sb, wait), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, wait ) + + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->wait = wait; + ), + + TP_printk("dev %d,%d wait %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->wait) +); + +TRACE_EVENT(ext3_rsv_window_add, + TP_PROTO(struct super_block *sb, + struct ext3_reserve_window_node *rsv_node), + + TP_ARGS(sb, rsv_node), + + TP_STRUCT__entry( + __field( unsigned long, start ) + __field( unsigned long, end ) + __field( dev_t, dev ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->start = rsv_node->rsv_window._rsv_start; + __entry->end = rsv_node->rsv_window._rsv_end; + ), + + TP_printk("dev %d,%d start %lu end %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->start, __entry->end) +); + +TRACE_EVENT(ext3_discard_reservation, + TP_PROTO(struct inode *inode, + struct ext3_reserve_window_node *rsv_node), + + TP_ARGS(inode, rsv_node), + + TP_STRUCT__entry( + __field( unsigned long, start ) + __field( unsigned long, end ) + __field( ino_t, ino ) + __field( dev_t, dev ) + ), + + TP_fast_assign( + __entry->start = rsv_node->rsv_window._rsv_start; + __entry->end = rsv_node->rsv_window._rsv_end; + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + ), + + TP_printk("dev %d,%d ino %lu start %lu end %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long)__entry->ino, __entry->start, + __entry->end) +); + +TRACE_EVENT(ext3_alloc_new_reservation, + TP_PROTO(struct super_block *sb, unsigned long goal), + + TP_ARGS(sb, goal), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( unsigned long, goal ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->goal = goal; + ), + + TP_printk("dev %d,%d goal %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->goal) +); + +TRACE_EVENT(ext3_reserved, + TP_PROTO(struct super_block *sb, unsigned long block, + struct ext3_reserve_window_node *rsv_node), + + TP_ARGS(sb, block, rsv_node), + + TP_STRUCT__entry( + __field( unsigned long, block ) + __field( unsigned long, start ) + __field( unsigned long, end ) + __field( dev_t, dev ) + ), + + TP_fast_assign( + __entry->block = block; + __entry->start = rsv_node->rsv_window._rsv_start; + __entry->end = rsv_node->rsv_window._rsv_end; + __entry->dev = sb->s_dev; + ), + + TP_printk("dev %d,%d block %lu, start %lu end %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->block, __entry->start, __entry->end) +); + +TRACE_EVENT(ext3_forget, + TP_PROTO(struct inode *inode, int is_metadata, unsigned long block), + + TP_ARGS(inode, is_metadata, block), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( umode_t, mode ) + __field( int, is_metadata ) + __field( unsigned long, block ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->mode = inode->i_mode; + __entry->is_metadata = is_metadata; + __entry->block = block; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->mode, __entry->is_metadata, __entry->block) +); + +TRACE_EVENT(ext3_read_block_bitmap, + TP_PROTO(struct super_block *sb, unsigned int group), + + TP_ARGS(sb, group), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( __u32, group ) + + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->group = group; + ), + + TP_printk("dev %d,%d group %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->group) +); + +TRACE_EVENT(ext3_direct_IO_enter, + TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw), + + TP_ARGS(inode, offset, len, rw), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( dev_t, dev ) + __field( loff_t, pos ) + __field( unsigned long, len ) + __field( int, rw ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + __entry->pos = offset; + __entry->len = len; + __entry->rw = rw; + ), + + TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long long) __entry->pos, __entry->len, + __entry->rw) +); + +TRACE_EVENT(ext3_direct_IO_exit, + TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, + int rw, int ret), + + TP_ARGS(inode, offset, len, rw, ret), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( dev_t, dev ) + __field( loff_t, pos ) + __field( unsigned long, len ) + __field( int, rw ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + __entry->pos = offset; + __entry->len = len; + __entry->rw = rw; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long long) __entry->pos, __entry->len, + __entry->rw, __entry->ret) +); + +TRACE_EVENT(ext3_unlink_enter, + TP_PROTO(struct inode *parent, struct dentry *dentry), + + TP_ARGS(parent, dentry), + + TP_STRUCT__entry( + __field( ino_t, parent ) + __field( ino_t, ino ) + __field( loff_t, size ) + __field( dev_t, dev ) + ), + + TP_fast_assign( + __entry->parent = parent->i_ino; + __entry->ino = dentry->d_inode->i_ino; + __entry->size = dentry->d_inode->i_size; + __entry->dev = dentry->d_inode->i_sb->s_dev; + ), + + TP_printk("dev %d,%d ino %lu size %lld parent %ld", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long long)__entry->size, + (unsigned long) __entry->parent) +); + +TRACE_EVENT(ext3_unlink_exit, + TP_PROTO(struct dentry *dentry, int ret), + + TP_ARGS(dentry, ret), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( dev_t, dev ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->ino = dentry->d_inode->i_ino; + __entry->dev = dentry->d_inode->i_sb->s_dev; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d ino %lu ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->ret) +); + +DECLARE_EVENT_CLASS(ext3__truncate, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( dev_t, dev ) + __field( blkcnt_t, blocks ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + __entry->blocks = inode->i_blocks; + ), + + TP_printk("dev %d,%d ino %lu blocks %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, (unsigned long) __entry->blocks) +); + +DEFINE_EVENT(ext3__truncate, ext3_truncate_enter, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +DEFINE_EVENT(ext3__truncate, ext3_truncate_exit, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +TRACE_EVENT(ext3_get_blocks_enter, + TP_PROTO(struct inode *inode, unsigned long lblk, + unsigned long len, int create), + + TP_ARGS(inode, lblk, len, create), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( dev_t, dev ) + __field( unsigned long, lblk ) + __field( unsigned long, len ) + __field( int, create ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + __entry->lblk = lblk; + __entry->len = len; + __entry->create = create; + ), + + TP_printk("dev %d,%d ino %lu lblk %lu len %lu create %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->lblk, __entry->len, __entry->create) +); + +TRACE_EVENT(ext3_get_blocks_exit, + TP_PROTO(struct inode *inode, unsigned long lblk, + unsigned long pblk, unsigned long len, int ret), + + TP_ARGS(inode, lblk, pblk, len, ret), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( dev_t, dev ) + __field( unsigned long, lblk ) + __field( unsigned long, pblk ) + __field( unsigned long, len ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + __entry->lblk = lblk; + __entry->pblk = pblk; + __entry->len = len; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d ino %lu lblk %lu pblk %lu len %lu ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->lblk, __entry->pblk, + __entry->len, __entry->ret) +); + +TRACE_EVENT(ext3_load_inode, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( dev_t, dev ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + ), + + TP_printk("dev %d,%d ino %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino) +); + +#endif /* _TRACE_EXT3_H */ + +/* This part must be outside protection */ +#include -- cgit v1.2.3 From 99cb1a318c37bf462c53d43f4dacb7b4896ce0c9 Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Mon, 23 May 2011 18:33:02 +0200 Subject: jbd: Add fixed tracepoints This commit adds fixed tracepoint for jbd. It has been based on fixed tracepoints for jbd2, however there are missing those for collecting statistics, since I think that it will require more intrusive patch so I should have its own commit, if someone decide that it is needed. Also there are new tracepoints in __journal_drop_transaction() and journal_update_superblock(). The list of jbd tracepoints: jbd_checkpoint jbd_start_commit jbd_commit_locking jbd_commit_flushing jbd_commit_logging jbd_drop_transaction jbd_end_commit jbd_do_submit_data jbd_cleanup_journal_tail jbd_update_superblock_end Signed-off-by: Lukas Czerner Cc: Jan Kara Signed-off-by: Jan Kara --- fs/jbd/checkpoint.c | 4 + fs/jbd/commit.c | 11 +++ fs/jbd/journal.c | 4 + include/trace/events/jbd.h | 203 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 222 insertions(+) create mode 100644 include/trace/events/jbd.h diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index e4b87bc1fa56..dea7503b47e8 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c @@ -22,6 +22,7 @@ #include #include #include +#include /* * Unlink a buffer from a transaction checkpoint list. @@ -358,6 +359,7 @@ int log_do_checkpoint(journal_t *journal) * journal straight away. */ result = cleanup_journal_tail(journal); + trace_jbd_checkpoint(journal, result); jbd_debug(1, "cleanup_journal_tail returned %d\n", result); if (result <= 0) return result; @@ -503,6 +505,7 @@ int cleanup_journal_tail(journal_t *journal) if (blocknr < journal->j_tail) freed = freed + journal->j_last - journal->j_first; + trace_jbd_cleanup_journal_tail(journal, first_tid, blocknr, freed); jbd_debug(1, "Cleaning journal tail from %d to %d (offset %u), " "freeing %u\n", @@ -752,6 +755,7 @@ void __journal_drop_transaction(journal_t *journal, transaction_t *transaction) J_ASSERT(journal->j_committing_transaction != transaction); J_ASSERT(journal->j_running_transaction != transaction); + trace_jbd_drop_transaction(journal, transaction); jbd_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid); kfree(transaction); } diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 72ffa974b0b8..eedd201374a8 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c @@ -21,6 +21,7 @@ #include #include #include +#include /* * Default IO end handler for temporary BJ_IO buffer_heads. @@ -204,6 +205,8 @@ write_out_data: if (!trylock_buffer(bh)) { BUFFER_TRACE(bh, "needs blocking lock"); spin_unlock(&journal->j_list_lock); + trace_jbd_do_submit_data(journal, + commit_transaction); /* Write out all data to prevent deadlocks */ journal_do_submit_data(wbuf, bufs, write_op); bufs = 0; @@ -236,6 +239,8 @@ write_out_data: jbd_unlock_bh_state(bh); if (bufs == journal->j_wbufsize) { spin_unlock(&journal->j_list_lock); + trace_jbd_do_submit_data(journal, + commit_transaction); journal_do_submit_data(wbuf, bufs, write_op); bufs = 0; goto write_out_data; @@ -266,6 +271,7 @@ write_out_data: } } spin_unlock(&journal->j_list_lock); + trace_jbd_do_submit_data(journal, commit_transaction); journal_do_submit_data(wbuf, bufs, write_op); return err; @@ -316,12 +322,14 @@ void journal_commit_transaction(journal_t *journal) commit_transaction = journal->j_running_transaction; J_ASSERT(commit_transaction->t_state == T_RUNNING); + trace_jbd_start_commit(journal, commit_transaction); jbd_debug(1, "JBD: starting commit of transaction %d\n", commit_transaction->t_tid); spin_lock(&journal->j_state_lock); commit_transaction->t_state = T_LOCKED; + trace_jbd_commit_locking(journal, commit_transaction); spin_lock(&commit_transaction->t_handle_lock); while (commit_transaction->t_updates) { DEFINE_WAIT(wait); @@ -392,6 +400,7 @@ void journal_commit_transaction(journal_t *journal) */ journal_switch_revoke_table(journal); + trace_jbd_commit_flushing(journal, commit_transaction); commit_transaction->t_state = T_FLUSH; journal->j_committing_transaction = commit_transaction; journal->j_running_transaction = NULL; @@ -493,6 +502,7 @@ void journal_commit_transaction(journal_t *journal) commit_transaction->t_state = T_COMMIT; spin_unlock(&journal->j_state_lock); + trace_jbd_commit_logging(journal, commit_transaction); J_ASSERT(commit_transaction->t_nr_buffers <= commit_transaction->t_outstanding_credits); @@ -946,6 +956,7 @@ restart_loop: } spin_unlock(&journal->j_list_lock); + trace_jbd_end_commit(journal, commit_transaction); jbd_debug(1, "JBD: commit %d complete, head %d\n", journal->j_commit_sequence, journal->j_tail_sequence); diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index e2d4285fbe90..ab019ee77888 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -38,6 +38,9 @@ #include #include +#define CREATE_TRACE_POINTS +#include + #include #include @@ -1065,6 +1068,7 @@ void journal_update_superblock(journal_t *journal, int wait) } else write_dirty_buffer(bh, WRITE); + trace_jbd_update_superblock_end(journal, wait); out: /* If we have just flushed the log (by marking s_start==0), then * any future commit will have to be careful to update the diff --git a/include/trace/events/jbd.h b/include/trace/events/jbd.h new file mode 100644 index 000000000000..aff64d82d713 --- /dev/null +++ b/include/trace/events/jbd.h @@ -0,0 +1,203 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM jbd + +#if !defined(_TRACE_JBD_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_JBD_H + +#include +#include + +TRACE_EVENT(jbd_checkpoint, + + TP_PROTO(journal_t *journal, int result), + + TP_ARGS(journal, result), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, result ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->result = result; + ), + + TP_printk("dev %d,%d result %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->result) +); + +DECLARE_EVENT_CLASS(jbd_commit, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( char, sync_commit ) + __field( int, transaction ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->sync_commit = commit_transaction->t_synchronous_commit; + __entry->transaction = commit_transaction->t_tid; + ), + + TP_printk("dev %d,%d transaction %d sync %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->transaction, __entry->sync_commit) +); + +DEFINE_EVENT(jbd_commit, jbd_start_commit, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd_commit, jbd_commit_locking, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd_commit, jbd_commit_flushing, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd_commit, jbd_commit_logging, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +TRACE_EVENT(jbd_drop_transaction, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( char, sync_commit ) + __field( int, transaction ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->sync_commit = commit_transaction->t_synchronous_commit; + __entry->transaction = commit_transaction->t_tid; + ), + + TP_printk("dev %d,%d transaction %d sync %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->transaction, __entry->sync_commit) +); + +TRACE_EVENT(jbd_end_commit, + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( char, sync_commit ) + __field( int, transaction ) + __field( int, head ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->sync_commit = commit_transaction->t_synchronous_commit; + __entry->transaction = commit_transaction->t_tid; + __entry->head = journal->j_tail_sequence; + ), + + TP_printk("dev %d,%d transaction %d sync %d head %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->transaction, __entry->sync_commit, __entry->head) +); + +TRACE_EVENT(jbd_do_submit_data, + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( char, sync_commit ) + __field( int, transaction ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->sync_commit = commit_transaction->t_synchronous_commit; + __entry->transaction = commit_transaction->t_tid; + ), + + TP_printk("dev %d,%d transaction %d sync %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->transaction, __entry->sync_commit) +); + +TRACE_EVENT(jbd_cleanup_journal_tail, + + TP_PROTO(journal_t *journal, tid_t first_tid, + unsigned long block_nr, unsigned long freed), + + TP_ARGS(journal, first_tid, block_nr, freed), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( tid_t, tail_sequence ) + __field( tid_t, first_tid ) + __field(unsigned long, block_nr ) + __field(unsigned long, freed ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->tail_sequence = journal->j_tail_sequence; + __entry->first_tid = first_tid; + __entry->block_nr = block_nr; + __entry->freed = freed; + ), + + TP_printk("dev %d,%d from %u to %u offset %lu freed %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->tail_sequence, __entry->first_tid, + __entry->block_nr, __entry->freed) +); + +TRACE_EVENT(jbd_update_superblock_end, + TP_PROTO(journal_t *journal, int wait), + + TP_ARGS(journal, wait), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, wait ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->wait = wait; + ), + + TP_printk("dev %d,%d wait %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->wait) +); + +#endif /* _TRACE_JBD_H */ + +/* This part must be outside protection */ +#include -- cgit v1.2.3 From 40680f2fa4670ab35ee554822a69dda1a118f966 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 24 May 2011 22:24:47 +0200 Subject: ext3: Convert ext3 to new truncate calling convention Mostly trivial conversion. We fix a bug that IS_IMMUTABLE and IS_APPEND files could not be truncated during failed writes as we change the code. In fact the test is not needed at all because both IS_IMMUTABLE and IS_APPEND is tested in upper layers in do_sys_[f]truncate(), may_write(), etc. Signed-off-by: Jan Kara --- fs/ext3/file.c | 1 - fs/ext3/inode.c | 27 +++++++++++---------------- include/linux/ext3_fs.h | 2 +- 3 files changed, 12 insertions(+), 18 deletions(-) diff --git a/fs/ext3/file.c b/fs/ext3/file.c index f55df0e61cbd..86c8ab343f6f 100644 --- a/fs/ext3/file.c +++ b/fs/ext3/file.c @@ -71,7 +71,6 @@ const struct file_operations ext3_file_operations = { }; const struct inode_operations ext3_file_inode_operations = { - .truncate = ext3_truncate, .setattr = ext3_setattr, #ifdef CONFIG_EXT3_FS_XATTR .setxattr = generic_setxattr, diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 3aa05eebe0b8..b4051c9ac5f2 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -234,12 +234,10 @@ void ext3_evict_inode (struct inode *inode) if (inode->i_blocks) ext3_truncate(inode); /* - * Kill off the orphan record which ext3_truncate created. - * AKPM: I think this can be inside the above `if'. - * Note that ext3_orphan_del() has to be able to cope with the - * deletion of a non-existent orphan - this is because we don't - * know if ext3_truncate() actually created an orphan record. - * (Well, we could do this if we need to, but heck - it works) + * Kill off the orphan record created when the inode lost the last + * link. Note that ext3_orphan_del() has to be able to cope with the + * deletion of a non-existent orphan - ext3_truncate() could + * have removed the record. */ ext3_orphan_del(handle, inode); EXT3_I(inode)->i_dtime = get_seconds(); @@ -890,6 +888,9 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, if (!create || err == -EIO) goto cleanup; + /* + * Block out ext3_truncate while we alter the tree + */ mutex_lock(&ei->truncate_mutex); /* @@ -938,9 +939,6 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, */ count = ext3_blks_to_allocate(partial, indirect_blks, maxblocks, blocks_to_boundary); - /* - * Block out ext3_truncate while we alter the tree - */ err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal, offsets + (partial - chain), partial); @@ -1849,7 +1847,7 @@ retry: loff_t end = offset + iov_length(iov, nr_segs); if (end > isize) - vmtruncate(inode, isize); + ext3_truncate_failed_write(inode); } if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) goto retry; @@ -1863,7 +1861,7 @@ retry: /* This is really bad luck. We've written the data * but cannot extend i_size. Truncate allocated blocks * and pretend the write failed... */ - ext3_truncate(inode); + ext3_truncate_failed_write(inode); ret = PTR_ERR(handle); goto out; } @@ -2414,8 +2412,6 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, int ext3_can_truncate(struct inode *inode) { - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) - return 0; if (S_ISREG(inode->i_mode)) return 1; if (S_ISDIR(inode->i_mode)) @@ -3264,9 +3260,8 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr) if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { - rc = vmtruncate(inode, attr->ia_size); - if (rc) - goto err_out; + truncate_setsize(inode, attr->ia_size); + ext3_truncate(inode); } setattr_copy(inode, attr); diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 5e06acf95d0f..9aaa3a84d373 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -913,7 +913,7 @@ extern void ext3_dirty_inode(struct inode *, int); extern int ext3_change_inode_journal_flag(struct inode *, int); extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *); extern int ext3_can_truncate(struct inode *inode); -extern void ext3_truncate (struct inode *); +extern void ext3_truncate(struct inode *inode); extern void ext3_set_inode_flags(struct inode *); extern void ext3_get_inode_flags(struct ext3_inode_info *); extern void ext3_set_aops(struct inode *inode); -- cgit v1.2.3 From 05713082ab7690a2b22b044cfc867f346c39cd2d Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 26 May 2011 17:17:18 +0200 Subject: jbd: remove dependency on __GFP_NOFAIL The callers of start_this_handle() (or better ext3_journal_start()) are not really prepared to handle allocation failures. Such failures can for example result in silent data loss when it happens in ext3_..._writepage(). OTOH __GFP_NOFAIL is going away so we just retry allocation in start_this_handle(). This loop is potentially dangerous because the oom killer cannot be invoked for GFP_NOFS allocation, so there is a potential for infinitely looping. But still this is better than silent data loss. Signed-off-by: Jan Kara --- fs/jbd/transaction.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index f7ee81a065da..83a661890868 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -26,6 +26,7 @@ #include #include #include +#include static void __journal_temp_unlink_buffer(struct journal_head *jh); @@ -99,11 +100,10 @@ static int start_this_handle(journal_t *journal, handle_t *handle) alloc_transaction: if (!journal->j_running_transaction) { - new_transaction = kzalloc(sizeof(*new_transaction), - GFP_NOFS|__GFP_NOFAIL); + new_transaction = kzalloc(sizeof(*new_transaction), GFP_NOFS); if (!new_transaction) { - ret = -ENOMEM; - goto out; + congestion_wait(BLK_RW_ASYNC, HZ/50); + goto alloc_transaction; } } -- cgit v1.2.3 From bd5c9e1854e13d0c62a3de29a5fbc15dd6a4d8c6 Mon Sep 17 00:00:00 2001 From: Ding Dinghua Date: Thu, 26 May 2011 10:29:01 +0800 Subject: jbd: fix a bug of leaking jh->b_jcount journal_get_create_access should drop jh->b_jcount in error handling path Signed-off-by: Ding Dinghua Signed-off-by: Jan Kara --- fs/jbd/transaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 83a661890868..dc39efd05d54 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -844,8 +844,8 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh) */ JBUFFER_TRACE(jh, "cancelling revoke"); journal_cancel_revoke(handle, jh); - journal_put_journal_head(jh); out: + journal_put_journal_head(jh); return err; } -- cgit v1.2.3 From ad95c5e9bc8b5885f94dce720137cac8fa8da4c9 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 30 May 2011 13:29:20 +0200 Subject: ext3: Fix oops in ext3_try_to_allocate_with_rsv() Block allocation is called from two places: ext3_get_blocks_handle() and ext3_xattr_block_set(). These two callers are not necessarily synchronized because xattr code holds only xattr_sem and i_mutex, and ext3_get_blocks_handle() may hold only truncate_mutex when called from writepage() path. Block reservation code does not expect two concurrent allocations to happen to the same inode and thus assertions can be triggered or reservation structure corruption can occur. Fix the problem by taking truncate_mutex in xattr code to serialize allocations. CC: Sage Weil CC: stable@kernel.org Reported-by: Fyodor Ustinov Signed-off-by: Jan Kara --- fs/ext3/xattr.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c index 32e6cc23bd9a..d565759d82ee 100644 --- a/fs/ext3/xattr.c +++ b/fs/ext3/xattr.c @@ -803,8 +803,16 @@ inserted: /* We need to allocate a new block */ ext3_fsblk_t goal = ext3_group_first_block_no(sb, EXT3_I(inode)->i_block_group); - ext3_fsblk_t block = ext3_new_block(handle, inode, - goal, &error); + ext3_fsblk_t block; + + /* + * Protect us agaist concurrent allocations to the + * same inode from ext3_..._writepage(). Reservation + * code does not expect racing allocations. + */ + mutex_lock(&EXT3_I(inode)->truncate_mutex); + block = ext3_new_block(handle, inode, goal, &error); + mutex_unlock(&EXT3_I(inode)->truncate_mutex); if (error) goto cleanup; ea_idebug(inode, "creating block %d", block); -- cgit v1.2.3 From fbcc9e624b8dbc7f740fac3906aa261b83398100 Mon Sep 17 00:00:00 2001 From: Petr Uzel Date: Tue, 31 May 2011 11:36:06 +0200 Subject: ext2: include fs.h into ext2_fs.h AC_CHECK_HEADERS([linux/ext2_fs.h]) fails with configure:34666: checking linux/ext2_fs.h usability configure:34666: gcc -std=gnu99 -c -ggdb3 -O0 -Wunreachable-code conftest.c >&5 In file included from conftest.c:406:0: /usr/include/linux/ext2_fs.h: In function 'ext2_mask_flags': /usr/include/linux/ext2_fs.h:182:21: error: 'FS_DIRSYNC_FL' undeclared (first use in this function) /usr/include/linux/ext2_fs.h:182:21: note: each undeclared identifier is reported only once for each function it appears in /usr/include/linux/ext2_fs.h:182:37: error: 'FS_TOPDIR_FL' undeclared (first use in this function) /usr/include/linux/ext2_fs.h:184:19: error: 'FS_NODUMP_FL' undeclared (first use in this function) /usr/include/linux/ext2_fs.h:184:34: error: 'FS_NOATIME_FL' undeclared (first use in this function) It's reasonable to have headers that include all necessary definitions. So fix this by including fs.h into ext2_fs.h. Signed-off-by: Petr Uzel Signed-off-by: Jan Kara --- include/linux/ext2_fs.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h index 2dfa7076e8b6..53792bf36c71 100644 --- a/include/linux/ext2_fs.h +++ b/include/linux/ext2_fs.h @@ -18,6 +18,7 @@ #include #include +#include /* * The second extended filesystem constants/structures -- cgit v1.2.3 From 9008593017069ad513cc7dc78a6c94e8dfddba31 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Wed, 1 Jun 2011 23:34:04 +0900 Subject: ext3: use proper little-endian bitops ext3_{set,clear}_bit() is defined as __test_and_{set,clear}_bit_le() for ext3. But all ext3_{set,clear}_bit() calls ignore return values. So these can be replaced with __{set,clear}_bit_le(). This changes ext3_{set,clear}_bit safely, because if someone uses these macros without noticing the change, new ext3_{set,clear}_bit don't have return value and causes compiler errors where the return value is used. This also removes unused ext3_find_first_zero_bit(). Signed-off-by: Akinobu Mita Cc: Jan Kara Cc: Andrew Morton Cc: Andreas Dilger Cc: linux-ext4@vger.kernel.org Signed-off-by: Jan Kara --- include/linux/ext3_fs.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 9aaa3a84d373..8f1f908eddb8 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -418,12 +418,11 @@ struct ext3_inode { #define EXT2_MOUNT_DATA_FLAGS EXT3_MOUNT_DATA_FLAGS #endif -#define ext3_set_bit __test_and_set_bit_le +#define ext3_set_bit __set_bit_le #define ext3_set_bit_atomic ext2_set_bit_atomic -#define ext3_clear_bit __test_and_clear_bit_le +#define ext3_clear_bit __clear_bit_le #define ext3_clear_bit_atomic ext2_clear_bit_atomic #define ext3_test_bit test_bit_le -#define ext3_find_first_zero_bit find_first_zero_bit_le #define ext3_find_next_zero_bit find_next_zero_bit_le /* -- cgit v1.2.3 From ee3e77f18010679a889b3831c2dd931238c12d09 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 3 Jun 2011 21:58:11 +0200 Subject: ext3: Improve truncate error handling New truncate calling convention allows us to handle errors from ext3_block_truncate_page(). So reorganize the code so that ext3_block_truncate_page() is called before we change inode size. This also removes unnecessary block zeroing from error recovery after failed buffered writes (zeroing isn't needed because we could have never written non-zero data to disk). We have to be careful and keep zeroing in direct IO write error recovery because there we might have already overwritten end of the last file block. Signed-off-by: Jan Kara --- fs/ext3/inode.c | 101 +++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 63 insertions(+), 38 deletions(-) diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index b4051c9ac5f2..d2e4547c7806 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -43,6 +43,7 @@ #include "acl.h" static int ext3_writepage_trans_blocks(struct inode *inode); +static int ext3_block_truncate_page(struct inode *inode, loff_t from); /* * Test whether an inode is a fast symlink. @@ -1207,6 +1208,16 @@ static void ext3_truncate_failed_write(struct inode *inode) ext3_truncate(inode); } +/* + * Truncate blocks that were not used by direct IO write. We have to zero out + * the last file block as well because direct IO might have written to it. + */ +static void ext3_truncate_failed_direct_write(struct inode *inode) +{ + ext3_block_truncate_page(inode, inode->i_size); + ext3_truncate(inode); +} + static int ext3_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) @@ -1847,7 +1858,7 @@ retry: loff_t end = offset + iov_length(iov, nr_segs); if (end > isize) - ext3_truncate_failed_write(inode); + ext3_truncate_failed_direct_write(inode); } if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) goto retry; @@ -1861,7 +1872,7 @@ retry: /* This is really bad luck. We've written the data * but cannot extend i_size. Truncate allocated blocks * and pretend the write failed... */ - ext3_truncate_failed_write(inode); + ext3_truncate_failed_direct_write(inode); ret = PTR_ERR(handle); goto out; } @@ -1971,17 +1982,24 @@ void ext3_set_aops(struct inode *inode) * This required during truncate. We need to physically zero the tail end * of that block so it doesn't yield old data if the file is later grown. */ -static int ext3_block_truncate_page(handle_t *handle, struct page *page, - struct address_space *mapping, loff_t from) +static int ext3_block_truncate_page(struct inode *inode, loff_t from) { ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); + unsigned offset = from & (PAGE_CACHE_SIZE - 1); unsigned blocksize, iblock, length, pos; - struct inode *inode = mapping->host; + struct page *page; + handle_t *handle = NULL; struct buffer_head *bh; int err = 0; + /* Truncated on block boundary - nothing to do */ blocksize = inode->i_sb->s_blocksize; + if ((from & (blocksize - 1)) == 0) + return 0; + + page = grab_cache_page(inode->i_mapping, index); + if (!page) + return -ENOMEM; length = blocksize - (offset & (blocksize - 1)); iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); @@ -2026,11 +2044,23 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page, goto unlock; } + /* data=writeback mode doesn't need transaction to zero-out data */ + if (!ext3_should_writeback_data(inode)) { + /* We journal at most one block */ + handle = ext3_journal_start(inode, 1); + if (IS_ERR(handle)) { + clear_highpage(page); + flush_dcache_page(page); + err = PTR_ERR(handle); + goto unlock; + } + } + if (ext3_should_journal_data(inode)) { BUFFER_TRACE(bh, "get write access"); err = ext3_journal_get_write_access(handle, bh); if (err) - goto unlock; + goto stop; } zero_user(page, offset, length); @@ -2044,6 +2074,9 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page, err = ext3_journal_dirty_data(handle, bh); mark_buffer_dirty(bh); } +stop: + if (handle) + ext3_journal_stop(handle); unlock: unlock_page(page); @@ -2455,7 +2488,6 @@ void ext3_truncate(struct inode *inode) struct ext3_inode_info *ei = EXT3_I(inode); __le32 *i_data = ei->i_data; int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb); - struct address_space *mapping = inode->i_mapping; int offsets[4]; Indirect chain[4]; Indirect *partial; @@ -2463,7 +2495,6 @@ void ext3_truncate(struct inode *inode) int n; long last_block; unsigned blocksize = inode->i_sb->s_blocksize; - struct page *page; trace_ext3_truncate_enter(inode); @@ -2473,37 +2504,12 @@ void ext3_truncate(struct inode *inode) if (inode->i_size == 0 && ext3_should_writeback_data(inode)) ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE); - /* - * We have to lock the EOF page here, because lock_page() nests - * outside journal_start(). - */ - if ((inode->i_size & (blocksize - 1)) == 0) { - /* Block boundary? Nothing to do */ - page = NULL; - } else { - page = grab_cache_page(mapping, - inode->i_size >> PAGE_CACHE_SHIFT); - if (!page) - goto out_notrans; - } - handle = start_transaction(inode); - if (IS_ERR(handle)) { - if (page) { - clear_highpage(page); - flush_dcache_page(page); - unlock_page(page); - page_cache_release(page); - } + if (IS_ERR(handle)) goto out_notrans; - } last_block = (inode->i_size + blocksize-1) >> EXT3_BLOCK_SIZE_BITS(inode->i_sb); - - if (page) - ext3_block_truncate_page(handle, page, mapping, inode->i_size); - n = ext3_block_to_path(inode, last_block, offsets, NULL); if (n == 0) goto out_stop; /* error */ @@ -3251,11 +3257,30 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr) } error = ext3_orphan_add(handle, inode); + if (error) { + ext3_journal_stop(handle); + goto err_out; + } EXT3_I(inode)->i_disksize = attr->ia_size; - rc = ext3_mark_inode_dirty(handle, inode); - if (!error) - error = rc; + error = ext3_mark_inode_dirty(handle, inode); ext3_journal_stop(handle); + if (error) { + /* Some hard fs error must have happened. Bail out. */ + ext3_orphan_del(NULL, inode); + goto err_out; + } + rc = ext3_block_truncate_page(inode, attr->ia_size); + if (rc) { + /* Cleanup orphan list and exit */ + handle = ext3_journal_start(inode, 3); + if (IS_ERR(handle)) { + ext3_orphan_del(NULL, inode); + goto err_out; + } + ext3_orphan_del(handle, inode); + ext3_journal_stop(handle); + goto err_out; + } } if ((attr->ia_valid & ATTR_SIZE) && -- cgit v1.2.3 From ad434017718a725b1695fb2ebfff312cf3693d3b Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Tue, 7 Jun 2011 12:27:05 +0200 Subject: ext3/ext4 Documentation: remove bh/nobh since it has been deprecated Bh and nobh mount option has been deprecated in ext4 (206f7ab4f49a2021fcb8687f25395be77711ddee) and in ext3 (4c4d3901225518ed1a4c938ba15ba09842a00770) so remove those options from documentation. Signed-off-by: Lukas Czerner Reviewed-by: Eric Sandeen Signed-off-by: Jan Kara --- Documentation/filesystems/ext3.txt | 9 --------- Documentation/filesystems/ext4.txt | 23 +++++++---------------- 2 files changed, 7 insertions(+), 25 deletions(-) diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt index 272f80d5f966..aee556031adb 100644 --- a/Documentation/filesystems/ext3.txt +++ b/Documentation/filesystems/ext3.txt @@ -147,15 +147,6 @@ grpjquota= during journal replay. They replace the above package for more details (http://sourceforge.net/projects/linuxquota). -bh (*) ext3 associates buffer heads to data pages to -nobh (a) cache disk block mapping information - (b) link pages into transaction to provide - ordering guarantees. - "bh" option forces use of buffer heads. - "nobh" option tries to avoid associating buffer - heads (supported only for "writeback" mode). - - Specification ============= Ext3 shares all disk implementation with the ext2 filesystem, and adds diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt index 3ae9bc94352a..232a575a0c48 100644 --- a/Documentation/filesystems/ext4.txt +++ b/Documentation/filesystems/ext4.txt @@ -68,12 +68,12 @@ Note: More extensive information for getting started with ext4 can be '-o barriers=[0|1]' mount option for both ext3 and ext4 filesystems for a fair comparison. When tuning ext3 for best benchmark numbers, it is often worthwhile to try changing the data journaling mode; '-o - data=writeback,nobh' can be faster for some workloads. (Note - however that running mounted with data=writeback can potentially - leave stale data exposed in recently written files in case of an - unclean shutdown, which could be a security exposure in some - situations.) Configuring the filesystem with a large journal can - also be helpful for metadata-intensive workloads. + data=writeback' can be faster for some workloads. (Note however that + running mounted with data=writeback can potentially leave stale data + exposed in recently written files in case of an unclean shutdown, + which could be a security exposure in some situations.) Configuring + the filesystem with a large journal can also be helpful for + metadata-intensive workloads. 2. Features =========== @@ -272,14 +272,6 @@ grpjquota= during journal replay. They replace the above package for more details (http://sourceforge.net/projects/linuxquota). -bh (*) ext4 associates buffer heads to data pages to -nobh (a) cache disk block mapping information - (b) link pages into transaction to provide - ordering guarantees. - "bh" option forces use of buffer heads. - "nobh" option tries to avoid associating buffer - heads (supported only for "writeback" mode). - stripe=n Number of filesystem blocks that mballoc will try to use for allocation size and alignment. For RAID5/6 systems this should be the number of data @@ -393,8 +385,7 @@ dioread_nolock locking. If the dioread_nolock option is specified write and convert the extent to initialized after IO completes. This approach allows ext4 code to avoid using inode mutex, which improves scalability on high - speed storages. However this does not work with nobh - option and the mount will fail. Nor does it work with + speed storages. However this does not work with data journaling and dioread_nolock option will be ignored with kernel warning. Note that dioread_nolock code path is only used for extent-based files. -- cgit v1.2.3 From 81fe8c62febade6b5d0915269b06a0c50448da27 Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Fri, 10 Jun 2011 14:59:05 -0700 Subject: ext3/ioctl.c: silence sparse warnings about different address spaces The 'from' argument for copy_from_user and the 'to' argument for copy_to_user should both be tagged as __user address space. Signed-off-by: H Hartley Sweeten Cc: Andrew Morton Cc: Andreas Dilger Signed-off-by: Jan Kara --- fs/ext3/ioctl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c index f4090bd2f345..c7f43944f160 100644 --- a/fs/ext3/ioctl.c +++ b/fs/ext3/ioctl.c @@ -285,7 +285,7 @@ group_add_out: if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (copy_from_user(&range, (struct fstrim_range *)arg, + if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range))) return -EFAULT; @@ -293,7 +293,7 @@ group_add_out: if (ret < 0) return ret; - if (copy_to_user((struct fstrim_range *)arg, &range, + if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range))) return -EFAULT; -- cgit v1.2.3 From 2c2ea9451fc2a12ee57c8346f0da26969d07ee7f Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Wed, 22 Jun 2011 10:51:09 +0200 Subject: ext3: Return -EINVAL when start is beyond the end of fs in ext3_trim_fs() We should return -EINVAL when the FITRIM parameters are not sane, but currently we are exiting silently if start is beyond the end of the file system. This commit fixes this so we return -EINVAL as other file systems do. Signed-off-by: Lukas Czerner CC: Jan Kara Signed-off-by: Jan Kara --- fs/ext3/balloc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index f7d111e499ad..6386d76f44a7 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -2108,7 +2108,7 @@ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range) if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb))) return -EINVAL; if (start >= max_blks) - goto out; + return -EINVAL; if (start + len > max_blks) len = max_blks - start; @@ -2156,8 +2156,6 @@ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range) if (ret >= 0) ret = 0; - -out: range->len = trimmed * sb->s_blocksize; return ret; -- cgit v1.2.3 From 9617757fb3dc6274b42afd2dcaa4fbc3ef6db98b Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 24 Jun 2011 08:18:23 +1000 Subject: drm/nouveau: fix fetching vbios from above 4GiB vram addresses Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bios.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index ff339df6f007..1aa73d3957e1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -135,13 +135,14 @@ static void load_vbios_pramin(struct drm_device *dev, uint8_t *data) int i; if (dev_priv->card_type >= NV_50) { - uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8; - - if (!vbios_vram) - vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000; + u64 addr = (u64)(nv_rd32(dev, 0x619f04) & 0xffffff00) << 8; + if (!addr) { + addr = (u64)nv_rd32(dev, 0x1700) << 16; + addr += 0xf0000; + } old_bar0_pramin = nv_rd32(dev, 0x1700); - nv_wr32(dev, 0x1700, vbios_vram >> 16); + nv_wr32(dev, 0x1700, addr >> 16); } /* bail if no rom signature */ -- cgit v1.2.3 From 0de53a546b4f7056d1404e40320e57aad723621c Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Thu, 23 Jun 2011 16:35:31 +0200 Subject: drm/nouveau: fix nouveau_mem object leak It's a regression from "drm/nouveau: create temp vmas for both src and dst of bo moves". Signed-off-by: Marcin Slusarz Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_mem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 765f0e57da78..ab79bf8cc83a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -846,8 +846,8 @@ nouveau_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { nouveau_mem_node_cleanup(mem->mm_node); - mem->mm_node = NULL; kfree(mem->mm_node); + mem->mm_node = NULL; } static int -- cgit v1.2.3 From 8fe198b2c6fd8455db9f07d712ee54e2a1d02783 Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Thu, 23 Jun 2011 16:34:30 +0200 Subject: drm/nouveau: fix nouveau_vma object leak Signed-off-by: Marcin Slusarz Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_gem.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 022393777805..5f0bc57fdaab 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -113,8 +113,10 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) vma = nouveau_bo_vma_find(nvbo, fpriv->vm); if (vma) { - if (--vma->refcount == 0) + if (--vma->refcount == 0) { nouveau_bo_vma_del(nvbo, vma); + kfree(vma); + } } ttm_bo_unreserve(&nvbo->bo); } -- cgit v1.2.3 From 3b40d07d8c4a9dc33ee6e1b4ad1d377309531ffe Mon Sep 17 00:00:00 2001 From: Younes Manton Date: Fri, 24 Jun 2011 01:15:58 -0400 Subject: drm/nouveau: Calculate reserved VRAM for PRAMIN value before use. 'drm/nouveau: rework vram init/fini ordering a little' changed the order of instmem.init() and nouveau_mem_vram_init() which resulted in using ramin_rsvd_vram before it was calculated and failing to init any accel on pre-NV50 cards. Since it's only used on Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_mem.c | 28 ---------------------------- drivers/gpu/drm/nouveau/nv04_instmem.c | 25 +++++++++++++++++++++++++ 2 files changed, 25 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index ab79bf8cc83a..81dadeb9debc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -423,34 +423,6 @@ nouveau_mem_vram_init(struct drm_device *dev) return ret; } - /* reserve space at end of VRAM for PRAMIN */ - if (dev_priv->card_type >= NV_50) { - dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024; - } else - if (dev_priv->card_type >= NV_40) { - u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8); - u32 rsvd; - - /* estimate grctx size, the magics come from nv40_grctx.c */ - if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs; - else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs; - else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs; - else rsvd = 0x4a40 * vs; - rsvd += 16 * 1024; - rsvd *= dev_priv->engine.fifo.channels; - - /* pciegart table */ - if (drm_pci_device_is_pcie(dev)) - rsvd += 512 * 1024; - - /* object storage */ - rsvd += 512 * 1024; - - dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096); - } else { - dev_priv->ramin_rsvd_vram = 512 * 1024; - } - NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); if (dev_priv->vram_sys_base) { NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index ae36bfc84853..e2075dec84a3 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c @@ -28,6 +28,31 @@ int nv04_instmem_init(struct drm_device *dev) /* RAMIN always available */ dev_priv->ramin_available = true; + /* Reserve space at end of VRAM for PRAMIN */ + if (dev_priv->card_type >= NV_40) { + u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8); + u32 rsvd; + + /* estimate grctx size, the magics come from nv40_grctx.c */ + if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs; + else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs; + else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs; + else rsvd = 0x4a40 * vs; + rsvd += 16 * 1024; + rsvd *= dev_priv->engine.fifo.channels; + + /* pciegart table */ + if (drm_pci_device_is_pcie(dev)) + rsvd += 512 * 1024; + + /* object storage */ + rsvd += 512 * 1024; + + dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096); + } else { + dev_priv->ramin_rsvd_vram = 512 * 1024; + } + /* Setup shared RAMHT */ ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096, NVOBJ_FLAG_ZERO_ALLOC, &ramht); -- cgit v1.2.3 From 60f7ab06651db7d9916c0d9138ed3b12676e920d Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 25 Jun 2011 08:54:46 +0300 Subject: drm/nouveau: error paths leak in nvc0_graph_construct_context() Two of these error paths returned without freeing "ctx". Signed-off-by: Dan Carpenter Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_graph.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index 39e9208a708c..3a97431996c5 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -106,7 +106,8 @@ nvc0_graph_construct_context(struct nouveau_channel *chan) if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n"); nvc0_graph_ctxctl_debug(dev); - return -EBUSY; + ret = -EBUSY; + goto err; } } else { nvc0_graph_load_context(chan); @@ -119,10 +120,8 @@ nvc0_graph_construct_context(struct nouveau_channel *chan) } ret = nvc0_grctx_generate(chan); - if (ret) { - kfree(ctx); - return ret; - } + if (ret) + goto err; if (!nouveau_ctxfw) { nv_wr32(dev, 0x409840, 0x80000000); @@ -131,14 +130,13 @@ nvc0_graph_construct_context(struct nouveau_channel *chan) if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n"); nvc0_graph_ctxctl_debug(dev); - return -EBUSY; + ret = -EBUSY; + goto err; } } else { ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst); - if (ret) { - kfree(ctx); - return ret; - } + if (ret) + goto err; } for (i = 0; i < priv->grctx_size; i += 4) @@ -146,6 +144,10 @@ nvc0_graph_construct_context(struct nouveau_channel *chan) priv->grctx_vals = ctx; return 0; + +err: + kfree(ctx); + return ret; } static int -- cgit v1.2.3 From bb189247f35688a3353545902c56290fb7d7754a Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 24 Jun 2011 23:11:59 +0200 Subject: jbd: Fix oops in journal_remove_journal_head() journal_remove_journal_head() can oops when trying to access journal_head returned by bh2jh(). This is caused for example by the following race: TASK1 TASK2 journal_commit_transaction() ... processing t_forget list __journal_refile_buffer(jh); if (!jh->b_transaction) { jbd_unlock_bh_state(bh); journal_try_to_free_buffers() journal_grab_journal_head(bh) jbd_lock_bh_state(bh) __journal_try_to_free_buffer() journal_put_journal_head(jh) journal_remove_journal_head(bh); journal_put_journal_head() in TASK2 sees that b_jcount == 0 and buffer is not part of any transaction and thus frees journal_head before TASK1 gets to doing so. Note that even buffer_head can be released by try_to_free_buffers() after journal_put_journal_head() which adds even larger opportunity for oops (but I didn't see this happen in reality). Fix the problem by making transactions hold their own journal_head reference (in b_jcount). That way we don't have to remove journal_head explicitely via journal_remove_journal_head() and instead just remove journal_head when b_jcount drops to zero. The result of this is that [__]journal_refile_buffer(), [__]journal_unfile_buffer(), and __journal_remove_checkpoint() can free journal_head which needs modification of a few callers. Also we have to be careful because once journal_head is removed, buffer_head might be freed as well. So we have to get our own buffer_head reference where it matters. Signed-off-by: Jan Kara --- fs/jbd/checkpoint.c | 27 ++++++++------- fs/jbd/commit.c | 46 ++++++++++++------------- fs/jbd/journal.c | 95 +++++++++++++++++----------------------------------- fs/jbd/transaction.c | 73 ++++++++++++++++++++-------------------- include/linux/jbd.h | 1 - 5 files changed, 104 insertions(+), 138 deletions(-) diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index dea7503b47e8..61655a37c731 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c @@ -96,10 +96,14 @@ static int __try_to_free_cp_buf(struct journal_head *jh) if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh) && !buffer_write_io_error(bh)) { + /* + * Get our reference so that bh cannot be freed before + * we unlock it + */ + get_bh(bh); JBUFFER_TRACE(jh, "remove from checkpoint list"); ret = __journal_remove_checkpoint(jh) + 1; jbd_unlock_bh_state(bh); - journal_remove_journal_head(bh); BUFFER_TRACE(bh, "release"); __brelse(bh); } else { @@ -221,8 +225,8 @@ restart: spin_lock(&journal->j_list_lock); goto restart; } + get_bh(bh); if (buffer_locked(bh)) { - get_bh(bh); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); wait_on_buffer(bh); @@ -241,7 +245,6 @@ restart: */ released = __journal_remove_checkpoint(jh); jbd_unlock_bh_state(bh); - journal_remove_journal_head(bh); __brelse(bh); } @@ -305,12 +308,12 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh, ret = 1; if (unlikely(buffer_write_io_error(bh))) ret = -EIO; + get_bh(bh); J_ASSERT_JH(jh, !buffer_jbddirty(bh)); BUFFER_TRACE(bh, "remove from checkpoint"); __journal_remove_checkpoint(jh); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); - journal_remove_journal_head(bh); __brelse(bh); } else { /* @@ -526,9 +529,9 @@ int cleanup_journal_tail(journal_t *journal) /* * journal_clean_one_cp_list * - * Find all the written-back checkpoint buffers in the given list and release them. + * Find all the written-back checkpoint buffers in the given list and release + * them. * - * Called with the journal locked. * Called with j_list_lock held. * Returns number of bufers reaped (for debug) */ @@ -635,8 +638,8 @@ out: * checkpoint lists. * * The function returns 1 if it frees the transaction, 0 otherwise. + * The function can free jh and bh. * - * This function is called with the journal locked. * This function is called with j_list_lock held. * This function is called with jbd_lock_bh_state(jh2bh(jh)) */ @@ -655,13 +658,14 @@ int __journal_remove_checkpoint(struct journal_head *jh) } journal = transaction->t_journal; + JBUFFER_TRACE(jh, "removing from transaction"); __buffer_unlink(jh); jh->b_cp_transaction = NULL; + journal_put_journal_head(jh); if (transaction->t_checkpoint_list != NULL || transaction->t_checkpoint_io_list != NULL) goto out; - JBUFFER_TRACE(jh, "transaction has no more buffers"); /* * There is one special case to worry about: if we have just pulled the @@ -672,10 +676,8 @@ int __journal_remove_checkpoint(struct journal_head *jh) * The locking here around t_state is a bit sleazy. * See the comment at the end of journal_commit_transaction(). */ - if (transaction->t_state != T_FINISHED) { - JBUFFER_TRACE(jh, "belongs to running/committing transaction"); + if (transaction->t_state != T_FINISHED) goto out; - } /* OK, that was the last buffer for the transaction: we can now safely remove this transaction from the log */ @@ -687,7 +689,6 @@ int __journal_remove_checkpoint(struct journal_head *jh) wake_up(&journal->j_wait_logspace); ret = 1; out: - JBUFFER_TRACE(jh, "exit"); return ret; } @@ -706,6 +707,8 @@ void __journal_insert_checkpoint(struct journal_head *jh, J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh))); J_ASSERT_JH(jh, jh->b_cp_transaction == NULL); + /* Get reference for checkpointing transaction */ + journal_grab_journal_head(jh2bh(jh)); jh->b_cp_transaction = transaction; if (!transaction->t_checkpoint_list) { diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index eedd201374a8..8799207df058 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c @@ -258,10 +258,6 @@ write_out_data: jbd_unlock_bh_state(bh); if (locked) unlock_buffer(bh); - journal_remove_journal_head(bh); - /* One for our safety reference, other for - * journal_remove_journal_head() */ - put_bh(bh); release_data_buffer(bh); } @@ -455,14 +451,9 @@ void journal_commit_transaction(journal_t *journal) } if (buffer_jbd(bh) && bh2jh(bh) == jh && jh->b_transaction == commit_transaction && - jh->b_jlist == BJ_Locked) { + jh->b_jlist == BJ_Locked) __journal_unfile_buffer(jh); - jbd_unlock_bh_state(bh); - journal_remove_journal_head(bh); - put_bh(bh); - } else { - jbd_unlock_bh_state(bh); - } + jbd_unlock_bh_state(bh); release_data_buffer(bh); cond_resched_lock(&journal->j_list_lock); } @@ -807,10 +798,16 @@ restart_loop: while (commit_transaction->t_forget) { transaction_t *cp_transaction; struct buffer_head *bh; + int try_to_free = 0; jh = commit_transaction->t_forget; spin_unlock(&journal->j_list_lock); bh = jh2bh(jh); + /* + * Get a reference so that bh cannot be freed before we are + * done with it. + */ + get_bh(bh); jbd_lock_bh_state(bh); J_ASSERT_JH(jh, jh->b_transaction == commit_transaction || jh->b_transaction == journal->j_running_transaction); @@ -868,28 +865,27 @@ restart_loop: __journal_insert_checkpoint(jh, commit_transaction); if (is_journal_aborted(journal)) clear_buffer_jbddirty(bh); - JBUFFER_TRACE(jh, "refile for checkpoint writeback"); - __journal_refile_buffer(jh); - jbd_unlock_bh_state(bh); } else { J_ASSERT_BH(bh, !buffer_dirty(bh)); - /* The buffer on BJ_Forget list and not jbddirty means + /* + * The buffer on BJ_Forget list and not jbddirty means * it has been freed by this transaction and hence it * could not have been reallocated until this * transaction has committed. *BUT* it could be * reallocated once we have written all the data to * disk and before we process the buffer on BJ_Forget - * list. */ - JBUFFER_TRACE(jh, "refile or unfile freed buffer"); - __journal_refile_buffer(jh); - if (!jh->b_transaction) { - jbd_unlock_bh_state(bh); - /* needs a brelse */ - journal_remove_journal_head(bh); - release_buffer_page(bh); - } else - jbd_unlock_bh_state(bh); + * list. + */ + if (!jh->b_next_transaction) + try_to_free = 1; } + JBUFFER_TRACE(jh, "refile or unfile freed buffer"); + __journal_refile_buffer(jh); + jbd_unlock_bh_state(bh); + if (try_to_free) + release_buffer_page(bh); + else + __brelse(bh); cond_resched_lock(&journal->j_list_lock); } spin_unlock(&journal->j_list_lock); diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index ab019ee77888..9fe061fb8779 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -1803,10 +1803,9 @@ static void journal_free_journal_head(struct journal_head *jh) * When a buffer has its BH_JBD bit set it is immune from being released by * core kernel code, mainly via ->b_count. * - * A journal_head may be detached from its buffer_head when the journal_head's - * b_transaction, b_cp_transaction and b_next_transaction pointers are NULL. - * Various places in JBD call journal_remove_journal_head() to indicate that the - * journal_head can be dropped if needed. + * A journal_head is detached from its buffer_head when the journal_head's + * b_jcount reaches zero. Running transaction (b_transaction) and checkpoint + * transaction (b_cp_transaction) hold their references to b_jcount. * * Various places in the kernel want to attach a journal_head to a buffer_head * _before_ attaching the journal_head to a transaction. To protect the @@ -1819,17 +1818,16 @@ static void journal_free_journal_head(struct journal_head *jh) * (Attach a journal_head if needed. Increments b_jcount) * struct journal_head *jh = journal_add_journal_head(bh); * ... - * jh->b_transaction = xxx; - * journal_put_journal_head(jh); - * - * Now, the journal_head's b_jcount is zero, but it is safe from being released - * because it has a non-zero b_transaction. + * (Get another reference for transaction) + * journal_grab_journal_head(bh); + * jh->b_transaction = xxx; + * (Put original reference) + * journal_put_journal_head(jh); */ /* * Give a buffer_head a journal_head. * - * Doesn't need the journal lock. * May sleep. */ struct journal_head *journal_add_journal_head(struct buffer_head *bh) @@ -1893,61 +1891,29 @@ static void __journal_remove_journal_head(struct buffer_head *bh) struct journal_head *jh = bh2jh(bh); J_ASSERT_JH(jh, jh->b_jcount >= 0); - - get_bh(bh); - if (jh->b_jcount == 0) { - if (jh->b_transaction == NULL && - jh->b_next_transaction == NULL && - jh->b_cp_transaction == NULL) { - J_ASSERT_JH(jh, jh->b_jlist == BJ_None); - J_ASSERT_BH(bh, buffer_jbd(bh)); - J_ASSERT_BH(bh, jh2bh(jh) == bh); - BUFFER_TRACE(bh, "remove journal_head"); - if (jh->b_frozen_data) { - printk(KERN_WARNING "%s: freeing " - "b_frozen_data\n", - __func__); - jbd_free(jh->b_frozen_data, bh->b_size); - } - if (jh->b_committed_data) { - printk(KERN_WARNING "%s: freeing " - "b_committed_data\n", - __func__); - jbd_free(jh->b_committed_data, bh->b_size); - } - bh->b_private = NULL; - jh->b_bh = NULL; /* debug, really */ - clear_buffer_jbd(bh); - __brelse(bh); - journal_free_journal_head(jh); - } else { - BUFFER_TRACE(bh, "journal_head was locked"); - } + J_ASSERT_JH(jh, jh->b_transaction == NULL); + J_ASSERT_JH(jh, jh->b_next_transaction == NULL); + J_ASSERT_JH(jh, jh->b_cp_transaction == NULL); + J_ASSERT_JH(jh, jh->b_jlist == BJ_None); + J_ASSERT_BH(bh, buffer_jbd(bh)); + J_ASSERT_BH(bh, jh2bh(jh) == bh); + BUFFER_TRACE(bh, "remove journal_head"); + if (jh->b_frozen_data) { + printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__); + jbd_free(jh->b_frozen_data, bh->b_size); } + if (jh->b_committed_data) { + printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__); + jbd_free(jh->b_committed_data, bh->b_size); + } + bh->b_private = NULL; + jh->b_bh = NULL; /* debug, really */ + clear_buffer_jbd(bh); + journal_free_journal_head(jh); } /* - * journal_remove_journal_head(): if the buffer isn't attached to a transaction - * and has a zero b_jcount then remove and release its journal_head. If we did - * see that the buffer is not used by any transaction we also "logically" - * decrement ->b_count. - * - * We in fact take an additional increment on ->b_count as a convenience, - * because the caller usually wants to do additional things with the bh - * after calling here. - * The caller of journal_remove_journal_head() *must* run __brelse(bh) at some - * time. Once the caller has run __brelse(), the buffer is eligible for - * reaping by try_to_free_buffers(). - */ -void journal_remove_journal_head(struct buffer_head *bh) -{ - jbd_lock_bh_journal_head(bh); - __journal_remove_journal_head(bh); - jbd_unlock_bh_journal_head(bh); -} - -/* - * Drop a reference on the passed journal_head. If it fell to zero then try to + * Drop a reference on the passed journal_head. If it fell to zero then * release the journal_head from the buffer_head. */ void journal_put_journal_head(struct journal_head *jh) @@ -1957,11 +1923,12 @@ void journal_put_journal_head(struct journal_head *jh) jbd_lock_bh_journal_head(bh); J_ASSERT_JH(jh, jh->b_jcount > 0); --jh->b_jcount; - if (!jh->b_jcount && !jh->b_transaction) { + if (!jh->b_jcount) { __journal_remove_journal_head(bh); + jbd_unlock_bh_journal_head(bh); __brelse(bh); - } - jbd_unlock_bh_journal_head(bh); + } else + jbd_unlock_bh_journal_head(bh); } /* diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index dc39efd05d54..7e59c6e66f9b 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -696,7 +696,6 @@ repeat: if (!jh->b_transaction) { JBUFFER_TRACE(jh, "no transaction"); J_ASSERT_JH(jh, !jh->b_next_transaction); - jh->b_transaction = transaction; JBUFFER_TRACE(jh, "file as BJ_Reserved"); spin_lock(&journal->j_list_lock); __journal_file_buffer(jh, transaction, BJ_Reserved); @@ -818,7 +817,6 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh) * committed and so it's safe to clear the dirty bit. */ clear_buffer_dirty(jh2bh(jh)); - jh->b_transaction = transaction; /* first access by this transaction */ jh->b_modified = 0; @@ -1069,8 +1067,9 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh) ret = -EIO; goto no_journal; } - - if (jh->b_transaction != NULL) { + /* We might have slept so buffer could be refiled now */ + if (jh->b_transaction != NULL && + jh->b_transaction != handle->h_transaction) { JBUFFER_TRACE(jh, "unfile from commit"); __journal_temp_unlink_buffer(jh); /* It still points to the committing @@ -1091,8 +1090,6 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh) if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) { JBUFFER_TRACE(jh, "not on correct data list: unfile"); J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow); - __journal_temp_unlink_buffer(jh); - jh->b_transaction = handle->h_transaction; JBUFFER_TRACE(jh, "file as data"); __journal_file_buffer(jh, handle->h_transaction, BJ_SyncData); @@ -1300,8 +1297,6 @@ int journal_forget (handle_t *handle, struct buffer_head *bh) __journal_file_buffer(jh, transaction, BJ_Forget); } else { __journal_unfile_buffer(jh); - journal_remove_journal_head(bh); - __brelse(bh); if (!buffer_jbd(bh)) { spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); @@ -1622,19 +1617,32 @@ static void __journal_temp_unlink_buffer(struct journal_head *jh) mark_buffer_dirty(bh); /* Expose it to the VM */ } +/* + * Remove buffer from all transactions. + * + * Called with bh_state lock and j_list_lock + * + * jh and bh may be already freed when this function returns. + */ void __journal_unfile_buffer(struct journal_head *jh) { __journal_temp_unlink_buffer(jh); jh->b_transaction = NULL; + journal_put_journal_head(jh); } void journal_unfile_buffer(journal_t *journal, struct journal_head *jh) { - jbd_lock_bh_state(jh2bh(jh)); + struct buffer_head *bh = jh2bh(jh); + + /* Get reference so that buffer cannot be freed before we unlock it */ + get_bh(bh); + jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); __journal_unfile_buffer(jh); spin_unlock(&journal->j_list_lock); - jbd_unlock_bh_state(jh2bh(jh)); + jbd_unlock_bh_state(bh); + __brelse(bh); } /* @@ -1661,16 +1669,12 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) /* A written-back ordered data buffer */ JBUFFER_TRACE(jh, "release data"); __journal_unfile_buffer(jh); - journal_remove_journal_head(bh); - __brelse(bh); } } else if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) { /* written-back checkpointed metadata buffer */ if (jh->b_jlist == BJ_None) { JBUFFER_TRACE(jh, "remove from checkpoint list"); __journal_remove_checkpoint(jh); - journal_remove_journal_head(bh); - __brelse(bh); } } spin_unlock(&journal->j_list_lock); @@ -1733,7 +1737,7 @@ int journal_try_to_free_buffers(journal_t *journal, /* * We take our own ref against the journal_head here to avoid * having to add tons of locking around each instance of - * journal_remove_journal_head() and journal_put_journal_head(). + * journal_put_journal_head(). */ jh = journal_grab_journal_head(bh); if (!jh) @@ -1770,10 +1774,9 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) int may_free = 1; struct buffer_head *bh = jh2bh(jh); - __journal_unfile_buffer(jh); - if (jh->b_cp_transaction) { JBUFFER_TRACE(jh, "on running+cp transaction"); + __journal_temp_unlink_buffer(jh); /* * We don't want to write the buffer anymore, clear the * bit so that we don't confuse checks in @@ -1784,8 +1787,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) may_free = 0; } else { JBUFFER_TRACE(jh, "on running transaction"); - journal_remove_journal_head(bh); - __brelse(bh); + __journal_unfile_buffer(jh); } return may_free; } @@ -2070,6 +2072,8 @@ void __journal_file_buffer(struct journal_head *jh, if (jh->b_transaction) __journal_temp_unlink_buffer(jh); + else + journal_grab_journal_head(bh); jh->b_transaction = transaction; switch (jlist) { @@ -2127,9 +2131,10 @@ void journal_file_buffer(struct journal_head *jh, * already started to be used by a subsequent transaction, refile the * buffer on that transaction's metadata list. * - * Called under journal->j_list_lock - * + * Called under j_list_lock * Called under jbd_lock_bh_state(jh2bh(jh)) + * + * jh and bh may be already free when this function returns */ void __journal_refile_buffer(struct journal_head *jh) { @@ -2153,6 +2158,11 @@ void __journal_refile_buffer(struct journal_head *jh) was_dirty = test_clear_buffer_jbddirty(bh); __journal_temp_unlink_buffer(jh); + /* + * We set b_transaction here because b_next_transaction will inherit + * our jh reference and thus __journal_file_buffer() must not take a + * new one. + */ jh->b_transaction = jh->b_next_transaction; jh->b_next_transaction = NULL; if (buffer_freed(bh)) @@ -2169,30 +2179,21 @@ void __journal_refile_buffer(struct journal_head *jh) } /* - * For the unlocked version of this call, also make sure that any - * hanging journal_head is cleaned up if necessary. - * - * __journal_refile_buffer is usually called as part of a single locked - * operation on a buffer_head, in which the caller is probably going to - * be hooking the journal_head onto other lists. In that case it is up - * to the caller to remove the journal_head if necessary. For the - * unlocked journal_refile_buffer call, the caller isn't going to be - * doing anything else to the buffer so we need to do the cleanup - * ourselves to avoid a jh leak. - * - * *** The journal_head may be freed by this call! *** + * __journal_refile_buffer() with necessary locking added. We take our bh + * reference so that we can safely unlock bh. + * + * The jh and bh may be freed by this call. */ void journal_refile_buffer(journal_t *journal, struct journal_head *jh) { struct buffer_head *bh = jh2bh(jh); + /* Get reference so that buffer cannot be freed before we unlock it */ + get_bh(bh); jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); - __journal_refile_buffer(jh); jbd_unlock_bh_state(bh); - journal_remove_journal_head(bh); - spin_unlock(&journal->j_list_lock); __brelse(bh); } diff --git a/include/linux/jbd.h b/include/linux/jbd.h index e06965081ba5..e6a5e34bed4f 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -940,7 +940,6 @@ extern int journal_force_commit(journal_t *); */ struct journal_head *journal_add_journal_head(struct buffer_head *bh); struct journal_head *journal_grab_journal_head(struct buffer_head *bh); -void journal_remove_journal_head(struct buffer_head *bh); void journal_put_journal_head(struct journal_head *jh); /* -- cgit v1.2.3 From f91c2c5cfa2950a20265b45bcc13e49ed9e49aac Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Mon, 27 Jun 2011 13:45:39 +0200 Subject: encrypted_keys: avoid dumping the master key if the request fails Do not dump the master key if an error is encountered during the request. Signed-off-by: Roberto Sassu Acked-by: Gianluca Ramunno Signed-off-by: Mimi Zohar --- security/keys/encrypted.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/security/keys/encrypted.c b/security/keys/encrypted.c index b1cba5bf0a5e..37cd913f18ae 100644 --- a/security/keys/encrypted.c +++ b/security/keys/encrypted.c @@ -378,11 +378,13 @@ static struct key *request_master_key(struct encrypted_key_payload *epayload, } else goto out; - if (IS_ERR(mkey)) + if (IS_ERR(mkey)) { pr_info("encrypted_key: key %s not found", epayload->master_desc); - if (mkey) - dump_master_key(*master_key, *master_keylen); + goto out; + } + + dump_master_key(*master_key, *master_keylen); out: return mkey; } -- cgit v1.2.3 From 08fa2aa54e72ddde8076cc77126bace8d4780e0f Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Mon, 27 Jun 2011 13:45:40 +0200 Subject: encrypted-keys: fixed valid_master_desc() function description Valid key type prefixes for the parameter 'key-type' are: 'trusted' and 'user'. Signed-off-by: Roberto Sassu Acked-by: Gianluca Ramunno Acked-by: David Howells Signed-off-by: Mimi Zohar --- security/keys/encrypted.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/keys/encrypted.c b/security/keys/encrypted.c index 37cd913f18ae..3ff2f72dad94 100644 --- a/security/keys/encrypted.c +++ b/security/keys/encrypted.c @@ -84,7 +84,7 @@ static int aes_get_sizes(void) /* * valid_master_desc - verify the 'key-type:desc' of a new/updated master-key * - * key-type:= "trusted:" | "encrypted:" + * key-type:= "trusted:" | "user:" * desc:= master-key description * * Verify that 'key-type' is valid and that 'desc' exists. On key update, -- cgit v1.2.3 From 7103dff0e598cd634767f17a2958302c515700ca Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Mon, 27 Jun 2011 13:45:41 +0200 Subject: encrypted-keys: added additional debug messages Some debug messages have been added in the function datablob_parse() in order to better identify errors returned when dealing with 'encrypted' keys. Changelog from version v4: - made the debug messages more understandable Signed-off-by: Roberto Sassu Acked-by: Gianluca Ramunno Signed-off-by: Mimi Zohar --- security/keys/encrypted.c | 45 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/security/keys/encrypted.c b/security/keys/encrypted.c index 3ff2f72dad94..f36a105de791 100644 --- a/security/keys/encrypted.c +++ b/security/keys/encrypted.c @@ -133,46 +133,69 @@ static int datablob_parse(char *datablob, char **master_desc, substring_t args[MAX_OPT_ARGS]; int ret = -EINVAL; int key_cmd; - char *p; + char *keyword; - p = strsep(&datablob, " \t"); - if (!p) + keyword = strsep(&datablob, " \t"); + if (!keyword) { + pr_info("encrypted_key: insufficient parameters specified\n"); return ret; - key_cmd = match_token(p, key_tokens, args); + } + key_cmd = match_token(keyword, key_tokens, args); *master_desc = strsep(&datablob, " \t"); - if (!*master_desc) + if (!*master_desc) { + pr_info("encrypted_key: master key parameter is missing\n"); goto out; + } - if (valid_master_desc(*master_desc, NULL) < 0) + if (valid_master_desc(*master_desc, NULL) < 0) { + pr_info("encrypted_key: master key parameter \'%s\' " + "is invalid\n", *master_desc); goto out; + } if (decrypted_datalen) { *decrypted_datalen = strsep(&datablob, " \t"); - if (!*decrypted_datalen) + if (!*decrypted_datalen) { + pr_info("encrypted_key: keylen parameter is missing\n"); goto out; + } } switch (key_cmd) { case Opt_new: - if (!decrypted_datalen) + if (!decrypted_datalen) { + pr_info("encrypted_key: keyword \'%s\' not allowed " + "when called from .update method\n", keyword); break; + } ret = 0; break; case Opt_load: - if (!decrypted_datalen) + if (!decrypted_datalen) { + pr_info("encrypted_key: keyword \'%s\' not allowed " + "when called from .update method\n", keyword); break; + } *hex_encoded_iv = strsep(&datablob, " \t"); - if (!*hex_encoded_iv) + if (!*hex_encoded_iv) { + pr_info("encrypted_key: hex blob is missing\n"); break; + } ret = 0; break; case Opt_update: - if (decrypted_datalen) + if (decrypted_datalen) { + pr_info("encrypted_key: keyword \'%s\' not allowed " + "when called from .instantiate method\n", + keyword); break; + } ret = 0; break; case Opt_err: + pr_info("encrypted_key: keyword \'%s\' not recognized\n", + keyword); break; } out: -- cgit v1.2.3 From 4e561d388feff18e4b798cef6a1a84a2cc7f20c2 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Mon, 27 Jun 2011 13:45:42 +0200 Subject: encrypted-keys: add key format support This patch introduces a new parameter, called 'format', that defines the format of data stored by encrypted keys. The 'default' format identifies encrypted keys containing only the symmetric key, while other formats can be defined to support additional information. The 'format' parameter is written in the datablob produced by commands 'keyctl print' or 'keyctl pipe' and is integrity protected by the HMAC. Signed-off-by: Roberto Sassu Acked-by: Gianluca Ramunno Acked-by: David Howells Signed-off-by: Mimi Zohar --- Documentation/security/keys-trusted-encrypted.txt | 48 +++++--- include/keys/encrypted-type.h | 13 +- security/keys/encrypted.c | 141 ++++++++++++++++------ 3 files changed, 142 insertions(+), 60 deletions(-) diff --git a/Documentation/security/keys-trusted-encrypted.txt b/Documentation/security/keys-trusted-encrypted.txt index 8fb79bc1ac4b..0afcb5023c75 100644 --- a/Documentation/security/keys-trusted-encrypted.txt +++ b/Documentation/security/keys-trusted-encrypted.txt @@ -53,12 +53,19 @@ they are only as secure as the user key encrypting them. The master user key should therefore be loaded in as secure a way as possible, preferably early in boot. +The decrypted portion of encrypted keys can contain either a simple symmetric +key or a more complex structure. The format of the more complex structure is +application specific, which is identified by 'format'. + Usage: - keyctl add encrypted name "new key-type:master-key-name keylen" ring - keyctl add encrypted name "load hex_blob" ring - keyctl update keyid "update key-type:master-key-name" + keyctl add encrypted name "new [format] key-type:master-key-name keylen" + ring + keyctl add encrypted name "load hex_blob" ring + keyctl update keyid "update key-type:master-key-name" + +format:= 'default' +key-type:= 'trusted' | 'user' -where 'key-type' is either 'trusted' or 'user'. Examples of trusted and encrypted key usage: @@ -114,15 +121,25 @@ Reseal a trusted key under new pcr values: 7ef6a24defe4846104209bf0c3eced7fa1a672ed5b125fc9d8cd88b476a658a4434644ef df8ae9a178e9f83ba9f08d10fa47e4226b98b0702f06b3b8 -Create and save an encrypted key "evm" using the above trusted key "kmk": +The initial consumer of trusted keys is EVM, which at boot time needs a high +quality symmetric key for HMAC protection of file metadata. The use of a +trusted key provides strong guarantees that the EVM key has not been +compromised by a user level problem, and when sealed to specific boot PCR +values, protects against boot and offline attacks. Create and save an +encrypted key "evm" using the above trusted key "kmk": +option 1: omitting 'format' $ keyctl add encrypted evm "new trusted:kmk 32" @u 159771175 +option 2: explicitly defining 'format' as 'default' + $ keyctl add encrypted evm "new default trusted:kmk 32" @u + 159771175 + $ keyctl print 159771175 - trusted:kmk 32 2375725ad57798846a9bbd240de8906f006e66c03af53b1b382dbbc55 - be2a44616e4959430436dc4f2a7a9659aa60bb4652aeb2120f149ed197c564e024717c64 - 5972dcb82ab2dde83376d82b2e3c09ffc + default trusted:kmk 32 2375725ad57798846a9bbd240de8906f006e66c03af53b1b3 + 82dbbc55be2a44616e4959430436dc4f2a7a9659aa60bb4652aeb2120f149ed197c564e0 + 24717c64 5972dcb82ab2dde83376d82b2e3c09ffc $ keyctl pipe 159771175 > evm.blob @@ -132,14 +149,9 @@ Load an encrypted key "evm" from saved blob: 831684262 $ keyctl print 831684262 - trusted:kmk 32 2375725ad57798846a9bbd240de8906f006e66c03af53b1b382dbbc55 - be2a44616e4959430436dc4f2a7a9659aa60bb4652aeb2120f149ed197c564e024717c64 - 5972dcb82ab2dde83376d82b2e3c09ffc - + default trusted:kmk 32 2375725ad57798846a9bbd240de8906f006e66c03af53b1b3 + 82dbbc55be2a44616e4959430436dc4f2a7a9659aa60bb4652aeb2120f149ed197c564e0 + 24717c64 5972dcb82ab2dde83376d82b2e3c09ffc -The initial consumer of trusted keys is EVM, which at boot time needs a high -quality symmetric key for HMAC protection of file metadata. The use of a -trusted key provides strong guarantees that the EVM key has not been -compromised by a user level problem, and when sealed to specific boot PCR -values, protects against boot and offline attacks. Other uses for trusted and -encrypted keys, such as for disk and file encryption are anticipated. +Other uses for trusted and encrypted keys, such as for disk and file encryption +are anticipated. diff --git a/include/keys/encrypted-type.h b/include/keys/encrypted-type.h index 95855017a32b..1d4541370a64 100644 --- a/include/keys/encrypted-type.h +++ b/include/keys/encrypted-type.h @@ -1,6 +1,11 @@ /* * Copyright (C) 2010 IBM Corporation - * Author: Mimi Zohar + * Copyright (C) 2010 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Authors: + * Mimi Zohar + * Roberto Sassu * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -15,13 +20,17 @@ struct encrypted_key_payload { struct rcu_head rcu; + char *format; /* datablob: format */ char *master_desc; /* datablob: master key name */ char *datalen; /* datablob: decrypted key length */ u8 *iv; /* datablob: iv */ u8 *encrypted_data; /* datablob: encrypted data */ unsigned short datablob_len; /* length of datablob */ unsigned short decrypted_datalen; /* decrypted data length */ - u8 decrypted_data[0]; /* decrypted data + datablob + hmac */ + unsigned short payload_datalen; /* payload data length */ + unsigned short encrypted_key_format; /* encrypted key format */ + u8 *decrypted_data; /* decrypted data */ + u8 payload_data[0]; /* payload data + datablob + hmac */ }; extern struct key_type key_type_encrypted; diff --git a/security/keys/encrypted.c b/security/keys/encrypted.c index f36a105de791..89981c987ba7 100644 --- a/security/keys/encrypted.c +++ b/security/keys/encrypted.c @@ -1,8 +1,11 @@ /* * Copyright (C) 2010 IBM Corporation + * Copyright (C) 2010 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it * - * Author: + * Authors: * Mimi Zohar + * Roberto Sassu * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -37,6 +40,7 @@ static const char KEY_USER_PREFIX[] = "user:"; static const char hash_alg[] = "sha256"; static const char hmac_alg[] = "hmac(sha256)"; static const char blkcipher_alg[] = "cbc(aes)"; +static const char key_format_default[] = "default"; static unsigned int ivsize; static int blksize; @@ -58,6 +62,15 @@ enum { Opt_err = -1, Opt_new, Opt_load, Opt_update }; +enum { + Opt_error = -1, Opt_default +}; + +static const match_table_t key_format_tokens = { + {Opt_default, "default"}, + {Opt_error, NULL} +}; + static const match_table_t key_tokens = { {Opt_new, "new"}, {Opt_load, "load"}, @@ -118,8 +131,9 @@ out: * datablob_parse - parse the keyctl data * * datablob format: - * new - * load + * new [] + * load [] + * * update * * Tokenizes a copy of the keyctl data, returning a pointer to each token, @@ -127,13 +141,15 @@ out: * * On success returns 0, otherwise -EINVAL. */ -static int datablob_parse(char *datablob, char **master_desc, - char **decrypted_datalen, char **hex_encoded_iv) +static int datablob_parse(char *datablob, const char **format, + char **master_desc, char **decrypted_datalen, + char **hex_encoded_iv) { substring_t args[MAX_OPT_ARGS]; int ret = -EINVAL; int key_cmd; - char *keyword; + int key_format; + char *p, *keyword; keyword = strsep(&datablob, " \t"); if (!keyword) { @@ -142,7 +158,24 @@ static int datablob_parse(char *datablob, char **master_desc, } key_cmd = match_token(keyword, key_tokens, args); - *master_desc = strsep(&datablob, " \t"); + /* Get optional format: default */ + p = strsep(&datablob, " \t"); + if (!p) { + pr_err("encrypted_key: insufficient parameters specified\n"); + return ret; + } + + key_format = match_token(p, key_format_tokens, args); + switch (key_format) { + case Opt_default: + *format = p; + *master_desc = strsep(&datablob, " \t"); + break; + case Opt_error: + *master_desc = p; + break; + } + if (!*master_desc) { pr_info("encrypted_key: master key parameter is missing\n"); goto out; @@ -220,8 +253,8 @@ static char *datablob_format(struct encrypted_key_payload *epayload, ascii_buf[asciiblob_len] = '\0'; /* copy datablob master_desc and datalen strings */ - len = sprintf(ascii_buf, "%s %s ", epayload->master_desc, - epayload->datalen); + len = sprintf(ascii_buf, "%s %s %s ", epayload->format, + epayload->master_desc, epayload->datalen); /* convert the hex encoded iv, encrypted-data and HMAC to ascii */ bufp = &ascii_buf[len]; @@ -464,9 +497,9 @@ static int datablob_hmac_append(struct encrypted_key_payload *epayload, if (ret < 0) goto out; - digest = epayload->master_desc + epayload->datablob_len; + digest = epayload->format + epayload->datablob_len; ret = calc_hmac(digest, derived_key, sizeof derived_key, - epayload->master_desc, epayload->datablob_len); + epayload->format, epayload->datablob_len); if (!ret) dump_hmac(NULL, digest, HASH_SIZE); out: @@ -475,26 +508,35 @@ out: /* verify HMAC before decrypting encrypted key */ static int datablob_hmac_verify(struct encrypted_key_payload *epayload, - const u8 *master_key, size_t master_keylen) + const u8 *format, const u8 *master_key, + size_t master_keylen) { u8 derived_key[HASH_SIZE]; u8 digest[HASH_SIZE]; int ret; + char *p; + unsigned short len; ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen); if (ret < 0) goto out; - ret = calc_hmac(digest, derived_key, sizeof derived_key, - epayload->master_desc, epayload->datablob_len); + len = epayload->datablob_len; + if (!format) { + p = epayload->master_desc; + len -= strlen(epayload->format) + 1; + } else + p = epayload->format; + + ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len); if (ret < 0) goto out; - ret = memcmp(digest, epayload->master_desc + epayload->datablob_len, + ret = memcmp(digest, epayload->format + epayload->datablob_len, sizeof digest); if (ret) { ret = -EINVAL; dump_hmac("datablob", - epayload->master_desc + epayload->datablob_len, + epayload->format + epayload->datablob_len, HASH_SIZE); dump_hmac("calc", digest, HASH_SIZE); } @@ -539,13 +581,16 @@ out: /* Allocate memory for decrypted key and datablob. */ static struct encrypted_key_payload *encrypted_key_alloc(struct key *key, + const char *format, const char *master_desc, const char *datalen) { struct encrypted_key_payload *epayload = NULL; unsigned short datablob_len; unsigned short decrypted_datalen; + unsigned short payload_datalen; unsigned int encrypted_datalen; + unsigned int format_len; long dlen; int ret; @@ -553,29 +598,32 @@ static struct encrypted_key_payload *encrypted_key_alloc(struct key *key, if (ret < 0 || dlen < MIN_DATA_SIZE || dlen > MAX_DATA_SIZE) return ERR_PTR(-EINVAL); + format_len = (!format) ? strlen(key_format_default) : strlen(format); decrypted_datalen = dlen; + payload_datalen = decrypted_datalen; encrypted_datalen = roundup(decrypted_datalen, blksize); - datablob_len = strlen(master_desc) + 1 + strlen(datalen) + 1 - + ivsize + 1 + encrypted_datalen; + datablob_len = format_len + 1 + strlen(master_desc) + 1 + + strlen(datalen) + 1 + ivsize + 1 + encrypted_datalen; - ret = key_payload_reserve(key, decrypted_datalen + datablob_len + ret = key_payload_reserve(key, payload_datalen + datablob_len + HASH_SIZE + 1); if (ret < 0) return ERR_PTR(ret); - epayload = kzalloc(sizeof(*epayload) + decrypted_datalen + + epayload = kzalloc(sizeof(*epayload) + payload_datalen + datablob_len + HASH_SIZE + 1, GFP_KERNEL); if (!epayload) return ERR_PTR(-ENOMEM); + epayload->payload_datalen = payload_datalen; epayload->decrypted_datalen = decrypted_datalen; epayload->datablob_len = datablob_len; return epayload; } static int encrypted_key_decrypt(struct encrypted_key_payload *epayload, - const char *hex_encoded_iv) + const char *format, const char *hex_encoded_iv) { struct key *mkey; u8 derived_key[HASH_SIZE]; @@ -596,14 +644,14 @@ static int encrypted_key_decrypt(struct encrypted_key_payload *epayload, hex2bin(epayload->iv, hex_encoded_iv, ivsize); hex2bin(epayload->encrypted_data, hex_encoded_data, encrypted_datalen); - hmac = epayload->master_desc + epayload->datablob_len; + hmac = epayload->format + epayload->datablob_len; hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2), HASH_SIZE); mkey = request_master_key(epayload, &master_key, &master_keylen); if (IS_ERR(mkey)) return PTR_ERR(mkey); - ret = datablob_hmac_verify(epayload, master_key, master_keylen); + ret = datablob_hmac_verify(epayload, format, master_key, master_keylen); if (ret < 0) { pr_err("encrypted_key: bad hmac (%d)\n", ret); goto out; @@ -623,14 +671,23 @@ out: } static void __ekey_init(struct encrypted_key_payload *epayload, - const char *master_desc, const char *datalen) + const char *format, const char *master_desc, + const char *datalen) { - epayload->master_desc = epayload->decrypted_data - + epayload->decrypted_datalen; + unsigned int format_len; + + format_len = (!format) ? strlen(key_format_default) : strlen(format); + epayload->format = epayload->payload_data + epayload->payload_datalen; + epayload->master_desc = epayload->format + format_len + 1; epayload->datalen = epayload->master_desc + strlen(master_desc) + 1; epayload->iv = epayload->datalen + strlen(datalen) + 1; epayload->encrypted_data = epayload->iv + ivsize + 1; + epayload->decrypted_data = epayload->payload_data; + if (!format) + memcpy(epayload->format, key_format_default, format_len); + else + memcpy(epayload->format, format, format_len); memcpy(epayload->master_desc, master_desc, strlen(master_desc)); memcpy(epayload->datalen, datalen, strlen(datalen)); } @@ -642,19 +699,19 @@ static void __ekey_init(struct encrypted_key_payload *epayload, * itself. For an old key, decrypt the hex encoded data. */ static int encrypted_init(struct encrypted_key_payload *epayload, - const char *master_desc, const char *datalen, - const char *hex_encoded_iv) + const char *format, const char *master_desc, + const char *datalen, const char *hex_encoded_iv) { int ret = 0; - __ekey_init(epayload, master_desc, datalen); + __ekey_init(epayload, format, master_desc, datalen); if (!hex_encoded_iv) { get_random_bytes(epayload->iv, ivsize); get_random_bytes(epayload->decrypted_data, epayload->decrypted_datalen); } else - ret = encrypted_key_decrypt(epayload, hex_encoded_iv); + ret = encrypted_key_decrypt(epayload, format, hex_encoded_iv); return ret; } @@ -671,6 +728,7 @@ static int encrypted_instantiate(struct key *key, const void *data, { struct encrypted_key_payload *epayload = NULL; char *datablob = NULL; + const char *format = NULL; char *master_desc = NULL; char *decrypted_datalen = NULL; char *hex_encoded_iv = NULL; @@ -684,17 +742,18 @@ static int encrypted_instantiate(struct key *key, const void *data, return -ENOMEM; datablob[datalen] = 0; memcpy(datablob, data, datalen); - ret = datablob_parse(datablob, &master_desc, &decrypted_datalen, - &hex_encoded_iv); + ret = datablob_parse(datablob, &format, &master_desc, + &decrypted_datalen, &hex_encoded_iv); if (ret < 0) goto out; - epayload = encrypted_key_alloc(key, master_desc, decrypted_datalen); + epayload = encrypted_key_alloc(key, format, master_desc, + decrypted_datalen); if (IS_ERR(epayload)) { ret = PTR_ERR(epayload); goto out; } - ret = encrypted_init(epayload, master_desc, decrypted_datalen, + ret = encrypted_init(epayload, format, master_desc, decrypted_datalen, hex_encoded_iv); if (ret < 0) { kfree(epayload); @@ -731,6 +790,7 @@ static int encrypted_update(struct key *key, const void *data, size_t datalen) struct encrypted_key_payload *new_epayload; char *buf; char *new_master_desc = NULL; + const char *format = NULL; int ret = 0; if (datalen <= 0 || datalen > 32767 || !data) @@ -742,7 +802,7 @@ static int encrypted_update(struct key *key, const void *data, size_t datalen) buf[datalen] = 0; memcpy(buf, data, datalen); - ret = datablob_parse(buf, &new_master_desc, NULL, NULL); + ret = datablob_parse(buf, &format, &new_master_desc, NULL, NULL); if (ret < 0) goto out; @@ -750,18 +810,19 @@ static int encrypted_update(struct key *key, const void *data, size_t datalen) if (ret < 0) goto out; - new_epayload = encrypted_key_alloc(key, new_master_desc, - epayload->datalen); + new_epayload = encrypted_key_alloc(key, epayload->format, + new_master_desc, epayload->datalen); if (IS_ERR(new_epayload)) { ret = PTR_ERR(new_epayload); goto out; } - __ekey_init(new_epayload, new_master_desc, epayload->datalen); + __ekey_init(new_epayload, epayload->format, new_master_desc, + epayload->datalen); memcpy(new_epayload->iv, epayload->iv, ivsize); - memcpy(new_epayload->decrypted_data, epayload->decrypted_data, - epayload->decrypted_datalen); + memcpy(new_epayload->payload_data, epayload->payload_data, + epayload->payload_datalen); rcu_assign_pointer(key->payload.data, new_epayload); call_rcu(&epayload->rcu, encrypted_rcu_free); -- cgit v1.2.3 From f8f8527103a264b5e4ab2ce5c1743b28f3219d90 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Mon, 27 Jun 2011 13:45:43 +0200 Subject: eCryptfs: export global eCryptfs definitions to include/linux/ecryptfs.h Some eCryptfs specific definitions, such as the current version and the authentication token structure, are moved to the new include file 'include/linux/ecryptfs.h', in order to be available for all kernel subsystems. Signed-off-by: Roberto Sassu Acked-by: Gianluca Ramunno Acked-by: Tyler Hicks Acked-by: David Howells Signed-off-by: Mimi Zohar --- fs/ecryptfs/ecryptfs_kernel.h | 109 +--------------------------------------- include/linux/ecryptfs.h | 113 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 114 insertions(+), 108 deletions(-) create mode 100644 include/linux/ecryptfs.h diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 43c7c43b06f5..bb8ec5d4301c 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -36,125 +36,18 @@ #include #include #include +#include -/* Version verification for shared data structures w/ userspace */ -#define ECRYPTFS_VERSION_MAJOR 0x00 -#define ECRYPTFS_VERSION_MINOR 0x04 -#define ECRYPTFS_SUPPORTED_FILE_VERSION 0x03 -/* These flags indicate which features are supported by the kernel - * module; userspace tools such as the mount helper read - * ECRYPTFS_VERSIONING_MASK from a sysfs handle in order to determine - * how to behave. */ -#define ECRYPTFS_VERSIONING_PASSPHRASE 0x00000001 -#define ECRYPTFS_VERSIONING_PUBKEY 0x00000002 -#define ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH 0x00000004 -#define ECRYPTFS_VERSIONING_POLICY 0x00000008 -#define ECRYPTFS_VERSIONING_XATTR 0x00000010 -#define ECRYPTFS_VERSIONING_MULTKEY 0x00000020 -#define ECRYPTFS_VERSIONING_DEVMISC 0x00000040 -#define ECRYPTFS_VERSIONING_HMAC 0x00000080 -#define ECRYPTFS_VERSIONING_FILENAME_ENCRYPTION 0x00000100 -#define ECRYPTFS_VERSIONING_GCM 0x00000200 -#define ECRYPTFS_VERSIONING_MASK (ECRYPTFS_VERSIONING_PASSPHRASE \ - | ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH \ - | ECRYPTFS_VERSIONING_PUBKEY \ - | ECRYPTFS_VERSIONING_XATTR \ - | ECRYPTFS_VERSIONING_MULTKEY \ - | ECRYPTFS_VERSIONING_DEVMISC \ - | ECRYPTFS_VERSIONING_FILENAME_ENCRYPTION) -#define ECRYPTFS_MAX_PASSWORD_LENGTH 64 -#define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH -#define ECRYPTFS_SALT_SIZE 8 -#define ECRYPTFS_SALT_SIZE_HEX (ECRYPTFS_SALT_SIZE*2) -/* The original signature size is only for what is stored on disk; all - * in-memory representations are expanded hex, so it better adapted to - * be passed around or referenced on the command line */ -#define ECRYPTFS_SIG_SIZE 8 -#define ECRYPTFS_SIG_SIZE_HEX (ECRYPTFS_SIG_SIZE*2) -#define ECRYPTFS_PASSWORD_SIG_SIZE ECRYPTFS_SIG_SIZE_HEX -#define ECRYPTFS_MAX_KEY_BYTES 64 -#define ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES 512 #define ECRYPTFS_DEFAULT_IV_BYTES 16 -#define ECRYPTFS_FILE_VERSION 0x03 #define ECRYPTFS_DEFAULT_EXTENT_SIZE 4096 #define ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE 8192 #define ECRYPTFS_DEFAULT_MSG_CTX_ELEMS 32 #define ECRYPTFS_DEFAULT_SEND_TIMEOUT HZ #define ECRYPTFS_MAX_MSG_CTX_TTL (HZ*3) -#define ECRYPTFS_MAX_PKI_NAME_BYTES 16 #define ECRYPTFS_DEFAULT_NUM_USERS 4 #define ECRYPTFS_MAX_NUM_USERS 32768 #define ECRYPTFS_XATTR_NAME "user.ecryptfs" -#define RFC2440_CIPHER_DES3_EDE 0x02 -#define RFC2440_CIPHER_CAST_5 0x03 -#define RFC2440_CIPHER_BLOWFISH 0x04 -#define RFC2440_CIPHER_AES_128 0x07 -#define RFC2440_CIPHER_AES_192 0x08 -#define RFC2440_CIPHER_AES_256 0x09 -#define RFC2440_CIPHER_TWOFISH 0x0a -#define RFC2440_CIPHER_CAST_6 0x0b - -#define RFC2440_CIPHER_RSA 0x01 - -/** - * For convenience, we may need to pass around the encrypted session - * key between kernel and userspace because the authentication token - * may not be extractable. For example, the TPM may not release the - * private key, instead requiring the encrypted data and returning the - * decrypted data. - */ -struct ecryptfs_session_key { -#define ECRYPTFS_USERSPACE_SHOULD_TRY_TO_DECRYPT 0x00000001 -#define ECRYPTFS_USERSPACE_SHOULD_TRY_TO_ENCRYPT 0x00000002 -#define ECRYPTFS_CONTAINS_DECRYPTED_KEY 0x00000004 -#define ECRYPTFS_CONTAINS_ENCRYPTED_KEY 0x00000008 - u32 flags; - u32 encrypted_key_size; - u32 decrypted_key_size; - u8 encrypted_key[ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES]; - u8 decrypted_key[ECRYPTFS_MAX_KEY_BYTES]; -}; - -struct ecryptfs_password { - u32 password_bytes; - s32 hash_algo; - u32 hash_iterations; - u32 session_key_encryption_key_bytes; -#define ECRYPTFS_PERSISTENT_PASSWORD 0x01 -#define ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET 0x02 - u32 flags; - /* Iterated-hash concatenation of salt and passphrase */ - u8 session_key_encryption_key[ECRYPTFS_MAX_KEY_BYTES]; - u8 signature[ECRYPTFS_PASSWORD_SIG_SIZE + 1]; - /* Always in expanded hex */ - u8 salt[ECRYPTFS_SALT_SIZE]; -}; - -enum ecryptfs_token_types {ECRYPTFS_PASSWORD, ECRYPTFS_PRIVATE_KEY}; - -struct ecryptfs_private_key { - u32 key_size; - u32 data_len; - u8 signature[ECRYPTFS_PASSWORD_SIG_SIZE + 1]; - char pki_type[ECRYPTFS_MAX_PKI_NAME_BYTES + 1]; - u8 data[]; -}; - -/* May be a password or a private key */ -struct ecryptfs_auth_tok { - u16 version; /* 8-bit major and 8-bit minor */ - u16 token_type; -#define ECRYPTFS_ENCRYPT_ONLY 0x00000001 - u32 flags; - struct ecryptfs_session_key session_key; - u8 reserved[32]; - union { - struct ecryptfs_password password; - struct ecryptfs_private_key private_key; - } token; -} __attribute__ ((packed)); - void ecryptfs_dump_auth_tok(struct ecryptfs_auth_tok *auth_tok); extern void ecryptfs_to_hex(char *dst, char *src, size_t src_size); extern void ecryptfs_from_hex(char *dst, char *src, int dst_size); diff --git a/include/linux/ecryptfs.h b/include/linux/ecryptfs.h new file mode 100644 index 000000000000..2224a8c0cb64 --- /dev/null +++ b/include/linux/ecryptfs.h @@ -0,0 +1,113 @@ +#ifndef _LINUX_ECRYPTFS_H +#define _LINUX_ECRYPTFS_H + +/* Version verification for shared data structures w/ userspace */ +#define ECRYPTFS_VERSION_MAJOR 0x00 +#define ECRYPTFS_VERSION_MINOR 0x04 +#define ECRYPTFS_SUPPORTED_FILE_VERSION 0x03 +/* These flags indicate which features are supported by the kernel + * module; userspace tools such as the mount helper read + * ECRYPTFS_VERSIONING_MASK from a sysfs handle in order to determine + * how to behave. */ +#define ECRYPTFS_VERSIONING_PASSPHRASE 0x00000001 +#define ECRYPTFS_VERSIONING_PUBKEY 0x00000002 +#define ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH 0x00000004 +#define ECRYPTFS_VERSIONING_POLICY 0x00000008 +#define ECRYPTFS_VERSIONING_XATTR 0x00000010 +#define ECRYPTFS_VERSIONING_MULTKEY 0x00000020 +#define ECRYPTFS_VERSIONING_DEVMISC 0x00000040 +#define ECRYPTFS_VERSIONING_HMAC 0x00000080 +#define ECRYPTFS_VERSIONING_FILENAME_ENCRYPTION 0x00000100 +#define ECRYPTFS_VERSIONING_GCM 0x00000200 +#define ECRYPTFS_VERSIONING_MASK (ECRYPTFS_VERSIONING_PASSPHRASE \ + | ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH \ + | ECRYPTFS_VERSIONING_PUBKEY \ + | ECRYPTFS_VERSIONING_XATTR \ + | ECRYPTFS_VERSIONING_MULTKEY \ + | ECRYPTFS_VERSIONING_DEVMISC \ + | ECRYPTFS_VERSIONING_FILENAME_ENCRYPTION) +#define ECRYPTFS_MAX_PASSWORD_LENGTH 64 +#define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH +#define ECRYPTFS_SALT_SIZE 8 +#define ECRYPTFS_SALT_SIZE_HEX (ECRYPTFS_SALT_SIZE*2) +/* The original signature size is only for what is stored on disk; all + * in-memory representations are expanded hex, so it better adapted to + * be passed around or referenced on the command line */ +#define ECRYPTFS_SIG_SIZE 8 +#define ECRYPTFS_SIG_SIZE_HEX (ECRYPTFS_SIG_SIZE*2) +#define ECRYPTFS_PASSWORD_SIG_SIZE ECRYPTFS_SIG_SIZE_HEX +#define ECRYPTFS_MAX_KEY_BYTES 64 +#define ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES 512 +#define ECRYPTFS_FILE_VERSION 0x03 +#define ECRYPTFS_MAX_PKI_NAME_BYTES 16 + +#define RFC2440_CIPHER_DES3_EDE 0x02 +#define RFC2440_CIPHER_CAST_5 0x03 +#define RFC2440_CIPHER_BLOWFISH 0x04 +#define RFC2440_CIPHER_AES_128 0x07 +#define RFC2440_CIPHER_AES_192 0x08 +#define RFC2440_CIPHER_AES_256 0x09 +#define RFC2440_CIPHER_TWOFISH 0x0a +#define RFC2440_CIPHER_CAST_6 0x0b + +#define RFC2440_CIPHER_RSA 0x01 + +/** + * For convenience, we may need to pass around the encrypted session + * key between kernel and userspace because the authentication token + * may not be extractable. For example, the TPM may not release the + * private key, instead requiring the encrypted data and returning the + * decrypted data. + */ +struct ecryptfs_session_key { +#define ECRYPTFS_USERSPACE_SHOULD_TRY_TO_DECRYPT 0x00000001 +#define ECRYPTFS_USERSPACE_SHOULD_TRY_TO_ENCRYPT 0x00000002 +#define ECRYPTFS_CONTAINS_DECRYPTED_KEY 0x00000004 +#define ECRYPTFS_CONTAINS_ENCRYPTED_KEY 0x00000008 + u32 flags; + u32 encrypted_key_size; + u32 decrypted_key_size; + u8 encrypted_key[ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES]; + u8 decrypted_key[ECRYPTFS_MAX_KEY_BYTES]; +}; + +struct ecryptfs_password { + u32 password_bytes; + s32 hash_algo; + u32 hash_iterations; + u32 session_key_encryption_key_bytes; +#define ECRYPTFS_PERSISTENT_PASSWORD 0x01 +#define ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET 0x02 + u32 flags; + /* Iterated-hash concatenation of salt and passphrase */ + u8 session_key_encryption_key[ECRYPTFS_MAX_KEY_BYTES]; + u8 signature[ECRYPTFS_PASSWORD_SIG_SIZE + 1]; + /* Always in expanded hex */ + u8 salt[ECRYPTFS_SALT_SIZE]; +}; + +enum ecryptfs_token_types {ECRYPTFS_PASSWORD, ECRYPTFS_PRIVATE_KEY}; + +struct ecryptfs_private_key { + u32 key_size; + u32 data_len; + u8 signature[ECRYPTFS_PASSWORD_SIG_SIZE + 1]; + char pki_type[ECRYPTFS_MAX_PKI_NAME_BYTES + 1]; + u8 data[]; +}; + +/* May be a password or a private key */ +struct ecryptfs_auth_tok { + u16 version; /* 8-bit major and 8-bit minor */ + u16 token_type; +#define ECRYPTFS_ENCRYPT_ONLY 0x00000001 + u32 flags; + struct ecryptfs_session_key session_key; + u8 reserved[32]; + union { + struct ecryptfs_password password; + struct ecryptfs_private_key private_key; + } token; +} __attribute__ ((packed)); + +#endif /* _LINUX_ECRYPTFS_H */ -- cgit v1.2.3 From 79a73d188726b473ca3bf483244bc96096831905 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Mon, 27 Jun 2011 13:45:44 +0200 Subject: encrypted-keys: add ecryptfs format support The 'encrypted' key type defines its own payload format which contains a symmetric key randomly generated that cannot be used directly to mount an eCryptfs filesystem, because it expects an authentication token structure. This patch introduces the new format 'ecryptfs' that allows to store an authentication token structure inside the encrypted key payload containing a randomly generated symmetric key, as the same for the format 'default'. More details about the usage of encrypted keys with the eCryptfs filesystem can be found in the file 'Documentation/keys-ecryptfs.txt'. Signed-off-by: Roberto Sassu Acked-by: Gianluca Ramunno Acked-by: Tyler Hicks Signed-off-by: Mimi Zohar --- Documentation/keys-ecryptfs.txt | 68 +++++++++++++++++++ Documentation/security/keys-trusted-encrypted.txt | 6 +- security/keys/Makefile | 2 +- security/keys/ecryptfs_format.c | 81 +++++++++++++++++++++++ security/keys/ecryptfs_format.h | 30 +++++++++ security/keys/encrypted.c | 75 +++++++++++++++++++-- 6 files changed, 252 insertions(+), 10 deletions(-) create mode 100644 Documentation/keys-ecryptfs.txt create mode 100644 security/keys/ecryptfs_format.c create mode 100644 security/keys/ecryptfs_format.h diff --git a/Documentation/keys-ecryptfs.txt b/Documentation/keys-ecryptfs.txt new file mode 100644 index 000000000000..c3bbeba63562 --- /dev/null +++ b/Documentation/keys-ecryptfs.txt @@ -0,0 +1,68 @@ + Encrypted keys for the eCryptfs filesystem + +ECryptfs is a stacked filesystem which transparently encrypts and decrypts each +file using a randomly generated File Encryption Key (FEK). + +Each FEK is in turn encrypted with a File Encryption Key Encryption Key (FEFEK) +either in kernel space or in user space with a daemon called 'ecryptfsd'. In +the former case the operation is performed directly by the kernel CryptoAPI +using a key, the FEFEK, derived from a user prompted passphrase; in the latter +the FEK is encrypted by 'ecryptfsd' with the help of external libraries in order +to support other mechanisms like public key cryptography, PKCS#11 and TPM based +operations. + +The data structure defined by eCryptfs to contain information required for the +FEK decryption is called authentication token and, currently, can be stored in a +kernel key of the 'user' type, inserted in the user's session specific keyring +by the userspace utility 'mount.ecryptfs' shipped with the package +'ecryptfs-utils'. + +The 'encrypted' key type has been extended with the introduction of the new +format 'ecryptfs' in order to be used in conjunction with the eCryptfs +filesystem. Encrypted keys of the newly introduced format store an +authentication token in its payload with a FEFEK randomly generated by the +kernel and protected by the parent master key. + +In order to avoid known-plaintext attacks, the datablob obtained through +commands 'keyctl print' or 'keyctl pipe' does not contain the overall +authentication token, which content is well known, but only the FEFEK in +encrypted form. + +The eCryptfs filesystem may really benefit from using encrypted keys in that the +required key can be securely generated by an Administrator and provided at boot +time after the unsealing of a 'trusted' key in order to perform the mount in a +controlled environment. Another advantage is that the key is not exposed to +threats of malicious software, because it is available in clear form only at +kernel level. + +Usage: + keyctl add encrypted name "new ecryptfs key-type:master-key-name keylen" ring + keyctl add encrypted name "load hex_blob" ring + keyctl update keyid "update key-type:master-key-name" + +name:= '<16 hexadecimal characters>' +key-type:= 'trusted' | 'user' +keylen:= 64 + + +Example of encrypted key usage with the eCryptfs filesystem: + +Create an encrypted key "1000100010001000" of length 64 bytes with format +'ecryptfs' and save it using a previously loaded user key "test": + + $ keyctl add encrypted 1000100010001000 "new ecryptfs user:test 64" @u + 19184530 + + $ keyctl print 19184530 + ecryptfs user:test 64 490045d4bfe48c99f0d465fbbbb79e7500da954178e2de0697 + dd85091f5450a0511219e9f7cd70dcd498038181466f78ac8d4c19504fcc72402bfc41c2 + f253a41b7507ccaa4b2b03fff19a69d1cc0b16e71746473f023a95488b6edfd86f7fdd40 + 9d292e4bacded1258880122dd553a661 + + $ keyctl pipe 19184530 > ecryptfs.blob + +Mount an eCryptfs filesystem using the created encrypted key "1000100010001000" +into the '/secret' directory: + + $ mount -i -t ecryptfs -oecryptfs_sig=1000100010001000,\ + ecryptfs_cipher=aes,ecryptfs_key_bytes=32 /secret /secret diff --git a/Documentation/security/keys-trusted-encrypted.txt b/Documentation/security/keys-trusted-encrypted.txt index 0afcb5023c75..5f50ccabfc8a 100644 --- a/Documentation/security/keys-trusted-encrypted.txt +++ b/Documentation/security/keys-trusted-encrypted.txt @@ -63,7 +63,7 @@ Usage: keyctl add encrypted name "load hex_blob" ring keyctl update keyid "update key-type:master-key-name" -format:= 'default' +format:= 'default | ecryptfs' key-type:= 'trusted' | 'user' @@ -154,4 +154,6 @@ Load an encrypted key "evm" from saved blob: 24717c64 5972dcb82ab2dde83376d82b2e3c09ffc Other uses for trusted and encrypted keys, such as for disk and file encryption -are anticipated. +are anticipated. In particular the new format 'ecryptfs' has been defined in +in order to use encrypted keys to mount an eCryptfs filesystem. More details +about the usage can be found in the file 'Documentation/keys-ecryptfs.txt'. diff --git a/security/keys/Makefile b/security/keys/Makefile index 1bf090a885fe..b34cc6ee6900 100644 --- a/security/keys/Makefile +++ b/security/keys/Makefile @@ -14,7 +14,7 @@ obj-y := \ user_defined.o obj-$(CONFIG_TRUSTED_KEYS) += trusted.o -obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted.o +obj-$(CONFIG_ENCRYPTED_KEYS) += ecryptfs_format.o encrypted.o obj-$(CONFIG_KEYS_COMPAT) += compat.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_SYSCTL) += sysctl.o diff --git a/security/keys/ecryptfs_format.c b/security/keys/ecryptfs_format.c new file mode 100644 index 000000000000..6daa3b6ff9ed --- /dev/null +++ b/security/keys/ecryptfs_format.c @@ -0,0 +1,81 @@ +/* + * ecryptfs_format.c: helper functions for the encrypted key type + * + * Copyright (C) 2006 International Business Machines Corp. + * Copyright (C) 2010 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Authors: + * Michael A. Halcrow + * Tyler Hicks + * Roberto Sassu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + */ + +#include +#include "ecryptfs_format.h" + +u8 *ecryptfs_get_auth_tok_key(struct ecryptfs_auth_tok *auth_tok) +{ + return auth_tok->token.password.session_key_encryption_key; +} +EXPORT_SYMBOL(ecryptfs_get_auth_tok_key); + +/* + * ecryptfs_get_versions() + * + * Source code taken from the software 'ecryptfs-utils' version 83. + * + */ +void ecryptfs_get_versions(int *major, int *minor, int *file_version) +{ + *major = ECRYPTFS_VERSION_MAJOR; + *minor = ECRYPTFS_VERSION_MINOR; + if (file_version) + *file_version = ECRYPTFS_SUPPORTED_FILE_VERSION; +} +EXPORT_SYMBOL(ecryptfs_get_versions); + +/* + * ecryptfs_fill_auth_tok - fill the ecryptfs_auth_tok structure + * + * Fill the ecryptfs_auth_tok structure with required ecryptfs data. + * The source code is inspired to the original function generate_payload() + * shipped with the software 'ecryptfs-utils' version 83. + * + */ +int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok, + const char *key_desc) +{ + int major, minor; + + ecryptfs_get_versions(&major, &minor, NULL); + auth_tok->version = (((uint16_t)(major << 8) & 0xFF00) + | ((uint16_t)minor & 0x00FF)); + auth_tok->token_type = ECRYPTFS_PASSWORD; + strncpy((char *)auth_tok->token.password.signature, key_desc, + ECRYPTFS_PASSWORD_SIG_SIZE); + auth_tok->token.password.session_key_encryption_key_bytes = + ECRYPTFS_MAX_KEY_BYTES; + /* + * Removed auth_tok->token.password.salt and + * auth_tok->token.password.session_key_encryption_key + * initialization from the original code + */ + /* TODO: Make the hash parameterizable via policy */ + auth_tok->token.password.flags |= + ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET; + /* The kernel code will encrypt the session key. */ + auth_tok->session_key.encrypted_key[0] = 0; + auth_tok->session_key.encrypted_key_size = 0; + /* Default; subject to change by kernel eCryptfs */ + auth_tok->token.password.hash_algo = PGP_DIGEST_ALGO_SHA512; + auth_tok->token.password.flags &= ~(ECRYPTFS_PERSISTENT_PASSWORD); + return 0; +} +EXPORT_SYMBOL(ecryptfs_fill_auth_tok); + +MODULE_LICENSE("GPL"); diff --git a/security/keys/ecryptfs_format.h b/security/keys/ecryptfs_format.h new file mode 100644 index 000000000000..40294de238bb --- /dev/null +++ b/security/keys/ecryptfs_format.h @@ -0,0 +1,30 @@ +/* + * ecryptfs_format.h: helper functions for the encrypted key type + * + * Copyright (C) 2006 International Business Machines Corp. + * Copyright (C) 2010 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Authors: + * Michael A. Halcrow + * Tyler Hicks + * Roberto Sassu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + */ + +#ifndef __KEYS_ECRYPTFS_H +#define __KEYS_ECRYPTFS_H + +#include + +#define PGP_DIGEST_ALGO_SHA512 10 + +u8 *ecryptfs_get_auth_tok_key(struct ecryptfs_auth_tok *auth_tok); +void ecryptfs_get_versions(int *major, int *minor, int *file_version); +int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok, + const char *key_desc); + +#endif /* __KEYS_ECRYPTFS_H */ diff --git a/security/keys/encrypted.c b/security/keys/encrypted.c index 89981c987ba7..e7eca9ec4c65 100644 --- a/security/keys/encrypted.c +++ b/security/keys/encrypted.c @@ -29,11 +29,13 @@ #include #include #include +#include #include #include #include #include "encrypted.h" +#include "ecryptfs_format.h" static const char KEY_TRUSTED_PREFIX[] = "trusted:"; static const char KEY_USER_PREFIX[] = "user:"; @@ -41,11 +43,13 @@ static const char hash_alg[] = "sha256"; static const char hmac_alg[] = "hmac(sha256)"; static const char blkcipher_alg[] = "cbc(aes)"; static const char key_format_default[] = "default"; +static const char key_format_ecryptfs[] = "ecryptfs"; static unsigned int ivsize; static int blksize; #define KEY_TRUSTED_PREFIX_LEN (sizeof (KEY_TRUSTED_PREFIX) - 1) #define KEY_USER_PREFIX_LEN (sizeof (KEY_USER_PREFIX) - 1) +#define KEY_ECRYPTFS_DESC_LEN 16 #define HASH_SIZE SHA256_DIGEST_SIZE #define MAX_DATA_SIZE 4096 #define MIN_DATA_SIZE 20 @@ -63,11 +67,12 @@ enum { }; enum { - Opt_error = -1, Opt_default + Opt_error = -1, Opt_default, Opt_ecryptfs }; static const match_table_t key_format_tokens = { {Opt_default, "default"}, + {Opt_ecryptfs, "ecryptfs"}, {Opt_error, NULL} }; @@ -94,6 +99,34 @@ static int aes_get_sizes(void) return 0; } +/* + * valid_ecryptfs_desc - verify the description of a new/loaded encrypted key + * + * The description of a encrypted key with format 'ecryptfs' must contain + * exactly 16 hexadecimal characters. + * + */ +static int valid_ecryptfs_desc(const char *ecryptfs_desc) +{ + int i; + + if (strlen(ecryptfs_desc) != KEY_ECRYPTFS_DESC_LEN) { + pr_err("encrypted_key: key description must be %d hexadecimal " + "characters long\n", KEY_ECRYPTFS_DESC_LEN); + return -EINVAL; + } + + for (i = 0; i < KEY_ECRYPTFS_DESC_LEN; i++) { + if (!isxdigit(ecryptfs_desc[i])) { + pr_err("encrypted_key: key description must contain " + "only hexadecimal characters\n"); + return -EINVAL; + } + } + + return 0; +} + /* * valid_master_desc - verify the 'key-type:desc' of a new/updated master-key * @@ -158,7 +191,7 @@ static int datablob_parse(char *datablob, const char **format, } key_cmd = match_token(keyword, key_tokens, args); - /* Get optional format: default */ + /* Get optional format: default | ecryptfs */ p = strsep(&datablob, " \t"); if (!p) { pr_err("encrypted_key: insufficient parameters specified\n"); @@ -167,6 +200,7 @@ static int datablob_parse(char *datablob, const char **format, key_format = match_token(p, key_format_tokens, args); switch (key_format) { + case Opt_ecryptfs: case Opt_default: *format = p; *master_desc = strsep(&datablob, " \t"); @@ -601,6 +635,17 @@ static struct encrypted_key_payload *encrypted_key_alloc(struct key *key, format_len = (!format) ? strlen(key_format_default) : strlen(format); decrypted_datalen = dlen; payload_datalen = decrypted_datalen; + if (format && !strcmp(format, key_format_ecryptfs)) { + if (dlen != ECRYPTFS_MAX_KEY_BYTES) { + pr_err("encrypted_key: keylen for the ecryptfs format " + "must be equal to %d bytes\n", + ECRYPTFS_MAX_KEY_BYTES); + return ERR_PTR(-EINVAL); + } + decrypted_datalen = ECRYPTFS_MAX_KEY_BYTES; + payload_datalen = sizeof(struct ecryptfs_auth_tok); + } + encrypted_datalen = roundup(decrypted_datalen, blksize); datablob_len = format_len + 1 + strlen(master_desc) + 1 @@ -686,8 +731,14 @@ static void __ekey_init(struct encrypted_key_payload *epayload, if (!format) memcpy(epayload->format, key_format_default, format_len); - else + else { + if (!strcmp(format, key_format_ecryptfs)) + epayload->decrypted_data = + ecryptfs_get_auth_tok_key((struct ecryptfs_auth_tok *)epayload->payload_data); + memcpy(epayload->format, format, format_len); + } + memcpy(epayload->master_desc, master_desc, strlen(master_desc)); memcpy(epayload->datalen, datalen, strlen(datalen)); } @@ -699,11 +750,21 @@ static void __ekey_init(struct encrypted_key_payload *epayload, * itself. For an old key, decrypt the hex encoded data. */ static int encrypted_init(struct encrypted_key_payload *epayload, - const char *format, const char *master_desc, - const char *datalen, const char *hex_encoded_iv) + const char *key_desc, const char *format, + const char *master_desc, const char *datalen, + const char *hex_encoded_iv) { int ret = 0; + if (format && !strcmp(format, key_format_ecryptfs)) { + ret = valid_ecryptfs_desc(key_desc); + if (ret < 0) + return ret; + + ecryptfs_fill_auth_tok((struct ecryptfs_auth_tok *)epayload->payload_data, + key_desc); + } + __ekey_init(epayload, format, master_desc, datalen); if (!hex_encoded_iv) { get_random_bytes(epayload->iv, ivsize); @@ -753,8 +814,8 @@ static int encrypted_instantiate(struct key *key, const void *data, ret = PTR_ERR(epayload); goto out; } - ret = encrypted_init(epayload, format, master_desc, decrypted_datalen, - hex_encoded_iv); + ret = encrypted_init(epayload, key->description, format, master_desc, + decrypted_datalen, hex_encoded_iv); if (ret < 0) { kfree(epayload); goto out; -- cgit v1.2.3 From 1252cc3b232e582e887623dc5f70979418caaaa2 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Mon, 27 Jun 2011 13:45:45 +0200 Subject: eCryptfs: added support for the encrypted key type The function ecryptfs_keyring_auth_tok_for_sig() has been modified in order to search keys of both 'user' and 'encrypted' types. Signed-off-by: Roberto Sassu Acked-by: Gianluca Ramunno Acked-by: Tyler Hicks Signed-off-by: Mimi Zohar --- fs/ecryptfs/ecryptfs_kernel.h | 41 +++++++++++++++++++++++++++++++++++++++-- fs/ecryptfs/keystore.c | 13 ++++++++----- 2 files changed, 47 insertions(+), 7 deletions(-) diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index bb8ec5d4301c..b36c5572b3f3 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -29,6 +29,7 @@ #define ECRYPTFS_KERNEL_H #include +#include #include #include #include @@ -78,11 +79,47 @@ struct ecryptfs_page_crypt_context { } param; }; +#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE) +static inline struct ecryptfs_auth_tok * +ecryptfs_get_encrypted_key_payload_data(struct key *key) +{ + if (key->type == &key_type_encrypted) + return (struct ecryptfs_auth_tok *) + (&((struct encrypted_key_payload *)key->payload.data)->payload_data); + else + return NULL; +} + +static inline struct key *ecryptfs_get_encrypted_key(char *sig) +{ + return request_key(&key_type_encrypted, sig, NULL); +} + +#else +static inline struct ecryptfs_auth_tok * +ecryptfs_get_encrypted_key_payload_data(struct key *key) +{ + return NULL; +} + +static inline struct key *ecryptfs_get_encrypted_key(char *sig) +{ + return ERR_PTR(-ENOKEY); +} + +#endif /* CONFIG_ENCRYPTED_KEYS */ + static inline struct ecryptfs_auth_tok * ecryptfs_get_key_payload_data(struct key *key) { - return (struct ecryptfs_auth_tok *) - (((struct user_key_payload*)key->payload.data)->data); + struct ecryptfs_auth_tok *auth_tok; + + auth_tok = ecryptfs_get_encrypted_key_payload_data(key); + if (!auth_tok) + return (struct ecryptfs_auth_tok *) + (((struct user_key_payload *)key->payload.data)->data); + else + return auth_tok; } #define ECRYPTFS_MAX_KEYSET_SIZE 1024 diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 27a7fefb83eb..2cff13ac8937 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -1635,11 +1635,14 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key, (*auth_tok_key) = request_key(&key_type_user, sig, NULL); if (!(*auth_tok_key) || IS_ERR(*auth_tok_key)) { - printk(KERN_ERR "Could not find key with description: [%s]\n", - sig); - rc = process_request_key_err(PTR_ERR(*auth_tok_key)); - (*auth_tok_key) = NULL; - goto out; + (*auth_tok_key) = ecryptfs_get_encrypted_key(sig); + if (!(*auth_tok_key) || IS_ERR(*auth_tok_key)) { + printk(KERN_ERR "Could not find key with description: [%s]\n", + sig); + rc = process_request_key_err(PTR_ERR(*auth_tok_key)); + (*auth_tok_key) = NULL; + goto out; + } } down_write(&(*auth_tok_key)->sem); rc = ecryptfs_verify_auth_tok_from_key(*auth_tok_key, auth_tok); -- cgit v1.2.3 From d3ad8434aa83ef7c88bc91edcfe012cdcbab9f3e Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Mon, 27 Jun 2011 12:36:29 -0400 Subject: jbd2: use WRITE_SYNC in journal checkpoint In journal checkpoint, we write the buffer and wait for its finish. But in cfq, the async queue has a very low priority, and in our test, if there are too many sync queues and every queue is filled up with requests, the write request will be delayed for quite a long time and all the tasks which are waiting for journal space will end with errors like: INFO: task attr_set:3816 blocked for more than 120 seconds. "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. attr_set D ffff880028393480 0 3816 1 0x00000000 ffff8802073fbae8 0000000000000086 ffff8802140847c8 ffff8800283934e8 ffff8802073fb9d8 ffffffff8103e456 ffff8802140847b8 ffff8801ed728080 ffff8801db4bc080 ffff8801ed728450 ffff880028393480 0000000000000002 Call Trace: [] ? __dequeue_entity+0x33/0x38 [] ? need_resched+0x23/0x2d [] ? thread_return+0xa2/0xbc [] ? jbd2_journal_dirty_metadata+0x116/0x126 [jbd2] [] ? jbd2_journal_dirty_metadata+0x116/0x126 [jbd2] [] __mutex_lock_common+0x14e/0x1a9 [] ? brelse+0x13/0x15 [ext4] [] __mutex_lock_slowpath+0x19/0x1b [] mutex_lock+0x1b/0x32 [] __jbd2_journal_insert_checkpoint+0xe3/0x20c [jbd2] [] start_this_handle+0x438/0x527 [jbd2] [] ? autoremove_wake_function+0x0/0x3e [] jbd2_journal_start+0xa1/0xcc [jbd2] [] ext4_journal_start_sb+0x57/0x81 [ext4] [] ext4_xattr_set+0x6c/0xe3 [ext4] [] ext4_xattr_user_set+0x42/0x4b [ext4] [] generic_setxattr+0x6b/0x76 [] __vfs_setxattr_noperm+0x47/0xc0 [] vfs_setxattr+0x7f/0x9a [] setxattr+0xb5/0xe8 [] ? do_filp_open+0x571/0xa6e [] sys_fsetxattr+0x6b/0x91 [] system_call_fastpath+0x16/0x1b So this patch tries to use WRITE_SYNC in __flush_batch so that the request will be moved into sync queue and handled by cfq timely. We also use the new plug, sot that all the WRITE_SYNC requests can be given as a whole when we unplug it. Signed-off-by: Tao Ma Signed-off-by: "Theodore Ts'o" Cc: Jan Kara Reported-by: Robin Dong --- fs/jbd2/checkpoint.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 2c62c5aae82f..16a698bd906d 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -257,9 +257,12 @@ static void __flush_batch(journal_t *journal, int *batch_count) { int i; + struct blk_plug plug; + blk_start_plug(&plug); for (i = 0; i < *batch_count; i++) - write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE); + write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE_SYNC); + blk_finish_plug(&plug); for (i = 0; i < *batch_count; i++) { struct buffer_head *bh = journal->j_chkpt_bhs[i]; -- cgit v1.2.3 From fc5070b527b8243e9f4369e4e79ab431a5a1cb79 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 27 Jun 2011 18:19:30 +0200 Subject: mach-ux500: fix USB build error The mach-ux500/usb.c was referencing DMA macros, but not including so it didn't compile. Fixed by a proper #include. Cc: Arnd Bergmann Cc: Mian Yousaf Kaukab Signed-off-by: Linus Walleij Signed-off-by: Arnd Bergmann --- arch/arm/mach-ux500/usb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/mach-ux500/usb.c b/arch/arm/mach-ux500/usb.c index 82e535953fd9..0a01cbdfe063 100644 --- a/arch/arm/mach-ux500/usb.c +++ b/arch/arm/mach-ux500/usb.c @@ -6,6 +6,7 @@ */ #include #include +#include #include #include #include -- cgit v1.2.3 From ed7a7e16724a4123fce1fc0ff1f5131a0596f189 Mon Sep 17 00:00:00 2001 From: Robin Dong Date: Mon, 27 Jun 2011 15:35:53 -0400 Subject: ext4: fix incorrect error msg in ext4_ext_insert_index In function ext4_ext_insert_index when eh_entries of curp is bigger than eh_max, error messages will be printed out, but the content is about logical and ei_block, that's incorret. Signed-off-by: Robin Dong Signed-off-by: "Theodore Ts'o" --- fs/ext4/extents.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index f815cc81e7a2..eb63c7b8dfd2 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -808,8 +808,9 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) > le16_to_cpu(curp->p_hdr->eh_max))) { EXT4_ERROR_INODE(inode, - "logical %d == ei_block %d!", - logical, le32_to_cpu(curp->p_idx->ei_block)); + "eh_entries %d > eh_max %d!", + le16_to_cpu(curp->p_hdr->eh_entries), + le16_to_cpu(curp->p_hdr->eh_max)); return -EIO; } if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { -- cgit v1.2.3 From 5993548725ba3f3deb2b90a681a62dbb7bd17961 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Mon, 27 Jun 2011 11:59:43 -0700 Subject: Input: remove unneeded version.h includes It was pointed out by 'make versioncheck' that some includes of linux/version.h are not needed in drivers/input/. This patch removes them. Signed-off-by: Jesper Juhl Acked-by: Mike Frysinger Acked-by: Michael Hennerich Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/adp5588-keys.c | 1 - drivers/input/keyboard/adp5589-keys.c | 1 - drivers/input/misc/bfin_rotary.c | 1 - drivers/input/mouse/pxa930_trkball.c | 1 - drivers/input/mouse/sentelic.c | 1 - 5 files changed, 5 deletions(-) diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index af45d275f686..7b404e5443ed 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c @@ -9,7 +9,6 @@ */ #include -#include #include #include #include diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c index 631598663aab..c7708263051b 100644 --- a/drivers/input/keyboard/adp5589-keys.c +++ b/drivers/input/keyboard/adp5589-keys.c @@ -8,7 +8,6 @@ */ #include -#include #include #include #include diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c index 4f72bdd69410..d00edc9f39d1 100644 --- a/drivers/input/misc/bfin_rotary.c +++ b/drivers/input/misc/bfin_rotary.c @@ -6,7 +6,6 @@ */ #include -#include #include #include #include diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c index 943cfec15665..6c5d84fcdea1 100644 --- a/drivers/input/mouse/pxa930_trkball.c +++ b/drivers/input/mouse/pxa930_trkball.c @@ -12,7 +12,6 @@ #include #include -#include #include #include #include diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c index 1242775fee19..2fc887a51066 100644 --- a/drivers/input/mouse/sentelic.c +++ b/drivers/input/mouse/sentelic.c @@ -20,7 +20,6 @@ */ #include -#include #include #include #include -- cgit v1.2.3 From 51a3db41e18254b938279cb4b160310e11a13d4f Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Mon, 27 Jun 2011 12:38:43 -0700 Subject: Input: tnetv107x-keypad - fix MODULE_ALIAS Remove the space between "platform:" prefix and the driver name. Signed-off-by: Axel Lin Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/tnetv107x-keypad.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c index c8f097a15d89..1c58681de81f 100644 --- a/drivers/input/keyboard/tnetv107x-keypad.c +++ b/drivers/input/keyboard/tnetv107x-keypad.c @@ -337,5 +337,5 @@ module_exit(keypad_exit); MODULE_AUTHOR("Cyril Chemparathy"); MODULE_DESCRIPTION("TNETV107X Keypad Driver"); -MODULE_ALIAS("platform: tnetv107x-keypad"); +MODULE_ALIAS("platform:tnetv107x-keypad"); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 6a592a7f4514cd744e154a146db1ab7af06a7ee7 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Mon, 27 Jun 2011 12:42:12 -0700 Subject: Input: tnetv107x-ts - fix MODULE_ALIAS Remove the space between "platform:" prefix and the driver name. Signed-off-by: Axel Lin Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/tnetv107x-ts.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c index 22a3411e93c5..089b0a0f3d8c 100644 --- a/drivers/input/touchscreen/tnetv107x-ts.c +++ b/drivers/input/touchscreen/tnetv107x-ts.c @@ -393,5 +393,5 @@ module_exit(tsc_exit); MODULE_AUTHOR("Cyril Chemparathy"); MODULE_DESCRIPTION("TNETV107X Touchscreen Driver"); -MODULE_ALIAS("platform: tnetv107x-ts"); +MODULE_ALIAS("platform:tnetv107x-ts"); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From b23302052d96a3945e4c72aca77b5fd28884c353 Mon Sep 17 00:00:00 2001 From: David Jander Date: Thu, 23 Jun 2011 01:30:09 -0700 Subject: Input: gpio_keys - move to late_initcall Initialize gpio_keys driver at late_initcall level, to give it a chance to work with GPIO expanders that might not be ready yet if we initialize the driver at module_init time. This is strictly a band-aid until there is a better way to specify inter-device dependencies. Signed-off-by: David Jander Acked-by: Grant Likely Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/gpio_keys.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index 6d0e2f64122b..320b59ab8902 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -644,7 +644,7 @@ static void __exit gpio_keys_exit(void) platform_driver_unregister(&gpio_keys_device_driver); } -module_init(gpio_keys_init); +late_initcall(gpio_keys_init); module_exit(gpio_keys_exit); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From a001a8f3cedb0e3cb92ff3abdb3170df7da92d47 Mon Sep 17 00:00:00 2001 From: Ping Cheng Date: Mon, 27 Jun 2011 12:57:58 -0700 Subject: Input: wacom - Wacom Bamboo Pen D4 has 1024 pressure levels D4 has 1024, not 512, pressure levels. Reported-by: David Foley Signed-off-by: Ping Cheng Signed-off-by: Dmitry Torokhov --- drivers/input/tablet/wacom_wac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 2ea0d2e55a4e..1cbb9a89bff4 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -1462,7 +1462,7 @@ static const struct wacom_features wacom_features_0xD3 = { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD4 = - { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 255, + { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD6 = { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, -- cgit v1.2.3 From 11d0cf8859451d6336959204b2d4cc173dd1aa4e Mon Sep 17 00:00:00 2001 From: Ping Cheng Date: Mon, 27 Jun 2011 12:57:58 -0700 Subject: Input: wacom - add 3 new models - 6A, 6B, and 97 Tested-by: Alex Tervoort for 6A Signed-off-by: David Foley Signed-off-by: Ping Cheng Signed-off-by: Dmitry Torokhov --- drivers/input/tablet/wacom_wac.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 1cbb9a89bff4..0c302c925ecf 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -1299,6 +1299,12 @@ static const struct wacom_features wacom_features_0x65 = static const struct wacom_features wacom_features_0x69 = { "Wacom Bamboo1", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 63, GRAPHIRE, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES }; +static const struct wacom_features wacom_features_0x6A = + { "Wacom Bamboo1 4x6", WACOM_PKGLEN_GRAPHIRE, 14760, 9225, 1023, + 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0x6B = + { "Wacom Bamboo1 5x8", WACOM_PKGLEN_GRAPHIRE, 21648, 13530, 1023, + 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x20 = { "Wacom Intuos 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -1431,6 +1437,9 @@ static const struct wacom_features wacom_features_0x90 = static const struct wacom_features wacom_features_0x93 = { "Wacom ISDv4 93", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0x97 = + { "Wacom ISDv4 97", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 511, + 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x9A = { "Wacom ISDv4 9A", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -1515,6 +1524,8 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x64) }, { USB_DEVICE_WACOM(0x65) }, { USB_DEVICE_WACOM(0x69) }, + { USB_DEVICE_WACOM(0x6A) }, + { USB_DEVICE_WACOM(0x6B) }, { USB_DEVICE_WACOM(0x20) }, { USB_DEVICE_WACOM(0x21) }, { USB_DEVICE_WACOM(0x22) }, @@ -1575,6 +1586,7 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0xCC) }, { USB_DEVICE_WACOM(0x90) }, { USB_DEVICE_WACOM(0x93) }, + { USB_DEVICE_WACOM(0x97) }, { USB_DEVICE_WACOM(0x9A) }, { USB_DEVICE_WACOM(0x9F) }, { USB_DEVICE_WACOM(0xE2) }, -- cgit v1.2.3 From 58c244009ef6ca450f0d787828a7f2f27651db5b Mon Sep 17 00:00:00 2001 From: Igor Grinberg Date: Mon, 27 Jun 2011 13:06:27 -0700 Subject: Input: ads7846 - cleanup GPIO initialization Use gpio_request_one() instead of multiple gpiolib calls. This also simplifies error handling a bit. Signed-off-by: Igor Grinberg Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/ads7846.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 5196861b86ef..d507b9b67806 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -967,17 +967,12 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784 ts->get_pendown_state = pdata->get_pendown_state; } else if (gpio_is_valid(pdata->gpio_pendown)) { - err = gpio_request(pdata->gpio_pendown, "ads7846_pendown"); + err = gpio_request_one(pdata->gpio_pendown, GPIOF_IN, + "ads7846_pendown"); if (err) { - dev_err(&spi->dev, "failed to request pendown GPIO%d\n", - pdata->gpio_pendown); - return err; - } - err = gpio_direction_input(pdata->gpio_pendown); - if (err) { - dev_err(&spi->dev, "failed to setup pendown GPIO%d\n", - pdata->gpio_pendown); - gpio_free(pdata->gpio_pendown); + dev_err(&spi->dev, + "failed to request/setup pendown GPIO%d: %d\n", + pdata->gpio_pendown, err); return err; } -- cgit v1.2.3 From ff9893dc8aa622a4f122293a6861566a284edea5 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Mon, 27 Jun 2011 16:36:31 -0400 Subject: ext4: split ext4_ind_truncate from ext4_truncate We are about to move all indirect inode functions to a new file. Before we do that, let's split ext4_ind_truncate() out of ext4_truncate() leaving only generic code in the latter, so we will be able to move ext4_ind_truncate() to the new file. Signed-off-by: Amir Goldstein Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 2 ++ fs/ext4/inode.c | 36 ++++++++++++++++++++---------------- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 1921392cd708..8532dd43d320 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1834,6 +1834,8 @@ extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); extern qsize_t *ext4_get_reserved_space(struct inode *inode); extern void ext4_da_update_reserve_space(struct inode *inode, int used, int quota_claim); +extern void ext4_ind_truncate(struct inode *inode); + /* ioctl.c */ extern long ext4_ioctl(struct file *, unsigned int, unsigned long); extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index e3126c051006..a8f310b77f56 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4470,6 +4470,26 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) * ext4_truncate() run will find them and release them. */ void ext4_truncate(struct inode *inode) +{ + trace_ext4_truncate_enter(inode); + + if (!ext4_can_truncate(inode)) + return; + + ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); + + if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) + ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); + + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) + ext4_ext_truncate(inode); + else + ext4_ind_truncate(inode); + + trace_ext4_truncate_exit(inode); +} + +void ext4_ind_truncate(struct inode *inode) { handle_t *handle; struct ext4_inode_info *ei = EXT4_I(inode); @@ -4484,22 +4504,6 @@ void ext4_truncate(struct inode *inode) ext4_lblk_t last_block, max_block; unsigned blocksize = inode->i_sb->s_blocksize; - trace_ext4_truncate_enter(inode); - - if (!ext4_can_truncate(inode)) - return; - - ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); - - if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) - ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); - - if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { - ext4_ext_truncate(inode); - trace_ext4_truncate_exit(inode); - return; - } - handle = start_transaction(inode); if (IS_ERR(handle)) return; /* AKPM: return what? */ -- cgit v1.2.3 From 8bb2b247124ba6093455d4aef26743b1bef27bc5 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Mon, 27 Jun 2011 17:10:28 -0400 Subject: ext4: rename ext4_indirect_* funcs to ext4_ind_* We are going to move all ext4_ind_* functions to indirect.c. Before we do that, let's rename 2 functions called ext4_indirect_* to ext4_ind_*, to keep to the naming convention. Signed-off-by: Amir Goldstein Signed-off-by: "Theodore Ts'o" --- fs/ext4/inode.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index a8f310b77f56..6c1d28e37235 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1075,8 +1075,7 @@ qsize_t *ext4_get_reserved_space(struct inode *inode) * Calculate the number of metadata blocks need to reserve * to allocate a new block at @lblocks for non extent file based file */ -static int ext4_indirect_calc_metadata_amount(struct inode *inode, - sector_t lblock) +static int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock) { struct ext4_inode_info *ei = EXT4_I(inode); sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1); @@ -1107,7 +1106,7 @@ static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) return ext4_ext_calc_metadata_amount(inode, lblock); - return ext4_indirect_calc_metadata_amount(inode, lblock); + return ext4_ind_calc_metadata_amount(inode, lblock); } /* @@ -5456,8 +5455,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, return 0; } -static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, - int chunk) +static int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk) { int indirects; @@ -5483,7 +5481,7 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) { if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) - return ext4_indirect_trans_blocks(inode, nrblocks, chunk); + return ext4_ind_trans_blocks(inode, nrblocks, chunk); return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); } -- cgit v1.2.3 From a212d1a71deea10ba4f6de2aaac0221be34ddb29 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Tue, 7 Jun 2011 11:56:50 +0800 Subject: jbd: Use WRITE_SYNC in journal checkpoint. In journal checkpoint, we write the buffer and wait for its finish. But in cfq, the async queue has a very low priority, and in our test, if there are too many sync queues and every queue is filled up with requests, and the process will hang waiting for the log space. So this patch tries to use WRITE_SYNC in __flush_batch so that the request will be moved into sync queue and handled by cfq timely. We also use the new plug, sot that all the WRITE_SYNC requests can be given as a whole when we unplug it. Reported-by: Robin Dong Signed-off-by: Tao Ma Signed-off-by: Jan Kara --- fs/jbd/checkpoint.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index 61655a37c731..f94fc48ff3a0 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c @@ -22,6 +22,7 @@ #include #include #include +#include #include /* @@ -257,9 +258,12 @@ static void __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count) { int i; + struct blk_plug plug; + blk_start_plug(&plug); for (i = 0; i < *batch_count; i++) - write_dirty_buffer(bhs[i], WRITE); + write_dirty_buffer(bhs[i], WRITE_SYNC); + blk_finish_plug(&plug); for (i = 0; i < *batch_count; i++) { struct buffer_head *bh = bhs[i]; -- cgit v1.2.3 From 1f7d1e77419050831a905353683807fa69a26625 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 27 Jun 2011 19:16:02 -0400 Subject: ext4: move __ext4_check_blockref to block_validity.c In preparation for moving the indirect functions to a separate file, move __ext4_check_blockref() to block_validity.c and rename it to ext4_check_blockref() which is exported as globally visible function. Also, rename the cpp macro ext4_check_inode_blockref() to ext4_ind_check_inode(), to make it clear that it is only valid for use with non-extent mapped inodes. Signed-off-by: "Theodore Ts'o" --- fs/ext4/block_validity.c | 20 ++++++++++++++++++++ fs/ext4/ext4.h | 15 +++++++++++++++ fs/ext4/inode.c | 35 +---------------------------------- 3 files changed, 36 insertions(+), 34 deletions(-) diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index fac90f3fba80..af103be491b0 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -246,3 +246,23 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk, return 1; } +int ext4_check_blockref(const char *function, unsigned int line, + struct inode *inode, __le32 *p, unsigned int max) +{ + struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; + __le32 *bref = p; + unsigned int blk; + + while (bref < p+max) { + blk = le32_to_cpu(*bref++); + if (blk && + unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), + blk, 1))) { + es->s_last_error_block = cpu_to_le64(blk); + ext4_error_inode(inode, function, line, blk, + "invalid block"); + return -EIO; + } + } + return 0; +} diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 8532dd43d320..82ba7eb7c4a5 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2124,6 +2124,19 @@ static inline void ext4_mark_super_dirty(struct super_block *sb) sb->s_dirt =1; } +/* + * Block validity checking + */ +#define ext4_check_indirect_blockref(inode, bh) \ + ext4_check_blockref(__func__, __LINE__, inode, \ + (__le32 *)(bh)->b_data, \ + EXT4_ADDR_PER_BLOCK((inode)->i_sb)) + +#define ext4_ind_check_inode(inode) \ + ext4_check_blockref(__func__, __LINE__, inode, \ + EXT4_I(inode)->i_data, \ + EXT4_NDIR_BLOCKS) + /* * Inodes and files operations */ @@ -2153,6 +2166,8 @@ extern void ext4_exit_system_zone(void); extern int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk, unsigned int count); +extern int ext4_check_blockref(const char *, unsigned int, + struct inode *, __le32 *, unsigned int); /* extents.c */ extern int ext4_ext_tree_init(handle_t *handle, struct inode *); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 6c1d28e37235..3dca5264ccff 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -360,39 +360,6 @@ static int ext4_block_to_path(struct inode *inode, return n; } -static int __ext4_check_blockref(const char *function, unsigned int line, - struct inode *inode, - __le32 *p, unsigned int max) -{ - struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; - __le32 *bref = p; - unsigned int blk; - - while (bref < p+max) { - blk = le32_to_cpu(*bref++); - if (blk && - unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), - blk, 1))) { - es->s_last_error_block = cpu_to_le64(blk); - ext4_error_inode(inode, function, line, blk, - "invalid block"); - return -EIO; - } - } - return 0; -} - - -#define ext4_check_indirect_blockref(inode, bh) \ - __ext4_check_blockref(__func__, __LINE__, inode, \ - (__le32 *)(bh)->b_data, \ - EXT4_ADDR_PER_BLOCK((inode)->i_sb)) - -#define ext4_check_inode_blockref(inode) \ - __ext4_check_blockref(__func__, __LINE__, inode, \ - EXT4_I(inode)->i_data, \ - EXT4_NDIR_BLOCKS) - /** * ext4_get_branch - read the chain of indirect blocks leading to data * @inode: inode in question @@ -5010,7 +4977,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) (S_ISLNK(inode->i_mode) && !ext4_inode_is_fast_symlink(inode))) { /* Validate block references which are part of inode */ - ret = ext4_check_inode_blockref(inode); + ret = ext4_ind_check_inode(inode); } if (ret) goto bad_inode; -- cgit v1.2.3 From 9f125d641beb898f5bf2fe69583192c18043517a Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 27 Jun 2011 19:16:04 -0400 Subject: ext4: move common truncate functions to header file Move two functions that will be needed by the indirect functions to be moved to indirect.c as well as inode.c to truncate.h as inline functions, so that we can avoid having duplicate copies of the function (which can be a maintenance problem) without having to expose them as globally functions. Signed-off-by: "Theodore Ts'o" --- fs/ext4/inode.c | 48 ++++++------------------------------------------ fs/ext4/truncate.h | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 42 deletions(-) create mode 100644 fs/ext4/truncate.h diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 3dca5264ccff..9b82ac7b0f55 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -47,6 +47,7 @@ #include "xattr.h" #include "acl.h" #include "ext4_extents.h" +#include "truncate.h" #include @@ -88,33 +89,6 @@ static int ext4_inode_is_fast_symlink(struct inode *inode) return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); } -/* - * Work out how many blocks we need to proceed with the next chunk of a - * truncate transaction. - */ -static unsigned long blocks_for_truncate(struct inode *inode) -{ - ext4_lblk_t needed; - - needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); - - /* Give ourselves just enough room to cope with inodes in which - * i_blocks is corrupt: we've seen disk corruptions in the past - * which resulted in random data in an inode which looked enough - * like a regular file for ext4 to try to delete it. Things - * will go a bit crazy if that happens, but at least we should - * try not to panic the whole kernel. */ - if (needed < 2) - needed = 2; - - /* But we need to bound the transaction so we don't overflow the - * journal. */ - if (needed > EXT4_MAX_TRANS_DATA) - needed = EXT4_MAX_TRANS_DATA; - - return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; -} - /* * Truncate transactions can be complex and absolutely huge. So we need to * be able to restart the transaction at a conventient checkpoint to make @@ -129,7 +103,7 @@ static handle_t *start_transaction(struct inode *inode) { handle_t *result; - result = ext4_journal_start(inode, blocks_for_truncate(inode)); + result = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)); if (!IS_ERR(result)) return result; @@ -149,7 +123,7 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode) return 0; if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) return 0; - if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) + if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode))) return 0; return 1; } @@ -204,7 +178,7 @@ void ext4_evict_inode(struct inode *inode) if (is_bad_inode(inode)) goto no_delete; - handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3); + handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3); if (IS_ERR(handle)) { ext4_std_error(inode->i_sb, PTR_ERR(handle)); /* @@ -1555,16 +1529,6 @@ static int do_journal_get_write_access(handle_t *handle, return ret; } -/* - * Truncate blocks that were not used by write. We have to truncate the - * pagecache as well so that corresponding buffers get properly unmapped. - */ -static void ext4_truncate_failed_write(struct inode *inode) -{ - truncate_inode_pages(inode->i_mapping, inode->i_size); - ext4_truncate(inode); -} - static int ext4_get_block_write(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); static int ext4_write_begin(struct file *file, struct address_space *mapping, @@ -4134,7 +4098,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode, if (unlikely(err)) goto out_err; err = ext4_truncate_restart_trans(handle, inode, - blocks_for_truncate(inode)); + ext4_blocks_for_truncate(inode)); if (unlikely(err)) goto out_err; if (bh) { @@ -4329,7 +4293,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, if (try_to_extend_transaction(handle, inode)) { ext4_mark_inode_dirty(handle, inode); ext4_truncate_restart_trans(handle, inode, - blocks_for_truncate(inode)); + ext4_blocks_for_truncate(inode)); } /* diff --git a/fs/ext4/truncate.h b/fs/ext4/truncate.h new file mode 100644 index 000000000000..011ba6670d99 --- /dev/null +++ b/fs/ext4/truncate.h @@ -0,0 +1,43 @@ +/* + * linux/fs/ext4/truncate.h + * + * Common inline functions needed for truncate support + */ + +/* + * Truncate blocks that were not used by write. We have to truncate the + * pagecache as well so that corresponding buffers get properly unmapped. + */ +static inline void ext4_truncate_failed_write(struct inode *inode) +{ + truncate_inode_pages(inode->i_mapping, inode->i_size); + ext4_truncate(inode); +} + +/* + * Work out how many blocks we need to proceed with the next chunk of a + * truncate transaction. + */ +static inline unsigned long ext4_blocks_for_truncate(struct inode *inode) +{ + ext4_lblk_t needed; + + needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); + + /* Give ourselves just enough room to cope with inodes in which + * i_blocks is corrupt: we've seen disk corruptions in the past + * which resulted in random data in an inode which looked enough + * like a regular file for ext4 to try to delete it. Things + * will go a bit crazy if that happens, but at least we should + * try not to panic the whole kernel. */ + if (needed < 2) + needed = 2; + + /* But we need to bound the transaction so we don't overflow the + * journal. */ + if (needed > EXT4_MAX_TRANS_DATA) + needed = EXT4_MAX_TRANS_DATA; + + return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; +} + -- cgit v1.2.3 From dae1e52cb1267bf8f52e5e47a80fab566d7e8aa4 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Mon, 27 Jun 2011 19:40:50 -0400 Subject: ext4: move ext4_ind_* functions from inode.c to indirect.c This patch moves functions from inode.c to indirect.c. The moved functions are ext4_ind_* functions and their helpers. Functions called from inode.c are declared extern. Signed-off-by: Amir Goldstein Signed-off-by: "Theodore Ts'o" --- fs/ext4/Makefile | 2 +- fs/ext4/block_validity.c | 1 + fs/ext4/ext4.h | 9 + fs/ext4/indirect.c | 1510 ++++++++++++++++++++++++++++++++++++++++++++++ fs/ext4/inode.c | 1486 --------------------------------------------- 5 files changed, 1521 insertions(+), 1487 deletions(-) create mode 100644 fs/ext4/indirect.c diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile index 04109460ba9e..56fd8f865930 100644 --- a/fs/ext4/Makefile +++ b/fs/ext4/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \ ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \ ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \ - mmp.o + mmp.o indirect.o ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index af103be491b0..8efb2f0a3447 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -266,3 +266,4 @@ int ext4_check_blockref(const char *function, unsigned int line, } return 0; } + diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 82ba7eb7c4a5..ddaf5043fb38 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1834,6 +1834,15 @@ extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); extern qsize_t *ext4_get_reserved_space(struct inode *inode); extern void ext4_da_update_reserve_space(struct inode *inode, int used, int quota_claim); + +/* indirect.c */ +extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, + struct ext4_map_blocks *map, int flags); +extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, + const struct iovec *iov, loff_t offset, + unsigned long nr_segs); +extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock); +extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk); extern void ext4_ind_truncate(struct inode *inode); /* ioctl.c */ diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c new file mode 100644 index 000000000000..c3e85a86e821 --- /dev/null +++ b/fs/ext4/indirect.c @@ -0,0 +1,1510 @@ +/* + * linux/fs/ext4/indirect.c + * + * from + * + * linux/fs/ext4/inode.c + * + * Copyright (C) 1992, 1993, 1994, 1995 + * Remy Card (card@masi.ibp.fr) + * Laboratoire MASI - Institut Blaise Pascal + * Universite Pierre et Marie Curie (Paris VI) + * + * from + * + * linux/fs/minix/inode.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + * + * Goal-directed block allocation by Stephen Tweedie + * (sct@redhat.com), 1993, 1998 + */ + +#include +#include "ext4_jbd2.h" +#include "truncate.h" + +#include + +typedef struct { + __le32 *p; + __le32 key; + struct buffer_head *bh; +} Indirect; + +static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) +{ + p->key = *(p->p = v); + p->bh = bh; +} + +/** + * ext4_block_to_path - parse the block number into array of offsets + * @inode: inode in question (we are only interested in its superblock) + * @i_block: block number to be parsed + * @offsets: array to store the offsets in + * @boundary: set this non-zero if the referred-to block is likely to be + * followed (on disk) by an indirect block. + * + * To store the locations of file's data ext4 uses a data structure common + * for UNIX filesystems - tree of pointers anchored in the inode, with + * data blocks at leaves and indirect blocks in intermediate nodes. + * This function translates the block number into path in that tree - + * return value is the path length and @offsets[n] is the offset of + * pointer to (n+1)th node in the nth one. If @block is out of range + * (negative or too large) warning is printed and zero returned. + * + * Note: function doesn't find node addresses, so no IO is needed. All + * we need to know is the capacity of indirect blocks (taken from the + * inode->i_sb). + */ + +/* + * Portability note: the last comparison (check that we fit into triple + * indirect block) is spelled differently, because otherwise on an + * architecture with 32-bit longs and 8Kb pages we might get into trouble + * if our filesystem had 8Kb blocks. We might use long long, but that would + * kill us on x86. Oh, well, at least the sign propagation does not matter - + * i_block would have to be negative in the very beginning, so we would not + * get there at all. + */ + +static int ext4_block_to_path(struct inode *inode, + ext4_lblk_t i_block, + ext4_lblk_t offsets[4], int *boundary) +{ + int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); + int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); + const long direct_blocks = EXT4_NDIR_BLOCKS, + indirect_blocks = ptrs, + double_blocks = (1 << (ptrs_bits * 2)); + int n = 0; + int final = 0; + + if (i_block < direct_blocks) { + offsets[n++] = i_block; + final = direct_blocks; + } else if ((i_block -= direct_blocks) < indirect_blocks) { + offsets[n++] = EXT4_IND_BLOCK; + offsets[n++] = i_block; + final = ptrs; + } else if ((i_block -= indirect_blocks) < double_blocks) { + offsets[n++] = EXT4_DIND_BLOCK; + offsets[n++] = i_block >> ptrs_bits; + offsets[n++] = i_block & (ptrs - 1); + final = ptrs; + } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { + offsets[n++] = EXT4_TIND_BLOCK; + offsets[n++] = i_block >> (ptrs_bits * 2); + offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); + offsets[n++] = i_block & (ptrs - 1); + final = ptrs; + } else { + ext4_warning(inode->i_sb, "block %lu > max in inode %lu", + i_block + direct_blocks + + indirect_blocks + double_blocks, inode->i_ino); + } + if (boundary) + *boundary = final - 1 - (i_block & (ptrs - 1)); + return n; +} + +/** + * ext4_get_branch - read the chain of indirect blocks leading to data + * @inode: inode in question + * @depth: depth of the chain (1 - direct pointer, etc.) + * @offsets: offsets of pointers in inode/indirect blocks + * @chain: place to store the result + * @err: here we store the error value + * + * Function fills the array of triples and returns %NULL + * if everything went OK or the pointer to the last filled triple + * (incomplete one) otherwise. Upon the return chain[i].key contains + * the number of (i+1)-th block in the chain (as it is stored in memory, + * i.e. little-endian 32-bit), chain[i].p contains the address of that + * number (it points into struct inode for i==0 and into the bh->b_data + * for i>0) and chain[i].bh points to the buffer_head of i-th indirect + * block for i>0 and NULL for i==0. In other words, it holds the block + * numbers of the chain, addresses they were taken from (and where we can + * verify that chain did not change) and buffer_heads hosting these + * numbers. + * + * Function stops when it stumbles upon zero pointer (absent block) + * (pointer to last triple returned, *@err == 0) + * or when it gets an IO error reading an indirect block + * (ditto, *@err == -EIO) + * or when it reads all @depth-1 indirect blocks successfully and finds + * the whole chain, all way to the data (returns %NULL, *err == 0). + * + * Need to be called with + * down_read(&EXT4_I(inode)->i_data_sem) + */ +static Indirect *ext4_get_branch(struct inode *inode, int depth, + ext4_lblk_t *offsets, + Indirect chain[4], int *err) +{ + struct super_block *sb = inode->i_sb; + Indirect *p = chain; + struct buffer_head *bh; + + *err = 0; + /* i_data is not going away, no lock needed */ + add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); + if (!p->key) + goto no_block; + while (--depth) { + bh = sb_getblk(sb, le32_to_cpu(p->key)); + if (unlikely(!bh)) + goto failure; + + if (!bh_uptodate_or_lock(bh)) { + if (bh_submit_read(bh) < 0) { + put_bh(bh); + goto failure; + } + /* validate block references */ + if (ext4_check_indirect_blockref(inode, bh)) { + put_bh(bh); + goto failure; + } + } + + add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); + /* Reader: end */ + if (!p->key) + goto no_block; + } + return NULL; + +failure: + *err = -EIO; +no_block: + return p; +} + +/** + * ext4_find_near - find a place for allocation with sufficient locality + * @inode: owner + * @ind: descriptor of indirect block. + * + * This function returns the preferred place for block allocation. + * It is used when heuristic for sequential allocation fails. + * Rules are: + * + if there is a block to the left of our position - allocate near it. + * + if pointer will live in indirect block - allocate near that block. + * + if pointer will live in inode - allocate in the same + * cylinder group. + * + * In the latter case we colour the starting block by the callers PID to + * prevent it from clashing with concurrent allocations for a different inode + * in the same block group. The PID is used here so that functionally related + * files will be close-by on-disk. + * + * Caller must make sure that @ind is valid and will stay that way. + */ +static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; + __le32 *p; + ext4_fsblk_t bg_start; + ext4_fsblk_t last_block; + ext4_grpblk_t colour; + ext4_group_t block_group; + int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); + + /* Try to find previous block */ + for (p = ind->p - 1; p >= start; p--) { + if (*p) + return le32_to_cpu(*p); + } + + /* No such thing, so let's try location of indirect block */ + if (ind->bh) + return ind->bh->b_blocknr; + + /* + * It is going to be referred to from the inode itself? OK, just put it + * into the same cylinder group then. + */ + block_group = ei->i_block_group; + if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { + block_group &= ~(flex_size-1); + if (S_ISREG(inode->i_mode)) + block_group++; + } + bg_start = ext4_group_first_block_no(inode->i_sb, block_group); + last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; + + /* + * If we are doing delayed allocation, we don't need take + * colour into account. + */ + if (test_opt(inode->i_sb, DELALLOC)) + return bg_start; + + if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) + colour = (current->pid % 16) * + (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); + else + colour = (current->pid % 16) * ((last_block - bg_start) / 16); + return bg_start + colour; +} + +/** + * ext4_find_goal - find a preferred place for allocation. + * @inode: owner + * @block: block we want + * @partial: pointer to the last triple within a chain + * + * Normally this function find the preferred place for block allocation, + * returns it. + * Because this is only used for non-extent files, we limit the block nr + * to 32 bits. + */ +static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, + Indirect *partial) +{ + ext4_fsblk_t goal; + + /* + * XXX need to get goal block from mballoc's data structures + */ + + goal = ext4_find_near(inode, partial); + goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; + return goal; +} + +/** + * ext4_blks_to_allocate - Look up the block map and count the number + * of direct blocks need to be allocated for the given branch. + * + * @branch: chain of indirect blocks + * @k: number of blocks need for indirect blocks + * @blks: number of data blocks to be mapped. + * @blocks_to_boundary: the offset in the indirect block + * + * return the total number of blocks to be allocate, including the + * direct and indirect blocks. + */ +static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, + int blocks_to_boundary) +{ + unsigned int count = 0; + + /* + * Simple case, [t,d]Indirect block(s) has not allocated yet + * then it's clear blocks on that path have not allocated + */ + if (k > 0) { + /* right now we don't handle cross boundary allocation */ + if (blks < blocks_to_boundary + 1) + count += blks; + else + count += blocks_to_boundary + 1; + return count; + } + + count++; + while (count < blks && count <= blocks_to_boundary && + le32_to_cpu(*(branch[0].p + count)) == 0) { + count++; + } + return count; +} + +/** + * ext4_alloc_blocks: multiple allocate blocks needed for a branch + * @handle: handle for this transaction + * @inode: inode which needs allocated blocks + * @iblock: the logical block to start allocated at + * @goal: preferred physical block of allocation + * @indirect_blks: the number of blocks need to allocate for indirect + * blocks + * @blks: number of desired blocks + * @new_blocks: on return it will store the new block numbers for + * the indirect blocks(if needed) and the first direct block, + * @err: on return it will store the error code + * + * This function will return the number of blocks allocated as + * requested by the passed-in parameters. + */ +static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, + ext4_lblk_t iblock, ext4_fsblk_t goal, + int indirect_blks, int blks, + ext4_fsblk_t new_blocks[4], int *err) +{ + struct ext4_allocation_request ar; + int target, i; + unsigned long count = 0, blk_allocated = 0; + int index = 0; + ext4_fsblk_t current_block = 0; + int ret = 0; + + /* + * Here we try to allocate the requested multiple blocks at once, + * on a best-effort basis. + * To build a branch, we should allocate blocks for + * the indirect blocks(if not allocated yet), and at least + * the first direct block of this branch. That's the + * minimum number of blocks need to allocate(required) + */ + /* first we try to allocate the indirect blocks */ + target = indirect_blks; + while (target > 0) { + count = target; + /* allocating blocks for indirect blocks and direct blocks */ + current_block = ext4_new_meta_blocks(handle, inode, goal, + 0, &count, err); + if (*err) + goto failed_out; + + if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) { + EXT4_ERROR_INODE(inode, + "current_block %llu + count %lu > %d!", + current_block, count, + EXT4_MAX_BLOCK_FILE_PHYS); + *err = -EIO; + goto failed_out; + } + + target -= count; + /* allocate blocks for indirect blocks */ + while (index < indirect_blks && count) { + new_blocks[index++] = current_block++; + count--; + } + if (count > 0) { + /* + * save the new block number + * for the first direct block + */ + new_blocks[index] = current_block; + printk(KERN_INFO "%s returned more blocks than " + "requested\n", __func__); + WARN_ON(1); + break; + } + } + + target = blks - count ; + blk_allocated = count; + if (!target) + goto allocated; + /* Now allocate data blocks */ + memset(&ar, 0, sizeof(ar)); + ar.inode = inode; + ar.goal = goal; + ar.len = target; + ar.logical = iblock; + if (S_ISREG(inode->i_mode)) + /* enable in-core preallocation only for regular files */ + ar.flags = EXT4_MB_HINT_DATA; + + current_block = ext4_mb_new_blocks(handle, &ar, err); + if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) { + EXT4_ERROR_INODE(inode, + "current_block %llu + ar.len %d > %d!", + current_block, ar.len, + EXT4_MAX_BLOCK_FILE_PHYS); + *err = -EIO; + goto failed_out; + } + + if (*err && (target == blks)) { + /* + * if the allocation failed and we didn't allocate + * any blocks before + */ + goto failed_out; + } + if (!*err) { + if (target == blks) { + /* + * save the new block number + * for the first direct block + */ + new_blocks[index] = current_block; + } + blk_allocated += ar.len; + } +allocated: + /* total number of blocks allocated for direct blocks */ + ret = blk_allocated; + *err = 0; + return ret; +failed_out: + for (i = 0; i < index; i++) + ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0); + return ret; +} + +/** + * ext4_alloc_branch - allocate and set up a chain of blocks. + * @handle: handle for this transaction + * @inode: owner + * @indirect_blks: number of allocated indirect blocks + * @blks: number of allocated direct blocks + * @goal: preferred place for allocation + * @offsets: offsets (in the blocks) to store the pointers to next. + * @branch: place to store the chain in. + * + * This function allocates blocks, zeroes out all but the last one, + * links them into chain and (if we are synchronous) writes them to disk. + * In other words, it prepares a branch that can be spliced onto the + * inode. It stores the information about that chain in the branch[], in + * the same format as ext4_get_branch() would do. We are calling it after + * we had read the existing part of chain and partial points to the last + * triple of that (one with zero ->key). Upon the exit we have the same + * picture as after the successful ext4_get_block(), except that in one + * place chain is disconnected - *branch->p is still zero (we did not + * set the last link), but branch->key contains the number that should + * be placed into *branch->p to fill that gap. + * + * If allocation fails we free all blocks we've allocated (and forget + * their buffer_heads) and return the error value the from failed + * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain + * as described above and return 0. + */ +static int ext4_alloc_branch(handle_t *handle, struct inode *inode, + ext4_lblk_t iblock, int indirect_blks, + int *blks, ext4_fsblk_t goal, + ext4_lblk_t *offsets, Indirect *branch) +{ + int blocksize = inode->i_sb->s_blocksize; + int i, n = 0; + int err = 0; + struct buffer_head *bh; + int num; + ext4_fsblk_t new_blocks[4]; + ext4_fsblk_t current_block; + + num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, + *blks, new_blocks, &err); + if (err) + return err; + + branch[0].key = cpu_to_le32(new_blocks[0]); + /* + * metadata blocks and data blocks are allocated. + */ + for (n = 1; n <= indirect_blks; n++) { + /* + * Get buffer_head for parent block, zero it out + * and set the pointer to new one, then send + * parent to disk. + */ + bh = sb_getblk(inode->i_sb, new_blocks[n-1]); + if (unlikely(!bh)) { + err = -EIO; + goto failed; + } + + branch[n].bh = bh; + lock_buffer(bh); + BUFFER_TRACE(bh, "call get_create_access"); + err = ext4_journal_get_create_access(handle, bh); + if (err) { + /* Don't brelse(bh) here; it's done in + * ext4_journal_forget() below */ + unlock_buffer(bh); + goto failed; + } + + memset(bh->b_data, 0, blocksize); + branch[n].p = (__le32 *) bh->b_data + offsets[n]; + branch[n].key = cpu_to_le32(new_blocks[n]); + *branch[n].p = branch[n].key; + if (n == indirect_blks) { + current_block = new_blocks[n]; + /* + * End of chain, update the last new metablock of + * the chain to point to the new allocated + * data blocks numbers + */ + for (i = 1; i < num; i++) + *(branch[n].p + i) = cpu_to_le32(++current_block); + } + BUFFER_TRACE(bh, "marking uptodate"); + set_buffer_uptodate(bh); + unlock_buffer(bh); + + BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); + err = ext4_handle_dirty_metadata(handle, inode, bh); + if (err) + goto failed; + } + *blks = num; + return err; +failed: + /* Allocation failed, free what we already allocated */ + ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0); + for (i = 1; i <= n ; i++) { + /* + * branch[i].bh is newly allocated, so there is no + * need to revoke the block, which is why we don't + * need to set EXT4_FREE_BLOCKS_METADATA. + */ + ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, + EXT4_FREE_BLOCKS_FORGET); + } + for (i = n+1; i < indirect_blks; i++) + ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0); + + ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0); + + return err; +} + +/** + * ext4_splice_branch - splice the allocated branch onto inode. + * @handle: handle for this transaction + * @inode: owner + * @block: (logical) number of block we are adding + * @chain: chain of indirect blocks (with a missing link - see + * ext4_alloc_branch) + * @where: location of missing link + * @num: number of indirect blocks we are adding + * @blks: number of direct blocks we are adding + * + * This function fills the missing link and does all housekeeping needed in + * inode (->i_blocks, etc.). In case of success we end up with the full + * chain to new block and return 0. + */ +static int ext4_splice_branch(handle_t *handle, struct inode *inode, + ext4_lblk_t block, Indirect *where, int num, + int blks) +{ + int i; + int err = 0; + ext4_fsblk_t current_block; + + /* + * If we're splicing into a [td]indirect block (as opposed to the + * inode) then we need to get write access to the [td]indirect block + * before the splice. + */ + if (where->bh) { + BUFFER_TRACE(where->bh, "get_write_access"); + err = ext4_journal_get_write_access(handle, where->bh); + if (err) + goto err_out; + } + /* That's it */ + + *where->p = where->key; + + /* + * Update the host buffer_head or inode to point to more just allocated + * direct blocks blocks + */ + if (num == 0 && blks > 1) { + current_block = le32_to_cpu(where->key) + 1; + for (i = 1; i < blks; i++) + *(where->p + i) = cpu_to_le32(current_block++); + } + + /* We are done with atomic stuff, now do the rest of housekeeping */ + /* had we spliced it onto indirect block? */ + if (where->bh) { + /* + * If we spliced it onto an indirect block, we haven't + * altered the inode. Note however that if it is being spliced + * onto an indirect block at the very end of the file (the + * file is growing) then we *will* alter the inode to reflect + * the new i_size. But that is not done here - it is done in + * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. + */ + jbd_debug(5, "splicing indirect only\n"); + BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); + err = ext4_handle_dirty_metadata(handle, inode, where->bh); + if (err) + goto err_out; + } else { + /* + * OK, we spliced it into the inode itself on a direct block. + */ + ext4_mark_inode_dirty(handle, inode); + jbd_debug(5, "splicing direct\n"); + } + return err; + +err_out: + for (i = 1; i <= num; i++) { + /* + * branch[i].bh is newly allocated, so there is no + * need to revoke the block, which is why we don't + * need to set EXT4_FREE_BLOCKS_METADATA. + */ + ext4_free_blocks(handle, inode, where[i].bh, 0, 1, + EXT4_FREE_BLOCKS_FORGET); + } + ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key), + blks, 0); + + return err; +} + +/* + * The ext4_ind_map_blocks() function handles non-extents inodes + * (i.e., using the traditional indirect/double-indirect i_blocks + * scheme) for ext4_map_blocks(). + * + * Allocation strategy is simple: if we have to allocate something, we will + * have to go the whole way to leaf. So let's do it before attaching anything + * to tree, set linkage between the newborn blocks, write them if sync is + * required, recheck the path, free and repeat if check fails, otherwise + * set the last missing link (that will protect us from any truncate-generated + * removals - all blocks on the path are immune now) and possibly force the + * write on the parent block. + * That has a nice additional property: no special recovery from the failed + * allocations is needed - we simply release blocks and do not touch anything + * reachable from inode. + * + * `handle' can be NULL if create == 0. + * + * return > 0, # of blocks mapped or allocated. + * return = 0, if plain lookup failed. + * return < 0, error case. + * + * The ext4_ind_get_blocks() function should be called with + * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem + * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or + * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system + * blocks. + */ +int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, + struct ext4_map_blocks *map, + int flags) +{ + int err = -EIO; + ext4_lblk_t offsets[4]; + Indirect chain[4]; + Indirect *partial; + ext4_fsblk_t goal; + int indirect_blks; + int blocks_to_boundary = 0; + int depth; + int count = 0; + ext4_fsblk_t first_block = 0; + + trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); + J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))); + J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); + depth = ext4_block_to_path(inode, map->m_lblk, offsets, + &blocks_to_boundary); + + if (depth == 0) + goto out; + + partial = ext4_get_branch(inode, depth, offsets, chain, &err); + + /* Simplest case - block found, no allocation needed */ + if (!partial) { + first_block = le32_to_cpu(chain[depth - 1].key); + count++; + /*map more blocks*/ + while (count < map->m_len && count <= blocks_to_boundary) { + ext4_fsblk_t blk; + + blk = le32_to_cpu(*(chain[depth-1].p + count)); + + if (blk == first_block + count) + count++; + else + break; + } + goto got_it; + } + + /* Next simple case - plain lookup or failed read of indirect block */ + if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO) + goto cleanup; + + /* + * Okay, we need to do block allocation. + */ + goal = ext4_find_goal(inode, map->m_lblk, partial); + + /* the number of blocks need to allocate for [d,t]indirect blocks */ + indirect_blks = (chain + depth) - partial - 1; + + /* + * Next look up the indirect map to count the totoal number of + * direct blocks to allocate for this branch. + */ + count = ext4_blks_to_allocate(partial, indirect_blks, + map->m_len, blocks_to_boundary); + /* + * Block out ext4_truncate while we alter the tree + */ + err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks, + &count, goal, + offsets + (partial - chain), partial); + + /* + * The ext4_splice_branch call will free and forget any buffers + * on the new chain if there is a failure, but that risks using + * up transaction credits, especially for bitmaps where the + * credits cannot be returned. Can we handle this somehow? We + * may need to return -EAGAIN upwards in the worst case. --sct + */ + if (!err) + err = ext4_splice_branch(handle, inode, map->m_lblk, + partial, indirect_blks, count); + if (err) + goto cleanup; + + map->m_flags |= EXT4_MAP_NEW; + + ext4_update_inode_fsync_trans(handle, inode, 1); +got_it: + map->m_flags |= EXT4_MAP_MAPPED; + map->m_pblk = le32_to_cpu(chain[depth-1].key); + map->m_len = count; + if (count > blocks_to_boundary) + map->m_flags |= EXT4_MAP_BOUNDARY; + err = count; + /* Clean up and exit */ + partial = chain + depth - 1; /* the whole chain */ +cleanup: + while (partial > chain) { + BUFFER_TRACE(partial->bh, "call brelse"); + brelse(partial->bh); + partial--; + } +out: + trace_ext4_ind_map_blocks_exit(inode, map->m_lblk, + map->m_pblk, map->m_len, err); + return err; +} + +/* + * O_DIRECT for ext3 (or indirect map) based files + * + * If the O_DIRECT write will extend the file then add this inode to the + * orphan list. So recovery will truncate it back to the original size + * if the machine crashes during the write. + * + * If the O_DIRECT write is intantiating holes inside i_size and the machine + * crashes then stale disk data _may_ be exposed inside the file. But current + * VFS code falls back into buffered path in that case so we are safe. + */ +ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, + const struct iovec *iov, loff_t offset, + unsigned long nr_segs) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = file->f_mapping->host; + struct ext4_inode_info *ei = EXT4_I(inode); + handle_t *handle; + ssize_t ret; + int orphan = 0; + size_t count = iov_length(iov, nr_segs); + int retries = 0; + + if (rw == WRITE) { + loff_t final_size = offset + count; + + if (final_size > inode->i_size) { + /* Credits for sb + inode write */ + handle = ext4_journal_start(inode, 2); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + goto out; + } + ret = ext4_orphan_add(handle, inode); + if (ret) { + ext4_journal_stop(handle); + goto out; + } + orphan = 1; + ei->i_disksize = inode->i_size; + ext4_journal_stop(handle); + } + } + +retry: + if (rw == READ && ext4_should_dioread_nolock(inode)) + ret = __blockdev_direct_IO(rw, iocb, inode, + inode->i_sb->s_bdev, iov, + offset, nr_segs, + ext4_get_block, NULL, NULL, 0); + else { + ret = blockdev_direct_IO(rw, iocb, inode, + inode->i_sb->s_bdev, iov, + offset, nr_segs, + ext4_get_block, NULL); + + if (unlikely((rw & WRITE) && ret < 0)) { + loff_t isize = i_size_read(inode); + loff_t end = offset + iov_length(iov, nr_segs); + + if (end > isize) + ext4_truncate_failed_write(inode); + } + } + if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) + goto retry; + + if (orphan) { + int err; + + /* Credits for sb + inode write */ + handle = ext4_journal_start(inode, 2); + if (IS_ERR(handle)) { + /* This is really bad luck. We've written the data + * but cannot extend i_size. Bail out and pretend + * the write failed... */ + ret = PTR_ERR(handle); + if (inode->i_nlink) + ext4_orphan_del(NULL, inode); + + goto out; + } + if (inode->i_nlink) + ext4_orphan_del(handle, inode); + if (ret > 0) { + loff_t end = offset + ret; + if (end > inode->i_size) { + ei->i_disksize = end; + i_size_write(inode, end); + /* + * We're going to return a positive `ret' + * here due to non-zero-length I/O, so there's + * no way of reporting error returns from + * ext4_mark_inode_dirty() to userspace. So + * ignore it. + */ + ext4_mark_inode_dirty(handle, inode); + } + } + err = ext4_journal_stop(handle); + if (ret == 0) + ret = err; + } +out: + return ret; +} + +/* + * Calculate the number of metadata blocks need to reserve + * to allocate a new block at @lblocks for non extent file based file + */ +int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1); + int blk_bits; + + if (lblock < EXT4_NDIR_BLOCKS) + return 0; + + lblock -= EXT4_NDIR_BLOCKS; + + if (ei->i_da_metadata_calc_len && + (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) { + ei->i_da_metadata_calc_len++; + return 0; + } + ei->i_da_metadata_calc_last_lblock = lblock & dind_mask; + ei->i_da_metadata_calc_len = 1; + blk_bits = order_base_2(lblock); + return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1; +} + +int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk) +{ + int indirects; + + /* if nrblocks are contiguous */ + if (chunk) { + /* + * With N contiguous data blocks, we need at most + * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, + * 2 dindirect blocks, and 1 tindirect block + */ + return DIV_ROUND_UP(nrblocks, + EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; + } + /* + * if nrblocks are not contiguous, worse case, each block touch + * a indirect block, and each indirect block touch a double indirect + * block, plus a triple indirect block + */ + indirects = nrblocks * 2 + 1; + return indirects; +} + +/* + * Truncate transactions can be complex and absolutely huge. So we need to + * be able to restart the transaction at a conventient checkpoint to make + * sure we don't overflow the journal. + * + * start_transaction gets us a new handle for a truncate transaction, + * and extend_transaction tries to extend the existing one a bit. If + * extend fails, we need to propagate the failure up and restart the + * transaction in the top-level truncate loop. --sct + */ +static handle_t *start_transaction(struct inode *inode) +{ + handle_t *result; + + result = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)); + if (!IS_ERR(result)) + return result; + + ext4_std_error(inode->i_sb, PTR_ERR(result)); + return result; +} + +/* + * Try to extend this transaction for the purposes of truncation. + * + * Returns 0 if we managed to create more room. If we can't create more + * room, and the transaction must be restarted we return 1. + */ +static int try_to_extend_transaction(handle_t *handle, struct inode *inode) +{ + if (!ext4_handle_valid(handle)) + return 0; + if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) + return 0; + if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode))) + return 0; + return 1; +} + +/* + * Probably it should be a library function... search for first non-zero word + * or memcmp with zero_page, whatever is better for particular architecture. + * Linus? + */ +static inline int all_zeroes(__le32 *p, __le32 *q) +{ + while (p < q) + if (*p++) + return 0; + return 1; +} + +/** + * ext4_find_shared - find the indirect blocks for partial truncation. + * @inode: inode in question + * @depth: depth of the affected branch + * @offsets: offsets of pointers in that branch (see ext4_block_to_path) + * @chain: place to store the pointers to partial indirect blocks + * @top: place to the (detached) top of branch + * + * This is a helper function used by ext4_truncate(). + * + * When we do truncate() we may have to clean the ends of several + * indirect blocks but leave the blocks themselves alive. Block is + * partially truncated if some data below the new i_size is referred + * from it (and it is on the path to the first completely truncated + * data block, indeed). We have to free the top of that path along + * with everything to the right of the path. Since no allocation + * past the truncation point is possible until ext4_truncate() + * finishes, we may safely do the latter, but top of branch may + * require special attention - pageout below the truncation point + * might try to populate it. + * + * We atomically detach the top of branch from the tree, store the + * block number of its root in *@top, pointers to buffer_heads of + * partially truncated blocks - in @chain[].bh and pointers to + * their last elements that should not be removed - in + * @chain[].p. Return value is the pointer to last filled element + * of @chain. + * + * The work left to caller to do the actual freeing of subtrees: + * a) free the subtree starting from *@top + * b) free the subtrees whose roots are stored in + * (@chain[i].p+1 .. end of @chain[i].bh->b_data) + * c) free the subtrees growing from the inode past the @chain[0]. + * (no partially truncated stuff there). */ + +static Indirect *ext4_find_shared(struct inode *inode, int depth, + ext4_lblk_t offsets[4], Indirect chain[4], + __le32 *top) +{ + Indirect *partial, *p; + int k, err; + + *top = 0; + /* Make k index the deepest non-null offset + 1 */ + for (k = depth; k > 1 && !offsets[k-1]; k--) + ; + partial = ext4_get_branch(inode, k, offsets, chain, &err); + /* Writer: pointers */ + if (!partial) + partial = chain + k-1; + /* + * If the branch acquired continuation since we've looked at it - + * fine, it should all survive and (new) top doesn't belong to us. + */ + if (!partial->key && *partial->p) + /* Writer: end */ + goto no_top; + for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) + ; + /* + * OK, we've found the last block that must survive. The rest of our + * branch should be detached before unlocking. However, if that rest + * of branch is all ours and does not grow immediately from the inode + * it's easier to cheat and just decrement partial->p. + */ + if (p == chain + k - 1 && p > chain) { + p->p--; + } else { + *top = *p->p; + /* Nope, don't do this in ext4. Must leave the tree intact */ +#if 0 + *p->p = 0; +#endif + } + /* Writer: end */ + + while (partial > p) { + brelse(partial->bh); + partial--; + } +no_top: + return partial; +} + +/* + * Zero a number of block pointers in either an inode or an indirect block. + * If we restart the transaction we must again get write access to the + * indirect block for further modification. + * + * We release `count' blocks on disk, but (last - first) may be greater + * than `count' because there can be holes in there. + * + * Return 0 on success, 1 on invalid block range + * and < 0 on fatal error. + */ +static int ext4_clear_blocks(handle_t *handle, struct inode *inode, + struct buffer_head *bh, + ext4_fsblk_t block_to_free, + unsigned long count, __le32 *first, + __le32 *last) +{ + __le32 *p; + int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED; + int err; + + if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) + flags |= EXT4_FREE_BLOCKS_METADATA; + + if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, + count)) { + EXT4_ERROR_INODE(inode, "attempt to clear invalid " + "blocks %llu len %lu", + (unsigned long long) block_to_free, count); + return 1; + } + + if (try_to_extend_transaction(handle, inode)) { + if (bh) { + BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); + err = ext4_handle_dirty_metadata(handle, inode, bh); + if (unlikely(err)) + goto out_err; + } + err = ext4_mark_inode_dirty(handle, inode); + if (unlikely(err)) + goto out_err; + err = ext4_truncate_restart_trans(handle, inode, + ext4_blocks_for_truncate(inode)); + if (unlikely(err)) + goto out_err; + if (bh) { + BUFFER_TRACE(bh, "retaking write access"); + err = ext4_journal_get_write_access(handle, bh); + if (unlikely(err)) + goto out_err; + } + } + + for (p = first; p < last; p++) + *p = 0; + + ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags); + return 0; +out_err: + ext4_std_error(inode->i_sb, err); + return err; +} + +/** + * ext4_free_data - free a list of data blocks + * @handle: handle for this transaction + * @inode: inode we are dealing with + * @this_bh: indirect buffer_head which contains *@first and *@last + * @first: array of block numbers + * @last: points immediately past the end of array + * + * We are freeing all blocks referred from that array (numbers are stored as + * little-endian 32-bit) and updating @inode->i_blocks appropriately. + * + * We accumulate contiguous runs of blocks to free. Conveniently, if these + * blocks are contiguous then releasing them at one time will only affect one + * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't + * actually use a lot of journal space. + * + * @this_bh will be %NULL if @first and @last point into the inode's direct + * block pointers. + */ +static void ext4_free_data(handle_t *handle, struct inode *inode, + struct buffer_head *this_bh, + __le32 *first, __le32 *last) +{ + ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ + unsigned long count = 0; /* Number of blocks in the run */ + __le32 *block_to_free_p = NULL; /* Pointer into inode/ind + corresponding to + block_to_free */ + ext4_fsblk_t nr; /* Current block # */ + __le32 *p; /* Pointer into inode/ind + for current block */ + int err = 0; + + if (this_bh) { /* For indirect block */ + BUFFER_TRACE(this_bh, "get_write_access"); + err = ext4_journal_get_write_access(handle, this_bh); + /* Important: if we can't update the indirect pointers + * to the blocks, we can't free them. */ + if (err) + return; + } + + for (p = first; p < last; p++) { + nr = le32_to_cpu(*p); + if (nr) { + /* accumulate blocks to free if they're contiguous */ + if (count == 0) { + block_to_free = nr; + block_to_free_p = p; + count = 1; + } else if (nr == block_to_free + count) { + count++; + } else { + err = ext4_clear_blocks(handle, inode, this_bh, + block_to_free, count, + block_to_free_p, p); + if (err) + break; + block_to_free = nr; + block_to_free_p = p; + count = 1; + } + } + } + + if (!err && count > 0) + err = ext4_clear_blocks(handle, inode, this_bh, block_to_free, + count, block_to_free_p, p); + if (err < 0) + /* fatal error */ + return; + + if (this_bh) { + BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); + + /* + * The buffer head should have an attached journal head at this + * point. However, if the data is corrupted and an indirect + * block pointed to itself, it would have been detached when + * the block was cleared. Check for this instead of OOPSing. + */ + if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) + ext4_handle_dirty_metadata(handle, inode, this_bh); + else + EXT4_ERROR_INODE(inode, + "circular indirect block detected at " + "block %llu", + (unsigned long long) this_bh->b_blocknr); + } +} + +/** + * ext4_free_branches - free an array of branches + * @handle: JBD handle for this transaction + * @inode: inode we are dealing with + * @parent_bh: the buffer_head which contains *@first and *@last + * @first: array of block numbers + * @last: pointer immediately past the end of array + * @depth: depth of the branches to free + * + * We are freeing all blocks referred from these branches (numbers are + * stored as little-endian 32-bit) and updating @inode->i_blocks + * appropriately. + */ +static void ext4_free_branches(handle_t *handle, struct inode *inode, + struct buffer_head *parent_bh, + __le32 *first, __le32 *last, int depth) +{ + ext4_fsblk_t nr; + __le32 *p; + + if (ext4_handle_is_aborted(handle)) + return; + + if (depth--) { + struct buffer_head *bh; + int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); + p = last; + while (--p >= first) { + nr = le32_to_cpu(*p); + if (!nr) + continue; /* A hole */ + + if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), + nr, 1)) { + EXT4_ERROR_INODE(inode, + "invalid indirect mapped " + "block %lu (level %d)", + (unsigned long) nr, depth); + break; + } + + /* Go read the buffer for the next level down */ + bh = sb_bread(inode->i_sb, nr); + + /* + * A read failure? Report error and clear slot + * (should be rare). + */ + if (!bh) { + EXT4_ERROR_INODE_BLOCK(inode, nr, + "Read failure"); + continue; + } + + /* This zaps the entire block. Bottom up. */ + BUFFER_TRACE(bh, "free child branches"); + ext4_free_branches(handle, inode, bh, + (__le32 *) bh->b_data, + (__le32 *) bh->b_data + addr_per_block, + depth); + brelse(bh); + + /* + * Everything below this this pointer has been + * released. Now let this top-of-subtree go. + * + * We want the freeing of this indirect block to be + * atomic in the journal with the updating of the + * bitmap block which owns it. So make some room in + * the journal. + * + * We zero the parent pointer *after* freeing its + * pointee in the bitmaps, so if extend_transaction() + * for some reason fails to put the bitmap changes and + * the release into the same transaction, recovery + * will merely complain about releasing a free block, + * rather than leaking blocks. + */ + if (ext4_handle_is_aborted(handle)) + return; + if (try_to_extend_transaction(handle, inode)) { + ext4_mark_inode_dirty(handle, inode); + ext4_truncate_restart_trans(handle, inode, + ext4_blocks_for_truncate(inode)); + } + + /* + * The forget flag here is critical because if + * we are journaling (and not doing data + * journaling), we have to make sure a revoke + * record is written to prevent the journal + * replay from overwriting the (former) + * indirect block if it gets reallocated as a + * data block. This must happen in the same + * transaction where the data blocks are + * actually freed. + */ + ext4_free_blocks(handle, inode, NULL, nr, 1, + EXT4_FREE_BLOCKS_METADATA| + EXT4_FREE_BLOCKS_FORGET); + + if (parent_bh) { + /* + * The block which we have just freed is + * pointed to by an indirect block: journal it + */ + BUFFER_TRACE(parent_bh, "get_write_access"); + if (!ext4_journal_get_write_access(handle, + parent_bh)){ + *p = 0; + BUFFER_TRACE(parent_bh, + "call ext4_handle_dirty_metadata"); + ext4_handle_dirty_metadata(handle, + inode, + parent_bh); + } + } + } + } else { + /* We have reached the bottom of the tree. */ + BUFFER_TRACE(parent_bh, "free data blocks"); + ext4_free_data(handle, inode, parent_bh, first, last); + } +} + +void ext4_ind_truncate(struct inode *inode) +{ + handle_t *handle; + struct ext4_inode_info *ei = EXT4_I(inode); + __le32 *i_data = ei->i_data; + int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); + struct address_space *mapping = inode->i_mapping; + ext4_lblk_t offsets[4]; + Indirect chain[4]; + Indirect *partial; + __le32 nr = 0; + int n = 0; + ext4_lblk_t last_block, max_block; + unsigned blocksize = inode->i_sb->s_blocksize; + + handle = start_transaction(inode); + if (IS_ERR(handle)) + return; /* AKPM: return what? */ + + last_block = (inode->i_size + blocksize-1) + >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); + max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) + >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); + + if (inode->i_size & (blocksize - 1)) + if (ext4_block_truncate_page(handle, mapping, inode->i_size)) + goto out_stop; + + if (last_block != max_block) { + n = ext4_block_to_path(inode, last_block, offsets, NULL); + if (n == 0) + goto out_stop; /* error */ + } + + /* + * OK. This truncate is going to happen. We add the inode to the + * orphan list, so that if this truncate spans multiple transactions, + * and we crash, we will resume the truncate when the filesystem + * recovers. It also marks the inode dirty, to catch the new size. + * + * Implication: the file must always be in a sane, consistent + * truncatable state while each transaction commits. + */ + if (ext4_orphan_add(handle, inode)) + goto out_stop; + + /* + * From here we block out all ext4_get_block() callers who want to + * modify the block allocation tree. + */ + down_write(&ei->i_data_sem); + + ext4_discard_preallocations(inode); + + /* + * The orphan list entry will now protect us from any crash which + * occurs before the truncate completes, so it is now safe to propagate + * the new, shorter inode size (held for now in i_size) into the + * on-disk inode. We do this via i_disksize, which is the value which + * ext4 *really* writes onto the disk inode. + */ + ei->i_disksize = inode->i_size; + + if (last_block == max_block) { + /* + * It is unnecessary to free any data blocks if last_block is + * equal to the indirect block limit. + */ + goto out_unlock; + } else if (n == 1) { /* direct blocks */ + ext4_free_data(handle, inode, NULL, i_data+offsets[0], + i_data + EXT4_NDIR_BLOCKS); + goto do_indirects; + } + + partial = ext4_find_shared(inode, n, offsets, chain, &nr); + /* Kill the top of shared branch (not detached) */ + if (nr) { + if (partial == chain) { + /* Shared branch grows from the inode */ + ext4_free_branches(handle, inode, NULL, + &nr, &nr+1, (chain+n-1) - partial); + *partial->p = 0; + /* + * We mark the inode dirty prior to restart, + * and prior to stop. No need for it here. + */ + } else { + /* Shared branch grows from an indirect block */ + BUFFER_TRACE(partial->bh, "get_write_access"); + ext4_free_branches(handle, inode, partial->bh, + partial->p, + partial->p+1, (chain+n-1) - partial); + } + } + /* Clear the ends of indirect blocks on the shared branch */ + while (partial > chain) { + ext4_free_branches(handle, inode, partial->bh, partial->p + 1, + (__le32*)partial->bh->b_data+addr_per_block, + (chain+n-1) - partial); + BUFFER_TRACE(partial->bh, "call brelse"); + brelse(partial->bh); + partial--; + } +do_indirects: + /* Kill the remaining (whole) subtrees */ + switch (offsets[0]) { + default: + nr = i_data[EXT4_IND_BLOCK]; + if (nr) { + ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); + i_data[EXT4_IND_BLOCK] = 0; + } + case EXT4_IND_BLOCK: + nr = i_data[EXT4_DIND_BLOCK]; + if (nr) { + ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); + i_data[EXT4_DIND_BLOCK] = 0; + } + case EXT4_DIND_BLOCK: + nr = i_data[EXT4_TIND_BLOCK]; + if (nr) { + ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); + i_data[EXT4_TIND_BLOCK] = 0; + } + case EXT4_TIND_BLOCK: + ; + } + +out_unlock: + up_write(&ei->i_data_sem); + inode->i_mtime = inode->i_ctime = ext4_current_time(inode); + ext4_mark_inode_dirty(handle, inode); + + /* + * In a multi-transaction truncate, we only make the final transaction + * synchronous + */ + if (IS_SYNC(inode)) + ext4_handle_sync(handle); +out_stop: + /* + * If this was a simple ftruncate(), and the file will remain alive + * then we need to clear up the orphan record which we created above. + * However, if this was a real unlink then we were called by + * ext4_delete_inode(), and we allow that function to clean up the + * orphan info for us. + */ + if (inode->i_nlink) + ext4_orphan_del(handle, inode); + + ext4_journal_stop(handle); + trace_ext4_truncate_exit(inode); +} + diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 9b82ac7b0f55..de50b16a8f67 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -12,10 +12,6 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * - * Goal-directed block allocation by Stephen Tweedie - * (sct@redhat.com), 1993, 1998 - * Big-endian to little-endian byte-swapping/bitmaps by - * David S. Miller (davem@caip.rutgers.edu), 1995 * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) * @@ -89,45 +85,6 @@ static int ext4_inode_is_fast_symlink(struct inode *inode) return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); } -/* - * Truncate transactions can be complex and absolutely huge. So we need to - * be able to restart the transaction at a conventient checkpoint to make - * sure we don't overflow the journal. - * - * start_transaction gets us a new handle for a truncate transaction, - * and extend_transaction tries to extend the existing one a bit. If - * extend fails, we need to propagate the failure up and restart the - * transaction in the top-level truncate loop. --sct - */ -static handle_t *start_transaction(struct inode *inode) -{ - handle_t *result; - - result = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)); - if (!IS_ERR(result)) - return result; - - ext4_std_error(inode->i_sb, PTR_ERR(result)); - return result; -} - -/* - * Try to extend this transaction for the purposes of truncation. - * - * Returns 0 if we managed to create more room. If we can't create more - * room, and the transaction must be restarted we return 1. - */ -static int try_to_extend_transaction(handle_t *handle, struct inode *inode) -{ - if (!ext4_handle_valid(handle)) - return 0; - if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) - return 0; - if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode))) - return 0; - return 1; -} - /* * Restart the transaction associated with *handle. This does a commit, * so before we call here everything must be consistently dirtied against @@ -251,760 +208,6 @@ no_delete: ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ } -typedef struct { - __le32 *p; - __le32 key; - struct buffer_head *bh; -} Indirect; - -static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) -{ - p->key = *(p->p = v); - p->bh = bh; -} - -/** - * ext4_block_to_path - parse the block number into array of offsets - * @inode: inode in question (we are only interested in its superblock) - * @i_block: block number to be parsed - * @offsets: array to store the offsets in - * @boundary: set this non-zero if the referred-to block is likely to be - * followed (on disk) by an indirect block. - * - * To store the locations of file's data ext4 uses a data structure common - * for UNIX filesystems - tree of pointers anchored in the inode, with - * data blocks at leaves and indirect blocks in intermediate nodes. - * This function translates the block number into path in that tree - - * return value is the path length and @offsets[n] is the offset of - * pointer to (n+1)th node in the nth one. If @block is out of range - * (negative or too large) warning is printed and zero returned. - * - * Note: function doesn't find node addresses, so no IO is needed. All - * we need to know is the capacity of indirect blocks (taken from the - * inode->i_sb). - */ - -/* - * Portability note: the last comparison (check that we fit into triple - * indirect block) is spelled differently, because otherwise on an - * architecture with 32-bit longs and 8Kb pages we might get into trouble - * if our filesystem had 8Kb blocks. We might use long long, but that would - * kill us on x86. Oh, well, at least the sign propagation does not matter - - * i_block would have to be negative in the very beginning, so we would not - * get there at all. - */ - -static int ext4_block_to_path(struct inode *inode, - ext4_lblk_t i_block, - ext4_lblk_t offsets[4], int *boundary) -{ - int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); - int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); - const long direct_blocks = EXT4_NDIR_BLOCKS, - indirect_blocks = ptrs, - double_blocks = (1 << (ptrs_bits * 2)); - int n = 0; - int final = 0; - - if (i_block < direct_blocks) { - offsets[n++] = i_block; - final = direct_blocks; - } else if ((i_block -= direct_blocks) < indirect_blocks) { - offsets[n++] = EXT4_IND_BLOCK; - offsets[n++] = i_block; - final = ptrs; - } else if ((i_block -= indirect_blocks) < double_blocks) { - offsets[n++] = EXT4_DIND_BLOCK; - offsets[n++] = i_block >> ptrs_bits; - offsets[n++] = i_block & (ptrs - 1); - final = ptrs; - } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { - offsets[n++] = EXT4_TIND_BLOCK; - offsets[n++] = i_block >> (ptrs_bits * 2); - offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); - offsets[n++] = i_block & (ptrs - 1); - final = ptrs; - } else { - ext4_warning(inode->i_sb, "block %lu > max in inode %lu", - i_block + direct_blocks + - indirect_blocks + double_blocks, inode->i_ino); - } - if (boundary) - *boundary = final - 1 - (i_block & (ptrs - 1)); - return n; -} - -/** - * ext4_get_branch - read the chain of indirect blocks leading to data - * @inode: inode in question - * @depth: depth of the chain (1 - direct pointer, etc.) - * @offsets: offsets of pointers in inode/indirect blocks - * @chain: place to store the result - * @err: here we store the error value - * - * Function fills the array of triples and returns %NULL - * if everything went OK or the pointer to the last filled triple - * (incomplete one) otherwise. Upon the return chain[i].key contains - * the number of (i+1)-th block in the chain (as it is stored in memory, - * i.e. little-endian 32-bit), chain[i].p contains the address of that - * number (it points into struct inode for i==0 and into the bh->b_data - * for i>0) and chain[i].bh points to the buffer_head of i-th indirect - * block for i>0 and NULL for i==0. In other words, it holds the block - * numbers of the chain, addresses they were taken from (and where we can - * verify that chain did not change) and buffer_heads hosting these - * numbers. - * - * Function stops when it stumbles upon zero pointer (absent block) - * (pointer to last triple returned, *@err == 0) - * or when it gets an IO error reading an indirect block - * (ditto, *@err == -EIO) - * or when it reads all @depth-1 indirect blocks successfully and finds - * the whole chain, all way to the data (returns %NULL, *err == 0). - * - * Need to be called with - * down_read(&EXT4_I(inode)->i_data_sem) - */ -static Indirect *ext4_get_branch(struct inode *inode, int depth, - ext4_lblk_t *offsets, - Indirect chain[4], int *err) -{ - struct super_block *sb = inode->i_sb; - Indirect *p = chain; - struct buffer_head *bh; - - *err = 0; - /* i_data is not going away, no lock needed */ - add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); - if (!p->key) - goto no_block; - while (--depth) { - bh = sb_getblk(sb, le32_to_cpu(p->key)); - if (unlikely(!bh)) - goto failure; - - if (!bh_uptodate_or_lock(bh)) { - if (bh_submit_read(bh) < 0) { - put_bh(bh); - goto failure; - } - /* validate block references */ - if (ext4_check_indirect_blockref(inode, bh)) { - put_bh(bh); - goto failure; - } - } - - add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); - /* Reader: end */ - if (!p->key) - goto no_block; - } - return NULL; - -failure: - *err = -EIO; -no_block: - return p; -} - -/** - * ext4_find_near - find a place for allocation with sufficient locality - * @inode: owner - * @ind: descriptor of indirect block. - * - * This function returns the preferred place for block allocation. - * It is used when heuristic for sequential allocation fails. - * Rules are: - * + if there is a block to the left of our position - allocate near it. - * + if pointer will live in indirect block - allocate near that block. - * + if pointer will live in inode - allocate in the same - * cylinder group. - * - * In the latter case we colour the starting block by the callers PID to - * prevent it from clashing with concurrent allocations for a different inode - * in the same block group. The PID is used here so that functionally related - * files will be close-by on-disk. - * - * Caller must make sure that @ind is valid and will stay that way. - */ -static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) -{ - struct ext4_inode_info *ei = EXT4_I(inode); - __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; - __le32 *p; - ext4_fsblk_t bg_start; - ext4_fsblk_t last_block; - ext4_grpblk_t colour; - ext4_group_t block_group; - int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); - - /* Try to find previous block */ - for (p = ind->p - 1; p >= start; p--) { - if (*p) - return le32_to_cpu(*p); - } - - /* No such thing, so let's try location of indirect block */ - if (ind->bh) - return ind->bh->b_blocknr; - - /* - * It is going to be referred to from the inode itself? OK, just put it - * into the same cylinder group then. - */ - block_group = ei->i_block_group; - if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { - block_group &= ~(flex_size-1); - if (S_ISREG(inode->i_mode)) - block_group++; - } - bg_start = ext4_group_first_block_no(inode->i_sb, block_group); - last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; - - /* - * If we are doing delayed allocation, we don't need take - * colour into account. - */ - if (test_opt(inode->i_sb, DELALLOC)) - return bg_start; - - if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) - colour = (current->pid % 16) * - (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); - else - colour = (current->pid % 16) * ((last_block - bg_start) / 16); - return bg_start + colour; -} - -/** - * ext4_find_goal - find a preferred place for allocation. - * @inode: owner - * @block: block we want - * @partial: pointer to the last triple within a chain - * - * Normally this function find the preferred place for block allocation, - * returns it. - * Because this is only used for non-extent files, we limit the block nr - * to 32 bits. - */ -static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, - Indirect *partial) -{ - ext4_fsblk_t goal; - - /* - * XXX need to get goal block from mballoc's data structures - */ - - goal = ext4_find_near(inode, partial); - goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; - return goal; -} - -/** - * ext4_blks_to_allocate - Look up the block map and count the number - * of direct blocks need to be allocated for the given branch. - * - * @branch: chain of indirect blocks - * @k: number of blocks need for indirect blocks - * @blks: number of data blocks to be mapped. - * @blocks_to_boundary: the offset in the indirect block - * - * return the total number of blocks to be allocate, including the - * direct and indirect blocks. - */ -static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, - int blocks_to_boundary) -{ - unsigned int count = 0; - - /* - * Simple case, [t,d]Indirect block(s) has not allocated yet - * then it's clear blocks on that path have not allocated - */ - if (k > 0) { - /* right now we don't handle cross boundary allocation */ - if (blks < blocks_to_boundary + 1) - count += blks; - else - count += blocks_to_boundary + 1; - return count; - } - - count++; - while (count < blks && count <= blocks_to_boundary && - le32_to_cpu(*(branch[0].p + count)) == 0) { - count++; - } - return count; -} - -/** - * ext4_alloc_blocks: multiple allocate blocks needed for a branch - * @handle: handle for this transaction - * @inode: inode which needs allocated blocks - * @iblock: the logical block to start allocated at - * @goal: preferred physical block of allocation - * @indirect_blks: the number of blocks need to allocate for indirect - * blocks - * @blks: number of desired blocks - * @new_blocks: on return it will store the new block numbers for - * the indirect blocks(if needed) and the first direct block, - * @err: on return it will store the error code - * - * This function will return the number of blocks allocated as - * requested by the passed-in parameters. - */ -static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, - ext4_lblk_t iblock, ext4_fsblk_t goal, - int indirect_blks, int blks, - ext4_fsblk_t new_blocks[4], int *err) -{ - struct ext4_allocation_request ar; - int target, i; - unsigned long count = 0, blk_allocated = 0; - int index = 0; - ext4_fsblk_t current_block = 0; - int ret = 0; - - /* - * Here we try to allocate the requested multiple blocks at once, - * on a best-effort basis. - * To build a branch, we should allocate blocks for - * the indirect blocks(if not allocated yet), and at least - * the first direct block of this branch. That's the - * minimum number of blocks need to allocate(required) - */ - /* first we try to allocate the indirect blocks */ - target = indirect_blks; - while (target > 0) { - count = target; - /* allocating blocks for indirect blocks and direct blocks */ - current_block = ext4_new_meta_blocks(handle, inode, goal, - 0, &count, err); - if (*err) - goto failed_out; - - if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) { - EXT4_ERROR_INODE(inode, - "current_block %llu + count %lu > %d!", - current_block, count, - EXT4_MAX_BLOCK_FILE_PHYS); - *err = -EIO; - goto failed_out; - } - - target -= count; - /* allocate blocks for indirect blocks */ - while (index < indirect_blks && count) { - new_blocks[index++] = current_block++; - count--; - } - if (count > 0) { - /* - * save the new block number - * for the first direct block - */ - new_blocks[index] = current_block; - printk(KERN_INFO "%s returned more blocks than " - "requested\n", __func__); - WARN_ON(1); - break; - } - } - - target = blks - count ; - blk_allocated = count; - if (!target) - goto allocated; - /* Now allocate data blocks */ - memset(&ar, 0, sizeof(ar)); - ar.inode = inode; - ar.goal = goal; - ar.len = target; - ar.logical = iblock; - if (S_ISREG(inode->i_mode)) - /* enable in-core preallocation only for regular files */ - ar.flags = EXT4_MB_HINT_DATA; - - current_block = ext4_mb_new_blocks(handle, &ar, err); - if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) { - EXT4_ERROR_INODE(inode, - "current_block %llu + ar.len %d > %d!", - current_block, ar.len, - EXT4_MAX_BLOCK_FILE_PHYS); - *err = -EIO; - goto failed_out; - } - - if (*err && (target == blks)) { - /* - * if the allocation failed and we didn't allocate - * any blocks before - */ - goto failed_out; - } - if (!*err) { - if (target == blks) { - /* - * save the new block number - * for the first direct block - */ - new_blocks[index] = current_block; - } - blk_allocated += ar.len; - } -allocated: - /* total number of blocks allocated for direct blocks */ - ret = blk_allocated; - *err = 0; - return ret; -failed_out: - for (i = 0; i < index; i++) - ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0); - return ret; -} - -/** - * ext4_alloc_branch - allocate and set up a chain of blocks. - * @handle: handle for this transaction - * @inode: owner - * @indirect_blks: number of allocated indirect blocks - * @blks: number of allocated direct blocks - * @goal: preferred place for allocation - * @offsets: offsets (in the blocks) to store the pointers to next. - * @branch: place to store the chain in. - * - * This function allocates blocks, zeroes out all but the last one, - * links them into chain and (if we are synchronous) writes them to disk. - * In other words, it prepares a branch that can be spliced onto the - * inode. It stores the information about that chain in the branch[], in - * the same format as ext4_get_branch() would do. We are calling it after - * we had read the existing part of chain and partial points to the last - * triple of that (one with zero ->key). Upon the exit we have the same - * picture as after the successful ext4_get_block(), except that in one - * place chain is disconnected - *branch->p is still zero (we did not - * set the last link), but branch->key contains the number that should - * be placed into *branch->p to fill that gap. - * - * If allocation fails we free all blocks we've allocated (and forget - * their buffer_heads) and return the error value the from failed - * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain - * as described above and return 0. - */ -static int ext4_alloc_branch(handle_t *handle, struct inode *inode, - ext4_lblk_t iblock, int indirect_blks, - int *blks, ext4_fsblk_t goal, - ext4_lblk_t *offsets, Indirect *branch) -{ - int blocksize = inode->i_sb->s_blocksize; - int i, n = 0; - int err = 0; - struct buffer_head *bh; - int num; - ext4_fsblk_t new_blocks[4]; - ext4_fsblk_t current_block; - - num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, - *blks, new_blocks, &err); - if (err) - return err; - - branch[0].key = cpu_to_le32(new_blocks[0]); - /* - * metadata blocks and data blocks are allocated. - */ - for (n = 1; n <= indirect_blks; n++) { - /* - * Get buffer_head for parent block, zero it out - * and set the pointer to new one, then send - * parent to disk. - */ - bh = sb_getblk(inode->i_sb, new_blocks[n-1]); - if (unlikely(!bh)) { - err = -EIO; - goto failed; - } - - branch[n].bh = bh; - lock_buffer(bh); - BUFFER_TRACE(bh, "call get_create_access"); - err = ext4_journal_get_create_access(handle, bh); - if (err) { - /* Don't brelse(bh) here; it's done in - * ext4_journal_forget() below */ - unlock_buffer(bh); - goto failed; - } - - memset(bh->b_data, 0, blocksize); - branch[n].p = (__le32 *) bh->b_data + offsets[n]; - branch[n].key = cpu_to_le32(new_blocks[n]); - *branch[n].p = branch[n].key; - if (n == indirect_blks) { - current_block = new_blocks[n]; - /* - * End of chain, update the last new metablock of - * the chain to point to the new allocated - * data blocks numbers - */ - for (i = 1; i < num; i++) - *(branch[n].p + i) = cpu_to_le32(++current_block); - } - BUFFER_TRACE(bh, "marking uptodate"); - set_buffer_uptodate(bh); - unlock_buffer(bh); - - BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); - err = ext4_handle_dirty_metadata(handle, inode, bh); - if (err) - goto failed; - } - *blks = num; - return err; -failed: - /* Allocation failed, free what we already allocated */ - ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0); - for (i = 1; i <= n ; i++) { - /* - * branch[i].bh is newly allocated, so there is no - * need to revoke the block, which is why we don't - * need to set EXT4_FREE_BLOCKS_METADATA. - */ - ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, - EXT4_FREE_BLOCKS_FORGET); - } - for (i = n+1; i < indirect_blks; i++) - ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0); - - ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0); - - return err; -} - -/** - * ext4_splice_branch - splice the allocated branch onto inode. - * @handle: handle for this transaction - * @inode: owner - * @block: (logical) number of block we are adding - * @chain: chain of indirect blocks (with a missing link - see - * ext4_alloc_branch) - * @where: location of missing link - * @num: number of indirect blocks we are adding - * @blks: number of direct blocks we are adding - * - * This function fills the missing link and does all housekeeping needed in - * inode (->i_blocks, etc.). In case of success we end up with the full - * chain to new block and return 0. - */ -static int ext4_splice_branch(handle_t *handle, struct inode *inode, - ext4_lblk_t block, Indirect *where, int num, - int blks) -{ - int i; - int err = 0; - ext4_fsblk_t current_block; - - /* - * If we're splicing into a [td]indirect block (as opposed to the - * inode) then we need to get write access to the [td]indirect block - * before the splice. - */ - if (where->bh) { - BUFFER_TRACE(where->bh, "get_write_access"); - err = ext4_journal_get_write_access(handle, where->bh); - if (err) - goto err_out; - } - /* That's it */ - - *where->p = where->key; - - /* - * Update the host buffer_head or inode to point to more just allocated - * direct blocks blocks - */ - if (num == 0 && blks > 1) { - current_block = le32_to_cpu(where->key) + 1; - for (i = 1; i < blks; i++) - *(where->p + i) = cpu_to_le32(current_block++); - } - - /* We are done with atomic stuff, now do the rest of housekeeping */ - /* had we spliced it onto indirect block? */ - if (where->bh) { - /* - * If we spliced it onto an indirect block, we haven't - * altered the inode. Note however that if it is being spliced - * onto an indirect block at the very end of the file (the - * file is growing) then we *will* alter the inode to reflect - * the new i_size. But that is not done here - it is done in - * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. - */ - jbd_debug(5, "splicing indirect only\n"); - BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); - err = ext4_handle_dirty_metadata(handle, inode, where->bh); - if (err) - goto err_out; - } else { - /* - * OK, we spliced it into the inode itself on a direct block. - */ - ext4_mark_inode_dirty(handle, inode); - jbd_debug(5, "splicing direct\n"); - } - return err; - -err_out: - for (i = 1; i <= num; i++) { - /* - * branch[i].bh is newly allocated, so there is no - * need to revoke the block, which is why we don't - * need to set EXT4_FREE_BLOCKS_METADATA. - */ - ext4_free_blocks(handle, inode, where[i].bh, 0, 1, - EXT4_FREE_BLOCKS_FORGET); - } - ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key), - blks, 0); - - return err; -} - -/* - * The ext4_ind_map_blocks() function handles non-extents inodes - * (i.e., using the traditional indirect/double-indirect i_blocks - * scheme) for ext4_map_blocks(). - * - * Allocation strategy is simple: if we have to allocate something, we will - * have to go the whole way to leaf. So let's do it before attaching anything - * to tree, set linkage between the newborn blocks, write them if sync is - * required, recheck the path, free and repeat if check fails, otherwise - * set the last missing link (that will protect us from any truncate-generated - * removals - all blocks on the path are immune now) and possibly force the - * write on the parent block. - * That has a nice additional property: no special recovery from the failed - * allocations is needed - we simply release blocks and do not touch anything - * reachable from inode. - * - * `handle' can be NULL if create == 0. - * - * return > 0, # of blocks mapped or allocated. - * return = 0, if plain lookup failed. - * return < 0, error case. - * - * The ext4_ind_get_blocks() function should be called with - * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem - * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or - * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system - * blocks. - */ -static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, - struct ext4_map_blocks *map, - int flags) -{ - int err = -EIO; - ext4_lblk_t offsets[4]; - Indirect chain[4]; - Indirect *partial; - ext4_fsblk_t goal; - int indirect_blks; - int blocks_to_boundary = 0; - int depth; - int count = 0; - ext4_fsblk_t first_block = 0; - - trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); - J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))); - J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); - depth = ext4_block_to_path(inode, map->m_lblk, offsets, - &blocks_to_boundary); - - if (depth == 0) - goto out; - - partial = ext4_get_branch(inode, depth, offsets, chain, &err); - - /* Simplest case - block found, no allocation needed */ - if (!partial) { - first_block = le32_to_cpu(chain[depth - 1].key); - count++; - /*map more blocks*/ - while (count < map->m_len && count <= blocks_to_boundary) { - ext4_fsblk_t blk; - - blk = le32_to_cpu(*(chain[depth-1].p + count)); - - if (blk == first_block + count) - count++; - else - break; - } - goto got_it; - } - - /* Next simple case - plain lookup or failed read of indirect block */ - if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO) - goto cleanup; - - /* - * Okay, we need to do block allocation. - */ - goal = ext4_find_goal(inode, map->m_lblk, partial); - - /* the number of blocks need to allocate for [d,t]indirect blocks */ - indirect_blks = (chain + depth) - partial - 1; - - /* - * Next look up the indirect map to count the totoal number of - * direct blocks to allocate for this branch. - */ - count = ext4_blks_to_allocate(partial, indirect_blks, - map->m_len, blocks_to_boundary); - /* - * Block out ext4_truncate while we alter the tree - */ - err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks, - &count, goal, - offsets + (partial - chain), partial); - - /* - * The ext4_splice_branch call will free and forget any buffers - * on the new chain if there is a failure, but that risks using - * up transaction credits, especially for bitmaps where the - * credits cannot be returned. Can we handle this somehow? We - * may need to return -EAGAIN upwards in the worst case. --sct - */ - if (!err) - err = ext4_splice_branch(handle, inode, map->m_lblk, - partial, indirect_blks, count); - if (err) - goto cleanup; - - map->m_flags |= EXT4_MAP_NEW; - - ext4_update_inode_fsync_trans(handle, inode, 1); -got_it: - map->m_flags |= EXT4_MAP_MAPPED; - map->m_pblk = le32_to_cpu(chain[depth-1].key); - map->m_len = count; - if (count > blocks_to_boundary) - map->m_flags |= EXT4_MAP_BOUNDARY; - err = count; - /* Clean up and exit */ - partial = chain + depth - 1; /* the whole chain */ -cleanup: - while (partial > chain) { - BUFFER_TRACE(partial->bh, "call brelse"); - brelse(partial->bh); - partial--; - } -out: - trace_ext4_ind_map_blocks_exit(inode, map->m_lblk, - map->m_pblk, map->m_len, err); - return err; -} - #ifdef CONFIG_QUOTA qsize_t *ext4_get_reserved_space(struct inode *inode) { @@ -1012,32 +215,6 @@ qsize_t *ext4_get_reserved_space(struct inode *inode) } #endif -/* - * Calculate the number of metadata blocks need to reserve - * to allocate a new block at @lblocks for non extent file based file - */ -static int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock) -{ - struct ext4_inode_info *ei = EXT4_I(inode); - sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1); - int blk_bits; - - if (lblock < EXT4_NDIR_BLOCKS) - return 0; - - lblock -= EXT4_NDIR_BLOCKS; - - if (ei->i_da_metadata_calc_len && - (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) { - ei->i_da_metadata_calc_len++; - return 0; - } - ei->i_da_metadata_calc_last_lblock = lblock & dind_mask; - ei->i_da_metadata_calc_len = 1; - blk_bits = order_base_2(lblock); - return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1; -} - /* * Calculate the number of metadata blocks need to reserve * to allocate a block located at @lblock @@ -3379,114 +2556,6 @@ static int ext4_releasepage(struct page *page, gfp_t wait) return try_to_free_buffers(page); } -/* - * O_DIRECT for ext3 (or indirect map) based files - * - * If the O_DIRECT write will extend the file then add this inode to the - * orphan list. So recovery will truncate it back to the original size - * if the machine crashes during the write. - * - * If the O_DIRECT write is intantiating holes inside i_size and the machine - * crashes then stale disk data _may_ be exposed inside the file. But current - * VFS code falls back into buffered path in that case so we are safe. - */ -static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, - const struct iovec *iov, loff_t offset, - unsigned long nr_segs) -{ - struct file *file = iocb->ki_filp; - struct inode *inode = file->f_mapping->host; - struct ext4_inode_info *ei = EXT4_I(inode); - handle_t *handle; - ssize_t ret; - int orphan = 0; - size_t count = iov_length(iov, nr_segs); - int retries = 0; - - if (rw == WRITE) { - loff_t final_size = offset + count; - - if (final_size > inode->i_size) { - /* Credits for sb + inode write */ - handle = ext4_journal_start(inode, 2); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - goto out; - } - ret = ext4_orphan_add(handle, inode); - if (ret) { - ext4_journal_stop(handle); - goto out; - } - orphan = 1; - ei->i_disksize = inode->i_size; - ext4_journal_stop(handle); - } - } - -retry: - if (rw == READ && ext4_should_dioread_nolock(inode)) - ret = __blockdev_direct_IO(rw, iocb, inode, - inode->i_sb->s_bdev, iov, - offset, nr_segs, - ext4_get_block, NULL, NULL, 0); - else { - ret = blockdev_direct_IO(rw, iocb, inode, - inode->i_sb->s_bdev, iov, - offset, nr_segs, - ext4_get_block, NULL); - - if (unlikely((rw & WRITE) && ret < 0)) { - loff_t isize = i_size_read(inode); - loff_t end = offset + iov_length(iov, nr_segs); - - if (end > isize) - ext4_truncate_failed_write(inode); - } - } - if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) - goto retry; - - if (orphan) { - int err; - - /* Credits for sb + inode write */ - handle = ext4_journal_start(inode, 2); - if (IS_ERR(handle)) { - /* This is really bad luck. We've written the data - * but cannot extend i_size. Bail out and pretend - * the write failed... */ - ret = PTR_ERR(handle); - if (inode->i_nlink) - ext4_orphan_del(NULL, inode); - - goto out; - } - if (inode->i_nlink) - ext4_orphan_del(handle, inode); - if (ret > 0) { - loff_t end = offset + ret; - if (end > inode->i_size) { - ei->i_disksize = end; - i_size_write(inode, end); - /* - * We're going to return a positive `ret' - * here due to non-zero-length I/O, so there's - * no way of reporting error returns from - * ext4_mark_inode_dirty() to userspace. So - * ignore it. - */ - ext4_mark_inode_dirty(handle, inode); - } - } - err = ext4_journal_stop(handle); - if (ret == 0) - ret = err; - } -out: - return ret; -} - /* * ext4_get_block used when preparing for a DIO write or buffer write. * We allocate an uinitialized extent if blocks haven't been allocated. @@ -3958,383 +3027,6 @@ unlock: return err; } -/* - * Probably it should be a library function... search for first non-zero word - * or memcmp with zero_page, whatever is better for particular architecture. - * Linus? - */ -static inline int all_zeroes(__le32 *p, __le32 *q) -{ - while (p < q) - if (*p++) - return 0; - return 1; -} - -/** - * ext4_find_shared - find the indirect blocks for partial truncation. - * @inode: inode in question - * @depth: depth of the affected branch - * @offsets: offsets of pointers in that branch (see ext4_block_to_path) - * @chain: place to store the pointers to partial indirect blocks - * @top: place to the (detached) top of branch - * - * This is a helper function used by ext4_truncate(). - * - * When we do truncate() we may have to clean the ends of several - * indirect blocks but leave the blocks themselves alive. Block is - * partially truncated if some data below the new i_size is referred - * from it (and it is on the path to the first completely truncated - * data block, indeed). We have to free the top of that path along - * with everything to the right of the path. Since no allocation - * past the truncation point is possible until ext4_truncate() - * finishes, we may safely do the latter, but top of branch may - * require special attention - pageout below the truncation point - * might try to populate it. - * - * We atomically detach the top of branch from the tree, store the - * block number of its root in *@top, pointers to buffer_heads of - * partially truncated blocks - in @chain[].bh and pointers to - * their last elements that should not be removed - in - * @chain[].p. Return value is the pointer to last filled element - * of @chain. - * - * The work left to caller to do the actual freeing of subtrees: - * a) free the subtree starting from *@top - * b) free the subtrees whose roots are stored in - * (@chain[i].p+1 .. end of @chain[i].bh->b_data) - * c) free the subtrees growing from the inode past the @chain[0]. - * (no partially truncated stuff there). */ - -static Indirect *ext4_find_shared(struct inode *inode, int depth, - ext4_lblk_t offsets[4], Indirect chain[4], - __le32 *top) -{ - Indirect *partial, *p; - int k, err; - - *top = 0; - /* Make k index the deepest non-null offset + 1 */ - for (k = depth; k > 1 && !offsets[k-1]; k--) - ; - partial = ext4_get_branch(inode, k, offsets, chain, &err); - /* Writer: pointers */ - if (!partial) - partial = chain + k-1; - /* - * If the branch acquired continuation since we've looked at it - - * fine, it should all survive and (new) top doesn't belong to us. - */ - if (!partial->key && *partial->p) - /* Writer: end */ - goto no_top; - for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) - ; - /* - * OK, we've found the last block that must survive. The rest of our - * branch should be detached before unlocking. However, if that rest - * of branch is all ours and does not grow immediately from the inode - * it's easier to cheat and just decrement partial->p. - */ - if (p == chain + k - 1 && p > chain) { - p->p--; - } else { - *top = *p->p; - /* Nope, don't do this in ext4. Must leave the tree intact */ -#if 0 - *p->p = 0; -#endif - } - /* Writer: end */ - - while (partial > p) { - brelse(partial->bh); - partial--; - } -no_top: - return partial; -} - -/* - * Zero a number of block pointers in either an inode or an indirect block. - * If we restart the transaction we must again get write access to the - * indirect block for further modification. - * - * We release `count' blocks on disk, but (last - first) may be greater - * than `count' because there can be holes in there. - * - * Return 0 on success, 1 on invalid block range - * and < 0 on fatal error. - */ -static int ext4_clear_blocks(handle_t *handle, struct inode *inode, - struct buffer_head *bh, - ext4_fsblk_t block_to_free, - unsigned long count, __le32 *first, - __le32 *last) -{ - __le32 *p; - int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED; - int err; - - if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) - flags |= EXT4_FREE_BLOCKS_METADATA; - - if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, - count)) { - EXT4_ERROR_INODE(inode, "attempt to clear invalid " - "blocks %llu len %lu", - (unsigned long long) block_to_free, count); - return 1; - } - - if (try_to_extend_transaction(handle, inode)) { - if (bh) { - BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); - err = ext4_handle_dirty_metadata(handle, inode, bh); - if (unlikely(err)) - goto out_err; - } - err = ext4_mark_inode_dirty(handle, inode); - if (unlikely(err)) - goto out_err; - err = ext4_truncate_restart_trans(handle, inode, - ext4_blocks_for_truncate(inode)); - if (unlikely(err)) - goto out_err; - if (bh) { - BUFFER_TRACE(bh, "retaking write access"); - err = ext4_journal_get_write_access(handle, bh); - if (unlikely(err)) - goto out_err; - } - } - - for (p = first; p < last; p++) - *p = 0; - - ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags); - return 0; -out_err: - ext4_std_error(inode->i_sb, err); - return err; -} - -/** - * ext4_free_data - free a list of data blocks - * @handle: handle for this transaction - * @inode: inode we are dealing with - * @this_bh: indirect buffer_head which contains *@first and *@last - * @first: array of block numbers - * @last: points immediately past the end of array - * - * We are freeing all blocks referred from that array (numbers are stored as - * little-endian 32-bit) and updating @inode->i_blocks appropriately. - * - * We accumulate contiguous runs of blocks to free. Conveniently, if these - * blocks are contiguous then releasing them at one time will only affect one - * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't - * actually use a lot of journal space. - * - * @this_bh will be %NULL if @first and @last point into the inode's direct - * block pointers. - */ -static void ext4_free_data(handle_t *handle, struct inode *inode, - struct buffer_head *this_bh, - __le32 *first, __le32 *last) -{ - ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ - unsigned long count = 0; /* Number of blocks in the run */ - __le32 *block_to_free_p = NULL; /* Pointer into inode/ind - corresponding to - block_to_free */ - ext4_fsblk_t nr; /* Current block # */ - __le32 *p; /* Pointer into inode/ind - for current block */ - int err = 0; - - if (this_bh) { /* For indirect block */ - BUFFER_TRACE(this_bh, "get_write_access"); - err = ext4_journal_get_write_access(handle, this_bh); - /* Important: if we can't update the indirect pointers - * to the blocks, we can't free them. */ - if (err) - return; - } - - for (p = first; p < last; p++) { - nr = le32_to_cpu(*p); - if (nr) { - /* accumulate blocks to free if they're contiguous */ - if (count == 0) { - block_to_free = nr; - block_to_free_p = p; - count = 1; - } else if (nr == block_to_free + count) { - count++; - } else { - err = ext4_clear_blocks(handle, inode, this_bh, - block_to_free, count, - block_to_free_p, p); - if (err) - break; - block_to_free = nr; - block_to_free_p = p; - count = 1; - } - } - } - - if (!err && count > 0) - err = ext4_clear_blocks(handle, inode, this_bh, block_to_free, - count, block_to_free_p, p); - if (err < 0) - /* fatal error */ - return; - - if (this_bh) { - BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); - - /* - * The buffer head should have an attached journal head at this - * point. However, if the data is corrupted and an indirect - * block pointed to itself, it would have been detached when - * the block was cleared. Check for this instead of OOPSing. - */ - if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) - ext4_handle_dirty_metadata(handle, inode, this_bh); - else - EXT4_ERROR_INODE(inode, - "circular indirect block detected at " - "block %llu", - (unsigned long long) this_bh->b_blocknr); - } -} - -/** - * ext4_free_branches - free an array of branches - * @handle: JBD handle for this transaction - * @inode: inode we are dealing with - * @parent_bh: the buffer_head which contains *@first and *@last - * @first: array of block numbers - * @last: pointer immediately past the end of array - * @depth: depth of the branches to free - * - * We are freeing all blocks referred from these branches (numbers are - * stored as little-endian 32-bit) and updating @inode->i_blocks - * appropriately. - */ -static void ext4_free_branches(handle_t *handle, struct inode *inode, - struct buffer_head *parent_bh, - __le32 *first, __le32 *last, int depth) -{ - ext4_fsblk_t nr; - __le32 *p; - - if (ext4_handle_is_aborted(handle)) - return; - - if (depth--) { - struct buffer_head *bh; - int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); - p = last; - while (--p >= first) { - nr = le32_to_cpu(*p); - if (!nr) - continue; /* A hole */ - - if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), - nr, 1)) { - EXT4_ERROR_INODE(inode, - "invalid indirect mapped " - "block %lu (level %d)", - (unsigned long) nr, depth); - break; - } - - /* Go read the buffer for the next level down */ - bh = sb_bread(inode->i_sb, nr); - - /* - * A read failure? Report error and clear slot - * (should be rare). - */ - if (!bh) { - EXT4_ERROR_INODE_BLOCK(inode, nr, - "Read failure"); - continue; - } - - /* This zaps the entire block. Bottom up. */ - BUFFER_TRACE(bh, "free child branches"); - ext4_free_branches(handle, inode, bh, - (__le32 *) bh->b_data, - (__le32 *) bh->b_data + addr_per_block, - depth); - brelse(bh); - - /* - * Everything below this this pointer has been - * released. Now let this top-of-subtree go. - * - * We want the freeing of this indirect block to be - * atomic in the journal with the updating of the - * bitmap block which owns it. So make some room in - * the journal. - * - * We zero the parent pointer *after* freeing its - * pointee in the bitmaps, so if extend_transaction() - * for some reason fails to put the bitmap changes and - * the release into the same transaction, recovery - * will merely complain about releasing a free block, - * rather than leaking blocks. - */ - if (ext4_handle_is_aborted(handle)) - return; - if (try_to_extend_transaction(handle, inode)) { - ext4_mark_inode_dirty(handle, inode); - ext4_truncate_restart_trans(handle, inode, - ext4_blocks_for_truncate(inode)); - } - - /* - * The forget flag here is critical because if - * we are journaling (and not doing data - * journaling), we have to make sure a revoke - * record is written to prevent the journal - * replay from overwriting the (former) - * indirect block if it gets reallocated as a - * data block. This must happen in the same - * transaction where the data blocks are - * actually freed. - */ - ext4_free_blocks(handle, inode, NULL, nr, 1, - EXT4_FREE_BLOCKS_METADATA| - EXT4_FREE_BLOCKS_FORGET); - - if (parent_bh) { - /* - * The block which we have just freed is - * pointed to by an indirect block: journal it - */ - BUFFER_TRACE(parent_bh, "get_write_access"); - if (!ext4_journal_get_write_access(handle, - parent_bh)){ - *p = 0; - BUFFER_TRACE(parent_bh, - "call ext4_handle_dirty_metadata"); - ext4_handle_dirty_metadata(handle, - inode, - parent_bh); - } - } - } - } else { - /* We have reached the bottom of the tree. */ - BUFFER_TRACE(parent_bh, "free data blocks"); - ext4_free_data(handle, inode, parent_bh, first, last); - } -} - int ext4_can_truncate(struct inode *inode) { if (S_ISREG(inode->i_mode)) @@ -4419,161 +3111,6 @@ void ext4_truncate(struct inode *inode) trace_ext4_truncate_exit(inode); } -void ext4_ind_truncate(struct inode *inode) -{ - handle_t *handle; - struct ext4_inode_info *ei = EXT4_I(inode); - __le32 *i_data = ei->i_data; - int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); - struct address_space *mapping = inode->i_mapping; - ext4_lblk_t offsets[4]; - Indirect chain[4]; - Indirect *partial; - __le32 nr = 0; - int n = 0; - ext4_lblk_t last_block, max_block; - unsigned blocksize = inode->i_sb->s_blocksize; - - handle = start_transaction(inode); - if (IS_ERR(handle)) - return; /* AKPM: return what? */ - - last_block = (inode->i_size + blocksize-1) - >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); - max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) - >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); - - if (inode->i_size & (blocksize - 1)) - if (ext4_block_truncate_page(handle, mapping, inode->i_size)) - goto out_stop; - - if (last_block != max_block) { - n = ext4_block_to_path(inode, last_block, offsets, NULL); - if (n == 0) - goto out_stop; /* error */ - } - - /* - * OK. This truncate is going to happen. We add the inode to the - * orphan list, so that if this truncate spans multiple transactions, - * and we crash, we will resume the truncate when the filesystem - * recovers. It also marks the inode dirty, to catch the new size. - * - * Implication: the file must always be in a sane, consistent - * truncatable state while each transaction commits. - */ - if (ext4_orphan_add(handle, inode)) - goto out_stop; - - /* - * From here we block out all ext4_get_block() callers who want to - * modify the block allocation tree. - */ - down_write(&ei->i_data_sem); - - ext4_discard_preallocations(inode); - - /* - * The orphan list entry will now protect us from any crash which - * occurs before the truncate completes, so it is now safe to propagate - * the new, shorter inode size (held for now in i_size) into the - * on-disk inode. We do this via i_disksize, which is the value which - * ext4 *really* writes onto the disk inode. - */ - ei->i_disksize = inode->i_size; - - if (last_block == max_block) { - /* - * It is unnecessary to free any data blocks if last_block is - * equal to the indirect block limit. - */ - goto out_unlock; - } else if (n == 1) { /* direct blocks */ - ext4_free_data(handle, inode, NULL, i_data+offsets[0], - i_data + EXT4_NDIR_BLOCKS); - goto do_indirects; - } - - partial = ext4_find_shared(inode, n, offsets, chain, &nr); - /* Kill the top of shared branch (not detached) */ - if (nr) { - if (partial == chain) { - /* Shared branch grows from the inode */ - ext4_free_branches(handle, inode, NULL, - &nr, &nr+1, (chain+n-1) - partial); - *partial->p = 0; - /* - * We mark the inode dirty prior to restart, - * and prior to stop. No need for it here. - */ - } else { - /* Shared branch grows from an indirect block */ - BUFFER_TRACE(partial->bh, "get_write_access"); - ext4_free_branches(handle, inode, partial->bh, - partial->p, - partial->p+1, (chain+n-1) - partial); - } - } - /* Clear the ends of indirect blocks on the shared branch */ - while (partial > chain) { - ext4_free_branches(handle, inode, partial->bh, partial->p + 1, - (__le32*)partial->bh->b_data+addr_per_block, - (chain+n-1) - partial); - BUFFER_TRACE(partial->bh, "call brelse"); - brelse(partial->bh); - partial--; - } -do_indirects: - /* Kill the remaining (whole) subtrees */ - switch (offsets[0]) { - default: - nr = i_data[EXT4_IND_BLOCK]; - if (nr) { - ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); - i_data[EXT4_IND_BLOCK] = 0; - } - case EXT4_IND_BLOCK: - nr = i_data[EXT4_DIND_BLOCK]; - if (nr) { - ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); - i_data[EXT4_DIND_BLOCK] = 0; - } - case EXT4_DIND_BLOCK: - nr = i_data[EXT4_TIND_BLOCK]; - if (nr) { - ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); - i_data[EXT4_TIND_BLOCK] = 0; - } - case EXT4_TIND_BLOCK: - ; - } - -out_unlock: - up_write(&ei->i_data_sem); - inode->i_mtime = inode->i_ctime = ext4_current_time(inode); - ext4_mark_inode_dirty(handle, inode); - - /* - * In a multi-transaction truncate, we only make the final transaction - * synchronous - */ - if (IS_SYNC(inode)) - ext4_handle_sync(handle); -out_stop: - /* - * If this was a simple ftruncate(), and the file will remain alive - * then we need to clear up the orphan record which we created above. - * However, if this was a real unlink then we were called by - * ext4_delete_inode(), and we allow that function to clean up the - * orphan info for us. - */ - if (inode->i_nlink) - ext4_orphan_del(handle, inode); - - ext4_journal_stop(handle); - trace_ext4_truncate_exit(inode); -} - /* * ext4_get_inode_loc returns with an extra refcount against the inode's * underlying buffer_head on success. If 'in_mem' is true, we have all @@ -5386,29 +3923,6 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, return 0; } -static int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk) -{ - int indirects; - - /* if nrblocks are contiguous */ - if (chunk) { - /* - * With N contiguous data blocks, we need at most - * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, - * 2 dindirect blocks, and 1 tindirect block - */ - return DIV_ROUND_UP(nrblocks, - EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; - } - /* - * if nrblocks are not contiguous, worse case, each block touch - * a indirect block, and each indirect block touch a double indirect - * block, plus a triple indirect block - */ - indirects = nrblocks * 2 + 1; - return indirects; -} - static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) { if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) -- cgit v1.2.3 From 1e5216e43846b7758b2a04b3612c475608a4b708 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 27 Jun 2011 16:18:20 -0700 Subject: drm/i915: more struct_mutex locking When auditing the locking in i915_gem.c (for a prospective change which I then abandoned), I noticed two places where struct_mutex is not held across GEM object manipulations that would usually require it. Since one is in initial setup and the other in driver unload, I'm guessing the mutex is not required for either; but post a patch in case it is. Signed-off-by: Hugh Dickins Cc: Chris Wilson Cc: Keith Packard Signed-off-by: Andrew Morton Signed-off-by: Keith Packard --- drivers/gpu/drm/i915/i915_dma.c | 3 +-- drivers/gpu/drm/i915/intel_overlay.c | 5 +++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 0239e9974bf2..2b79588541e7 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2182,9 +2182,8 @@ int i915_driver_unload(struct drm_device *dev) /* Flush any outstanding unpin_work. */ flush_workqueue(dev_priv->wq); - i915_gem_free_all_phys_object(dev); - mutex_lock(&dev->struct_mutex); + i915_gem_free_all_phys_object(dev); i915_gem_cleanup_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); if (I915_HAS_FBC(dev) && i915_powersave) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index fcf6fcb0b482..cffd3edd9bb4 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1412,6 +1412,8 @@ void intel_setup_overlay(struct drm_device *dev) goto out_free; overlay->reg_bo = reg_bo; + mutex_lock(&dev->struct_mutex); + if (OVERLAY_NEEDS_PHYSICAL(dev)) { ret = i915_gem_attach_phys_object(dev, reg_bo, I915_GEM_PHYS_OVERLAY_REGS, @@ -1436,6 +1438,8 @@ void intel_setup_overlay(struct drm_device *dev) } } + mutex_unlock(&dev->struct_mutex); + /* init all values */ overlay->color_key = 0x0101fe; overlay->brightness = -19; @@ -1460,6 +1464,7 @@ out_unpin_bo: i915_gem_object_unpin(reg_bo); out_free_bo: drm_gem_object_unreference(®_bo->base); + mutex_unlock(&dev->struct_mutex); out_free: kfree(overlay); return; -- cgit v1.2.3 From 3127c6b225c6893bdfcd4db64d4316ce317fc10f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 28 Jun 2011 13:44:37 +0900 Subject: serial: sh-sci: Regtype probing doesn't need to be fatal. This was using a BUG_ON(), but it's not strictly necessary, so relax the constraints a bit. Signed-off-by: Paul Mundt --- drivers/tty/serial/sh-sci.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 8e55e0a2733a..5ff6657fd141 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1825,6 +1825,7 @@ static int __devinit sci_init_single(struct platform_device *dev, struct plat_sci_port *p) { struct uart_port *port = &sci_port->port; + int ret; port->ops = &sci_uart_ops; port->iotype = UPIO_MEM; @@ -1845,8 +1846,11 @@ static int __devinit sci_init_single(struct platform_device *dev, break; } - if (p->regtype == SCIx_PROBE_REGTYPE) - BUG_ON(sci_probe_regmap(p) != 0); + if (p->regtype == SCIx_PROBE_REGTYPE) { + ret = sci_probe_regmap(p); + if (unlikely(!ret)) + return ret; + } if (dev) { sci_port->iclk = clk_get(&dev->dev, "sci_ick"); -- cgit v1.2.3 From 7f405f9c3117acfa8a9775c467ab433b23abc5a7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 28 Jun 2011 13:47:40 +0900 Subject: serial: sh-sci: Add missing module description/author bits. Signed-off-by: Paul Mundt --- drivers/tty/serial/sh-sci.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 5ff6657fd141..fa99b0063158 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -2211,3 +2211,5 @@ module_exit(sci_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:sh-sci"); +MODULE_AUTHOR("Paul Mundt"); +MODULE_DESCRIPTION("SuperH SCI(F) serial driver"); -- cgit v1.2.3 From 23241d43eac88f63a7f0bf4d5c12bbc496651585 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 28 Jun 2011 13:55:31 +0900 Subject: serial: sh-sci: Kill off per-port enable/disable callbacks. Ultimately we want everything to be going through the clock framework and runtime pm, so kill off the per-port callbacks that enabled ports to bypass the common infrastructure. Signed-off-by: Paul Mundt --- drivers/tty/serial/sh-sci.c | 79 ++++++++++++++++++--------------------------- 1 file changed, 32 insertions(+), 47 deletions(-) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index fa99b0063158..9c8624d9c803 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -62,12 +62,6 @@ struct sci_port { /* Platform configuration */ struct plat_sci_port *cfg; - /* Port enable callback */ - void (*enable)(struct uart_port *port); - - /* Port disable callback */ - void (*disable)(struct uart_port *port); - /* Break timer */ struct timer_list break_timer; int break_flag; @@ -366,6 +360,29 @@ static int sci_probe_regmap(struct plat_sci_port *cfg) return 0; } +static void sci_port_enable(struct sci_port *sci_port) +{ + if (!sci_port->port.dev) + return; + + pm_runtime_get_sync(sci_port->port.dev); + + clk_enable(sci_port->iclk); + sci_port->port.uartclk = clk_get_rate(sci_port->iclk); + clk_enable(sci_port->fclk); +} + +static void sci_port_disable(struct sci_port *sci_port) +{ + if (!sci_port->port.dev) + return; + + clk_disable(sci_port->fclk); + clk_disable(sci_port->iclk); + + pm_runtime_put_sync(sci_port->port.dev); +} + #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) #ifdef CONFIG_CONSOLE_POLL @@ -651,8 +668,7 @@ static void sci_break_timer(unsigned long data) { struct sci_port *port = (struct sci_port *)data; - if (port->enable) - port->enable(&port->port); + sci_port_enable(port); if (sci_rxd_in(&port->port) == 0) { port->break_flag = 1; @@ -664,8 +680,7 @@ static void sci_break_timer(unsigned long data) } else port->break_flag = 0; - if (port->disable) - port->disable(&port->port); + sci_port_disable(port); } static int sci_handle_errors(struct uart_port *port) @@ -939,27 +954,6 @@ static int sci_notifier(struct notifier_block *self, return NOTIFY_OK; } -static void sci_clk_enable(struct uart_port *port) -{ - struct sci_port *sci_port = to_sci_port(port); - - pm_runtime_get_sync(port->dev); - - clk_enable(sci_port->iclk); - sci_port->port.uartclk = clk_get_rate(sci_port->iclk); - clk_enable(sci_port->fclk); -} - -static void sci_clk_disable(struct uart_port *port) -{ - struct sci_port *sci_port = to_sci_port(port); - - clk_disable(sci_port->fclk); - clk_disable(sci_port->iclk); - - pm_runtime_put_sync(port->dev); -} - static int sci_request_irq(struct sci_port *port) { int i; @@ -1537,8 +1531,7 @@ static int sci_startup(struct uart_port *port) dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); - if (s->enable) - s->enable(port); + sci_port_enable(s); ret = sci_request_irq(s); if (unlikely(ret < 0)) @@ -1564,8 +1557,7 @@ static void sci_shutdown(struct uart_port *port) sci_free_dma(port); sci_free_irq(s); - if (s->disable) - s->disable(port); + sci_port_disable(s); } static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps, @@ -1612,8 +1604,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, if (likely(baud && port->uartclk)) t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk); - if (s->enable) - s->enable(port); + sci_port_enable(s); do { status = sci_in(port, SCxSR); @@ -1683,8 +1674,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, if ((termios->c_cflag & CREAD) != 0) sci_start_rx(port); - if (s->disable) - s->disable(port); + sci_port_disable(s); } static const char *sci_type(struct uart_port *port) @@ -1870,8 +1860,6 @@ static int __devinit sci_init_single(struct platform_device *dev, if (IS_ERR(sci_port->fclk)) sci_port->fclk = NULL; - sci_port->enable = sci_clk_enable; - sci_port->disable = sci_clk_disable; port->dev = &dev->dev; pm_runtime_enable(&dev->dev); @@ -1950,8 +1938,7 @@ static void serial_console_write(struct console *co, const char *s, struct uart_port *port = &sci_port->port; unsigned short bits; - if (sci_port->enable) - sci_port->enable(port); + sci_port_enable(sci_port); uart_console_write(port, s, count, serial_console_putchar); @@ -1960,8 +1947,7 @@ static void serial_console_write(struct console *co, const char *s, while ((sci_in(port, SCxSR) & bits) != bits) cpu_relax(); - if (sci_port->disable) - sci_port->disable(port); + sci_port_disable(sci_port); } static int __devinit serial_console_setup(struct console *co, char *options) @@ -1993,8 +1979,7 @@ static int __devinit serial_console_setup(struct console *co, char *options) if (unlikely(ret != 0)) return ret; - if (sci_port->enable) - sci_port->enable(port); + sci_port_enable(sci_port); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); -- cgit v1.2.3 From 9174fc8f111982e024a00512c521ad8f1056fccb Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 28 Jun 2011 15:25:36 +0900 Subject: serial: sh-sci: Fix up pretty name printing for port IRQs. Presently these were all using the same static string with no regard to dev_name() and the like. This implements a bit of rework to name the IRQ dynamically, as it should have been doing all along anyways. Signed-off-by: Paul Mundt --- drivers/tty/serial/sh-sci.c | 118 +++++++++++++++++++++++++++++++------------- include/linux/serial_sci.h | 7 +++ 2 files changed, 92 insertions(+), 33 deletions(-) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 9c8624d9c803..d0a56235c50e 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -71,6 +71,8 @@ struct sci_port { /* Function clock */ struct clk *fclk; + char *irqstr[SCIx_NR_IRQS]; + struct dma_chan *chan_tx; struct dma_chan *chan_rx; @@ -954,53 +956,102 @@ static int sci_notifier(struct notifier_block *self, return NOTIFY_OK; } +static struct sci_irq_desc { + const char *desc; + irq_handler_t handler; +} sci_irq_desc[] = { + /* + * Split out handlers, the default case. + */ + [SCIx_ERI_IRQ] = { + .desc = "rx err", + .handler = sci_er_interrupt, + }, + + [SCIx_RXI_IRQ] = { + .desc = "rx full", + .handler = sci_rx_interrupt, + }, + + [SCIx_TXI_IRQ] = { + .desc = "tx empty", + .handler = sci_tx_interrupt, + }, + + [SCIx_BRI_IRQ] = { + .desc = "break", + .handler = sci_br_interrupt, + }, + + /* + * Special muxed handler. + */ + [SCIx_MUX_IRQ] = { + .desc = "mux", + .handler = sci_mpxed_interrupt, + }, +}; + static int sci_request_irq(struct sci_port *port) { - int i; - irqreturn_t (*handlers[4])(int irq, void *ptr) = { - sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt, - sci_br_interrupt, - }; - const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full", - "SCI Transmit Data Empty", "SCI Break" }; - - if (port->cfg->irqs[0] == port->cfg->irqs[1]) { - if (unlikely(!port->cfg->irqs[0])) - return -ENODEV; - - if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt, - IRQF_DISABLED, "sci", port)) { - dev_err(port->port.dev, "Can't allocate IRQ\n"); - return -ENODEV; + struct uart_port *up = &port->port; + int i, j, ret = 0; + + for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) { + struct sci_irq_desc *desc; + unsigned int irq; + + if (SCIx_IRQ_IS_MUXED(port)) { + i = SCIx_MUX_IRQ; + irq = up->irq; + } else + irq = port->cfg->irqs[i]; + + desc = sci_irq_desc + i; + port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s", + dev_name(up->dev), desc->desc); + if (!port->irqstr[j]) { + dev_err(up->dev, "Failed to allocate %s IRQ string\n", + desc->desc); + goto out_nomem; } - } else { - for (i = 0; i < ARRAY_SIZE(handlers); i++) { - if (unlikely(!port->cfg->irqs[i])) - continue; - - if (request_irq(port->cfg->irqs[i], handlers[i], - IRQF_DISABLED, desc[i], port)) { - dev_err(port->port.dev, "Can't allocate IRQ\n"); - return -ENODEV; - } + + ret = request_irq(irq, desc->handler, up->irqflags, + port->irqstr[j], port); + if (unlikely(ret)) { + dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc); + goto out_noirq; } } return 0; + +out_noirq: + while (--i >= 0) + free_irq(port->cfg->irqs[i], port); + +out_nomem: + while (--j >= 0) + kfree(port->irqstr[j]); + + return ret; } static void sci_free_irq(struct sci_port *port) { int i; - if (port->cfg->irqs[0] == port->cfg->irqs[1]) - free_irq(port->cfg->irqs[0], port); - else { - for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) { - if (!port->cfg->irqs[i]) - continue; + /* + * Intentionally in reverse order so we iterate over the muxed + * IRQ first. + */ + for (i = 0; i < SCIx_NR_IRQS; i++) { + free_irq(port->cfg->irqs[i], port); + kfree(port->irqstr[i]); - free_irq(port->cfg->irqs[i], port); + if (SCIx_IRQ_IS_MUXED(port)) { + /* If there's only one IRQ, we're done. */ + return; } } } @@ -1910,6 +1961,7 @@ static int __devinit sci_init_single(struct platform_device *dev, * For the muxed case there's nothing more to do. */ port->irq = p->irqs[SCIx_RXI_IRQ]; + port->irqflags = IRQF_DISABLED; port->serial_in = sci_serial_in; port->serial_out = sci_serial_out; diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 4ca130a90ea5..8bffe9ae2ca0 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -56,6 +56,8 @@ enum { SCIx_TXI_IRQ, SCIx_BRI_IRQ, SCIx_NR_IRQS, + + SCIx_MUX_IRQ = SCIx_NR_IRQS, /* special case */ }; enum { @@ -82,6 +84,11 @@ enum { [SCIx_BRI_IRQ] = (irq), \ } +#define SCIx_IRQ_IS_MUXED(port) \ + ((port)->cfg->irqs[SCIx_ERI_IRQ] == \ + (port)->cfg->irqs[SCIx_RXI_IRQ]) || \ + ((port)->cfg->irqs[SCIx_ERI_IRQ] && \ + !(port)->cfg->irqs[SCIx_RXI_IRQ]) /* * SCI register subset common for all port types. * Not all registers will exist on all parts. -- cgit v1.2.3 From f86186b44b4164600cce03d0d93ad48ec21fa429 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Tue, 28 Jun 2011 10:01:31 -0400 Subject: ext4: refactor duplicated block placement code I found that ext4_ext_find_goal() and ext4_find_near() share the same code for returning a coloured start block based on i_block_group. We can refactor this into a common function so that they don't diverge in the future. Thanks to adilger for suggesting the new function name. Signed-off-by: Eric Sandeen Signed-off-by: "Theodore Ts'o" --- fs/ext4/balloc.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ fs/ext4/ext4.h | 1 + fs/ext4/extents.c | 37 +------------------------------------ fs/ext4/indirect.c | 28 +--------------------------- 4 files changed, 51 insertions(+), 63 deletions(-) diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 264f6949511e..f8224adf496e 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -620,3 +620,51 @@ unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group) } +/** + * ext4_inode_to_goal_block - return a hint for block allocation + * @inode: inode for block allocation + * + * Return the ideal location to start allocating blocks for a + * newly created inode. + */ +ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + ext4_group_t block_group; + ext4_grpblk_t colour; + int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); + ext4_fsblk_t bg_start; + ext4_fsblk_t last_block; + + block_group = ei->i_block_group; + if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { + /* + * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME + * block groups per flexgroup, reserve the first block + * group for directories and special files. Regular + * files will start at the second block group. This + * tends to speed up directory access and improves + * fsck times. + */ + block_group &= ~(flex_size-1); + if (S_ISREG(inode->i_mode)) + block_group++; + } + bg_start = ext4_group_first_block_no(inode->i_sb, block_group); + last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; + + /* + * If we are doing delayed allocation, we don't need take + * colour into account. + */ + if (test_opt(inode->i_sb, DELALLOC)) + return bg_start; + + if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) + colour = (current->pid % 16) * + (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); + else + colour = (current->pid % 16) * ((last_block - bg_start) / 16); + return bg_start + colour; +} + diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index ddaf5043fb38..49d2cea47382 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1743,6 +1743,7 @@ extern unsigned ext4_init_block_bitmap(struct super_block *sb, struct ext4_group_desc *desc); #define ext4_free_blocks_after_init(sb, group, desc) \ ext4_init_block_bitmap(sb, NULL, group, desc) +ext4_fsblk_t ext4_inode_to_goal_block(struct inode *); /* dir.c */ extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *, diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index eb63c7b8dfd2..f331e5010f68 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -114,12 +114,6 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) { - struct ext4_inode_info *ei = EXT4_I(inode); - ext4_fsblk_t bg_start; - ext4_fsblk_t last_block; - ext4_grpblk_t colour; - ext4_group_t block_group; - int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); int depth; if (path) { @@ -161,36 +155,7 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, } /* OK. use inode's group */ - block_group = ei->i_block_group; - if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { - /* - * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME - * block groups per flexgroup, reserve the first block - * group for directories and special files. Regular - * files will start at the second block group. This - * tends to speed up directory access and improves - * fsck times. - */ - block_group &= ~(flex_size-1); - if (S_ISREG(inode->i_mode)) - block_group++; - } - bg_start = ext4_group_first_block_no(inode->i_sb, block_group); - last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; - - /* - * If we are doing delayed allocation, we don't need take - * colour into account. - */ - if (test_opt(inode->i_sb, DELALLOC)) - return bg_start; - - if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) - colour = (current->pid % 16) * - (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); - else - colour = (current->pid % 16) * ((last_block - bg_start) / 16); - return bg_start + colour + block; + return ext4_inode_to_goal_block(inode); } /* diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index c3e85a86e821..6c271115dbb6 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -207,11 +207,6 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) struct ext4_inode_info *ei = EXT4_I(inode); __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; __le32 *p; - ext4_fsblk_t bg_start; - ext4_fsblk_t last_block; - ext4_grpblk_t colour; - ext4_group_t block_group; - int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); /* Try to find previous block */ for (p = ind->p - 1; p >= start; p--) { @@ -227,28 +222,7 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) * It is going to be referred to from the inode itself? OK, just put it * into the same cylinder group then. */ - block_group = ei->i_block_group; - if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { - block_group &= ~(flex_size-1); - if (S_ISREG(inode->i_mode)) - block_group++; - } - bg_start = ext4_group_first_block_no(inode->i_sb, block_group); - last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; - - /* - * If we are doing delayed allocation, we don't need take - * colour into account. - */ - if (test_opt(inode->i_sb, DELALLOC)) - return bg_start; - - if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) - colour = (current->pid % 16) * - (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); - else - colour = (current->pid % 16) * ((last_block - bg_start) / 16); - return bg_start + colour; + return ext4_inode_to_goal_block(inode); } /** -- cgit v1.2.3 From 9331b6261058eb85ae7c57ab8ac279e7fdaa9f04 Mon Sep 17 00:00:00 2001 From: Yongqiang Yang Date: Tue, 28 Jun 2011 10:19:05 -0400 Subject: ext4: quiet 'unused variables' compile warnings Unused variables was deleted. Signed-off-by: Yongqiang Yang Signed-off-by: "Theodore Ts'o" --- fs/ext4/extents.c | 2 -- fs/ext4/mballoc.c | 2 -- 2 files changed, 4 deletions(-) diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index f331e5010f68..31ae5fbe89e5 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3073,12 +3073,10 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle, struct ext4_ext_path *path) { struct ext4_extent *ex; - struct ext4_extent_header *eh; int depth; int err = 0; depth = ext_depth(inode); - eh = path[depth].p_hdr; ex = path[depth].p_ext; ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 6ed859d56850..389386b41c98 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4666,12 +4666,10 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, struct ext4_buddy e4b; int err = 0, ret, blk_free_count; ext4_grpblk_t blocks_freed; - struct ext4_group_info *grp; ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); ext4_get_group_no_and_offset(sb, block, &block_group, &bit); - grp = ext4_get_group_info(sb, block_group); /* * Check to see if we are freeing blocks across a group * boundary. -- cgit v1.2.3 From 3d73710880afa3d61cf57b5d4eb192e812eb7e4f Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 28 Jun 2011 10:59:12 -0700 Subject: cpufreq: expose a cpufreq_quick_get_max routine This allows drivers and other code to get the max reported CPU frequency. Initial use is for scaling ring frequency with GPU frequency in the i915 driver. Signed-off-by: Jesse Barnes Signed-off-by: Keith Packard --- drivers/cpufreq/cpufreq.c | 20 ++++++++++++++++++++ include/linux/cpufreq.h | 5 +++++ 2 files changed, 25 insertions(+) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 0a5bea9e3585..987a165ede26 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1199,6 +1199,26 @@ unsigned int cpufreq_quick_get(unsigned int cpu) } EXPORT_SYMBOL(cpufreq_quick_get); +/** + * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU + * @cpu: CPU number + * + * Just return the max possible frequency for a given CPU. + */ +unsigned int cpufreq_quick_get_max(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + unsigned int ret_freq = 0; + + if (policy) { + ret_freq = policy->max; + cpufreq_cpu_put(policy); + } + + return ret_freq; +} +EXPORT_SYMBOL(cpufreq_quick_get_max); + static unsigned int __cpufreq_get(unsigned int cpu) { diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 11be48e0d168..6216115c7789 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -324,11 +324,16 @@ static inline unsigned int cpufreq_get(unsigned int cpu) /* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ #ifdef CONFIG_CPU_FREQ unsigned int cpufreq_quick_get(unsigned int cpu); +unsigned int cpufreq_quick_get_max(unsigned int cpu); #else static inline unsigned int cpufreq_quick_get(unsigned int cpu) { return 0; } +static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) +{ + return 0; +} #endif -- cgit v1.2.3 From 23b2f8bb92feb83127679c53633def32d3108e70 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 28 Jun 2011 13:04:16 -0700 Subject: drm/i915: load a ring frequency scaling table v3 The ring frequency scaling table tells the PCU to treat certain GPU frequencies as if they were a given CPU frequency for purposes of scaling the ring frequency. Normally the PCU will scale the ring frequency based on the CPU P-state, but with the table present, it will also take the GPU frequency into account. The main downside of keeping the ring frequency high while the CPU is at a low frequency (or asleep altogether) is increased power consumption. But then if you're keeping your GPU busy, you probably want the extra performance. v2: - add units to debug table header (from Eric) - use tsc_khz as a fallback if the cpufreq driver doesn't give us a freq (from Chris) v3: - fix comments & debug output - remove unneeded force wake get/put Reviewed-by: Ben Widawsky Tested-by: Eric Anholt Reviewed-by: Eric Anholt Signed-off-by: Jesse Barnes Signed-off-by: Keith Packard --- drivers/gpu/drm/i915/i915_debugfs.c | 39 ++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 4 ++- drivers/gpu/drm/i915/i915_suspend.c | 4 ++- drivers/gpu/drm/i915/intel_display.c | 58 +++++++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/intel_drv.h | 1 + 5 files changed, 103 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4d46441cbe2d..8a5a032ec696 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1123,6 +1123,44 @@ static int i915_emon_status(struct seq_file *m, void *unused) return 0; } +static int i915_ring_freq_table(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + int gpu_freq, ia_freq; + + if (!IS_GEN6(dev)) { + seq_printf(m, "unsupported on this chipset\n"); + return 0; + } + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); + + for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; + gpu_freq++) { + I915_WRITE(GEN6_PCODE_DATA, gpu_freq); + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | + GEN6_PCODE_READ_MIN_FREQ_TABLE); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & + GEN6_PCODE_READY) == 0, 10)) { + DRM_ERROR("pcode read of freq table timed out\n"); + continue; + } + ia_freq = I915_READ(GEN6_PCODE_DATA); + seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); + } + + mutex_unlock(&dev->struct_mutex); + + return 0; +} + static int i915_gfxec(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -1426,6 +1464,7 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_inttoext_table", i915_inttoext_table, 0}, {"i915_drpc_info", i915_drpc_info, 0}, {"i915_emon_status", i915_emon_status, 0}, + {"i915_ring_freq_table", i915_ring_freq_table, 0}, {"i915_gfxec", i915_gfxec, 0}, {"i915_fbc_status", i915_fbc_status, 0}, {"i915_sr_status", i915_sr_status, 0}, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5d5def756c9e..4a446b116e6a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3434,7 +3434,9 @@ #define GEN6_PCODE_MAILBOX 0x138124 #define GEN6_PCODE_READY (1<<31) #define GEN6_READ_OC_PARAMS 0xc -#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 +#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 +#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 #define GEN6_PCODE_DATA 0x138128 +#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index e8152d23d5b6..6fbd997f5a6c 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -875,8 +875,10 @@ int i915_restore_state(struct drm_device *dev) intel_init_emon(dev); } - if (IS_GEN6(dev)) + if (IS_GEN6(dev)) { gen6_enable_rps(dev_priv); + gen6_update_ring_freq(dev_priv); + } /* Cache mode state */ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e58627f580c6..804ac4d6cb48 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -24,6 +24,7 @@ * Eric Anholt */ +#include #include #include #include @@ -7273,6 +7274,59 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->dev->struct_mutex); } +void gen6_update_ring_freq(struct drm_i915_private *dev_priv) +{ + int min_freq = 15; + int gpu_freq, ia_freq, max_ia_freq; + int scaling_factor = 180; + + max_ia_freq = cpufreq_quick_get_max(0); + /* + * Default to measured freq if none found, PCU will ensure we don't go + * over + */ + if (!max_ia_freq) + max_ia_freq = tsc_khz; + + /* Convert from kHz to MHz */ + max_ia_freq /= 1000; + + mutex_lock(&dev_priv->dev->struct_mutex); + + /* + * For each potential GPU frequency, load a ring frequency we'd like + * to use for memory access. We do this by specifying the IA frequency + * the PCU should use as a reference to determine the ring frequency. + */ + for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; + gpu_freq--) { + int diff = dev_priv->max_delay - gpu_freq; + + /* + * For GPU frequencies less than 750MHz, just use the lowest + * ring freq. + */ + if (gpu_freq < min_freq) + ia_freq = 800; + else + ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); + ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); + + I915_WRITE(GEN6_PCODE_DATA, + (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | + gpu_freq); + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | + GEN6_PCODE_WRITE_MIN_FREQ_TABLE); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & + GEN6_PCODE_READY) == 0, 10)) { + DRM_ERROR("pcode write of freq table timed out\n"); + continue; + } + } + + mutex_unlock(&dev_priv->dev->struct_mutex); +} + static void ironlake_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -7916,8 +7970,10 @@ void intel_modeset_init(struct drm_device *dev) intel_init_emon(dev); } - if (IS_GEN6(dev)) + if (IS_GEN6(dev)) { gen6_enable_rps(dev_priv); + gen6_update_ring_freq(dev_priv); + } INIT_WORK(&dev_priv->idle_work, intel_idle_update); setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9ffa61eb4d7e..8ac3bd8b6faa 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -317,6 +317,7 @@ extern void intel_enable_clock_gating(struct drm_device *dev); extern void ironlake_enable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev); extern void gen6_enable_rps(struct drm_i915_private *dev_priv); +extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv); extern void gen6_disable_rps(struct drm_device *dev); extern void intel_init_emon(struct drm_device *dev); -- cgit v1.2.3 From 7c75964f432d14062d8eccfc916aa290f56b5aab Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:15:31 +0900 Subject: TOMOYO: Cleanup part 1. In order to synchronize with TOMOYO 1.8's syntax, (1) Remove special handling for allow_read/write permission. (2) Replace deny_rewrite/allow_rewrite permission with allow_append permission. (3) Remove file_pattern keyword. (4) Remove allow_read permission from exception policy. (5) Allow creating domains in enforcing mode without calling supervisor. (6) Add permission check for opening directory for reading. (7) Add permission check for stat() operation. (8) Make "cat < /sys/kernel/security/tomoyo/self_domain" behave as if "cat /sys/kernel/security/tomoyo/self_domain". Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/common.c | 60 +------- security/tomoyo/common.h | 93 ++---------- security/tomoyo/domain.c | 13 +- security/tomoyo/file.c | 368 +++++------------------------------------------ security/tomoyo/gc.c | 30 ---- security/tomoyo/mount.c | 5 +- security/tomoyo/tomoyo.c | 14 +- security/tomoyo/util.c | 23 +-- 8 files changed, 71 insertions(+), 535 deletions(-) diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index a0d09e56874b..0776173b7d2b 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -39,13 +39,13 @@ static const char *tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX [TOMOYO_MAC_FILE_OPEN] = "file::open", [TOMOYO_MAC_FILE_CREATE] = "file::create", [TOMOYO_MAC_FILE_UNLINK] = "file::unlink", + [TOMOYO_MAC_FILE_GETATTR] = "file::getattr", [TOMOYO_MAC_FILE_MKDIR] = "file::mkdir", [TOMOYO_MAC_FILE_RMDIR] = "file::rmdir", [TOMOYO_MAC_FILE_MKFIFO] = "file::mkfifo", [TOMOYO_MAC_FILE_MKSOCK] = "file::mksock", [TOMOYO_MAC_FILE_TRUNCATE] = "file::truncate", [TOMOYO_MAC_FILE_SYMLINK] = "file::symlink", - [TOMOYO_MAC_FILE_REWRITE] = "file::rewrite", [TOMOYO_MAC_FILE_MKBLOCK] = "file::mkblock", [TOMOYO_MAC_FILE_MKCHAR] = "file::mkchar", [TOMOYO_MAC_FILE_LINK] = "file::link", @@ -881,10 +881,6 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head) domain->profile = (u8) profile; return 0; } - if (!strcmp(data, TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ)) { - domain->ignore_global_allow_read = !is_delete; - return 0; - } if (!strcmp(data, TOMOYO_KEYWORD_QUOTA_EXCEEDED)) { domain->quota_warned = !is_delete; return 0; @@ -942,11 +938,6 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head, if (head->r.print_execute_only && bit != TOMOYO_TYPE_EXECUTE) continue; - /* Print "read/write" instead of "read" and "write". */ - if ((bit == TOMOYO_TYPE_READ || - bit == TOMOYO_TYPE_WRITE) - && (perm & (1 << TOMOYO_TYPE_READ_WRITE))) - continue; break; } if (bit >= TOMOYO_MAX_PATH_OPERATION) @@ -1055,10 +1046,6 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head) tomoyo_set_string(head, "quota_exceeded\n"); if (domain->transition_failed) tomoyo_set_string(head, "transition_failed\n"); - if (domain->ignore_global_allow_read) - tomoyo_set_string(head, - TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ - "\n"); head->r.step++; tomoyo_set_lf(head); /* fall through */ @@ -1235,18 +1222,15 @@ static int tomoyo_write_exception(struct tomoyo_io_buffer *head) static const struct { const char *keyword; int (*write) (char *, const bool); - } tomoyo_callback[4] = { + } tomoyo_callback[1] = { { TOMOYO_KEYWORD_AGGREGATOR, tomoyo_write_aggregator }, - { TOMOYO_KEYWORD_FILE_PATTERN, tomoyo_write_pattern }, - { TOMOYO_KEYWORD_DENY_REWRITE, tomoyo_write_no_rewrite }, - { TOMOYO_KEYWORD_ALLOW_READ, tomoyo_write_globally_readable }, }; for (i = 0; i < TOMOYO_MAX_TRANSITION_TYPE; i++) if (tomoyo_str_starts(&data, tomoyo_transition_type[i])) return tomoyo_write_transition_control(data, is_delete, i); - for (i = 0; i < 4; i++) + for (i = 0; i < 1; i++) if (tomoyo_str_starts(&data, tomoyo_callback[i].keyword)) return tomoyo_callback[i].write(data, is_delete); for (i = 0; i < TOMOYO_MAX_GROUP; i++) @@ -1336,15 +1320,6 @@ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) name); } break; - case TOMOYO_ID_GLOBALLY_READABLE: - { - struct tomoyo_readable_file *ptr = - container_of(acl, typeof(*ptr), head); - tomoyo_set_string(head, - TOMOYO_KEYWORD_ALLOW_READ); - tomoyo_set_string(head, ptr->filename->name); - } - break; case TOMOYO_ID_AGGREGATOR: { struct tomoyo_aggregator *ptr = @@ -1358,24 +1333,6 @@ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) ptr->aggregated_name->name); } break; - case TOMOYO_ID_PATTERN: - { - struct tomoyo_no_pattern *ptr = - container_of(acl, typeof(*ptr), head); - tomoyo_set_string(head, - TOMOYO_KEYWORD_FILE_PATTERN); - tomoyo_set_string(head, ptr->pattern->name); - } - break; - case TOMOYO_ID_NO_REWRITE: - { - struct tomoyo_no_rewrite *ptr = - container_of(acl, typeof(*ptr), head); - tomoyo_set_string(head, - TOMOYO_KEYWORD_DENY_REWRITE); - tomoyo_set_string(head, ptr->pattern->name); - } - break; default: continue; } @@ -1890,22 +1847,13 @@ int tomoyo_open_control(const u8 type, struct file *file) if (type != TOMOYO_QUERY) head->reader_idx = tomoyo_read_lock(); file->private_data = head; - /* - * Call the handler now if the file is - * /sys/kernel/security/tomoyo/self_domain - * so that the user can use - * cat < /sys/kernel/security/tomoyo/self_domain" - * to know the current process's domainname. - */ - if (type == TOMOYO_SELFDOMAIN) - tomoyo_read_control(file, NULL, 0); /* * If the file is /sys/kernel/security/tomoyo/query , increment the * observer counter. * The obserber counter is used by tomoyo_supervisor() to see if * there is some process monitoring /sys/kernel/security/tomoyo/query. */ - else if (type == TOMOYO_QUERY) + if (type == TOMOYO_QUERY) atomic_inc(&tomoyo_query_observers); return 0; } diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index 7c66bd898782..a5d6e212b18f 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -52,9 +52,6 @@ enum tomoyo_policy_id { TOMOYO_ID_NUMBER_GROUP, TOMOYO_ID_TRANSITION_CONTROL, TOMOYO_ID_AGGREGATOR, - TOMOYO_ID_GLOBALLY_READABLE, - TOMOYO_ID_PATTERN, - TOMOYO_ID_NO_REWRITE, TOMOYO_ID_MANAGER, TOMOYO_ID_NAME, TOMOYO_ID_ACL, @@ -73,8 +70,6 @@ enum tomoyo_group_id { #define TOMOYO_KEYWORD_ALLOW_MOUNT "allow_mount " #define TOMOYO_KEYWORD_ALLOW_READ "allow_read " #define TOMOYO_KEYWORD_DELETE "delete " -#define TOMOYO_KEYWORD_DENY_REWRITE "deny_rewrite " -#define TOMOYO_KEYWORD_FILE_PATTERN "file_pattern " #define TOMOYO_KEYWORD_INITIALIZE_DOMAIN "initialize_domain " #define TOMOYO_KEYWORD_KEEP_DOMAIN "keep_domain " #define TOMOYO_KEYWORD_NO_INITIALIZE_DOMAIN "no_initialize_domain " @@ -83,7 +78,6 @@ enum tomoyo_group_id { #define TOMOYO_KEYWORD_NUMBER_GROUP "number_group " #define TOMOYO_KEYWORD_SELECT "select " #define TOMOYO_KEYWORD_USE_PROFILE "use_profile " -#define TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "ignore_global_allow_read" #define TOMOYO_KEYWORD_QUOTA_EXCEEDED "quota_exceeded" #define TOMOYO_KEYWORD_TRANSITION_FAILED "transition_failed" /* A domain definition starts with . */ @@ -115,35 +109,21 @@ enum tomoyo_acl_entry_type_index { }; /* Index numbers for File Controls. */ - -/* - * TOMOYO_TYPE_READ_WRITE is special. TOMOYO_TYPE_READ_WRITE is automatically - * set if both TOMOYO_TYPE_READ and TOMOYO_TYPE_WRITE are set. - * Both TOMOYO_TYPE_READ and TOMOYO_TYPE_WRITE are automatically set if - * TOMOYO_TYPE_READ_WRITE is set. - * TOMOYO_TYPE_READ_WRITE is automatically cleared if either TOMOYO_TYPE_READ - * or TOMOYO_TYPE_WRITE is cleared. - * Both TOMOYO_TYPE_READ and TOMOYO_TYPE_WRITE are automatically cleared if - * TOMOYO_TYPE_READ_WRITE is cleared. - */ - enum tomoyo_path_acl_index { - TOMOYO_TYPE_READ_WRITE, TOMOYO_TYPE_EXECUTE, TOMOYO_TYPE_READ, TOMOYO_TYPE_WRITE, + TOMOYO_TYPE_APPEND, TOMOYO_TYPE_UNLINK, + TOMOYO_TYPE_GETATTR, TOMOYO_TYPE_RMDIR, TOMOYO_TYPE_TRUNCATE, TOMOYO_TYPE_SYMLINK, - TOMOYO_TYPE_REWRITE, TOMOYO_TYPE_CHROOT, TOMOYO_TYPE_UMOUNT, TOMOYO_MAX_PATH_OPERATION }; -#define TOMOYO_RW_MASK ((1 << TOMOYO_TYPE_READ) | (1 << TOMOYO_TYPE_WRITE)) - enum tomoyo_mkdev_acl_index { TOMOYO_TYPE_MKBLOCK, TOMOYO_TYPE_MKCHAR, @@ -187,13 +167,13 @@ enum tomoyo_mac_index { TOMOYO_MAC_FILE_OPEN, TOMOYO_MAC_FILE_CREATE, TOMOYO_MAC_FILE_UNLINK, + TOMOYO_MAC_FILE_GETATTR, TOMOYO_MAC_FILE_MKDIR, TOMOYO_MAC_FILE_RMDIR, TOMOYO_MAC_FILE_MKFIFO, TOMOYO_MAC_FILE_MKSOCK, TOMOYO_MAC_FILE_TRUNCATE, TOMOYO_MAC_FILE_SYMLINK, - TOMOYO_MAC_FILE_REWRITE, TOMOYO_MAC_FILE_MKBLOCK, TOMOYO_MAC_FILE_MKCHAR, TOMOYO_MAC_FILE_LINK, @@ -388,9 +368,7 @@ struct tomoyo_acl_info { * "deleted", false otherwise. * (6) "quota_warned" is a bool which is used for suppressing warning message * when learning mode learned too much entries. - * (7) "ignore_global_allow_read" is a bool which is true if this domain - * should ignore "allow_read" directive in exception policy. - * (8) "transition_failed" is a bool which is set to true when this domain was + * (7) "transition_failed" is a bool which is set to true when this domain was * unable to create a new domain at tomoyo_find_next_domain() because the * name of the domain to be created was too long or it could not allocate * memory. If set to true, more than one process continued execve() @@ -415,7 +393,6 @@ struct tomoyo_domain_info { u8 profile; /* Profile number to use. */ bool is_deleted; /* Delete flag. */ bool quota_warned; /* Quota warnning flag. */ - bool ignore_global_allow_read; /* Ignore "allow_read" flag. */ bool transition_failed; /* Domain transition failed flag. */ atomic_t users; /* Number of referring credentials. */ }; @@ -429,10 +406,9 @@ struct tomoyo_domain_info { * (2) "perm" which is a bitmask of permitted operations. * (3) "name" is the pathname. * - * Directives held by this structure are "allow_read/write", "allow_execute", - * "allow_read", "allow_write", "allow_unlink", "allow_rmdir", - * "allow_truncate", "allow_symlink", "allow_rewrite", "allow_chroot" and - * "allow_unmount". + * Directives held by this structure are "allow_execute", "allow_read", + * "allow_write", "allow_append", "allow_unlink", "allow_rmdir", + * "allow_truncate", "allow_symlink", "allow_chroot" and "allow_unmount". */ struct tomoyo_path_acl { struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_ACL */ @@ -573,47 +549,6 @@ struct tomoyo_io_buffer { u8 type; }; -/* - * tomoyo_readable_file is a structure which is used for holding - * "allow_read" entries. - * It has following fields. - * - * (1) "head" is "struct tomoyo_acl_head". - * (2) "filename" is a pathname which is allowed to open(O_RDONLY). - */ -struct tomoyo_readable_file { - struct tomoyo_acl_head head; - const struct tomoyo_path_info *filename; -}; - -/* - * tomoyo_no_pattern is a structure which is used for holding - * "file_pattern" entries. - * It has following fields. - * - * (1) "head" is "struct tomoyo_acl_head". - * (2) "pattern" is a pathname pattern which is used for converting pathnames - * to pathname patterns during learning mode. - */ -struct tomoyo_no_pattern { - struct tomoyo_acl_head head; - const struct tomoyo_path_info *pattern; -}; - -/* - * tomoyo_no_rewrite is a structure which is used for holding - * "deny_rewrite" entries. - * It has following fields. - * - * (1) "head" is "struct tomoyo_acl_head". - * (2) "pattern" is a pathname which is by default not permitted to modify - * already existing content. - */ -struct tomoyo_no_rewrite { - struct tomoyo_acl_head head; - const struct tomoyo_path_info *pattern; -}; - /* * tomoyo_transition_control is a structure which is used for holding * "initialize_domain"/"no_initialize_domain"/"keep_domain"/"no_keep_domain" @@ -764,23 +699,17 @@ int tomoyo_write_aggregator(char *data, const bool is_delete); int tomoyo_write_transition_control(char *data, const bool is_delete, const u8 type); /* - * Create "allow_read/write", "allow_execute", "allow_read", "allow_write", + * Create "allow_execute", "allow_read", "allow_write", "allow_append", * "allow_create", "allow_unlink", "allow_mkdir", "allow_rmdir", * "allow_mkfifo", "allow_mksock", "allow_mkblock", "allow_mkchar", - * "allow_truncate", "allow_symlink", "allow_rewrite", "allow_rename" and - * "allow_link" entry in domain policy. + * "allow_truncate", "allow_symlink", "allow_rename" and "allow_link" entry + * in domain policy. */ int tomoyo_write_file(char *data, struct tomoyo_domain_info *domain, const bool is_delete); -/* Create "allow_read" entry in exception policy. */ -int tomoyo_write_globally_readable(char *data, const bool is_delete); /* Create "allow_mount" entry in domain policy. */ int tomoyo_write_mount(char *data, struct tomoyo_domain_info *domain, const bool is_delete); -/* Create "deny_rewrite" entry in exception policy. */ -int tomoyo_write_no_rewrite(char *data, const bool is_delete); -/* Create "file_pattern" entry in exception policy. */ -int tomoyo_write_pattern(char *data, const bool is_delete); /* Create "path_group"/"number_group" entry in exception policy. */ int tomoyo_write_group(char *data, const bool is_delete, const u8 type); int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) @@ -819,8 +748,6 @@ char *tomoyo_realpath_nofollow(const char *pathname); * ignores chroot'ed root and the pathname is already solved. */ char *tomoyo_realpath_from_path(struct path *path); -/* Get patterned pathname. */ -const char *tomoyo_pattern(const struct tomoyo_path_info *filename); /* Check memory quota. */ bool tomoyo_memory_ok(void *ptr); diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 35388408e475..355b536262b1 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -510,17 +510,8 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm) if (domain || strlen(tmp) >= TOMOYO_EXEC_TMPSIZE - 10) goto done; domain = tomoyo_find_domain(tmp); - if (domain) - goto done; - if (is_enforce) { - int error = tomoyo_supervisor(&r, "# wants to create domain\n" - "%s\n", tmp); - if (error == TOMOYO_RETRY_REQUEST) - goto retry; - if (error < 0) - goto done; - } - domain = tomoyo_assign_domain(tmp, old_domain->profile); + if (!domain) + domain = tomoyo_assign_domain(tmp, old_domain->profile); done: if (domain) goto out; diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index d64e8ecb6fb3..41ed7de44ef1 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c @@ -11,15 +11,15 @@ /* Keyword array for operations with one pathname. */ const char *tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = { - [TOMOYO_TYPE_READ_WRITE] = "read/write", [TOMOYO_TYPE_EXECUTE] = "execute", [TOMOYO_TYPE_READ] = "read", [TOMOYO_TYPE_WRITE] = "write", + [TOMOYO_TYPE_APPEND] = "append", [TOMOYO_TYPE_UNLINK] = "unlink", + [TOMOYO_TYPE_GETATTR] = "getattr", [TOMOYO_TYPE_RMDIR] = "rmdir", [TOMOYO_TYPE_TRUNCATE] = "truncate", [TOMOYO_TYPE_SYMLINK] = "symlink", - [TOMOYO_TYPE_REWRITE] = "rewrite", [TOMOYO_TYPE_CHROOT] = "chroot", [TOMOYO_TYPE_UMOUNT] = "unmount", }; @@ -50,15 +50,15 @@ const char *tomoyo_path_number_keyword[TOMOYO_MAX_PATH_NUMBER_OPERATION] = { }; static const u8 tomoyo_p2mac[TOMOYO_MAX_PATH_OPERATION] = { - [TOMOYO_TYPE_READ_WRITE] = TOMOYO_MAC_FILE_OPEN, [TOMOYO_TYPE_EXECUTE] = TOMOYO_MAC_FILE_EXECUTE, [TOMOYO_TYPE_READ] = TOMOYO_MAC_FILE_OPEN, [TOMOYO_TYPE_WRITE] = TOMOYO_MAC_FILE_OPEN, + [TOMOYO_TYPE_APPEND] = TOMOYO_MAC_FILE_OPEN, [TOMOYO_TYPE_UNLINK] = TOMOYO_MAC_FILE_UNLINK, + [TOMOYO_TYPE_GETATTR] = TOMOYO_MAC_FILE_GETATTR, [TOMOYO_TYPE_RMDIR] = TOMOYO_MAC_FILE_RMDIR, [TOMOYO_TYPE_TRUNCATE] = TOMOYO_MAC_FILE_TRUNCATE, [TOMOYO_TYPE_SYMLINK] = TOMOYO_MAC_FILE_SYMLINK, - [TOMOYO_TYPE_REWRITE] = TOMOYO_MAC_FILE_REWRITE, [TOMOYO_TYPE_CHROOT] = TOMOYO_MAC_FILE_CHROOT, [TOMOYO_TYPE_UMOUNT] = TOMOYO_MAC_FILE_UMOUNT, }; @@ -131,24 +131,6 @@ static void tomoyo_add_slash(struct tomoyo_path_info *buf) tomoyo_fill_path_info(buf); } -/** - * tomoyo_strendswith - Check whether the token ends with the given token. - * - * @name: The token to check. - * @tail: The token to find. - * - * Returns true if @name ends with @tail, false otherwise. - */ -static bool tomoyo_strendswith(const char *name, const char *tail) -{ - int len; - - if (!name || !tail) - return false; - len = strlen(name) - strlen(tail); - return len >= 0 && !strcmp(name + len, tail); -} - /** * tomoyo_get_realpath - Get realpath. * @@ -182,7 +164,7 @@ static int tomoyo_audit_path_log(struct tomoyo_request_info *r) return 0; tomoyo_warn_log(r, "%s %s", operation, filename->name); return tomoyo_supervisor(r, "allow_%s %s\n", operation, - tomoyo_pattern(filename)); + filename->name); } /** @@ -202,8 +184,7 @@ static int tomoyo_audit_path2_log(struct tomoyo_request_info *r) tomoyo_warn_log(r, "%s %s %s", operation, filename1->name, filename2->name); return tomoyo_supervisor(r, "allow_%s %s %s\n", operation, - tomoyo_pattern(filename1), - tomoyo_pattern(filename2)); + filename1->name, filename2->name); } /** @@ -225,7 +206,7 @@ static int tomoyo_audit_mkdev_log(struct tomoyo_request_info *r) tomoyo_warn_log(r, "%s %s 0%o %u %u", operation, filename->name, mode, major, minor); return tomoyo_supervisor(r, "allow_%s %s 0%o %u %u\n", operation, - tomoyo_pattern(filename), mode, major, minor); + filename->name, mode, major, minor); } /** @@ -264,247 +245,7 @@ static int tomoyo_audit_path_number_log(struct tomoyo_request_info *r) radix); tomoyo_warn_log(r, "%s %s %s", operation, filename->name, buffer); return tomoyo_supervisor(r, "allow_%s %s %s\n", operation, - tomoyo_pattern(filename), buffer); -} - -static bool tomoyo_same_globally_readable(const struct tomoyo_acl_head *a, - const struct tomoyo_acl_head *b) -{ - return container_of(a, struct tomoyo_readable_file, - head)->filename == - container_of(b, struct tomoyo_readable_file, - head)->filename; -} - -/** - * tomoyo_update_globally_readable_entry - Update "struct tomoyo_readable_file" list. - * - * @filename: Filename unconditionally permitted to open() for reading. - * @is_delete: True if it is a delete request. - * - * Returns 0 on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -static int tomoyo_update_globally_readable_entry(const char *filename, - const bool is_delete) -{ - struct tomoyo_readable_file e = { }; - int error; - - if (!tomoyo_correct_word(filename)) - return -EINVAL; - e.filename = tomoyo_get_name(filename); - if (!e.filename) - return -ENOMEM; - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - &tomoyo_policy_list - [TOMOYO_ID_GLOBALLY_READABLE], - tomoyo_same_globally_readable); - tomoyo_put_name(e.filename); - return error; -} - -/** - * tomoyo_globally_readable_file - Check if the file is unconditionnaly permitted to be open()ed for reading. - * - * @filename: The filename to check. - * - * Returns true if any domain can open @filename for reading, false otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -static bool tomoyo_globally_readable_file(const struct tomoyo_path_info * - filename) -{ - struct tomoyo_readable_file *ptr; - bool found = false; - - list_for_each_entry_rcu(ptr, &tomoyo_policy_list - [TOMOYO_ID_GLOBALLY_READABLE], head.list) { - if (!ptr->head.is_deleted && - tomoyo_path_matches_pattern(filename, ptr->filename)) { - found = true; - break; - } - } - return found; -} - -/** - * tomoyo_write_globally_readable - Write "struct tomoyo_readable_file" list. - * - * @data: String to parse. - * @is_delete: True if it is a delete request. - * - * Returns 0 on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -int tomoyo_write_globally_readable(char *data, const bool is_delete) -{ - return tomoyo_update_globally_readable_entry(data, is_delete); -} - -static bool tomoyo_same_pattern(const struct tomoyo_acl_head *a, - const struct tomoyo_acl_head *b) -{ - return container_of(a, struct tomoyo_no_pattern, head)->pattern == - container_of(b, struct tomoyo_no_pattern, head)->pattern; -} - -/** - * tomoyo_update_file_pattern_entry - Update "struct tomoyo_no_pattern" list. - * - * @pattern: Pathname pattern. - * @is_delete: True if it is a delete request. - * - * Returns 0 on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -static int tomoyo_update_file_pattern_entry(const char *pattern, - const bool is_delete) -{ - struct tomoyo_no_pattern e = { }; - int error; - - if (!tomoyo_correct_word(pattern)) - return -EINVAL; - e.pattern = tomoyo_get_name(pattern); - if (!e.pattern) - return -ENOMEM; - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - &tomoyo_policy_list[TOMOYO_ID_PATTERN], - tomoyo_same_pattern); - tomoyo_put_name(e.pattern); - return error; -} - -/** - * tomoyo_pattern - Get patterned pathname. - * - * @filename: The filename to find patterned pathname. - * - * Returns pointer to pathname pattern if matched, @filename otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -const char *tomoyo_pattern(const struct tomoyo_path_info *filename) -{ - struct tomoyo_no_pattern *ptr; - const struct tomoyo_path_info *pattern = NULL; - - list_for_each_entry_rcu(ptr, &tomoyo_policy_list[TOMOYO_ID_PATTERN], - head.list) { - if (ptr->head.is_deleted) - continue; - if (!tomoyo_path_matches_pattern(filename, ptr->pattern)) - continue; - pattern = ptr->pattern; - if (tomoyo_strendswith(pattern->name, "/\\*")) { - /* Do nothing. Try to find the better match. */ - } else { - /* This would be the better match. Use this. */ - break; - } - } - if (pattern) - filename = pattern; - return filename->name; -} - -/** - * tomoyo_write_pattern - Write "struct tomoyo_no_pattern" list. - * - * @data: String to parse. - * @is_delete: True if it is a delete request. - * - * Returns 0 on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -int tomoyo_write_pattern(char *data, const bool is_delete) -{ - return tomoyo_update_file_pattern_entry(data, is_delete); -} - -static bool tomoyo_same_no_rewrite(const struct tomoyo_acl_head *a, - const struct tomoyo_acl_head *b) -{ - return container_of(a, struct tomoyo_no_rewrite, head)->pattern - == container_of(b, struct tomoyo_no_rewrite, head) - ->pattern; -} - -/** - * tomoyo_update_no_rewrite_entry - Update "struct tomoyo_no_rewrite" list. - * - * @pattern: Pathname pattern that are not rewritable by default. - * @is_delete: True if it is a delete request. - * - * Returns 0 on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -static int tomoyo_update_no_rewrite_entry(const char *pattern, - const bool is_delete) -{ - struct tomoyo_no_rewrite e = { }; - int error; - - if (!tomoyo_correct_word(pattern)) - return -EINVAL; - e.pattern = tomoyo_get_name(pattern); - if (!e.pattern) - return -ENOMEM; - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - &tomoyo_policy_list[TOMOYO_ID_NO_REWRITE], - tomoyo_same_no_rewrite); - tomoyo_put_name(e.pattern); - return error; -} - -/** - * tomoyo_no_rewrite_file - Check if the given pathname is not permitted to be rewrited. - * - * @filename: Filename to check. - * - * Returns true if @filename is specified by "deny_rewrite" directive, - * false otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -static bool tomoyo_no_rewrite_file(const struct tomoyo_path_info *filename) -{ - struct tomoyo_no_rewrite *ptr; - bool found = false; - - list_for_each_entry_rcu(ptr, &tomoyo_policy_list[TOMOYO_ID_NO_REWRITE], - head.list) { - if (ptr->head.is_deleted) - continue; - if (!tomoyo_path_matches_pattern(filename, ptr->pattern)) - continue; - found = true; - break; - } - return found; -} - -/** - * tomoyo_write_no_rewrite - Write "struct tomoyo_no_rewrite" list. - * - * @data: String to parse. - * @is_delete: True if it is a delete request. - * - * Returns 0 on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -int tomoyo_write_no_rewrite(char *data, const bool is_delete) -{ - return tomoyo_update_no_rewrite_entry(data, is_delete); + filename->name, buffer); } static bool tomoyo_check_path_acl(struct tomoyo_request_info *r, @@ -569,6 +310,15 @@ static bool tomoyo_same_path_acl(const struct tomoyo_acl_info *a, tomoyo_same_name_union(&p1->name, &p2->name); } +/** + * tomoyo_merge_path_acl - Merge duplicated "struct tomoyo_path_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * @is_delete: True for @a &= ~@b, false for @a |= @b. + * + * Returns true if @a is empty, false otherwise. + */ static bool tomoyo_merge_path_acl(struct tomoyo_acl_info *a, struct tomoyo_acl_info *b, const bool is_delete) @@ -577,19 +327,10 @@ static bool tomoyo_merge_path_acl(struct tomoyo_acl_info *a, ->perm; u16 perm = *a_perm; const u16 b_perm = container_of(b, struct tomoyo_path_acl, head)->perm; - if (is_delete) { + if (is_delete) perm &= ~b_perm; - if ((perm & TOMOYO_RW_MASK) != TOMOYO_RW_MASK) - perm &= ~(1 << TOMOYO_TYPE_READ_WRITE); - else if (!(perm & (1 << TOMOYO_TYPE_READ_WRITE))) - perm &= ~TOMOYO_RW_MASK; - } else { + else perm |= b_perm; - if ((perm & TOMOYO_RW_MASK) == TOMOYO_RW_MASK) - perm |= (1 << TOMOYO_TYPE_READ_WRITE); - else if (perm & (1 << TOMOYO_TYPE_READ_WRITE)) - perm |= TOMOYO_RW_MASK; - } *a_perm = perm; return !perm; } @@ -615,8 +356,6 @@ static int tomoyo_update_path_acl(const u8 type, const char *filename, .perm = 1 << type }; int error; - if (e.perm == (1 << TOMOYO_TYPE_READ_WRITE)) - e.perm |= TOMOYO_RW_MASK; if (!tomoyo_parse_name_union(filename, &e.name)) return -EINVAL; error = tomoyo_update_domain(&e.head, sizeof(e), is_delete, domain, @@ -775,7 +514,6 @@ int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, { int error; - next: r->type = tomoyo_p2mac[operation]; r->mode = tomoyo_get_mode(r->profile, r->type); if (r->mode == TOMOYO_CONFIG_DISABLED) @@ -785,10 +523,6 @@ int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, r->param.path.operation = operation; do { tomoyo_check_acl(r, tomoyo_check_path_acl); - if (!r->granted && operation == TOMOYO_TYPE_READ && - !r->domain->ignore_global_allow_read && - tomoyo_globally_readable_file(filename)) - r->granted = true; error = tomoyo_audit_path_log(r); /* * Do not retry for execute request, for alias may have @@ -796,16 +530,6 @@ int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, */ } while (error == TOMOYO_RETRY_REQUEST && operation != TOMOYO_TYPE_EXECUTE); - /* - * Since "allow_truncate" doesn't imply "allow_rewrite" permission, - * we need to check "allow_rewrite" permission if the filename is - * specified by "deny_rewrite" keyword. - */ - if (!error && operation == TOMOYO_TYPE_TRUNCATE && - tomoyo_no_rewrite_file(filename)) { - operation = TOMOYO_TYPE_REWRITE; - goto next; - } return error; } @@ -932,43 +656,26 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, struct tomoyo_request_info r; int idx; - if (!path->mnt || - (path->dentry->d_inode && S_ISDIR(path->dentry->d_inode->i_mode))) + if (!path->mnt) return 0; buf.name = NULL; r.mode = TOMOYO_CONFIG_DISABLED; idx = tomoyo_read_lock(); - /* - * If the filename is specified by "deny_rewrite" keyword, - * we need to check "allow_rewrite" permission when the filename is not - * opened for append mode or the filename is truncated at open time. - */ - if ((acc_mode & MAY_WRITE) && !(flag & O_APPEND) - && tomoyo_init_request_info(&r, domain, TOMOYO_MAC_FILE_REWRITE) + if (acc_mode && + tomoyo_init_request_info(&r, domain, TOMOYO_MAC_FILE_OPEN) != TOMOYO_CONFIG_DISABLED) { if (!tomoyo_get_realpath(&buf, path)) { error = -ENOMEM; goto out; } - if (tomoyo_no_rewrite_file(&buf)) - error = tomoyo_path_permission(&r, TOMOYO_TYPE_REWRITE, + if (acc_mode & MAY_READ) + error = tomoyo_path_permission(&r, TOMOYO_TYPE_READ, + &buf); + if (!error && (acc_mode & MAY_WRITE)) + error = tomoyo_path_permission(&r, (flag & O_APPEND) ? + TOMOYO_TYPE_APPEND : + TOMOYO_TYPE_WRITE, &buf); - } - if (!error && acc_mode && - tomoyo_init_request_info(&r, domain, TOMOYO_MAC_FILE_OPEN) - != TOMOYO_CONFIG_DISABLED) { - u8 operation; - if (!buf.name && !tomoyo_get_realpath(&buf, path)) { - error = -ENOMEM; - goto out; - } - if (acc_mode == (MAY_READ | MAY_WRITE)) - operation = TOMOYO_TYPE_READ_WRITE; - else if (acc_mode == MAY_READ) - operation = TOMOYO_TYPE_READ; - else - operation = TOMOYO_TYPE_WRITE; - error = tomoyo_path_permission(&r, operation, &buf); } out: kfree(buf.name); @@ -979,7 +686,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, } /** - * tomoyo_path_perm - Check permission for "unlink", "rmdir", "truncate", "symlink", "rewrite", "chroot" and "unmount". + * tomoyo_path_perm - Check permission for "unlink", "rmdir", "truncate", "symlink", "append", "chroot" and "unmount". * * @operation: Type of operation. * @path: Pointer to "struct path". @@ -988,9 +695,10 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, */ int tomoyo_path_perm(const u8 operation, struct path *path) { - int error = -ENOMEM; - struct tomoyo_path_info buf; struct tomoyo_request_info r; + int error; + struct tomoyo_path_info buf; + bool is_enforce; int idx; if (!path->mnt) @@ -998,17 +706,13 @@ int tomoyo_path_perm(const u8 operation, struct path *path) if (tomoyo_init_request_info(&r, NULL, tomoyo_p2mac[operation]) == TOMOYO_CONFIG_DISABLED) return 0; + is_enforce = (r.mode == TOMOYO_CONFIG_ENFORCING); + error = -ENOMEM; buf.name = NULL; idx = tomoyo_read_lock(); if (!tomoyo_get_realpath(&buf, path)) goto out; switch (operation) { - case TOMOYO_TYPE_REWRITE: - if (!tomoyo_no_rewrite_file(&buf)) { - error = 0; - goto out; - } - break; case TOMOYO_TYPE_RMDIR: case TOMOYO_TYPE_CHROOT: tomoyo_add_slash(&buf); @@ -1018,7 +722,7 @@ int tomoyo_path_perm(const u8 operation, struct path *path) out: kfree(buf.name); tomoyo_read_unlock(idx); - if (r.mode != TOMOYO_CONFIG_ENFORCING) + if (!is_enforce) error = 0; return error; } diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c index a877e4c3b101..ba799b49ee3a 100644 --- a/security/tomoyo/gc.c +++ b/security/tomoyo/gc.c @@ -32,27 +32,6 @@ static bool tomoyo_add_to_gc(const int type, struct list_head *element) return true; } -static void tomoyo_del_allow_read(struct list_head *element) -{ - struct tomoyo_readable_file *ptr = - container_of(element, typeof(*ptr), head.list); - tomoyo_put_name(ptr->filename); -} - -static void tomoyo_del_file_pattern(struct list_head *element) -{ - struct tomoyo_no_pattern *ptr = - container_of(element, typeof(*ptr), head.list); - tomoyo_put_name(ptr->pattern); -} - -static void tomoyo_del_no_rewrite(struct list_head *element) -{ - struct tomoyo_no_rewrite *ptr = - container_of(element, typeof(*ptr), head.list); - tomoyo_put_name(ptr->pattern); -} - static void tomoyo_del_transition_control(struct list_head *element) { struct tomoyo_transition_control *ptr = @@ -290,15 +269,6 @@ static void tomoyo_kfree_entry(void) case TOMOYO_ID_AGGREGATOR: tomoyo_del_aggregator(element); break; - case TOMOYO_ID_GLOBALLY_READABLE: - tomoyo_del_allow_read(element); - break; - case TOMOYO_ID_PATTERN: - tomoyo_del_file_pattern(element); - break; - case TOMOYO_ID_NO_REWRITE: - tomoyo_del_no_rewrite(element); - break; case TOMOYO_ID_MANAGER: tomoyo_del_manager(element); break; diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c index 162a864dba24..f1d9e1a9eff4 100644 --- a/security/tomoyo/mount.c +++ b/security/tomoyo/mount.c @@ -55,9 +55,8 @@ static int tomoyo_audit_mount_log(struct tomoyo_request_info *r) flags); return tomoyo_supervisor(r, TOMOYO_KEYWORD_ALLOW_MOUNT "%s %s %s 0x%lX\n", - tomoyo_pattern(r->param.mount.dev), - tomoyo_pattern(r->param.mount.dir), type, - flags); + r->param.mount.dev->name, + r->param.mount.dir->name, type, flags); } static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r, diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c index 95d3f9572237..2615c7d43960 100644 --- a/security/tomoyo/tomoyo.c +++ b/security/tomoyo/tomoyo.c @@ -93,6 +93,12 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm) return tomoyo_check_open_permission(domain, &bprm->file->f_path, O_RDONLY); } +static int tomoyo_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) +{ + struct path path = { mnt, dentry }; + return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, &path); +} + static int tomoyo_path_truncate(struct path *path) { return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path); @@ -176,9 +182,10 @@ static int tomoyo_path_rename(struct path *old_parent, static int tomoyo_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { - if (cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND)) - return tomoyo_path_perm(TOMOYO_TYPE_REWRITE, &file->f_path); - return 0; + if (!(cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND))) + return 0; + return tomoyo_check_open_permission(tomoyo_domain(), &file->f_path, + O_WRONLY | (arg & O_APPEND)); } static int tomoyo_dentry_open(struct file *f, const struct cred *cred) @@ -258,6 +265,7 @@ static struct security_operations tomoyo_security_ops = { .path_mknod = tomoyo_path_mknod, .path_link = tomoyo_path_link, .path_rename = tomoyo_path_rename, + .inode_getattr = tomoyo_inode_getattr, .file_ioctl = tomoyo_file_ioctl, .path_chmod = tomoyo_path_chmod, .path_chown = tomoyo_path_chown, diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c index 6d5393204d95..7fb9bbf7021a 100644 --- a/security/tomoyo/util.c +++ b/security/tomoyo/util.c @@ -911,44 +911,33 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r) if (!domain) return true; list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) { + u16 perm; + u8 i; if (ptr->is_deleted) continue; switch (ptr->type) { - u16 perm; - u8 i; case TOMOYO_TYPE_PATH_ACL: perm = container_of(ptr, struct tomoyo_path_acl, head) ->perm; - for (i = 0; i < TOMOYO_MAX_PATH_OPERATION; i++) - if (perm & (1 << i)) - count++; - if (perm & (1 << TOMOYO_TYPE_READ_WRITE)) - count -= 2; break; case TOMOYO_TYPE_PATH2_ACL: perm = container_of(ptr, struct tomoyo_path2_acl, head) ->perm; - for (i = 0; i < TOMOYO_MAX_PATH2_OPERATION; i++) - if (perm & (1 << i)) - count++; break; case TOMOYO_TYPE_PATH_NUMBER_ACL: perm = container_of(ptr, struct tomoyo_path_number_acl, head)->perm; - for (i = 0; i < TOMOYO_MAX_PATH_NUMBER_OPERATION; i++) - if (perm & (1 << i)) - count++; break; case TOMOYO_TYPE_MKDEV_ACL: perm = container_of(ptr, struct tomoyo_mkdev_acl, head)->perm; - for (i = 0; i < TOMOYO_MAX_MKDEV_OPERATION; i++) - if (perm & (1 << i)) - count++; break; default: - count++; + perm = 1; } + for (i = 0; i < 16; i++) + if (perm & (1 << i)) + count++; } if (count < tomoyo_profile(domain->profile)->learning-> learning_max_entry) -- cgit v1.2.3 From b5bc60b4ce313b6dbb42e7d32915dcf0a07c2a68 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:16:03 +0900 Subject: TOMOYO: Cleanup part 2. Update (or temporarily remove) comments. Remove or replace some of #define lines. Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/common.c | 38 ++-- security/tomoyo/common.h | 441 ++++++++++++++-------------------------- security/tomoyo/file.c | 3 +- security/tomoyo/mount.c | 79 ++++--- security/tomoyo/securityfs_if.c | 2 +- 5 files changed, 204 insertions(+), 359 deletions(-) diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 0776173b7d2b..1c340217a06a 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -643,7 +643,7 @@ static int tomoyo_update_manager_entry(const char *manager, static int tomoyo_write_manager(struct tomoyo_io_buffer *head) { char *data = head->write_buf; - bool is_delete = tomoyo_str_starts(&data, TOMOYO_KEYWORD_DELETE); + bool is_delete = tomoyo_str_starts(&data, "delete "); if (!strcmp(data, "manage_by_non_root")) { tomoyo_manage_by_non_root = !is_delete; @@ -830,7 +830,7 @@ static int tomoyo_delete_domain(char *domainname) static int tomoyo_write_domain2(char *data, struct tomoyo_domain_info *domain, const bool is_delete) { - if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_ALLOW_MOUNT)) + if (tomoyo_str_starts(&data, "allow_mount ")) return tomoyo_write_mount(data, domain, is_delete); return tomoyo_write_file(data, domain, is_delete); } @@ -852,9 +852,9 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head) bool is_select = false; unsigned int profile; - if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_DELETE)) + if (tomoyo_str_starts(&data, "delete ")) is_delete = true; - else if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_SELECT)) + else if (tomoyo_str_starts(&data, "select ")) is_select = true; if (is_select && tomoyo_select_one(head, data)) return 0; @@ -875,17 +875,17 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head) if (!domain) return -EINVAL; - if (sscanf(data, TOMOYO_KEYWORD_USE_PROFILE "%u", &profile) == 1 + if (sscanf(data, "use_profile %u", &profile) == 1 && profile < TOMOYO_MAX_PROFILES) { if (tomoyo_profile_ptr[profile] || !tomoyo_policy_loaded) domain->profile = (u8) profile; return 0; } - if (!strcmp(data, TOMOYO_KEYWORD_QUOTA_EXCEEDED)) { + if (!strcmp(data, "quota_exceeded")) { domain->quota_warned = !is_delete; return 0; } - if (!strcmp(data, TOMOYO_KEYWORD_TRANSITION_FAILED)) { + if (!strcmp(data, "transition_failed")) { domain->transition_failed = !is_delete; return 0; } @@ -1039,8 +1039,7 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head) /* Print domainname and flags. */ tomoyo_set_string(head, domain->domainname->name); tomoyo_set_lf(head); - tomoyo_io_printf(head, - TOMOYO_KEYWORD_USE_PROFILE "%u\n", + tomoyo_io_printf(head, "use_profile %u\n", domain->profile); if (domain->quota_warned) tomoyo_set_string(head, "quota_exceeded\n"); @@ -1192,17 +1191,15 @@ static void tomoyo_read_pid(struct tomoyo_io_buffer *head) } static const char *tomoyo_transition_type[TOMOYO_MAX_TRANSITION_TYPE] = { - [TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE] - = TOMOYO_KEYWORD_NO_INITIALIZE_DOMAIN, - [TOMOYO_TRANSITION_CONTROL_INITIALIZE] - = TOMOYO_KEYWORD_INITIALIZE_DOMAIN, - [TOMOYO_TRANSITION_CONTROL_NO_KEEP] = TOMOYO_KEYWORD_NO_KEEP_DOMAIN, - [TOMOYO_TRANSITION_CONTROL_KEEP] = TOMOYO_KEYWORD_KEEP_DOMAIN + [TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE] = "no_initialize_domain", + [TOMOYO_TRANSITION_CONTROL_INITIALIZE] = "initialize_domain", + [TOMOYO_TRANSITION_CONTROL_NO_KEEP] = "no_keep_domain", + [TOMOYO_TRANSITION_CONTROL_KEEP] = "keep_domain", }; static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = { - [TOMOYO_PATH_GROUP] = TOMOYO_KEYWORD_PATH_GROUP, - [TOMOYO_NUMBER_GROUP] = TOMOYO_KEYWORD_NUMBER_GROUP + [TOMOYO_PATH_GROUP] = "path_group ", + [TOMOYO_NUMBER_GROUP] = "number_group ", }; /** @@ -1217,13 +1214,13 @@ static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = { static int tomoyo_write_exception(struct tomoyo_io_buffer *head) { char *data = head->write_buf; - bool is_delete = tomoyo_str_starts(&data, TOMOYO_KEYWORD_DELETE); + bool is_delete = tomoyo_str_starts(&data, "delete "); u8 i; static const struct { const char *keyword; int (*write) (char *, const bool); } tomoyo_callback[1] = { - { TOMOYO_KEYWORD_AGGREGATOR, tomoyo_write_aggregator }, + { "aggregator ", tomoyo_write_aggregator }, }; for (i = 0; i < TOMOYO_MAX_TRANSITION_TYPE; i++) @@ -1324,8 +1321,7 @@ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) { struct tomoyo_aggregator *ptr = container_of(acl, typeof(*ptr), head); - tomoyo_set_string(head, - TOMOYO_KEYWORD_AGGREGATOR); + tomoyo_set_string(head, "aggregator "); tomoyo_set_string(head, ptr->original_name->name); tomoyo_set_space(head); diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index a5d6e212b18f..d0645733c102 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -38,6 +38,7 @@ struct linux_binprm; /* Profile number is an integer between 0 and 255. */ #define TOMOYO_MAX_PROFILES 256 +/* Index numbers for operation mode. */ enum tomoyo_mode_index { TOMOYO_CONFIG_DISABLED, TOMOYO_CONFIG_LEARNING, @@ -46,6 +47,7 @@ enum tomoyo_mode_index { TOMOYO_CONFIG_USE_DEFAULT = 255 }; +/* Index numbers for entry type. */ enum tomoyo_policy_id { TOMOYO_ID_GROUP, TOMOYO_ID_PATH_GROUP, @@ -59,37 +61,26 @@ enum tomoyo_policy_id { TOMOYO_MAX_POLICY }; +/* Index numbers for group entries. */ enum tomoyo_group_id { TOMOYO_PATH_GROUP, TOMOYO_NUMBER_GROUP, TOMOYO_MAX_GROUP }; -/* Keywords for ACLs. */ -#define TOMOYO_KEYWORD_AGGREGATOR "aggregator " -#define TOMOYO_KEYWORD_ALLOW_MOUNT "allow_mount " -#define TOMOYO_KEYWORD_ALLOW_READ "allow_read " -#define TOMOYO_KEYWORD_DELETE "delete " -#define TOMOYO_KEYWORD_INITIALIZE_DOMAIN "initialize_domain " -#define TOMOYO_KEYWORD_KEEP_DOMAIN "keep_domain " -#define TOMOYO_KEYWORD_NO_INITIALIZE_DOMAIN "no_initialize_domain " -#define TOMOYO_KEYWORD_NO_KEEP_DOMAIN "no_keep_domain " -#define TOMOYO_KEYWORD_PATH_GROUP "path_group " -#define TOMOYO_KEYWORD_NUMBER_GROUP "number_group " -#define TOMOYO_KEYWORD_SELECT "select " -#define TOMOYO_KEYWORD_USE_PROFILE "use_profile " -#define TOMOYO_KEYWORD_QUOTA_EXCEEDED "quota_exceeded" -#define TOMOYO_KEYWORD_TRANSITION_FAILED "transition_failed" /* A domain definition starts with . */ #define TOMOYO_ROOT_NAME "" #define TOMOYO_ROOT_NAME_LEN (sizeof(TOMOYO_ROOT_NAME) - 1) -/* Value type definition. */ -#define TOMOYO_VALUE_TYPE_INVALID 0 -#define TOMOYO_VALUE_TYPE_DECIMAL 1 -#define TOMOYO_VALUE_TYPE_OCTAL 2 -#define TOMOYO_VALUE_TYPE_HEXADECIMAL 3 +/* Index numbers for type of numeric values. */ +enum tomoyo_value_type { + TOMOYO_VALUE_TYPE_INVALID, + TOMOYO_VALUE_TYPE_DECIMAL, + TOMOYO_VALUE_TYPE_OCTAL, + TOMOYO_VALUE_TYPE_HEXADECIMAL, +}; +/* Index numbers for domain transition control keywords. */ enum tomoyo_transition_type { /* Do not change this order, */ TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE, @@ -108,7 +99,7 @@ enum tomoyo_acl_entry_type_index { TOMOYO_TYPE_MOUNT_ACL, }; -/* Index numbers for File Controls. */ +/* Index numbers for access controls with one pathname. */ enum tomoyo_path_acl_index { TOMOYO_TYPE_EXECUTE, TOMOYO_TYPE_READ, @@ -130,6 +121,7 @@ enum tomoyo_mkdev_acl_index { TOMOYO_MAX_MKDEV_OPERATION }; +/* Index numbers for access controls with two pathnames. */ enum tomoyo_path2_acl_index { TOMOYO_TYPE_LINK, TOMOYO_TYPE_RENAME, @@ -137,6 +129,7 @@ enum tomoyo_path2_acl_index { TOMOYO_MAX_PATH2_OPERATION }; +/* Index numbers for access controls with one pathname and one number. */ enum tomoyo_path_number_acl_index { TOMOYO_TYPE_CREATE, TOMOYO_TYPE_MKDIR, @@ -149,6 +142,7 @@ enum tomoyo_path_number_acl_index { TOMOYO_MAX_PATH_NUMBER_OPERATION }; +/* Index numbers for /sys/kernel/security/tomoyo/ interfaces. */ enum tomoyo_securityfs_interface_index { TOMOYO_DOMAINPOLICY, TOMOYO_EXCEPTIONPOLICY, @@ -162,6 +156,19 @@ enum tomoyo_securityfs_interface_index { TOMOYO_MANAGER }; +/* Index numbers for special mount operations. */ +enum tomoyo_special_mount { + TOMOYO_MOUNT_BIND, /* mount --bind /source /dest */ + TOMOYO_MOUNT_MOVE, /* mount --move /old /new */ + TOMOYO_MOUNT_REMOUNT, /* mount -o remount /dir */ + TOMOYO_MOUNT_MAKE_UNBINDABLE, /* mount --make-unbindable /dir */ + TOMOYO_MOUNT_MAKE_PRIVATE, /* mount --make-private /dir */ + TOMOYO_MOUNT_MAKE_SLAVE, /* mount --make-slave /dir */ + TOMOYO_MOUNT_MAKE_SHARED, /* mount --make-shared /dir */ + TOMOYO_MAX_SPECIAL_MOUNT +}; + +/* Index numbers for functionality. */ enum tomoyo_mac_index { TOMOYO_MAC_FILE_EXECUTE, TOMOYO_MAC_FILE_OPEN, @@ -189,37 +196,30 @@ enum tomoyo_mac_index { TOMOYO_MAX_MAC_INDEX }; +/* Index numbers for category of functionality. */ enum tomoyo_mac_category_index { TOMOYO_MAC_CATEGORY_FILE, TOMOYO_MAX_MAC_CATEGORY_INDEX }; -#define TOMOYO_RETRY_REQUEST 1 /* Retry this request. */ - -/********** Structure definitions. **********/ - /* - * tomoyo_acl_head is a structure which is used for holding elements not in - * domain policy. - * It has following fields. + * Retry this request. Returned by tomoyo_supervisor() if policy violation has + * occurred in enforcing mode and the userspace daemon decided to retry. * - * (1) "list" which is linked to tomoyo_policy_list[] . - * (2) "is_deleted" is a bool which is true if marked as deleted, false - * otherwise. + * We must choose a positive value in order to distinguish "granted" (which is + * 0) and "rejected" (which is a negative value) and "retry". */ +#define TOMOYO_RETRY_REQUEST 1 + +/********** Structure definitions. **********/ + +/* Common header for holding ACL entries. */ struct tomoyo_acl_head { struct list_head list; bool is_deleted; } __packed; -/* - * tomoyo_request_info is a structure which is used for holding - * - * (1) Domain information of current process. - * (2) How many retries are made for this request. - * (3) Profile number used for this request. - * (4) Access control mode of the profile. - */ +/* Structure for request info. */ struct tomoyo_request_info { struct tomoyo_domain_info *domain; /* For holding parameters. */ @@ -228,11 +228,13 @@ struct tomoyo_request_info { const struct tomoyo_path_info *filename; /* For using wildcards at tomoyo_find_next_domain(). */ const struct tomoyo_path_info *matched_path; + /* One of values in "enum tomoyo_path_acl_index". */ u8 operation; } path; struct { const struct tomoyo_path_info *filename1; const struct tomoyo_path_info *filename2; + /* One of values in "enum tomoyo_path2_acl_index". */ u8 operation; } path2; struct { @@ -240,11 +242,16 @@ struct tomoyo_request_info { unsigned int mode; unsigned int major; unsigned int minor; + /* One of values in "enum tomoyo_mkdev_acl_index". */ u8 operation; } mkdev; struct { const struct tomoyo_path_info *filename; unsigned long number; + /* + * One of values in + * "enum tomoyo_path_number_acl_index". + */ u8 operation; } path_number; struct { @@ -263,26 +270,7 @@ struct tomoyo_request_info { u8 type; }; -/* - * tomoyo_path_info is a structure which is used for holding a string data - * used by TOMOYO. - * This structure has several fields for supporting pattern matching. - * - * (1) "name" is the '\0' terminated string data. - * (2) "hash" is full_name_hash(name, strlen(name)). - * This allows tomoyo_pathcmp() to compare by hash before actually compare - * using strcmp(). - * (3) "const_len" is the length of the initial segment of "name" which - * consists entirely of non wildcard characters. In other words, the length - * which we can compare two strings using strncmp(). - * (4) "is_dir" is a bool which is true if "name" ends with "/", - * false otherwise. - * TOMOYO distinguishes directory and non-directory. A directory ends with - * "/" and non-directory does not end with "/". - * (5) "is_patterned" is a bool which is true if "name" contains wildcard - * characters, false otherwise. This allows TOMOYO to use "hash" and - * strcmp() for string comparison if "is_patterned" is false. - */ +/* Structure for holding a token. */ struct tomoyo_path_info { const char *name; u32 hash; /* = full_name_hash(name, strlen(name)) */ @@ -291,27 +279,30 @@ struct tomoyo_path_info { bool is_patterned; /* = tomoyo_path_contains_pattern(name) */ }; -/* - * tomoyo_name is a structure which is used for linking - * "struct tomoyo_path_info" into tomoyo_name_list . - */ +/* Structure for holding string data. */ struct tomoyo_name { struct list_head list; atomic_t users; struct tomoyo_path_info entry; }; +/* Structure for holding a word. */ struct tomoyo_name_union { + /* Either @filename or @group is NULL. */ const struct tomoyo_path_info *filename; struct tomoyo_group *group; + /* True if @group != NULL, false if @filename != NULL. */ u8 is_group; }; +/* Structure for holding a number. */ struct tomoyo_number_union { unsigned long values[2]; - struct tomoyo_group *group; + struct tomoyo_group *group; /* Maybe NULL. */ + /* One of values in "enum tomoyo_value_type". */ u8 min_type; u8 max_type; + /* True if @group != NULL, false otherwise. */ u8 is_group; }; @@ -335,56 +326,14 @@ struct tomoyo_number_group { struct tomoyo_number_union number; }; -/* - * tomoyo_acl_info is a structure which is used for holding - * - * (1) "list" which is linked to the ->acl_info_list of - * "struct tomoyo_domain_info" - * (2) "is_deleted" is a bool which is true if this domain is marked as - * "deleted", false otherwise. - * (3) "type" which tells type of the entry. - * - * Packing "struct tomoyo_acl_info" allows - * "struct tomoyo_path_acl" to embed "u16" and "struct tomoyo_path2_acl" - * "struct tomoyo_path_number_acl" "struct tomoyo_mkdev_acl" to embed - * "u8" without enlarging their structure size. - */ +/* Common header for individual entries. */ struct tomoyo_acl_info { struct list_head list; bool is_deleted; - u8 type; /* = one of values in "enum tomoyo_acl_entry_type_index". */ + u8 type; /* One of values in "enum tomoyo_acl_entry_type_index". */ } __packed; -/* - * tomoyo_domain_info is a structure which is used for holding permissions - * (e.g. "allow_read /lib/libc-2.5.so") given to each domain. - * It has following fields. - * - * (1) "list" which is linked to tomoyo_domain_list . - * (2) "acl_info_list" which is linked to "struct tomoyo_acl_info". - * (3) "domainname" which holds the name of the domain. - * (4) "profile" which remembers profile number assigned to this domain. - * (5) "is_deleted" is a bool which is true if this domain is marked as - * "deleted", false otherwise. - * (6) "quota_warned" is a bool which is used for suppressing warning message - * when learning mode learned too much entries. - * (7) "transition_failed" is a bool which is set to true when this domain was - * unable to create a new domain at tomoyo_find_next_domain() because the - * name of the domain to be created was too long or it could not allocate - * memory. If set to true, more than one process continued execve() - * without domain transition. - * (9) "users" is an atomic_t that holds how many "struct cred"->security - * are referring this "struct tomoyo_domain_info". If is_deleted == true - * and users == 0, this struct will be kfree()d upon next garbage - * collection. - * - * A domain's lifecycle is an analogy of files on / directory. - * Multiple domains with the same domainname cannot be created (as with - * creating files with the same filename fails with -EEXIST). - * If a process reached a domain, that process can reside in that domain after - * that domain is marked as "deleted" (as with a process can access an already - * open()ed file after that file was unlink()ed). - */ +/* Structure for domain information. */ struct tomoyo_domain_info { struct list_head list; struct list_head acl_info_list; @@ -398,63 +347,32 @@ struct tomoyo_domain_info { }; /* - * tomoyo_path_acl is a structure which is used for holding an - * entry with one pathname operation (e.g. open(), mkdir()). - * It has following fields. - * - * (1) "head" which is a "struct tomoyo_acl_info". - * (2) "perm" which is a bitmask of permitted operations. - * (3) "name" is the pathname. - * - * Directives held by this structure are "allow_execute", "allow_read", - * "allow_write", "allow_append", "allow_unlink", "allow_rmdir", - * "allow_truncate", "allow_symlink", "allow_chroot" and "allow_unmount". + * Structure for "file execute", "file read", "file write", "file append", + * "file unlink", "file getattr", "file rmdir", "file truncate", + * "file symlink", "file chroot" and "file unmount" directive. */ struct tomoyo_path_acl { struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_ACL */ - u16 perm; + u16 perm; /* Bitmask of values in "enum tomoyo_path_acl_index". */ struct tomoyo_name_union name; }; /* - * tomoyo_path_number_acl is a structure which is used for holding an - * entry with one pathname and one number operation. - * It has following fields. - * - * (1) "head" which is a "struct tomoyo_acl_info". - * (2) "perm" which is a bitmask of permitted operations. - * (3) "name" is the pathname. - * (4) "number" is the numeric value. - * - * Directives held by this structure are "allow_create", "allow_mkdir", - * "allow_ioctl", "allow_mkfifo", "allow_mksock", "allow_chmod", "allow_chown" - * and "allow_chgrp". - * + * Structure for "file create", "file mkdir", "file mkfifo", "file mksock", + * "file ioctl", "file chmod", "file chown" and "file chgrp" directive. */ struct tomoyo_path_number_acl { struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_NUMBER_ACL */ + /* Bitmask of values in "enum tomoyo_path_number_acl_index". */ u8 perm; struct tomoyo_name_union name; struct tomoyo_number_union number; }; -/* - * tomoyo_mkdev_acl is a structure which is used for holding an - * entry with one pathname and three numbers operation. - * It has following fields. - * - * (1) "head" which is a "struct tomoyo_acl_info". - * (2) "perm" which is a bitmask of permitted operations. - * (3) "mode" is the create mode. - * (4) "major" is the major number of device node. - * (5) "minor" is the minor number of device node. - * - * Directives held by this structure are "allow_mkchar", "allow_mkblock". - * - */ +/* Structure for "file mkblock" and "file mkchar" directive. */ struct tomoyo_mkdev_acl { struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MKDEV_ACL */ - u8 perm; + u8 perm; /* Bitmask of values in "enum tomoyo_mkdev_acl_index". */ struct tomoyo_name_union name; struct tomoyo_number_union mode; struct tomoyo_number_union major; @@ -462,38 +380,16 @@ struct tomoyo_mkdev_acl { }; /* - * tomoyo_path2_acl is a structure which is used for holding an - * entry with two pathnames operation (i.e. link(), rename() and pivot_root()). - * It has following fields. - * - * (1) "head" which is a "struct tomoyo_acl_info". - * (2) "perm" which is a bitmask of permitted operations. - * (3) "name1" is the source/old pathname. - * (4) "name2" is the destination/new pathname. - * - * Directives held by this structure are "allow_rename", "allow_link" and - * "allow_pivot_root". + * Structure for "file rename", "file link" and "file pivot_root" directive. */ struct tomoyo_path2_acl { struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH2_ACL */ - u8 perm; + u8 perm; /* Bitmask of values in "enum tomoyo_path2_acl_index". */ struct tomoyo_name_union name1; struct tomoyo_name_union name2; }; -/* - * tomoyo_mount_acl is a structure which is used for holding an - * entry for mount operation. - * It has following fields. - * - * (1) "head" which is a "struct tomoyo_acl_info". - * (2) "dev_name" is the device name. - * (3) "dir_name" is the mount point. - * (4) "fs_type" is the filesystem type. - * (5) "flags" is the mount flags. - * - * Directive held by this structure is "allow_mount". - */ +/* Structure for "file mount" directive. */ struct tomoyo_mount_acl { struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MOUNT_ACL */ struct tomoyo_name_union dev_name; @@ -550,18 +446,8 @@ struct tomoyo_io_buffer { }; /* - * tomoyo_transition_control is a structure which is used for holding - * "initialize_domain"/"no_initialize_domain"/"keep_domain"/"no_keep_domain" - * entries. - * It has following fields. - * - * (1) "head" is "struct tomoyo_acl_head". - * (2) "type" is type of this entry. - * (3) "is_last_name" is a bool which is true if "domainname" is "the last - * component of a domainname", false otherwise. - * (4) "domainname" which is "a domainname" or "the last component of a - * domainname". - * (5) "program" which is a program's pathname. + * Structure for "initialize_domain"/"no_initialize_domain"/"keep_domain"/ + * "no_keep_domain" keyword. */ struct tomoyo_transition_control { struct tomoyo_acl_head head; @@ -572,32 +458,14 @@ struct tomoyo_transition_control { const struct tomoyo_path_info *program; /* Maybe NULL */ }; -/* - * tomoyo_aggregator is a structure which is used for holding - * "aggregator" entries. - * It has following fields. - * - * (1) "head" is "struct tomoyo_acl_head". - * (2) "original_name" which is originally requested name. - * (3) "aggregated_name" which is name to rewrite. - */ +/* Structure for "aggregator" keyword. */ struct tomoyo_aggregator { struct tomoyo_acl_head head; const struct tomoyo_path_info *original_name; const struct tomoyo_path_info *aggregated_name; }; -/* - * tomoyo_manager is a structure which is used for holding list of - * domainnames or programs which are permitted to modify configuration via - * /sys/kernel/security/tomoyo/ interface. - * It has following fields. - * - * (1) "head" is "struct tomoyo_acl_head". - * (2) "is_domain" is a bool which is true if "manager" is a domainname, false - * otherwise. - * (3) "manager" is a domainname or a program's pathname. - */ +/* Structure for policy manager. */ struct tomoyo_manager { struct tomoyo_acl_head head; bool is_domain; /* True if manager is a domainname. */ @@ -612,6 +480,7 @@ struct tomoyo_preference { bool permissive_verbose; }; +/* Structure for /sys/kernel/security/tomnoyo/profile interface. */ struct tomoyo_profile { const struct tomoyo_path_info *comment; struct tomoyo_preference *learning; @@ -624,148 +493,80 @@ struct tomoyo_profile { /********** Function prototypes. **********/ -/* Check whether the given string starts with the given keyword. */ bool tomoyo_str_starts(char **src, const char *find); -/* Get tomoyo_realpath() of current process. */ const char *tomoyo_get_exe(void); -/* Format string. */ void tomoyo_normalize_line(unsigned char *buffer); -/* Print warning or error message on console. */ void tomoyo_warn_log(struct tomoyo_request_info *r, const char *fmt, ...) __attribute__ ((format(printf, 2, 3))); -/* Check all profiles currently assigned to domains are defined. */ void tomoyo_check_profile(void); -/* Open operation for /sys/kernel/security/tomoyo/ interface. */ int tomoyo_open_control(const u8 type, struct file *file); -/* Close /sys/kernel/security/tomoyo/ interface. */ int tomoyo_close_control(struct file *file); -/* Poll operation for /sys/kernel/security/tomoyo/ interface. */ int tomoyo_poll_control(struct file *file, poll_table *wait); -/* Read operation for /sys/kernel/security/tomoyo/ interface. */ int tomoyo_read_control(struct file *file, char __user *buffer, const int buffer_len); -/* Write operation for /sys/kernel/security/tomoyo/ interface. */ int tomoyo_write_control(struct file *file, const char __user *buffer, const int buffer_len); -/* Check whether the domain has too many ACL entries to hold. */ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r); -/* Print out of memory warning message. */ void tomoyo_warn_oom(const char *function); -/* Check whether the given name matches the given name_union. */ const struct tomoyo_path_info * tomoyo_compare_name_union(const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr); -/* Check whether the given number matches the given number_union. */ bool tomoyo_compare_number_union(const unsigned long value, const struct tomoyo_number_union *ptr); int tomoyo_get_mode(const u8 profile, const u8 index); void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...) __attribute__ ((format(printf, 2, 3))); -/* Check whether the domainname is correct. */ bool tomoyo_correct_domain(const unsigned char *domainname); -/* Check whether the token is correct. */ bool tomoyo_correct_path(const char *filename); bool tomoyo_correct_word(const char *string); -/* Check whether the token can be a domainname. */ bool tomoyo_domain_def(const unsigned char *buffer); bool tomoyo_parse_name_union(const char *filename, struct tomoyo_name_union *ptr); -/* Check whether the given filename matches the given path_group. */ const struct tomoyo_path_info * tomoyo_path_matches_group(const struct tomoyo_path_info *pathname, const struct tomoyo_group *group); -/* Check whether the given value matches the given number_group. */ bool tomoyo_number_matches_group(const unsigned long min, const unsigned long max, const struct tomoyo_group *group); -/* Check whether the given filename matches the given pattern. */ bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename, const struct tomoyo_path_info *pattern); - bool tomoyo_parse_number_union(char *data, struct tomoyo_number_union *num); -/* Tokenize a line. */ bool tomoyo_tokenize(char *buffer, char *w[], size_t size); -/* Write domain policy violation warning message to console? */ bool tomoyo_verbose_mode(const struct tomoyo_domain_info *domain); -/* Fill "struct tomoyo_request_info". */ int tomoyo_init_request_info(struct tomoyo_request_info *r, struct tomoyo_domain_info *domain, const u8 index); -/* Check permission for mount operation. */ -int tomoyo_mount_permission(char *dev_name, struct path *path, char *type, - unsigned long flags, void *data_page); -/* Create "aggregator" entry in exception policy. */ +int tomoyo_mount_permission(char *dev_name, struct path *path, + const char *type, unsigned long flags, + void *data_page); int tomoyo_write_aggregator(char *data, const bool is_delete); int tomoyo_write_transition_control(char *data, const bool is_delete, const u8 type); -/* - * Create "allow_execute", "allow_read", "allow_write", "allow_append", - * "allow_create", "allow_unlink", "allow_mkdir", "allow_rmdir", - * "allow_mkfifo", "allow_mksock", "allow_mkblock", "allow_mkchar", - * "allow_truncate", "allow_symlink", "allow_rename" and "allow_link" entry - * in domain policy. - */ int tomoyo_write_file(char *data, struct tomoyo_domain_info *domain, const bool is_delete); -/* Create "allow_mount" entry in domain policy. */ int tomoyo_write_mount(char *data, struct tomoyo_domain_info *domain, const bool is_delete); -/* Create "path_group"/"number_group" entry in exception policy. */ int tomoyo_write_group(char *data, const bool is_delete, const u8 type); int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) __attribute__ ((format(printf, 2, 3))); -/* Find a domain by the given name. */ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname); -/* Find or create a domain by the given name. */ struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname, const u8 profile); struct tomoyo_profile *tomoyo_profile(const u8 profile); -/* - * Allocate memory for "struct tomoyo_path_group"/"struct tomoyo_number_group". - */ struct tomoyo_group *tomoyo_get_group(const char *group_name, const u8 type); - -/* Check mode for specified functionality. */ unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain, const u8 index); -/* Fill in "struct tomoyo_path_info" members. */ void tomoyo_fill_path_info(struct tomoyo_path_info *ptr); -/* Run policy loader when /sbin/init starts. */ void tomoyo_load_policy(const char *filename); - void tomoyo_put_number_union(struct tomoyo_number_union *ptr); - -/* Convert binary string to ascii string. */ char *tomoyo_encode(const char *str); - -/* - * Returns realpath(3) of the given pathname except that - * ignores chroot'ed root and does not follow the final symlink. - */ char *tomoyo_realpath_nofollow(const char *pathname); -/* - * Returns realpath(3) of the given pathname except that - * ignores chroot'ed root and the pathname is already solved. - */ char *tomoyo_realpath_from_path(struct path *path); - -/* Check memory quota. */ bool tomoyo_memory_ok(void *ptr); void *tomoyo_commit_ok(void *data, const unsigned int size); - -/* - * Keep the given name on the RAM. - * The RAM is shared, so NEVER try to modify or kfree() the returned name. - */ const struct tomoyo_path_info *tomoyo_get_name(const char *name); - -/* Check for memory usage. */ void tomoyo_read_memory_counter(struct tomoyo_io_buffer *head); - -/* Set memory quota. */ int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head); - -/* Initialize mm related code. */ void __init tomoyo_mm_init(void); int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, const struct tomoyo_path_info *filename); @@ -779,18 +580,11 @@ int tomoyo_path_perm(const u8 operation, struct path *path); int tomoyo_path2_perm(const u8 operation, struct path *path1, struct path *path2); int tomoyo_find_next_domain(struct linux_binprm *bprm); - void tomoyo_print_ulong(char *buffer, const int buffer_len, const unsigned long value, const u8 type); - -/* Drop refcount on tomoyo_name_union. */ void tomoyo_put_name_union(struct tomoyo_name_union *ptr); - -/* Run garbage collector. */ void tomoyo_run_gc(void); - void tomoyo_memory_free(void *ptr); - int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, bool is_delete, struct tomoyo_domain_info *domain, bool (*check_duplicate) (const struct tomoyo_acl_info @@ -841,17 +635,36 @@ extern unsigned int tomoyo_query_memory_size; /********** Inlined functions. **********/ +/** + * tomoyo_read_lock - Take lock for protecting policy. + * + * Returns index number for tomoyo_read_unlock(). + */ static inline int tomoyo_read_lock(void) { return srcu_read_lock(&tomoyo_ss); } +/** + * tomoyo_read_unlock - Release lock for protecting policy. + * + * @idx: Index number returned by tomoyo_read_lock(). + * + * Returns nothing. + */ static inline void tomoyo_read_unlock(int idx) { srcu_read_unlock(&tomoyo_ss, idx); } -/* strcmp() for "struct tomoyo_path_info" structure. */ +/** + * tomoyo_pathcmp - strcmp() for "struct tomoyo_path_info" structure. + * + * @a: Pointer to "struct tomoyo_path_info". + * @b: Pointer to "struct tomoyo_path_info". + * + * Returns true if @a == @b, false otherwise. + */ static inline bool tomoyo_pathcmp(const struct tomoyo_path_info *a, const struct tomoyo_path_info *b) { @@ -882,6 +695,13 @@ static inline bool tomoyo_invalid(const unsigned char c) return c && (c <= ' ' || c >= 127); } +/** + * tomoyo_put_name - Drop reference on "struct tomoyo_name". + * + * @name: Pointer to "struct tomoyo_path_info". Maybe NULL. + * + * Returns nothing. + */ static inline void tomoyo_put_name(const struct tomoyo_path_info *name) { if (name) { @@ -891,17 +711,36 @@ static inline void tomoyo_put_name(const struct tomoyo_path_info *name) } } +/** + * tomoyo_put_group - Drop reference on "struct tomoyo_group". + * + * @group: Pointer to "struct tomoyo_group". Maybe NULL. + * + * Returns nothing. + */ static inline void tomoyo_put_group(struct tomoyo_group *group) { if (group) atomic_dec(&group->users); } +/** + * tomoyo_domain - Get "struct tomoyo_domain_info" for current thread. + * + * Returns pointer to "struct tomoyo_domain_info" for current thread. + */ static inline struct tomoyo_domain_info *tomoyo_domain(void) { return current_cred()->security; } +/** + * tomoyo_real_domain - Get "struct tomoyo_domain_info" for specified thread. + * + * @task: Pointer to "struct task_struct". + * + * Returns pointer to "struct tomoyo_security" for specified thread. + */ static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct *task) { @@ -909,24 +748,40 @@ static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct } static inline bool tomoyo_same_acl_head(const struct tomoyo_acl_info *p1, - const struct tomoyo_acl_info *p2) + const struct tomoyo_acl_info *p2) { return p1->type == p2->type; } +/** + * tomoyo_same_name_union - Check for duplicated "struct tomoyo_name_union" entry. + * + * @a: Pointer to "struct tomoyo_name_union". + * @b: Pointer to "struct tomoyo_name_union". + * + * Returns true if @a == @b, false otherwise. + */ static inline bool tomoyo_same_name_union -(const struct tomoyo_name_union *p1, const struct tomoyo_name_union *p2) +(const struct tomoyo_name_union *a, const struct tomoyo_name_union *b) { - return p1->filename == p2->filename && p1->group == p2->group && - p1->is_group == p2->is_group; + return a->filename == b->filename && a->group == b->group && + a->is_group == b->is_group; } +/** + * tomoyo_same_number_union - Check for duplicated "struct tomoyo_number_union" entry. + * + * @a: Pointer to "struct tomoyo_number_union". + * @b: Pointer to "struct tomoyo_number_union". + * + * Returns true if @a == @b, false otherwise. + */ static inline bool tomoyo_same_number_union -(const struct tomoyo_number_union *p1, const struct tomoyo_number_union *p2) +(const struct tomoyo_number_union *a, const struct tomoyo_number_union *b) { - return p1->values[0] == p2->values[0] && p1->values[1] == p2->values[1] - && p1->group == p2->group && p1->min_type == p2->min_type && - p1->max_type == p2->max_type && p1->is_group == p2->is_group; + return a->values[0] == b->values[0] && a->values[1] == b->values[1] && + a->group == b->group && a->min_type == b->min_type && + a->max_type == b->max_type && a->is_group == b->is_group; } /** diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index 41ed7de44ef1..332380288078 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c @@ -212,8 +212,7 @@ static int tomoyo_audit_mkdev_log(struct tomoyo_request_info *r) /** * tomoyo_audit_path_number_log - Audit path/number request log. * - * @r: Pointer to "struct tomoyo_request_info". - * @error: Error code. + * @r: Pointer to "struct tomoyo_request_info". * * Returns 0 on success, negative value otherwise. */ diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c index f1d9e1a9eff4..5cfc72078742 100644 --- a/security/tomoyo/mount.c +++ b/security/tomoyo/mount.c @@ -7,22 +7,16 @@ #include #include "common.h" -/* Keywords for mount restrictions. */ - -/* Allow to call 'mount --bind /source_dir /dest_dir' */ -#define TOMOYO_MOUNT_BIND_KEYWORD "--bind" -/* Allow to call 'mount --move /old_dir /new_dir ' */ -#define TOMOYO_MOUNT_MOVE_KEYWORD "--move" -/* Allow to call 'mount -o remount /dir ' */ -#define TOMOYO_MOUNT_REMOUNT_KEYWORD "--remount" -/* Allow to call 'mount --make-unbindable /dir' */ -#define TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD "--make-unbindable" -/* Allow to call 'mount --make-private /dir' */ -#define TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD "--make-private" -/* Allow to call 'mount --make-slave /dir' */ -#define TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD "--make-slave" -/* Allow to call 'mount --make-shared /dir' */ -#define TOMOYO_MOUNT_MAKE_SHARED_KEYWORD "--make-shared" +/* String table for special mount operations. */ +static const char * const tomoyo_mounts[TOMOYO_MAX_SPECIAL_MOUNT] = { + [TOMOYO_MOUNT_BIND] = "--bind", + [TOMOYO_MOUNT_MOVE] = "--move", + [TOMOYO_MOUNT_REMOUNT] = "--remount", + [TOMOYO_MOUNT_MAKE_UNBINDABLE] = "--make-unbindable", + [TOMOYO_MOUNT_MAKE_PRIVATE] = "--make-private", + [TOMOYO_MOUNT_MAKE_SLAVE] = "--make-slave", + [TOMOYO_MOUNT_MAKE_SHARED] = "--make-shared", +}; /** * tomoyo_audit_mount_log - Audit mount log. @@ -39,22 +33,21 @@ static int tomoyo_audit_mount_log(struct tomoyo_request_info *r) const unsigned long flags = r->param.mount.flags; if (r->granted) return 0; - if (!strcmp(type, TOMOYO_MOUNT_REMOUNT_KEYWORD)) + if (type == tomoyo_mounts[TOMOYO_MOUNT_REMOUNT]) tomoyo_warn_log(r, "mount -o remount %s 0x%lX", dir, flags); - else if (!strcmp(type, TOMOYO_MOUNT_BIND_KEYWORD) - || !strcmp(type, TOMOYO_MOUNT_MOVE_KEYWORD)) + else if (type == tomoyo_mounts[TOMOYO_MOUNT_BIND] + || type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) tomoyo_warn_log(r, "mount %s %s %s 0x%lX", type, dev, dir, flags); - else if (!strcmp(type, TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD) || - !strcmp(type, TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD) || - !strcmp(type, TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD) || - !strcmp(type, TOMOYO_MOUNT_MAKE_SHARED_KEYWORD)) + else if (type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE] || + type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE] || + type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE] || + type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED]) tomoyo_warn_log(r, "mount %s %s 0x%lX", type, dir, flags); else tomoyo_warn_log(r, "mount -t %s %s %s 0x%lX", type, dev, dir, flags); - return tomoyo_supervisor(r, - TOMOYO_KEYWORD_ALLOW_MOUNT "%s %s %s 0x%lX\n", + return tomoyo_supervisor(r, "allow_mount %s %s %s 0x%lX\n", r->param.mount.dev->name, r->param.mount.dir->name, type, flags); } @@ -85,7 +78,8 @@ static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r, * Caller holds tomoyo_read_lock(). */ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name, - struct path *dir, char *type, unsigned long flags) + struct path *dir, const char *type, + unsigned long flags) { struct path path; struct file_system_type *fstype = NULL; @@ -115,15 +109,15 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name, tomoyo_fill_path_info(&rdir); /* Compare fs name. */ - if (!strcmp(type, TOMOYO_MOUNT_REMOUNT_KEYWORD)) { + if (type == tomoyo_mounts[TOMOYO_MOUNT_REMOUNT]) { /* dev_name is ignored. */ - } else if (!strcmp(type, TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD) || - !strcmp(type, TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD) || - !strcmp(type, TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD) || - !strcmp(type, TOMOYO_MOUNT_MAKE_SHARED_KEYWORD)) { + } else if (type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE] || + type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE] || + type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE] || + type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED]) { /* dev_name is ignored. */ - } else if (!strcmp(type, TOMOYO_MOUNT_BIND_KEYWORD) || - !strcmp(type, TOMOYO_MOUNT_MOVE_KEYWORD)) { + } else if (type == tomoyo_mounts[TOMOYO_MOUNT_BIND] || + type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) { need_dev = -1; /* dev_name is a directory */ } else { fstype = get_fs_type(type); @@ -189,8 +183,9 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name, * * Returns 0 on success, negative value otherwise. */ -int tomoyo_mount_permission(char *dev_name, struct path *path, char *type, - unsigned long flags, void *data_page) +int tomoyo_mount_permission(char *dev_name, struct path *path, + const char *type, unsigned long flags, + void *data_page) { struct tomoyo_request_info r; int error; @@ -202,31 +197,31 @@ int tomoyo_mount_permission(char *dev_name, struct path *path, char *type, if ((flags & MS_MGC_MSK) == MS_MGC_VAL) flags &= ~MS_MGC_MSK; if (flags & MS_REMOUNT) { - type = TOMOYO_MOUNT_REMOUNT_KEYWORD; + type = tomoyo_mounts[TOMOYO_MOUNT_REMOUNT]; flags &= ~MS_REMOUNT; } if (flags & MS_MOVE) { - type = TOMOYO_MOUNT_MOVE_KEYWORD; + type = tomoyo_mounts[TOMOYO_MOUNT_MOVE]; flags &= ~MS_MOVE; } if (flags & MS_BIND) { - type = TOMOYO_MOUNT_BIND_KEYWORD; + type = tomoyo_mounts[TOMOYO_MOUNT_BIND]; flags &= ~MS_BIND; } if (flags & MS_UNBINDABLE) { - type = TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD; + type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE]; flags &= ~MS_UNBINDABLE; } if (flags & MS_PRIVATE) { - type = TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD; + type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE]; flags &= ~MS_PRIVATE; } if (flags & MS_SLAVE) { - type = TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD; + type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE]; flags &= ~MS_SLAVE; } if (flags & MS_SHARED) { - type = TOMOYO_MOUNT_MAKE_SHARED_KEYWORD; + type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED]; flags &= ~MS_SHARED; } if (!type) diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c index e43d5554b506..a5bd76d7f6be 100644 --- a/security/tomoyo/securityfs_if.c +++ b/security/tomoyo/securityfs_if.c @@ -38,7 +38,7 @@ static int tomoyo_release(struct inode *inode, struct file *file) } /** - * tomoyo_poll - poll() for /proc/ccs/ interface. + * tomoyo_poll - poll() for /sys/kernel/security/tomoyo/ interface. * * @file: Pointer to "struct file". * @wait: Pointer to "poll_table". -- cgit v1.2.3 From 0df7e8b8f1c25c10820bdc679555f2fbfb897ca0 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:16:36 +0900 Subject: TOMOYO: Cleanup part 3. Use common structure for ACL with "struct list_head" + "atomic_t". Use array/struct where possible. Remove is_group from "struct tomoyo_name_union"/"struct tomoyo_number_union". Pass "struct file"->private_data rather than "struct file". Update some of comments. Bring tomoyo_same_acl_head() from common.h to domain.c . Bring tomoyo_invalid()/tomoyo_valid() from common.h to util.c . Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/common.c | 43 ++++----- security/tomoyo/common.h | 78 +++++---------- security/tomoyo/domain.c | 17 +++- security/tomoyo/file.c | 208 +++++++++++++++++++++++++++++++++------- security/tomoyo/gc.c | 127 +++++++++++++++++++++--- security/tomoyo/memory.c | 16 ++-- security/tomoyo/mount.c | 31 ++++-- security/tomoyo/securityfs_if.c | 6 +- security/tomoyo/util.c | 37 +++++-- 9 files changed, 410 insertions(+), 153 deletions(-) diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 1c340217a06a..2e6792ded357 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -192,7 +192,7 @@ static void tomoyo_print_name_union(struct tomoyo_io_buffer *head, const struct tomoyo_name_union *ptr) { tomoyo_set_space(head); - if (ptr->is_group) { + if (ptr->group) { tomoyo_set_string(head, "@"); tomoyo_set_string(head, ptr->group->group_name->name); } else { @@ -210,15 +210,15 @@ static void tomoyo_print_number_union(struct tomoyo_io_buffer *head, const struct tomoyo_number_union *ptr) { tomoyo_set_space(head); - if (ptr->is_group) { + if (ptr->group) { tomoyo_set_string(head, "@"); tomoyo_set_string(head, ptr->group->group_name->name); } else { int i; unsigned long min = ptr->values[0]; const unsigned long max = ptr->values[1]; - u8 min_type = ptr->min_type; - const u8 max_type = ptr->max_type; + u8 min_type = ptr->value_type[0]; + const u8 max_type = ptr->value_type[1]; char buffer[128]; buffer[0] = '\0'; for (i = 0; i < 2; i++) { @@ -769,7 +769,7 @@ static bool tomoyo_select_one(struct tomoyo_io_buffer *head, const char *data) domain = tomoyo_find_domain(data + 7); } else return false; - head->write_var1 = domain; + head->w.domain = domain; /* Accessing read_buf is safe because head->io_sem is held. */ if (!head->read_buf) return true; /* Do nothing if open(O_WRONLY). */ @@ -847,7 +847,7 @@ static int tomoyo_write_domain2(char *data, struct tomoyo_domain_info *domain, static int tomoyo_write_domain(struct tomoyo_io_buffer *head) { char *data = head->write_buf; - struct tomoyo_domain_info *domain = head->write_var1; + struct tomoyo_domain_info *domain = head->w.domain; bool is_delete = false; bool is_select = false; unsigned int profile; @@ -869,7 +869,7 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head) domain = tomoyo_find_domain(data); else domain = tomoyo_assign_domain(data, 0); - head->write_var1 = domain; + head->w.domain = domain; return 0; } if (!domain) @@ -1250,7 +1250,7 @@ static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx) { list_for_each_cookie(head->r.group, &tomoyo_group_list[idx]) { struct tomoyo_group *group = - list_entry(head->r.group, typeof(*group), list); + list_entry(head->r.group, typeof(*group), head.list); list_for_each_cookie(head->r.acl, &group->member_list) { struct tomoyo_acl_head *ptr = list_entry(head->r.acl, typeof(*ptr), list); @@ -1874,7 +1874,7 @@ int tomoyo_poll_control(struct file *file, poll_table *wait) /** * tomoyo_read_control - read() for /sys/kernel/security/tomoyo/ interface. * - * @file: Pointer to "struct file". + * @head: Pointer to "struct tomoyo_io_buffer". * @buffer: Poiner to buffer to write to. * @buffer_len: Size of @buffer. * @@ -1882,11 +1882,10 @@ int tomoyo_poll_control(struct file *file, poll_table *wait) * * Caller holds tomoyo_read_lock(). */ -int tomoyo_read_control(struct file *file, char __user *buffer, +int tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, const int buffer_len) { int len; - struct tomoyo_io_buffer *head = file->private_data; if (!head->read) return -ENOSYS; @@ -1906,7 +1905,7 @@ int tomoyo_read_control(struct file *file, char __user *buffer, /** * tomoyo_write_control - write() for /sys/kernel/security/tomoyo/ interface. * - * @file: Pointer to "struct file". + * @head: Pointer to "struct tomoyo_io_buffer". * @buffer: Pointer to buffer to read from. * @buffer_len: Size of @buffer. * @@ -1914,10 +1913,9 @@ int tomoyo_read_control(struct file *file, char __user *buffer, * * Caller holds tomoyo_read_lock(). */ -int tomoyo_write_control(struct file *file, const char __user *buffer, - const int buffer_len) +int tomoyo_write_control(struct tomoyo_io_buffer *head, + const char __user *buffer, const int buffer_len) { - struct tomoyo_io_buffer *head = file->private_data; int error = buffer_len; int avail_len = buffer_len; char *cp0 = head->write_buf; @@ -1935,7 +1933,7 @@ int tomoyo_write_control(struct file *file, const char __user *buffer, /* Read a line and dispatch it to the policy handler. */ while (avail_len > 0) { char c; - if (head->write_avail >= head->writebuf_size - 1) { + if (head->w.avail >= head->writebuf_size - 1) { error = -ENOMEM; break; } else if (get_user(c, buffer)) { @@ -1944,11 +1942,11 @@ int tomoyo_write_control(struct file *file, const char __user *buffer, } buffer++; avail_len--; - cp0[head->write_avail++] = c; + cp0[head->w.avail++] = c; if (c != '\n') continue; - cp0[head->write_avail - 1] = '\0'; - head->write_avail = 0; + cp0[head->w.avail - 1] = '\0'; + head->w.avail = 0; tomoyo_normalize_line(cp0); head->write(head); } @@ -1959,15 +1957,14 @@ int tomoyo_write_control(struct file *file, const char __user *buffer, /** * tomoyo_close_control - close() for /sys/kernel/security/tomoyo/ interface. * - * @file: Pointer to "struct file". + * @head: Pointer to "struct tomoyo_io_buffer". * * Releases memory and returns 0. * * Caller looses tomoyo_read_lock(). */ -int tomoyo_close_control(struct file *file) +int tomoyo_close_control(struct tomoyo_io_buffer *head) { - struct tomoyo_io_buffer *head = file->private_data; const bool is_write = !!head->write_buf; /* @@ -1984,8 +1981,6 @@ int tomoyo_close_control(struct file *file) kfree(head->write_buf); head->write_buf = NULL; kfree(head); - head = NULL; - file->private_data = NULL; if (is_write) tomoyo_run_gc(); return 0; diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index d0645733c102..7aa55eef67bd 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -219,6 +219,12 @@ struct tomoyo_acl_head { bool is_deleted; } __packed; +/* Common header for shared entries. */ +struct tomoyo_shared_acl_head { + struct list_head list; + atomic_t users; +} __packed; + /* Structure for request info. */ struct tomoyo_request_info { struct tomoyo_domain_info *domain; @@ -281,8 +287,7 @@ struct tomoyo_path_info { /* Structure for holding string data. */ struct tomoyo_name { - struct list_head list; - atomic_t users; + struct tomoyo_shared_acl_head head; struct tomoyo_path_info entry; }; @@ -291,8 +296,6 @@ struct tomoyo_name_union { /* Either @filename or @group is NULL. */ const struct tomoyo_path_info *filename; struct tomoyo_group *group; - /* True if @group != NULL, false if @filename != NULL. */ - u8 is_group; }; /* Structure for holding a number. */ @@ -300,18 +303,14 @@ struct tomoyo_number_union { unsigned long values[2]; struct tomoyo_group *group; /* Maybe NULL. */ /* One of values in "enum tomoyo_value_type". */ - u8 min_type; - u8 max_type; - /* True if @group != NULL, false otherwise. */ - u8 is_group; + u8 value_type[2]; }; /* Structure for "path_group"/"number_group" directive. */ struct tomoyo_group { - struct list_head list; + struct tomoyo_shared_acl_head head; const struct tomoyo_path_info *group_name; struct list_head member_list; - atomic_t users; }; /* Structure for "path_group" directive. */ @@ -429,16 +428,18 @@ struct tomoyo_io_buffer { bool print_execute_only; const char *w[TOMOYO_MAX_IO_READ_QUEUE]; } r; - /* The position currently writing to. */ - struct tomoyo_domain_info *write_var1; + struct { + /* The position currently writing to. */ + struct tomoyo_domain_info *domain; + /* Bytes available for writing. */ + int avail; + } w; /* Buffer for reading. */ char *read_buf; /* Size of read buffer. */ int readbuf_size; /* Buffer for writing. */ char *write_buf; - /* Bytes available for writing. */ - int write_avail; /* Size of write buffer. */ int writebuf_size; /* Type of this interface. */ @@ -500,12 +501,12 @@ void tomoyo_warn_log(struct tomoyo_request_info *r, const char *fmt, ...) __attribute__ ((format(printf, 2, 3))); void tomoyo_check_profile(void); int tomoyo_open_control(const u8 type, struct file *file); -int tomoyo_close_control(struct file *file); +int tomoyo_close_control(struct tomoyo_io_buffer *head); int tomoyo_poll_control(struct file *file, poll_table *wait); -int tomoyo_read_control(struct file *file, char __user *buffer, +int tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, const int buffer_len); -int tomoyo_write_control(struct file *file, const char __user *buffer, - const int buffer_len); +int tomoyo_write_control(struct tomoyo_io_buffer *head, + const char __user *buffer, const int buffer_len); bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r); void tomoyo_warn_oom(const char *function); const struct tomoyo_path_info * @@ -671,30 +672,6 @@ static inline bool tomoyo_pathcmp(const struct tomoyo_path_info *a, return a->hash != b->hash || strcmp(a->name, b->name); } -/** - * tomoyo_valid - Check whether the character is a valid char. - * - * @c: The character to check. - * - * Returns true if @c is a valid character, false otherwise. - */ -static inline bool tomoyo_valid(const unsigned char c) -{ - return c > ' ' && c < 127; -} - -/** - * tomoyo_invalid - Check whether the character is an invalid char. - * - * @c: The character to check. - * - * Returns true if @c is an invalid character, false otherwise. - */ -static inline bool tomoyo_invalid(const unsigned char c) -{ - return c && (c <= ' ' || c >= 127); -} - /** * tomoyo_put_name - Drop reference on "struct tomoyo_name". * @@ -707,7 +684,7 @@ static inline void tomoyo_put_name(const struct tomoyo_path_info *name) if (name) { struct tomoyo_name *ptr = container_of(name, typeof(*ptr), entry); - atomic_dec(&ptr->users); + atomic_dec(&ptr->head.users); } } @@ -721,7 +698,7 @@ static inline void tomoyo_put_name(const struct tomoyo_path_info *name) static inline void tomoyo_put_group(struct tomoyo_group *group) { if (group) - atomic_dec(&group->users); + atomic_dec(&group->head.users); } /** @@ -747,12 +724,6 @@ static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct return task_cred_xxx(task, security); } -static inline bool tomoyo_same_acl_head(const struct tomoyo_acl_info *p1, - const struct tomoyo_acl_info *p2) -{ - return p1->type == p2->type; -} - /** * tomoyo_same_name_union - Check for duplicated "struct tomoyo_name_union" entry. * @@ -764,8 +735,7 @@ static inline bool tomoyo_same_acl_head(const struct tomoyo_acl_info *p1, static inline bool tomoyo_same_name_union (const struct tomoyo_name_union *a, const struct tomoyo_name_union *b) { - return a->filename == b->filename && a->group == b->group && - a->is_group == b->is_group; + return a->filename == b->filename && a->group == b->group; } /** @@ -780,8 +750,8 @@ static inline bool tomoyo_same_number_union (const struct tomoyo_number_union *a, const struct tomoyo_number_union *b) { return a->values[0] == b->values[0] && a->values[1] == b->values[1] && - a->group == b->group && a->min_type == b->min_type && - a->max_type == b->max_type && a->is_group == b->is_group; + a->group == b->group && a->value_type[0] == b->value_type[0] && + a->value_type[1] == b->value_type[1]; } /** diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 355b536262b1..43977083254b 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -58,6 +58,20 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, return error; } +/** + * tomoyo_same_acl_head - Check for duplicated "struct tomoyo_acl_info" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b, false otherwise. + */ +static inline bool tomoyo_same_acl_head(const struct tomoyo_acl_info *a, + const struct tomoyo_acl_info *b) +{ + return a->type == b->type; +} + /** * tomoyo_update_domain - Update an entry for domain policy. * @@ -88,7 +102,8 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, if (mutex_lock_interruptible(&tomoyo_policy_lock)) return error; list_for_each_entry_rcu(entry, &domain->acl_info_list, list) { - if (!check_duplicate(entry, new_entry)) + if (!tomoyo_same_acl_head(entry, new_entry) || + !check_duplicate(entry, new_entry)) continue; if (merge_duplicate) entry->is_deleted = merge_duplicate(entry, new_entry, diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index 332380288078..4259e0a136d8 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c @@ -49,6 +49,9 @@ const char *tomoyo_path_number_keyword[TOMOYO_MAX_PATH_NUMBER_OPERATION] = { [TOMOYO_TYPE_CHGRP] = "chgrp", }; +/* + * Mapping table from "enum tomoyo_path_acl_index" to "enum tomoyo_mac_index". + */ static const u8 tomoyo_p2mac[TOMOYO_MAX_PATH_OPERATION] = { [TOMOYO_TYPE_EXECUTE] = TOMOYO_MAC_FILE_EXECUTE, [TOMOYO_TYPE_READ] = TOMOYO_MAC_FILE_OPEN, @@ -63,17 +66,27 @@ static const u8 tomoyo_p2mac[TOMOYO_MAX_PATH_OPERATION] = { [TOMOYO_TYPE_UMOUNT] = TOMOYO_MAC_FILE_UMOUNT, }; +/* + * Mapping table from "enum tomoyo_mkdev_acl_index" to "enum tomoyo_mac_index". + */ static const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION] = { [TOMOYO_TYPE_MKBLOCK] = TOMOYO_MAC_FILE_MKBLOCK, [TOMOYO_TYPE_MKCHAR] = TOMOYO_MAC_FILE_MKCHAR, }; +/* + * Mapping table from "enum tomoyo_path2_acl_index" to "enum tomoyo_mac_index". + */ static const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION] = { [TOMOYO_TYPE_LINK] = TOMOYO_MAC_FILE_LINK, [TOMOYO_TYPE_RENAME] = TOMOYO_MAC_FILE_RENAME, [TOMOYO_TYPE_PIVOT_ROOT] = TOMOYO_MAC_FILE_PIVOT_ROOT, }; +/* + * Mapping table from "enum tomoyo_path_number_acl_index" to + * "enum tomoyo_mac_index". + */ static const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION] = { [TOMOYO_TYPE_CREATE] = TOMOYO_MAC_FILE_CREATE, [TOMOYO_TYPE_MKDIR] = TOMOYO_MAC_FILE_MKDIR, @@ -85,41 +98,76 @@ static const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION] = { [TOMOYO_TYPE_CHGRP] = TOMOYO_MAC_FILE_CHGRP, }; +/** + * tomoyo_put_name_union - Drop reference on "struct tomoyo_name_union". + * + * @ptr: Pointer to "struct tomoyo_name_union". + * + * Returns nothing. + */ void tomoyo_put_name_union(struct tomoyo_name_union *ptr) { - if (!ptr) - return; - if (ptr->is_group) - tomoyo_put_group(ptr->group); - else - tomoyo_put_name(ptr->filename); + tomoyo_put_group(ptr->group); + tomoyo_put_name(ptr->filename); } +/** + * tomoyo_compare_name_union - Check whether a name matches "struct tomoyo_name_union" or not. + * + * @name: Pointer to "struct tomoyo_path_info". + * @ptr: Pointer to "struct tomoyo_name_union". + * + * Returns "struct tomoyo_path_info" if @name matches @ptr, NULL otherwise. + */ const struct tomoyo_path_info * tomoyo_compare_name_union(const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr) { - if (ptr->is_group) + if (ptr->group) return tomoyo_path_matches_group(name, ptr->group); if (tomoyo_path_matches_pattern(name, ptr->filename)) return ptr->filename; return NULL; } +/** + * tomoyo_put_number_union - Drop reference on "struct tomoyo_number_union". + * + * @ptr: Pointer to "struct tomoyo_number_union". + * + * Returns nothing. + */ void tomoyo_put_number_union(struct tomoyo_number_union *ptr) { - if (ptr && ptr->is_group) - tomoyo_put_group(ptr->group); + tomoyo_put_group(ptr->group); } +/** + * tomoyo_compare_number_union - Check whether a value matches "struct tomoyo_number_union" or not. + * + * @value: Number to check. + * @ptr: Pointer to "struct tomoyo_number_union". + * + * Returns true if @value matches @ptr, false otherwise. + */ bool tomoyo_compare_number_union(const unsigned long value, const struct tomoyo_number_union *ptr) { - if (ptr->is_group) + if (ptr->group) return tomoyo_number_matches_group(value, value, ptr->group); return value >= ptr->values[0] && value <= ptr->values[1]; } +/** + * tomoyo_add_slash - Add trailing '/' if needed. + * + * @buf: Pointer to "struct tomoyo_path_info". + * + * Returns nothing. + * + * @buf must be generated by tomoyo_encode() because this function does not + * allocate memory for adding '/'. + */ static void tomoyo_add_slash(struct tomoyo_path_info *buf) { if (buf->is_dir) @@ -247,6 +295,18 @@ static int tomoyo_audit_path_number_log(struct tomoyo_request_info *r) filename->name, buffer); } +/** + * tomoyo_check_path_acl - Check permission for path operation. + * + * @r: Pointer to "struct tomoyo_request_info". + * @ptr: Pointer to "struct tomoyo_acl_info". + * + * Returns true if granted, false otherwise. + * + * To be able to use wildcard for domain transition, this function sets + * matching entry on success. Since the caller holds tomoyo_read_lock(), + * it is safe to set matching entry. + */ static bool tomoyo_check_path_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { @@ -261,6 +321,14 @@ static bool tomoyo_check_path_acl(struct tomoyo_request_info *r, return false; } +/** + * tomoyo_check_path_number_acl - Check permission for path number operation. + * + * @r: Pointer to "struct tomoyo_request_info". + * @ptr: Pointer to "struct tomoyo_acl_info". + * + * Returns true if granted, false otherwise. + */ static bool tomoyo_check_path_number_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { @@ -273,6 +341,14 @@ static bool tomoyo_check_path_number_acl(struct tomoyo_request_info *r, &acl->name); } +/** + * tomoyo_check_path2_acl - Check permission for path path operation. + * + * @r: Pointer to "struct tomoyo_request_info". + * @ptr: Pointer to "struct tomoyo_acl_info". + * + * Returns true if granted, false otherwise. + */ static bool tomoyo_check_path2_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { @@ -284,8 +360,16 @@ static bool tomoyo_check_path2_acl(struct tomoyo_request_info *r, &acl->name2); } +/** + * tomoyo_check_mkdev_acl - Check permission for path number number number operation. + * + * @r: Pointer to "struct tomoyo_request_info". + * @ptr: Pointer to "struct tomoyo_acl_info". + * + * Returns true if granted, false otherwise. + */ static bool tomoyo_check_mkdev_acl(struct tomoyo_request_info *r, - const struct tomoyo_acl_info *ptr) + const struct tomoyo_acl_info *ptr) { const struct tomoyo_mkdev_acl *acl = container_of(ptr, typeof(*acl), head); @@ -300,13 +384,20 @@ static bool tomoyo_check_mkdev_acl(struct tomoyo_request_info *r, &acl->name); } +/** + * tomoyo_same_path_acl - Check for duplicated "struct tomoyo_path_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b except permission bits, false otherwise. + */ static bool tomoyo_same_path_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { const struct tomoyo_path_acl *p1 = container_of(a, typeof(*p1), head); const struct tomoyo_path_acl *p2 = container_of(b, typeof(*p2), head); - return tomoyo_same_acl_head(&p1->head, &p2->head) && - tomoyo_same_name_union(&p1->name, &p2->name); + return tomoyo_same_name_union(&p1->name, &p2->name); } /** @@ -364,23 +455,37 @@ static int tomoyo_update_path_acl(const u8 type, const char *filename, return error; } +/** + * tomoyo_same_mkdev_acl - Check for duplicated "struct tomoyo_mkdev_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b except permission bits, false otherwise. + */ static bool tomoyo_same_mkdev_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { - const struct tomoyo_mkdev_acl *p1 = container_of(a, typeof(*p1), - head); - const struct tomoyo_mkdev_acl *p2 = container_of(b, typeof(*p2), - head); - return tomoyo_same_acl_head(&p1->head, &p2->head) - && tomoyo_same_name_union(&p1->name, &p2->name) - && tomoyo_same_number_union(&p1->mode, &p2->mode) - && tomoyo_same_number_union(&p1->major, &p2->major) - && tomoyo_same_number_union(&p1->minor, &p2->minor); + const struct tomoyo_mkdev_acl *p1 = container_of(a, typeof(*p1), head); + const struct tomoyo_mkdev_acl *p2 = container_of(b, typeof(*p2), head); + return tomoyo_same_name_union(&p1->name, &p2->name) && + tomoyo_same_number_union(&p1->mode, &p2->mode) && + tomoyo_same_number_union(&p1->major, &p2->major) && + tomoyo_same_number_union(&p1->minor, &p2->minor); } +/** + * tomoyo_merge_mkdev_acl - Merge duplicated "struct tomoyo_mkdev_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * @is_delete: True for @a &= ~@b, false for @a |= @b. + * + * Returns true if @a is empty, false otherwise. + */ static bool tomoyo_merge_mkdev_acl(struct tomoyo_acl_info *a, - struct tomoyo_acl_info *b, - const bool is_delete) + struct tomoyo_acl_info *b, + const bool is_delete) { u8 *const a_perm = &container_of(a, struct tomoyo_mkdev_acl, head)->perm; @@ -411,9 +516,9 @@ static bool tomoyo_merge_mkdev_acl(struct tomoyo_acl_info *a, * Caller holds tomoyo_read_lock(). */ static int tomoyo_update_mkdev_acl(const u8 type, const char *filename, - char *mode, char *major, char *minor, - struct tomoyo_domain_info * const - domain, const bool is_delete) + char *mode, char *major, char *minor, + struct tomoyo_domain_info * const domain, + const bool is_delete) { struct tomoyo_mkdev_acl e = { .head.type = TOMOYO_TYPE_MKDEV_ACL, @@ -436,16 +541,32 @@ static int tomoyo_update_mkdev_acl(const u8 type, const char *filename, return error; } +/** + * tomoyo_same_path2_acl - Check for duplicated "struct tomoyo_path2_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b except permission bits, false otherwise. + */ static bool tomoyo_same_path2_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { const struct tomoyo_path2_acl *p1 = container_of(a, typeof(*p1), head); const struct tomoyo_path2_acl *p2 = container_of(b, typeof(*p2), head); - return tomoyo_same_acl_head(&p1->head, &p2->head) - && tomoyo_same_name_union(&p1->name1, &p2->name1) - && tomoyo_same_name_union(&p1->name2, &p2->name2); + return tomoyo_same_name_union(&p1->name1, &p2->name1) && + tomoyo_same_name_union(&p1->name2, &p2->name2); } +/** + * tomoyo_merge_path2_acl - Merge duplicated "struct tomoyo_path2_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * @is_delete: True for @a &= ~@b, false for @a |= @b. + * + * Returns true if @a is empty, false otherwise. + */ static bool tomoyo_merge_path2_acl(struct tomoyo_acl_info *a, struct tomoyo_acl_info *b, const bool is_delete) @@ -532,6 +653,14 @@ int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, return error; } +/** + * tomoyo_same_path_number_acl - Check for duplicated "struct tomoyo_path_number_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b except permission bits, false otherwise. + */ static bool tomoyo_same_path_number_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { @@ -539,11 +668,19 @@ static bool tomoyo_same_path_number_acl(const struct tomoyo_acl_info *a, head); const struct tomoyo_path_number_acl *p2 = container_of(b, typeof(*p2), head); - return tomoyo_same_acl_head(&p1->head, &p2->head) - && tomoyo_same_name_union(&p1->name, &p2->name) - && tomoyo_same_number_union(&p1->number, &p2->number); + return tomoyo_same_name_union(&p1->name, &p2->name) && + tomoyo_same_number_union(&p1->number, &p2->number); } +/** + * tomoyo_merge_path_number_acl - Merge duplicated "struct tomoyo_path_number_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * @is_delete: True for @a &= ~@b, false for @a |= @b. + * + * Returns true if @a is empty, false otherwise. + */ static bool tomoyo_merge_path_number_acl(struct tomoyo_acl_info *a, struct tomoyo_acl_info *b, const bool is_delete) @@ -575,8 +712,7 @@ static bool tomoyo_merge_path_number_acl(struct tomoyo_acl_info *a, static int tomoyo_update_path_number_acl(const u8 type, const char *filename, char *number, struct tomoyo_domain_info * const - domain, - const bool is_delete) + domain, const bool is_delete) { struct tomoyo_path_number_acl e = { .head.type = TOMOYO_TYPE_PATH_NUMBER_ACL, @@ -737,7 +873,7 @@ int tomoyo_path_perm(const u8 operation, struct path *path) * Returns 0 on success, negative value otherwise. */ int tomoyo_mkdev_perm(const u8 operation, struct path *path, - const unsigned int mode, unsigned int dev) + const unsigned int mode, unsigned int dev) { struct tomoyo_request_info r; int error = -ENOMEM; diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c index ba799b49ee3a..de14030823cd 100644 --- a/security/tomoyo/gc.c +++ b/security/tomoyo/gc.c @@ -13,13 +13,30 @@ struct tomoyo_gc { struct list_head list; - int type; + enum tomoyo_policy_id type; struct list_head *element; }; static LIST_HEAD(tomoyo_gc_queue); static DEFINE_MUTEX(tomoyo_gc_mutex); -/* Caller holds tomoyo_policy_lock mutex. */ +/** + * tomoyo_add_to_gc - Add an entry to to be deleted list. + * + * @type: One of values in "enum tomoyo_policy_id". + * @element: Pointer to "struct list_head". + * + * Returns true on success, false otherwise. + * + * Caller holds tomoyo_policy_lock mutex. + * + * Adding an entry needs kmalloc(). Thus, if we try to add thousands of + * entries at once, it will take too long time. Thus, do not add more than 128 + * entries per a scan. But to be able to handle worst case where all entries + * are in-use, we accept one more entry per a scan. + * + * If we use singly linked list using "struct list_head"->prev (which is + * LIST_POISON2), we can avoid kmalloc(). + */ static bool tomoyo_add_to_gc(const int type, struct list_head *element) { struct tomoyo_gc *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); @@ -32,6 +49,13 @@ static bool tomoyo_add_to_gc(const int type, struct list_head *element) return true; } +/** + * tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ static void tomoyo_del_transition_control(struct list_head *element) { struct tomoyo_transition_control *ptr = @@ -40,6 +64,13 @@ static void tomoyo_del_transition_control(struct list_head *element) tomoyo_put_name(ptr->program); } +/** + * tomoyo_del_aggregator - Delete members in "struct tomoyo_aggregator". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ static void tomoyo_del_aggregator(struct list_head *element) { struct tomoyo_aggregator *ptr = @@ -48,6 +79,13 @@ static void tomoyo_del_aggregator(struct list_head *element) tomoyo_put_name(ptr->aggregated_name); } +/** + * tomoyo_del_manager - Delete members in "struct tomoyo_manager". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ static void tomoyo_del_manager(struct list_head *element) { struct tomoyo_manager *ptr = @@ -55,6 +93,13 @@ static void tomoyo_del_manager(struct list_head *element) tomoyo_put_name(ptr->manager); } +/** + * tomoyo_del_acl - Delete members in "struct tomoyo_acl_info". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ static void tomoyo_del_acl(struct list_head *element) { struct tomoyo_acl_info *acl = @@ -145,12 +190,26 @@ static bool tomoyo_del_domain(struct list_head *element) } +/** + * tomoyo_del_name - Delete members in "struct tomoyo_name". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ static void tomoyo_del_name(struct list_head *element) { const struct tomoyo_name *ptr = - container_of(element, typeof(*ptr), list); + container_of(element, typeof(*ptr), head.list); } +/** + * tomoyo_del_path_group - Delete members in "struct tomoyo_path_group". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ static void tomoyo_del_path_group(struct list_head *element) { struct tomoyo_path_group *member = @@ -158,20 +217,43 @@ static void tomoyo_del_path_group(struct list_head *element) tomoyo_put_name(member->member_name); } +/** + * tomoyo_del_group - Delete "struct tomoyo_group". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ static void tomoyo_del_group(struct list_head *element) { struct tomoyo_group *group = - container_of(element, typeof(*group), list); + container_of(element, typeof(*group), head.list); tomoyo_put_name(group->group_name); } +/** + * tomoyo_del_number_group - Delete members in "struct tomoyo_number_group". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ static void tomoyo_del_number_group(struct list_head *element) { struct tomoyo_number_group *member = container_of(element, typeof(*member), head.list); } -static bool tomoyo_collect_member(struct list_head *member_list, int id) +/** + * tomoyo_collect_member - Delete elements with "struct tomoyo_acl_head". + * + * @id: One of values in "enum tomoyo_policy_id". + * @member_list: Pointer to "struct list_head". + * + * Returns true if some elements are deleted, false otherwise. + */ +static bool tomoyo_collect_member(const enum tomoyo_policy_id id, + struct list_head *member_list) { struct tomoyo_acl_head *member; list_for_each_entry(member, member_list, list) { @@ -195,13 +277,18 @@ static bool tomoyo_collect_acl(struct tomoyo_domain_info *domain) return true; } +/** + * tomoyo_collect_entry - Scan lists for deleted elements. + * + * Returns nothing. + */ static void tomoyo_collect_entry(void) { int i; if (mutex_lock_interruptible(&tomoyo_policy_lock)) return; for (i = 0; i < TOMOYO_MAX_POLICY; i++) { - if (!tomoyo_collect_member(&tomoyo_policy_list[i], i)) + if (!tomoyo_collect_member(i, &tomoyo_policy_list[i])) goto unlock; } { @@ -222,10 +309,10 @@ static void tomoyo_collect_entry(void) } for (i = 0; i < TOMOYO_MAX_HASH; i++) { struct tomoyo_name *ptr; - list_for_each_entry_rcu(ptr, &tomoyo_name_list[i], list) { - if (atomic_read(&ptr->users)) + list_for_each_entry_rcu(ptr, &tomoyo_name_list[i], head.list) { + if (atomic_read(&ptr->head.users)) continue; - if (!tomoyo_add_to_gc(TOMOYO_ID_NAME, &ptr->list)) + if (!tomoyo_add_to_gc(TOMOYO_ID_NAME, &ptr->head.list)) goto unlock; } } @@ -241,13 +328,14 @@ static void tomoyo_collect_entry(void) id = TOMOYO_ID_NUMBER_GROUP; break; } - list_for_each_entry(group, list, list) { - if (!tomoyo_collect_member(&group->member_list, id)) + list_for_each_entry(group, list, head.list) { + if (!tomoyo_collect_member(id, &group->member_list)) goto unlock; if (!list_empty(&group->member_list) || - atomic_read(&group->users)) + atomic_read(&group->head.users)) continue; - if (!tomoyo_add_to_gc(TOMOYO_ID_GROUP, &group->list)) + if (!tomoyo_add_to_gc(TOMOYO_ID_GROUP, + &group->head.list)) goto unlock; } } @@ -291,6 +379,8 @@ static void tomoyo_kfree_entry(void) case TOMOYO_ID_NUMBER_GROUP: tomoyo_del_number_group(element); break; + case TOMOYO_MAX_POLICY: + break; } tomoyo_memory_free(element); list_del(&p->list); @@ -298,6 +388,17 @@ static void tomoyo_kfree_entry(void) } } +/** + * tomoyo_gc_thread - Garbage collector thread function. + * + * @unused: Unused. + * + * In case OOM-killer choose this thread for termination, we create this thread + * as a short live thread whenever /sys/kernel/security/tomoyo/ interface was + * close()d. + * + * Returns 0. + */ static int tomoyo_gc_thread(void *unused) { daemonize("GC for TOMOYO"); diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c index 42a7b1ba8cbf..dfef0cb268dc 100644 --- a/security/tomoyo/memory.c +++ b/security/tomoyo/memory.c @@ -110,10 +110,10 @@ struct tomoyo_group *tomoyo_get_group(const char *group_name, const u8 idx) return NULL; if (mutex_lock_interruptible(&tomoyo_policy_lock)) goto out; - list_for_each_entry(group, &tomoyo_group_list[idx], list) { + list_for_each_entry(group, &tomoyo_group_list[idx], head.list) { if (e.group_name != group->group_name) continue; - atomic_inc(&group->users); + atomic_inc(&group->head.users); found = true; break; } @@ -121,8 +121,8 @@ struct tomoyo_group *tomoyo_get_group(const char *group_name, const u8 idx) struct tomoyo_group *entry = tomoyo_commit_ok(&e, sizeof(e)); if (entry) { INIT_LIST_HEAD(&entry->member_list); - atomic_set(&entry->users, 1); - list_add_tail_rcu(&entry->list, + atomic_set(&entry->head.users, 1); + list_add_tail_rcu(&entry->head.list, &tomoyo_group_list[idx]); group = entry; found = true; @@ -164,10 +164,10 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name) head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)]; if (mutex_lock_interruptible(&tomoyo_policy_lock)) return NULL; - list_for_each_entry(ptr, head, list) { + list_for_each_entry(ptr, head, head.list) { if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name)) continue; - atomic_inc(&ptr->users); + atomic_inc(&ptr->head.users); goto out; } ptr = kzalloc(sizeof(*ptr) + len, GFP_NOFS); @@ -183,9 +183,9 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name) atomic_add(allocated_len, &tomoyo_policy_memory_size); ptr->entry.name = ((char *) ptr) + sizeof(*ptr); memmove((char *) ptr->entry.name, name, len); - atomic_set(&ptr->users, 1); + atomic_set(&ptr->head.users, 1); tomoyo_fill_path_info(&ptr->entry); - list_add_tail(&ptr->list, head); + list_add_tail(&ptr->head.list, head); out: mutex_unlock(&tomoyo_policy_lock); return ptr ? &ptr->entry : NULL; diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c index 5cfc72078742..7649dbc6a56b 100644 --- a/security/tomoyo/mount.c +++ b/security/tomoyo/mount.c @@ -52,16 +52,28 @@ static int tomoyo_audit_mount_log(struct tomoyo_request_info *r) r->param.mount.dir->name, type, flags); } +/** + * tomoyo_check_mount_acl - Check permission for path path path number operation. + * + * @r: Pointer to "struct tomoyo_request_info". + * @ptr: Pointer to "struct tomoyo_acl_info". + * + * Returns true if granted, false otherwise. + */ static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { const struct tomoyo_mount_acl *acl = container_of(ptr, typeof(*acl), head); - return tomoyo_compare_number_union(r->param.mount.flags, &acl->flags) && - tomoyo_compare_name_union(r->param.mount.type, &acl->fs_type) && - tomoyo_compare_name_union(r->param.mount.dir, &acl->dir_name) && + return tomoyo_compare_number_union(r->param.mount.flags, + &acl->flags) && + tomoyo_compare_name_union(r->param.mount.type, + &acl->fs_type) && + tomoyo_compare_name_union(r->param.mount.dir, + &acl->dir_name) && (!r->param.mount.need_dev || - tomoyo_compare_name_union(r->param.mount.dev, &acl->dev_name)); + tomoyo_compare_name_union(r->param.mount.dev, + &acl->dev_name)); } /** @@ -232,13 +244,20 @@ int tomoyo_mount_permission(char *dev_name, struct path *path, return error; } +/** + * tomoyo_same_mount_acl - Check for duplicated "struct tomoyo_mount_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b, false otherwise. + */ static bool tomoyo_same_mount_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head); const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head); - return tomoyo_same_acl_head(&p1->head, &p2->head) && - tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) && + return tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) && tomoyo_same_name_union(&p1->dir_name, &p2->dir_name) && tomoyo_same_name_union(&p1->fs_type, &p2->fs_type) && tomoyo_same_number_union(&p1->flags, &p2->flags); diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c index a5bd76d7f6be..6410868c8a3d 100644 --- a/security/tomoyo/securityfs_if.c +++ b/security/tomoyo/securityfs_if.c @@ -34,7 +34,7 @@ static int tomoyo_open(struct inode *inode, struct file *file) */ static int tomoyo_release(struct inode *inode, struct file *file) { - return tomoyo_close_control(file); + return tomoyo_close_control(file->private_data); } /** @@ -63,7 +63,7 @@ static unsigned int tomoyo_poll(struct file *file, poll_table *wait) static ssize_t tomoyo_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - return tomoyo_read_control(file, buf, count); + return tomoyo_read_control(file->private_data, buf, count); } /** @@ -79,7 +79,7 @@ static ssize_t tomoyo_read(struct file *file, char __user *buf, size_t count, static ssize_t tomoyo_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - return tomoyo_write_control(file, buf, count); + return tomoyo_write_control(file->private_data, buf, count); } /* diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c index 7fb9bbf7021a..abb177c2d7c2 100644 --- a/security/tomoyo/util.c +++ b/security/tomoyo/util.c @@ -21,7 +21,7 @@ bool tomoyo_policy_loaded; * @result: Pointer to "unsigned long". * @str: Pointer to string to parse. * - * Returns value type on success, 0 otherwise. + * Returns one of values in "enum tomoyo_value_type". * * The @src is updated to point the first character after the value * on success. @@ -43,7 +43,7 @@ static u8 tomoyo_parse_ulong(unsigned long *result, char **str) } *result = simple_strtoul(cp, &ep, base); if (cp == ep) - return 0; + return TOMOYO_VALUE_TYPE_INVALID; *str = ep; switch (base) { case 16: @@ -93,11 +93,9 @@ bool tomoyo_parse_name_union(const char *filename, return false; if (filename[0] == '@') { ptr->group = tomoyo_get_group(filename + 1, TOMOYO_PATH_GROUP); - ptr->is_group = true; return ptr->group != NULL; } ptr->filename = tomoyo_get_name(filename); - ptr->is_group = false; return ptr->filename != NULL; } @@ -118,17 +116,16 @@ bool tomoyo_parse_number_union(char *data, struct tomoyo_number_union *num) if (!tomoyo_correct_word(data)) return false; num->group = tomoyo_get_group(data + 1, TOMOYO_NUMBER_GROUP); - num->is_group = true; return num->group != NULL; } type = tomoyo_parse_ulong(&v, &data); if (!type) return false; num->values[0] = v; - num->min_type = type; + num->value_type[0] = type; if (!*data) { num->values[1] = v; - num->max_type = type; + num->value_type[1] = type; return true; } if (*data++ != '-') @@ -137,7 +134,7 @@ bool tomoyo_parse_number_union(char *data, struct tomoyo_number_union *num) if (!type || *data) return false; num->values[1] = v; - num->max_type = type; + num->value_type[1] = type; return true; } @@ -184,6 +181,30 @@ static inline u8 tomoyo_make_byte(const u8 c1, const u8 c2, const u8 c3) return ((c1 - '0') << 6) + ((c2 - '0') << 3) + (c3 - '0'); } +/** + * tomoyo_valid - Check whether the character is a valid char. + * + * @c: The character to check. + * + * Returns true if @c is a valid character, false otherwise. + */ +static inline bool tomoyo_valid(const unsigned char c) +{ + return c > ' ' && c < 127; +} + +/** + * tomoyo_invalid - Check whether the character is an invalid char. + * + * @c: The character to check. + * + * Returns true if @c is an invalid character, false otherwise. + */ +static inline bool tomoyo_invalid(const unsigned char c) +{ + return c && (c <= ' ' || c >= 127); +} + /** * tomoyo_str_starts - Check whether the given string starts with the given keyword. * -- cgit v1.2.3 From a238cf5b89ed5285be8de56335665d023972f7d5 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:17:10 +0900 Subject: TOMOYO: Use struct for passing ACL line. Use structure for passing ACL line, in preparation for supporting policy namespace and conditional parameters. Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/common.c | 77 ++++++++------- security/tomoyo/common.h | 32 ++++--- security/tomoyo/domain.c | 146 ++++++++++++----------------- security/tomoyo/file.c | 237 +++++++++++++++++++++++++---------------------- security/tomoyo/group.c | 33 +++---- security/tomoyo/memory.c | 20 ++-- security/tomoyo/mount.c | 53 ----------- security/tomoyo/util.c | 120 ++++++++++++++---------- 8 files changed, 347 insertions(+), 371 deletions(-) diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 2e6792ded357..2cfadafd02f5 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -611,8 +611,11 @@ static int tomoyo_update_manager_entry(const char *manager, const bool is_delete) { struct tomoyo_manager e = { }; - int error; - + struct tomoyo_acl_param param = { + .is_delete = is_delete, + .list = &tomoyo_policy_list[TOMOYO_ID_MANAGER], + }; + int error = is_delete ? -ENOENT : -ENOMEM; if (tomoyo_domain_def(manager)) { if (!tomoyo_correct_domain(manager)) return -EINVAL; @@ -622,12 +625,11 @@ static int tomoyo_update_manager_entry(const char *manager, return -EINVAL; } e.manager = tomoyo_get_name(manager); - if (!e.manager) - return -ENOMEM; - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - &tomoyo_policy_list[TOMOYO_ID_MANAGER], - tomoyo_same_manager); - tomoyo_put_name(e.manager); + if (e.manager) { + error = tomoyo_update_policy(&e.head, sizeof(e), ¶m, + tomoyo_same_manager); + tomoyo_put_name(e.manager); + } return error; } @@ -821,18 +823,36 @@ static int tomoyo_delete_domain(char *domainname) /** * tomoyo_write_domain2 - Write domain policy. * - * @head: Pointer to "struct tomoyo_io_buffer". + * @list: Pointer to "struct list_head". + * @data: Policy to be interpreted. + * @is_delete: True if it is a delete request. * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ -static int tomoyo_write_domain2(char *data, struct tomoyo_domain_info *domain, +static int tomoyo_write_domain2(struct list_head *list, char *data, const bool is_delete) { - if (tomoyo_str_starts(&data, "allow_mount ")) - return tomoyo_write_mount(data, domain, is_delete); - return tomoyo_write_file(data, domain, is_delete); + struct tomoyo_acl_param param = { + .list = list, + .data = data, + .is_delete = is_delete, + }; + static const struct { + const char *keyword; + int (*write) (struct tomoyo_acl_param *); + } tomoyo_callback[1] = { + { "file ", tomoyo_write_file }, + }; + u8 i; + for (i = 0; i < 1; i++) { + if (!tomoyo_str_starts(¶m.data, + tomoyo_callback[i].keyword)) + continue; + return tomoyo_callback[i].write(¶m); + } + return -EINVAL; } /** @@ -889,7 +909,7 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head) domain->transition_failed = !is_delete; return 0; } - return tomoyo_write_domain2(data, domain, is_delete); + return tomoyo_write_domain2(&domain->acl_info_list, data, is_delete); } /** @@ -1213,26 +1233,19 @@ static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = { */ static int tomoyo_write_exception(struct tomoyo_io_buffer *head) { - char *data = head->write_buf; - bool is_delete = tomoyo_str_starts(&data, "delete "); - u8 i; - static const struct { - const char *keyword; - int (*write) (char *, const bool); - } tomoyo_callback[1] = { - { "aggregator ", tomoyo_write_aggregator }, + struct tomoyo_acl_param param = { + .data = head->write_buf, }; - + u8 i; + param.is_delete = tomoyo_str_starts(¶m.data, "delete "); + if (tomoyo_str_starts(¶m.data, "aggregator ")) + return tomoyo_write_aggregator(¶m); for (i = 0; i < TOMOYO_MAX_TRANSITION_TYPE; i++) - if (tomoyo_str_starts(&data, tomoyo_transition_type[i])) - return tomoyo_write_transition_control(data, is_delete, - i); - for (i = 0; i < 1; i++) - if (tomoyo_str_starts(&data, tomoyo_callback[i].keyword)) - return tomoyo_callback[i].write(data, is_delete); + if (tomoyo_str_starts(¶m.data, tomoyo_transition_type[i])) + return tomoyo_write_transition_control(¶m, i); for (i = 0; i < TOMOYO_MAX_GROUP; i++) - if (tomoyo_str_starts(&data, tomoyo_group_name[i])) - return tomoyo_write_group(data, is_delete, i); + if (tomoyo_str_starts(¶m.data, tomoyo_group_name[i])) + return tomoyo_write_group(¶m, i); return -EINVAL; } @@ -1490,7 +1503,7 @@ int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) vsnprintf(buffer, len - 1, fmt, args); va_end(args); tomoyo_normalize_line(buffer); - tomoyo_write_domain2(buffer, r->domain, false); + tomoyo_write_domain2(&r->domain->acl_info_list, buffer, false); kfree(buffer); /* fall through */ case TOMOYO_CONFIG_PERMISSIVE: diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index 7aa55eef67bd..6f9711ff73c1 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -397,6 +397,13 @@ struct tomoyo_mount_acl { struct tomoyo_number_union flags; }; +/* Structure for holding a line from /sys/kernel/security/tomoyo/ interface. */ +struct tomoyo_acl_param { + char *data; + struct list_head *list; + bool is_delete; +}; + #define TOMOYO_MAX_IO_READ_QUEUE 32 /* @@ -521,7 +528,7 @@ bool tomoyo_correct_domain(const unsigned char *domainname); bool tomoyo_correct_path(const char *filename); bool tomoyo_correct_word(const char *string); bool tomoyo_domain_def(const unsigned char *buffer); -bool tomoyo_parse_name_union(const char *filename, +bool tomoyo_parse_name_union(struct tomoyo_acl_param *param, struct tomoyo_name_union *ptr); const struct tomoyo_path_info * tomoyo_path_matches_group(const struct tomoyo_path_info *pathname, @@ -531,7 +538,8 @@ bool tomoyo_number_matches_group(const unsigned long min, const struct tomoyo_group *group); bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename, const struct tomoyo_path_info *pattern); -bool tomoyo_parse_number_union(char *data, struct tomoyo_number_union *num); +bool tomoyo_parse_number_union(struct tomoyo_acl_param *param, + struct tomoyo_number_union *ptr); bool tomoyo_tokenize(char *buffer, char *w[], size_t size); bool tomoyo_verbose_mode(const struct tomoyo_domain_info *domain); int tomoyo_init_request_info(struct tomoyo_request_info *r, @@ -540,21 +548,19 @@ int tomoyo_init_request_info(struct tomoyo_request_info *r, int tomoyo_mount_permission(char *dev_name, struct path *path, const char *type, unsigned long flags, void *data_page); -int tomoyo_write_aggregator(char *data, const bool is_delete); -int tomoyo_write_transition_control(char *data, const bool is_delete, +int tomoyo_write_aggregator(struct tomoyo_acl_param *param); +int tomoyo_write_transition_control(struct tomoyo_acl_param *param, const u8 type); -int tomoyo_write_file(char *data, struct tomoyo_domain_info *domain, - const bool is_delete); -int tomoyo_write_mount(char *data, struct tomoyo_domain_info *domain, - const bool is_delete); -int tomoyo_write_group(char *data, const bool is_delete, const u8 type); +int tomoyo_write_file(struct tomoyo_acl_param *param); +int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type); int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) __attribute__ ((format(printf, 2, 3))); struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname); struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname, const u8 profile); struct tomoyo_profile *tomoyo_profile(const u8 profile); -struct tomoyo_group *tomoyo_get_group(const char *group_name, const u8 type); +struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param, + const u8 idx); unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain, const u8 index); void tomoyo_fill_path_info(struct tomoyo_path_info *ptr); @@ -587,7 +593,7 @@ void tomoyo_put_name_union(struct tomoyo_name_union *ptr); void tomoyo_run_gc(void); void tomoyo_memory_free(void *ptr); int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, - bool is_delete, struct tomoyo_domain_info *domain, + struct tomoyo_acl_param *param, bool (*check_duplicate) (const struct tomoyo_acl_info *, const struct tomoyo_acl_info @@ -596,7 +602,7 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, struct tomoyo_acl_info *, const bool)); int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, - bool is_delete, struct list_head *list, + struct tomoyo_acl_param *param, bool (*check_duplicate) (const struct tomoyo_acl_head *, const struct tomoyo_acl_head @@ -604,6 +610,8 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, void tomoyo_check_acl(struct tomoyo_request_info *r, bool (*check_entry) (struct tomoyo_request_info *, const struct tomoyo_acl_info *)); +char *tomoyo_read_token(struct tomoyo_acl_param *param); +bool tomoyo_permstr(const char *string, const char *keyword); /********** External variable definitions. **********/ diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 43977083254b..d818717954f8 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -20,8 +20,7 @@ struct tomoyo_domain_info tomoyo_kernel_domain; * * @new_entry: Pointer to "struct tomoyo_acl_info". * @size: Size of @new_entry in bytes. - * @is_delete: True if it is a delete request. - * @list: Pointer to "struct list_head". + * @param: Pointer to "struct tomoyo_acl_param". * @check_duplicate: Callback function to find duplicated entry. * * Returns 0 on success, negative value otherwise. @@ -29,25 +28,26 @@ struct tomoyo_domain_info tomoyo_kernel_domain; * Caller holds tomoyo_read_lock(). */ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, - bool is_delete, struct list_head *list, + struct tomoyo_acl_param *param, bool (*check_duplicate) (const struct tomoyo_acl_head *, const struct tomoyo_acl_head *)) { - int error = is_delete ? -ENOENT : -ENOMEM; + int error = param->is_delete ? -ENOENT : -ENOMEM; struct tomoyo_acl_head *entry; + struct list_head *list = param->list; if (mutex_lock_interruptible(&tomoyo_policy_lock)) return -ENOMEM; list_for_each_entry_rcu(entry, list, list) { if (!check_duplicate(entry, new_entry)) continue; - entry->is_deleted = is_delete; + entry->is_deleted = param->is_delete; error = 0; break; } - if (error && !is_delete) { + if (error && !param->is_delete) { entry = tomoyo_commit_ok(new_entry, size); if (entry) { list_add_tail_rcu(&entry->list, list); @@ -77,8 +77,7 @@ static inline bool tomoyo_same_acl_head(const struct tomoyo_acl_info *a, * * @new_entry: Pointer to "struct tomoyo_acl_info". * @size: Size of @new_entry in bytes. - * @is_delete: True if it is a delete request. - * @domain: Pointer to "struct tomoyo_domain_info". + * @param: Pointer to "struct tomoyo_acl_param". * @check_duplicate: Callback function to find duplicated entry. * @merge_duplicate: Callback function to merge duplicated entry. * @@ -87,7 +86,7 @@ static inline bool tomoyo_same_acl_head(const struct tomoyo_acl_info *a, * Caller holds tomoyo_read_lock(). */ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, - bool is_delete, struct tomoyo_domain_info *domain, + struct tomoyo_acl_param *param, bool (*check_duplicate) (const struct tomoyo_acl_info *, const struct tomoyo_acl_info @@ -96,12 +95,14 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, struct tomoyo_acl_info *, const bool)) { + const bool is_delete = param->is_delete; int error = is_delete ? -ENOENT : -ENOMEM; struct tomoyo_acl_info *entry; + struct list_head * const list = param->list; if (mutex_lock_interruptible(&tomoyo_policy_lock)) return error; - list_for_each_entry_rcu(entry, &domain->acl_info_list, list) { + list_for_each_entry_rcu(entry, list, list) { if (!tomoyo_same_acl_head(entry, new_entry) || !check_duplicate(entry, new_entry)) continue; @@ -116,7 +117,7 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, if (error && !is_delete) { entry = tomoyo_commit_ok(new_entry, size); if (entry) { - list_add_tail_rcu(&entry->list, &domain->acl_info_list); + list_add_tail_rcu(&entry->list, list); error = 0; } } @@ -163,6 +164,14 @@ static const char *tomoyo_last_word(const char *name) return name; } +/** + * tomoyo_same_transition_control - Check for duplicated "struct tomoyo_transition_control" entry. + * + * @a: Pointer to "struct tomoyo_acl_head". + * @b: Pointer to "struct tomoyo_acl_head". + * + * Returns true if @a == @b, false otherwise. + */ static bool tomoyo_same_transition_control(const struct tomoyo_acl_head *a, const struct tomoyo_acl_head *b) { @@ -178,22 +187,28 @@ static bool tomoyo_same_transition_control(const struct tomoyo_acl_head *a, } /** - * tomoyo_update_transition_control_entry - Update "struct tomoyo_transition_control" list. + * tomoyo_write_transition_control - Write "struct tomoyo_transition_control" list. * - * @domainname: The name of domain. Maybe NULL. - * @program: The name of program. Maybe NULL. - * @type: Type of transition. - * @is_delete: True if it is a delete request. + * @param: Pointer to "struct tomoyo_acl_param". + * @type: Type of this entry. * * Returns 0 on success, negative value otherwise. */ -static int tomoyo_update_transition_control_entry(const char *domainname, - const char *program, - const u8 type, - const bool is_delete) +int tomoyo_write_transition_control(struct tomoyo_acl_param *param, + const u8 type) { struct tomoyo_transition_control e = { .type = type }; - int error = is_delete ? -ENOENT : -ENOMEM; + int error = param->is_delete ? -ENOENT : -ENOMEM; + char *program = param->data; + char *domainname = strstr(program, " from "); + if (domainname) { + *domainname = '\0'; + domainname += 6; + } else if (type == TOMOYO_TRANSITION_CONTROL_NO_KEEP || + type == TOMOYO_TRANSITION_CONTROL_KEEP) { + domainname = program; + program = NULL; + } if (program) { if (!tomoyo_correct_path(program)) return -EINVAL; @@ -211,41 +226,15 @@ static int tomoyo_update_transition_control_entry(const char *domainname, if (!e.domainname) goto out; } - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - &tomoyo_policy_list - [TOMOYO_ID_TRANSITION_CONTROL], + param->list = &tomoyo_policy_list[TOMOYO_ID_TRANSITION_CONTROL]; + error = tomoyo_update_policy(&e.head, sizeof(e), param, tomoyo_same_transition_control); - out: +out: tomoyo_put_name(e.domainname); tomoyo_put_name(e.program); return error; } -/** - * tomoyo_write_transition_control - Write "struct tomoyo_transition_control" list. - * - * @data: String to parse. - * @is_delete: True if it is a delete request. - * @type: Type of this entry. - * - * Returns 0 on success, negative value otherwise. - */ -int tomoyo_write_transition_control(char *data, const bool is_delete, - const u8 type) -{ - char *domainname = strstr(data, " from "); - if (domainname) { - *domainname = '\0'; - domainname += 6; - } else if (type == TOMOYO_TRANSITION_CONTROL_NO_KEEP || - type == TOMOYO_TRANSITION_CONTROL_KEEP) { - domainname = data; - data = NULL; - } - return tomoyo_update_transition_control_entry(domainname, data, type, - is_delete); -} - /** * tomoyo_transition_type - Get domain transition type. * @@ -303,34 +292,41 @@ static u8 tomoyo_transition_type(const struct tomoyo_path_info *domainname, return type; } +/** + * tomoyo_same_aggregator - Check for duplicated "struct tomoyo_aggregator" entry. + * + * @a: Pointer to "struct tomoyo_acl_head". + * @b: Pointer to "struct tomoyo_acl_head". + * + * Returns true if @a == @b, false otherwise. + */ static bool tomoyo_same_aggregator(const struct tomoyo_acl_head *a, const struct tomoyo_acl_head *b) { - const struct tomoyo_aggregator *p1 = container_of(a, typeof(*p1), head); - const struct tomoyo_aggregator *p2 = container_of(b, typeof(*p2), head); + const struct tomoyo_aggregator *p1 = container_of(a, typeof(*p1), + head); + const struct tomoyo_aggregator *p2 = container_of(b, typeof(*p2), + head); return p1->original_name == p2->original_name && p1->aggregated_name == p2->aggregated_name; } /** - * tomoyo_update_aggregator_entry - Update "struct tomoyo_aggregator" list. + * tomoyo_write_aggregator - Write "struct tomoyo_aggregator" list. * - * @original_name: The original program's name. - * @aggregated_name: The program name to use. - * @is_delete: True if it is a delete request. + * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ -static int tomoyo_update_aggregator_entry(const char *original_name, - const char *aggregated_name, - const bool is_delete) +int tomoyo_write_aggregator(struct tomoyo_acl_param *param) { struct tomoyo_aggregator e = { }; - int error = is_delete ? -ENOENT : -ENOMEM; - - if (!tomoyo_correct_path(original_name) || + int error = param->is_delete ? -ENOENT : -ENOMEM; + const char *original_name = tomoyo_read_token(param); + const char *aggregated_name = tomoyo_read_token(param); + if (!tomoyo_correct_word(original_name) || !tomoyo_correct_path(aggregated_name)) return -EINVAL; e.original_name = tomoyo_get_name(original_name); @@ -338,35 +334,15 @@ static int tomoyo_update_aggregator_entry(const char *original_name, if (!e.original_name || !e.aggregated_name || e.aggregated_name->is_patterned) /* No patterns allowed. */ goto out; - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - &tomoyo_policy_list[TOMOYO_ID_AGGREGATOR], + param->list = &tomoyo_policy_list[TOMOYO_ID_AGGREGATOR]; + error = tomoyo_update_policy(&e.head, sizeof(e), param, tomoyo_same_aggregator); - out: +out: tomoyo_put_name(e.original_name); tomoyo_put_name(e.aggregated_name); return error; } -/** - * tomoyo_write_aggregator - Write "struct tomoyo_aggregator" list. - * - * @data: String to parse. - * @is_delete: True if it is a delete request. - * - * Returns 0 on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -int tomoyo_write_aggregator(char *data, const bool is_delete) -{ - char *cp = strchr(data, ' '); - - if (!cp) - return -EINVAL; - *cp++ = '\0'; - return tomoyo_update_aggregator_entry(data, cp, is_delete); -} - /** * tomoyo_assign_domain - Create a domain. * diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index 4259e0a136d8..e60745f9f31e 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c @@ -428,29 +428,27 @@ static bool tomoyo_merge_path_acl(struct tomoyo_acl_info *a, /** * tomoyo_update_path_acl - Update "struct tomoyo_path_acl" list. * - * @type: Type of operation. - * @filename: Filename. - * @domain: Pointer to "struct tomoyo_domain_info". - * @is_delete: True if it is a delete request. + * @perm: Permission. + * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ -static int tomoyo_update_path_acl(const u8 type, const char *filename, - struct tomoyo_domain_info * const domain, - const bool is_delete) +static int tomoyo_update_path_acl(const u16 perm, + struct tomoyo_acl_param *param) { struct tomoyo_path_acl e = { .head.type = TOMOYO_TYPE_PATH_ACL, - .perm = 1 << type + .perm = perm }; int error; - if (!tomoyo_parse_name_union(filename, &e.name)) - return -EINVAL; - error = tomoyo_update_domain(&e.head, sizeof(e), is_delete, domain, - tomoyo_same_path_acl, - tomoyo_merge_path_acl); + if (!tomoyo_parse_name_union(param, &e.name)) + error = -EINVAL; + else + error = tomoyo_update_domain(&e.head, sizeof(e), param, + tomoyo_same_path_acl, + tomoyo_merge_path_acl); tomoyo_put_name_union(&e.name); return error; } @@ -503,37 +501,30 @@ static bool tomoyo_merge_mkdev_acl(struct tomoyo_acl_info *a, /** * tomoyo_update_mkdev_acl - Update "struct tomoyo_mkdev_acl" list. * - * @type: Type of operation. - * @filename: Filename. - * @mode: Create mode. - * @major: Device major number. - * @minor: Device minor number. - * @domain: Pointer to "struct tomoyo_domain_info". - * @is_delete: True if it is a delete request. + * @perm: Permission. + * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ -static int tomoyo_update_mkdev_acl(const u8 type, const char *filename, - char *mode, char *major, char *minor, - struct tomoyo_domain_info * const domain, - const bool is_delete) +static int tomoyo_update_mkdev_acl(const u8 perm, + struct tomoyo_acl_param *param) { struct tomoyo_mkdev_acl e = { .head.type = TOMOYO_TYPE_MKDEV_ACL, - .perm = 1 << type + .perm = perm }; - int error = is_delete ? -ENOENT : -ENOMEM; - if (!tomoyo_parse_name_union(filename, &e.name) || - !tomoyo_parse_number_union(mode, &e.mode) || - !tomoyo_parse_number_union(major, &e.major) || - !tomoyo_parse_number_union(minor, &e.minor)) - goto out; - error = tomoyo_update_domain(&e.head, sizeof(e), is_delete, domain, - tomoyo_same_mkdev_acl, - tomoyo_merge_mkdev_acl); - out: + int error; + if (!tomoyo_parse_name_union(param, &e.name) || + !tomoyo_parse_number_union(param, &e.mode) || + !tomoyo_parse_number_union(param, &e.major) || + !tomoyo_parse_number_union(param, &e.minor)) + error = -EINVAL; + else + error = tomoyo_update_domain(&e.head, sizeof(e), param, + tomoyo_same_mkdev_acl, + tomoyo_merge_mkdev_acl); tomoyo_put_name_union(&e.name); tomoyo_put_number_union(&e.mode); tomoyo_put_number_union(&e.major); @@ -586,33 +577,28 @@ static bool tomoyo_merge_path2_acl(struct tomoyo_acl_info *a, /** * tomoyo_update_path2_acl - Update "struct tomoyo_path2_acl" list. * - * @type: Type of operation. - * @filename1: First filename. - * @filename2: Second filename. - * @domain: Pointer to "struct tomoyo_domain_info". - * @is_delete: True if it is a delete request. + * @perm: Permission. + * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ -static int tomoyo_update_path2_acl(const u8 type, const char *filename1, - const char *filename2, - struct tomoyo_domain_info * const domain, - const bool is_delete) +static int tomoyo_update_path2_acl(const u8 perm, + struct tomoyo_acl_param *param) { struct tomoyo_path2_acl e = { .head.type = TOMOYO_TYPE_PATH2_ACL, - .perm = 1 << type + .perm = perm }; - int error = is_delete ? -ENOENT : -ENOMEM; - if (!tomoyo_parse_name_union(filename1, &e.name1) || - !tomoyo_parse_name_union(filename2, &e.name2)) - goto out; - error = tomoyo_update_domain(&e.head, sizeof(e), is_delete, domain, - tomoyo_same_path2_acl, - tomoyo_merge_path2_acl); - out: + int error; + if (!tomoyo_parse_name_union(param, &e.name1) || + !tomoyo_parse_name_union(param, &e.name2)) + error = -EINVAL; + else + error = tomoyo_update_domain(&e.head, sizeof(e), param, + tomoyo_same_path2_acl, + tomoyo_merge_path2_acl); tomoyo_put_name_union(&e.name1); tomoyo_put_name_union(&e.name2); return error; @@ -701,32 +687,26 @@ static bool tomoyo_merge_path_number_acl(struct tomoyo_acl_info *a, /** * tomoyo_update_path_number_acl - Update ioctl/chmod/chown/chgrp ACL. * - * @type: Type of operation. - * @filename: Filename. - * @number: Number. - * @domain: Pointer to "struct tomoyo_domain_info". - * @is_delete: True if it is a delete request. + * @perm: Permission. + * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. */ -static int tomoyo_update_path_number_acl(const u8 type, const char *filename, - char *number, - struct tomoyo_domain_info * const - domain, const bool is_delete) +static int tomoyo_update_path_number_acl(const u8 perm, + struct tomoyo_acl_param *param) { struct tomoyo_path_number_acl e = { .head.type = TOMOYO_TYPE_PATH_NUMBER_ACL, - .perm = 1 << type + .perm = perm }; - int error = is_delete ? -ENOENT : -ENOMEM; - if (!tomoyo_parse_name_union(filename, &e.name)) - return -EINVAL; - if (!tomoyo_parse_number_union(number, &e.number)) - goto out; - error = tomoyo_update_domain(&e.head, sizeof(e), is_delete, domain, - tomoyo_same_path_number_acl, - tomoyo_merge_path_number_acl); - out: + int error; + if (!tomoyo_parse_name_union(param, &e.name) || + !tomoyo_parse_number_union(param, &e.number)) + error = -EINVAL; + else + error = tomoyo_update_domain(&e.head, sizeof(e), param, + tomoyo_same_path_number_acl, + tomoyo_merge_path_number_acl); tomoyo_put_name_union(&e.name); tomoyo_put_number_union(&e.number); return error; @@ -962,54 +942,89 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1, return error; } +/** + * tomoyo_same_mount_acl - Check for duplicated "struct tomoyo_mount_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b, false otherwise. + */ +static bool tomoyo_same_mount_acl(const struct tomoyo_acl_info *a, + const struct tomoyo_acl_info *b) +{ + const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head); + const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head); + return tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) && + tomoyo_same_name_union(&p1->dir_name, &p2->dir_name) && + tomoyo_same_name_union(&p1->fs_type, &p2->fs_type) && + tomoyo_same_number_union(&p1->flags, &p2->flags); +} + +/** + * tomoyo_update_mount_acl - Write "struct tomoyo_mount_acl" list. + * + * @param: Pointer to "struct tomoyo_acl_param". + * + * Returns 0 on success, negative value otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +static int tomoyo_update_mount_acl(struct tomoyo_acl_param *param) +{ + struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL }; + int error; + if (!tomoyo_parse_name_union(param, &e.dev_name) || + !tomoyo_parse_name_union(param, &e.dir_name) || + !tomoyo_parse_name_union(param, &e.fs_type) || + !tomoyo_parse_number_union(param, &e.flags)) + error = -EINVAL; + else + error = tomoyo_update_domain(&e.head, sizeof(e), param, + tomoyo_same_mount_acl, NULL); + tomoyo_put_name_union(&e.dev_name); + tomoyo_put_name_union(&e.dir_name); + tomoyo_put_name_union(&e.fs_type); + tomoyo_put_number_union(&e.flags); + return error; +} + /** * tomoyo_write_file - Update file related list. * - * @data: String to parse. - * @domain: Pointer to "struct tomoyo_domain_info". - * @is_delete: True if it is a delete request. + * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ -int tomoyo_write_file(char *data, struct tomoyo_domain_info *domain, - const bool is_delete) +int tomoyo_write_file(struct tomoyo_acl_param *param) { - char *w[5]; + u16 perm = 0; u8 type; - if (!tomoyo_tokenize(data, w, sizeof(w)) || !w[1][0]) - return -EINVAL; - if (strncmp(w[0], "allow_", 6)) - goto out; - w[0] += 6; - for (type = 0; type < TOMOYO_MAX_PATH_OPERATION; type++) { - if (strcmp(w[0], tomoyo_path_keyword[type])) - continue; - return tomoyo_update_path_acl(type, w[1], domain, is_delete); - } - if (!w[2][0]) - goto out; - for (type = 0; type < TOMOYO_MAX_PATH2_OPERATION; type++) { - if (strcmp(w[0], tomoyo_path2_keyword[type])) - continue; - return tomoyo_update_path2_acl(type, w[1], w[2], domain, - is_delete); - } - for (type = 0; type < TOMOYO_MAX_PATH_NUMBER_OPERATION; type++) { - if (strcmp(w[0], tomoyo_path_number_keyword[type])) - continue; - return tomoyo_update_path_number_acl(type, w[1], w[2], domain, - is_delete); - } - if (!w[3][0] || !w[4][0]) - goto out; - for (type = 0; type < TOMOYO_MAX_MKDEV_OPERATION; type++) { - if (strcmp(w[0], tomoyo_mkdev_keyword[type])) - continue; - return tomoyo_update_mkdev_acl(type, w[1], w[2], w[3], - w[4], domain, is_delete); - } - out: + const char *operation = tomoyo_read_token(param); + for (type = 0; type < TOMOYO_MAX_PATH_OPERATION; type++) + if (tomoyo_permstr(operation, tomoyo_path_keyword[type])) + perm |= 1 << type; + if (perm) + return tomoyo_update_path_acl(perm, param); + for (type = 0; type < TOMOYO_MAX_PATH2_OPERATION; type++) + if (tomoyo_permstr(operation, tomoyo_path2_keyword[type])) + perm |= 1 << type; + if (perm) + return tomoyo_update_path2_acl(perm, param); + for (type = 0; type < TOMOYO_MAX_PATH_NUMBER_OPERATION; type++) + if (tomoyo_permstr(operation, + tomoyo_path_number_keyword[type])) + perm |= 1 << type; + if (perm) + return tomoyo_update_path_number_acl(perm, param); + for (type = 0; type < TOMOYO_MAX_MKDEV_OPERATION; type++) + if (tomoyo_permstr(operation, tomoyo_mkdev_keyword[type])) + perm |= 1 << type; + if (perm) + return tomoyo_update_mkdev_acl(perm, param); + if (tomoyo_permstr(operation, "mount")) + return tomoyo_update_mount_acl(param); return -EINVAL; } diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c index e94352ce723f..2e5b7bc73264 100644 --- a/security/tomoyo/group.c +++ b/security/tomoyo/group.c @@ -28,48 +28,41 @@ static bool tomoyo_same_number_group(const struct tomoyo_acl_head *a, /** * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group" list. * - * @data: String to parse. - * @is_delete: True if it is a delete request. + * @param: Pointer to "struct tomoyo_acl_param". * @type: Type of this group. * * Returns 0 on success, negative value otherwise. */ -int tomoyo_write_group(char *data, const bool is_delete, const u8 type) +int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type) { - struct tomoyo_group *group; - struct list_head *member; - char *w[2]; + struct tomoyo_group *group = tomoyo_get_group(param, type); int error = -EINVAL; - if (!tomoyo_tokenize(data, w, sizeof(w)) || !w[1][0]) - return -EINVAL; - group = tomoyo_get_group(w[0], type); if (!group) return -ENOMEM; - member = &group->member_list; + param->list = &group->member_list; if (type == TOMOYO_PATH_GROUP) { struct tomoyo_path_group e = { }; - e.member_name = tomoyo_get_name(w[1]); + e.member_name = tomoyo_get_name(tomoyo_read_token(param)); if (!e.member_name) { error = -ENOMEM; goto out; } - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - member, tomoyo_same_path_group); + error = tomoyo_update_policy(&e.head, sizeof(e), param, + tomoyo_same_path_group); tomoyo_put_name(e.member_name); } else if (type == TOMOYO_NUMBER_GROUP) { struct tomoyo_number_group e = { }; - if (w[1][0] == '@' - || !tomoyo_parse_number_union(w[1], &e.number) - || e.number.values[0] > e.number.values[1]) + if (param->data[0] == '@' || + !tomoyo_parse_number_union(param, &e.number)) goto out; - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - member, tomoyo_same_number_group); + error = tomoyo_update_policy(&e.head, sizeof(e), param, + tomoyo_same_number_group); /* * tomoyo_put_number_union() is not needed because - * w[1][0] != '@'. + * param->data[0] != '@'. */ } - out: +out: tomoyo_put_group(group); return error; } diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c index dfef0cb268dc..839b8ebc6fe6 100644 --- a/security/tomoyo/memory.c +++ b/security/tomoyo/memory.c @@ -93,15 +93,18 @@ void tomoyo_memory_free(void *ptr) /** * tomoyo_get_group - Allocate memory for "struct tomoyo_path_group"/"struct tomoyo_number_group". * - * @group_name: The name of address group. - * @idx: Index number. + * @param: Pointer to "struct tomoyo_acl_param". + * @idx: Index number. * * Returns pointer to "struct tomoyo_group" on success, NULL otherwise. */ -struct tomoyo_group *tomoyo_get_group(const char *group_name, const u8 idx) +struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param, + const u8 idx) { struct tomoyo_group e = { }; struct tomoyo_group *group = NULL; + struct list_head *list; + const char *group_name = tomoyo_read_token(param); bool found = false; if (!tomoyo_correct_word(group_name) || idx >= TOMOYO_MAX_GROUP) return NULL; @@ -110,7 +113,8 @@ struct tomoyo_group *tomoyo_get_group(const char *group_name, const u8 idx) return NULL; if (mutex_lock_interruptible(&tomoyo_policy_lock)) goto out; - list_for_each_entry(group, &tomoyo_group_list[idx], head.list) { + list = &tomoyo_group_list[idx]; + list_for_each_entry(group, list, head.list) { if (e.group_name != group->group_name) continue; atomic_inc(&group->head.users); @@ -122,14 +126,13 @@ struct tomoyo_group *tomoyo_get_group(const char *group_name, const u8 idx) if (entry) { INIT_LIST_HEAD(&entry->member_list); atomic_set(&entry->head.users, 1); - list_add_tail_rcu(&entry->head.list, - &tomoyo_group_list[idx]); + list_add_tail_rcu(&entry->head.list, list); group = entry; found = true; } } mutex_unlock(&tomoyo_policy_lock); - out: +out: tomoyo_put_name(e.group_name); return found ? group : NULL; } @@ -210,6 +213,8 @@ void __init tomoyo_mm_init(void) idx = tomoyo_read_lock(); if (tomoyo_find_domain(TOMOYO_ROOT_NAME) != &tomoyo_kernel_domain) panic("Can't register tomoyo_kernel_domain"); +#if 0 + /* Will be replaced with tomoyo_load_builtin_policy(). */ { /* Load built-in policy. */ tomoyo_write_transition_control("/sbin/hotplug", false, @@ -217,6 +222,7 @@ void __init tomoyo_mm_init(void) tomoyo_write_transition_control("/sbin/modprobe", false, TOMOYO_TRANSITION_CONTROL_INITIALIZE); } +#endif tomoyo_read_unlock(idx); } diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c index 7649dbc6a56b..1e610f96c99d 100644 --- a/security/tomoyo/mount.c +++ b/security/tomoyo/mount.c @@ -243,56 +243,3 @@ int tomoyo_mount_permission(char *dev_name, struct path *path, tomoyo_read_unlock(idx); return error; } - -/** - * tomoyo_same_mount_acl - Check for duplicated "struct tomoyo_mount_acl" entry. - * - * @a: Pointer to "struct tomoyo_acl_info". - * @b: Pointer to "struct tomoyo_acl_info". - * - * Returns true if @a == @b, false otherwise. - */ -static bool tomoyo_same_mount_acl(const struct tomoyo_acl_info *a, - const struct tomoyo_acl_info *b) -{ - const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head); - const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head); - return tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) && - tomoyo_same_name_union(&p1->dir_name, &p2->dir_name) && - tomoyo_same_name_union(&p1->fs_type, &p2->fs_type) && - tomoyo_same_number_union(&p1->flags, &p2->flags); -} - -/** - * tomoyo_write_mount - Write "struct tomoyo_mount_acl" list. - * - * @data: String to parse. - * @domain: Pointer to "struct tomoyo_domain_info". - * @is_delete: True if it is a delete request. - * - * Returns 0 on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -int tomoyo_write_mount(char *data, struct tomoyo_domain_info *domain, - const bool is_delete) -{ - struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL }; - int error = is_delete ? -ENOENT : -ENOMEM; - char *w[4]; - if (!tomoyo_tokenize(data, w, sizeof(w)) || !w[3][0]) - return -EINVAL; - if (!tomoyo_parse_name_union(w[0], &e.dev_name) || - !tomoyo_parse_name_union(w[1], &e.dir_name) || - !tomoyo_parse_name_union(w[2], &e.fs_type) || - !tomoyo_parse_number_union(w[3], &e.flags)) - goto out; - error = tomoyo_update_domain(&e.head, sizeof(e), is_delete, domain, - tomoyo_same_mount_acl, NULL); - out: - tomoyo_put_name_union(&e.dev_name); - tomoyo_put_name_union(&e.dir_name); - tomoyo_put_name_union(&e.fs_type); - tomoyo_put_number_union(&e.flags); - return error; -} diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c index abb177c2d7c2..72cd2b97cae8 100644 --- a/security/tomoyo/util.c +++ b/security/tomoyo/util.c @@ -15,6 +15,46 @@ DEFINE_MUTEX(tomoyo_policy_lock); /* Has /sbin/init started? */ bool tomoyo_policy_loaded; +/** + * tomoyo_permstr - Find permission keywords. + * + * @string: String representation for permissions in foo/bar/buz format. + * @keyword: Keyword to find from @string/ + * + * Returns ture if @keyword was found in @string, false otherwise. + * + * This function assumes that strncmp(w1, w2, strlen(w1)) != 0 if w1 != w2. + */ +bool tomoyo_permstr(const char *string, const char *keyword) +{ + const char *cp = strstr(string, keyword); + if (cp) + return cp == string || *(cp - 1) == '/'; + return false; +} + +/** + * tomoyo_read_token - Read a word from a line. + * + * @param: Pointer to "struct tomoyo_acl_param". + * + * Returns a word on success, "" otherwise. + * + * To allow the caller to skip NULL check, this function returns "" rather than + * NULL if there is no more words to read. + */ +char *tomoyo_read_token(struct tomoyo_acl_param *param) +{ + char *pos = param->data; + char *del = strchr(pos, ' '); + if (del) + *del++ = '\0'; + else + del = pos + strlen(pos); + param->data = del; + return pos; +} + /** * tomoyo_parse_ulong - Parse an "unsigned long" value. * @@ -81,20 +121,23 @@ void tomoyo_print_ulong(char *buffer, const int buffer_len, /** * tomoyo_parse_name_union - Parse a tomoyo_name_union. * - * @filename: Name or name group. - * @ptr: Pointer to "struct tomoyo_name_union". + * @param: Pointer to "struct tomoyo_acl_param". + * @ptr: Pointer to "struct tomoyo_name_union". * * Returns true on success, false otherwise. */ -bool tomoyo_parse_name_union(const char *filename, +bool tomoyo_parse_name_union(struct tomoyo_acl_param *param, struct tomoyo_name_union *ptr) { - if (!tomoyo_correct_word(filename)) - return false; - if (filename[0] == '@') { - ptr->group = tomoyo_get_group(filename + 1, TOMOYO_PATH_GROUP); + char *filename; + if (param->data[0] == '@') { + param->data++; + ptr->group = tomoyo_get_group(param, TOMOYO_PATH_GROUP); return ptr->group != NULL; } + filename = tomoyo_read_token(param); + if (!tomoyo_correct_word(filename)) + return false; ptr->filename = tomoyo_get_name(filename); return ptr->filename != NULL; } @@ -102,39 +145,41 @@ bool tomoyo_parse_name_union(const char *filename, /** * tomoyo_parse_number_union - Parse a tomoyo_number_union. * - * @data: Number or number range or number group. - * @ptr: Pointer to "struct tomoyo_number_union". + * @param: Pointer to "struct tomoyo_acl_param". + * @ptr: Pointer to "struct tomoyo_number_union". * * Returns true on success, false otherwise. */ -bool tomoyo_parse_number_union(char *data, struct tomoyo_number_union *num) +bool tomoyo_parse_number_union(struct tomoyo_acl_param *param, + struct tomoyo_number_union *ptr) { + char *data; u8 type; unsigned long v; - memset(num, 0, sizeof(*num)); - if (data[0] == '@') { - if (!tomoyo_correct_word(data)) - return false; - num->group = tomoyo_get_group(data + 1, TOMOYO_NUMBER_GROUP); - return num->group != NULL; + memset(ptr, 0, sizeof(*ptr)); + if (param->data[0] == '@') { + param->data++; + ptr->group = tomoyo_get_group(param, TOMOYO_NUMBER_GROUP); + return ptr->group != NULL; } + data = tomoyo_read_token(param); type = tomoyo_parse_ulong(&v, &data); - if (!type) + if (type == TOMOYO_VALUE_TYPE_INVALID) return false; - num->values[0] = v; - num->value_type[0] = type; + ptr->values[0] = v; + ptr->value_type[0] = type; if (!*data) { - num->values[1] = v; - num->value_type[1] = type; + ptr->values[1] = v; + ptr->value_type[1] = type; return true; } if (*data++ != '-') return false; type = tomoyo_parse_ulong(&v, &data); - if (!type || *data) + if (type == TOMOYO_VALUE_TYPE_INVALID || *data || ptr->values[0] > v) return false; - num->values[1] = v; - num->value_type[1] = type; + ptr->values[1] = v; + ptr->value_type[1] = type; return true; } @@ -258,33 +303,6 @@ void tomoyo_normalize_line(unsigned char *buffer) *dp = '\0'; } -/** - * tomoyo_tokenize - Tokenize string. - * - * @buffer: The line to tokenize. - * @w: Pointer to "char *". - * @size: Sizeof @w . - * - * Returns true on success, false otherwise. - */ -bool tomoyo_tokenize(char *buffer, char *w[], size_t size) -{ - int count = size / sizeof(char *); - int i; - for (i = 0; i < count; i++) - w[i] = ""; - for (i = 0; i < count; i++) { - char *cp = strchr(buffer, ' '); - if (cp) - *cp = '\0'; - w[i] = buffer; - if (!cp) - break; - buffer = cp + 1; - } - return i < count || !*buffer; -} - /** * tomoyo_correct_word2 - Validate a string. * -- cgit v1.2.3 From 0d2171d711cbfca84cc0001121be8a6cc8e4d148 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:17:46 +0900 Subject: TOMOYO: Rename directives. Convert "allow_..." style directives to "file ..." style directives. By converting to the latter style, we can pack policy like "file read/write/execute /path/to/file". Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/common.c | 147 +++++++++++++++++++++++++++++++---------------- security/tomoyo/common.h | 6 +- security/tomoyo/domain.c | 4 +- security/tomoyo/file.c | 15 +++-- 4 files changed, 113 insertions(+), 59 deletions(-) diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 2cfadafd02f5..465df022c211 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -56,7 +56,7 @@ static const char *tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX [TOMOYO_MAC_FILE_IOCTL] = "file::ioctl", [TOMOYO_MAC_FILE_CHROOT] = "file::chroot", [TOMOYO_MAC_FILE_MOUNT] = "file::mount", - [TOMOYO_MAC_FILE_UMOUNT] = "file::umount", + [TOMOYO_MAC_FILE_UMOUNT] = "file::unmount", [TOMOYO_MAC_FILE_PIVOT_ROOT] = "file::pivot_root", [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_FILE] = "file", }; @@ -171,17 +171,43 @@ void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...) tomoyo_set_string(head, head->read_buf + pos); } +/** + * tomoyo_set_space - Put a space to "struct tomoyo_io_buffer" structure. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. + */ static void tomoyo_set_space(struct tomoyo_io_buffer *head) { tomoyo_set_string(head, " "); } +/** + * tomoyo_set_lf - Put a line feed to "struct tomoyo_io_buffer" structure. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. + */ static bool tomoyo_set_lf(struct tomoyo_io_buffer *head) { tomoyo_set_string(head, "\n"); return !head->r.w_pos; } +/** + * tomoyo_set_slash - Put a shash to "struct tomoyo_io_buffer" structure. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. + */ +static void tomoyo_set_slash(struct tomoyo_io_buffer *head) +{ + tomoyo_set_string(head, "/"); +} + /** * tomoyo_print_name_union - Print a tomoyo_name_union. * @@ -913,19 +939,17 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head) } /** - * tomoyo_fns - Find next set bit. + * tomoyo_set_group - Print category name. * - * @perm: 8 bits value. - * @bit: First bit to find. + * @head: Pointer to "struct tomoyo_io_buffer". + * @category: Category name. * - * Returns next on-bit on success, 8 otherwise. + * Returns nothing. */ -static u8 tomoyo_fns(const u8 perm, u8 bit) +static void tomoyo_set_group(struct tomoyo_io_buffer *head, + const char *category) { - for ( ; bit < 8; bit++) - if (perm & (1 << bit)) - break; - return bit; + tomoyo_set_string(head, category); } /** @@ -940,58 +964,94 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head, struct tomoyo_acl_info *acl) { const u8 acl_type = acl->type; + bool first = true; u8 bit; if (acl->is_deleted) return true; - next: - bit = head->r.bit; if (!tomoyo_flush(head)) return false; else if (acl_type == TOMOYO_TYPE_PATH_ACL) { struct tomoyo_path_acl *ptr = container_of(acl, typeof(*ptr), head); const u16 perm = ptr->perm; - for ( ; bit < TOMOYO_MAX_PATH_OPERATION; bit++) { + for (bit = 0; bit < TOMOYO_MAX_PATH_OPERATION; bit++) { if (!(perm & (1 << bit))) continue; if (head->r.print_execute_only && bit != TOMOYO_TYPE_EXECUTE) continue; - break; + if (first) { + tomoyo_set_group(head, "file "); + first = false; + } else { + tomoyo_set_slash(head); + } + tomoyo_set_string(head, tomoyo_path_keyword[bit]); } - if (bit >= TOMOYO_MAX_PATH_OPERATION) - goto done; - tomoyo_io_printf(head, "allow_%s", tomoyo_path_keyword[bit]); + if (first) + return true; tomoyo_print_name_union(head, &ptr->name); } else if (head->r.print_execute_only) { return true; } else if (acl_type == TOMOYO_TYPE_PATH2_ACL) { struct tomoyo_path2_acl *ptr = container_of(acl, typeof(*ptr), head); - bit = tomoyo_fns(ptr->perm, bit); - if (bit >= TOMOYO_MAX_PATH2_OPERATION) - goto done; - tomoyo_io_printf(head, "allow_%s", tomoyo_path2_keyword[bit]); + const u8 perm = ptr->perm; + for (bit = 0; bit < TOMOYO_MAX_PATH2_OPERATION; bit++) { + if (!(perm & (1 << bit))) + continue; + if (first) { + tomoyo_set_group(head, "file "); + first = false; + } else { + tomoyo_set_slash(head); + } + tomoyo_set_string(head, tomoyo_mac_keywords + [tomoyo_pp2mac[bit]]); + } + if (first) + return true; tomoyo_print_name_union(head, &ptr->name1); tomoyo_print_name_union(head, &ptr->name2); } else if (acl_type == TOMOYO_TYPE_PATH_NUMBER_ACL) { struct tomoyo_path_number_acl *ptr = container_of(acl, typeof(*ptr), head); - bit = tomoyo_fns(ptr->perm, bit); - if (bit >= TOMOYO_MAX_PATH_NUMBER_OPERATION) - goto done; - tomoyo_io_printf(head, "allow_%s", - tomoyo_path_number_keyword[bit]); + const u8 perm = ptr->perm; + for (bit = 0; bit < TOMOYO_MAX_PATH_NUMBER_OPERATION; bit++) { + if (!(perm & (1 << bit))) + continue; + if (first) { + tomoyo_set_group(head, "file "); + first = false; + } else { + tomoyo_set_slash(head); + } + tomoyo_set_string(head, tomoyo_mac_keywords + [tomoyo_pn2mac[bit]]); + } + if (first) + return true; tomoyo_print_name_union(head, &ptr->name); tomoyo_print_number_union(head, &ptr->number); } else if (acl_type == TOMOYO_TYPE_MKDEV_ACL) { struct tomoyo_mkdev_acl *ptr = container_of(acl, typeof(*ptr), head); - bit = tomoyo_fns(ptr->perm, bit); - if (bit >= TOMOYO_MAX_MKDEV_OPERATION) - goto done; - tomoyo_io_printf(head, "allow_%s", tomoyo_mkdev_keyword[bit]); + const u8 perm = ptr->perm; + for (bit = 0; bit < TOMOYO_MAX_MKDEV_OPERATION; bit++) { + if (!(perm & (1 << bit))) + continue; + if (first) { + tomoyo_set_group(head, "file "); + first = false; + } else { + tomoyo_set_slash(head); + } + tomoyo_set_string(head, tomoyo_mac_keywords + [tomoyo_pnnn2mac[bit]]); + } + if (first) + return true; tomoyo_print_name_union(head, &ptr->name); tomoyo_print_number_union(head, &ptr->mode); tomoyo_print_number_union(head, &ptr->major); @@ -999,18 +1059,13 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head, } else if (acl_type == TOMOYO_TYPE_MOUNT_ACL) { struct tomoyo_mount_acl *ptr = container_of(acl, typeof(*ptr), head); - tomoyo_io_printf(head, "allow_mount"); + tomoyo_set_group(head, "file mount"); tomoyo_print_name_union(head, &ptr->dev_name); tomoyo_print_name_union(head, &ptr->dir_name); tomoyo_print_name_union(head, &ptr->fs_type); tomoyo_print_number_union(head, &ptr->flags); } - head->r.bit = bit + 1; - tomoyo_io_printf(head, "\n"); - if (acl_type != TOMOYO_TYPE_MOUNT_ACL) - goto next; - done: - head->r.bit = 0; + tomoyo_set_lf(head); return true; } @@ -1316,18 +1371,14 @@ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) { struct tomoyo_transition_control *ptr = container_of(acl, typeof(*ptr), head); - tomoyo_set_string(head, - tomoyo_transition_type + tomoyo_set_string(head, tomoyo_transition_type [ptr->type]); - if (ptr->program) - tomoyo_set_string(head, - ptr->program->name); - if (ptr->program && ptr->domainname) - tomoyo_set_string(head, " from "); - if (ptr->domainname) - tomoyo_set_string(head, - ptr->domainname-> - name); + tomoyo_set_string(head, ptr->program ? + ptr->program->name : "any"); + tomoyo_set_string(head, " from "); + tomoyo_set_string(head, ptr->domainname ? + ptr->domainname->name : + "any"); } break; case TOMOYO_ID_AGGREGATOR: diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index 6f9711ff73c1..139ad7544460 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -404,7 +404,7 @@ struct tomoyo_acl_param { bool is_delete; }; -#define TOMOYO_MAX_IO_READ_QUEUE 32 +#define TOMOYO_MAX_IO_READ_QUEUE 64 /* * Structure for reading/writing policy via /sys/kernel/security/tomoyo @@ -639,6 +639,10 @@ extern const char *tomoyo_mkdev_keyword[TOMOYO_MAX_MKDEV_OPERATION]; extern const char *tomoyo_path2_keyword[TOMOYO_MAX_PATH2_OPERATION]; extern const char *tomoyo_path_number_keyword[TOMOYO_MAX_PATH_NUMBER_OPERATION]; +extern const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION]; +extern const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION]; +extern const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION]; + extern unsigned int tomoyo_quota_for_query; extern unsigned int tomoyo_query_memory_size; diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index d818717954f8..cb5d2b05c244 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -209,14 +209,14 @@ int tomoyo_write_transition_control(struct tomoyo_acl_param *param, domainname = program; program = NULL; } - if (program) { + if (program && strcmp(program, "any")) { if (!tomoyo_correct_path(program)) return -EINVAL; e.program = tomoyo_get_name(program); if (!e.program) goto out; } - if (domainname) { + if (domainname && strcmp(domainname, "any")) { if (!tomoyo_correct_domain(domainname)) { if (!tomoyo_correct_path(domainname)) goto out; diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index e60745f9f31e..0673a69b1320 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c @@ -69,7 +69,7 @@ static const u8 tomoyo_p2mac[TOMOYO_MAX_PATH_OPERATION] = { /* * Mapping table from "enum tomoyo_mkdev_acl_index" to "enum tomoyo_mac_index". */ -static const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION] = { +const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION] = { [TOMOYO_TYPE_MKBLOCK] = TOMOYO_MAC_FILE_MKBLOCK, [TOMOYO_TYPE_MKCHAR] = TOMOYO_MAC_FILE_MKCHAR, }; @@ -77,7 +77,7 @@ static const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION] = { /* * Mapping table from "enum tomoyo_path2_acl_index" to "enum tomoyo_mac_index". */ -static const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION] = { +const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION] = { [TOMOYO_TYPE_LINK] = TOMOYO_MAC_FILE_LINK, [TOMOYO_TYPE_RENAME] = TOMOYO_MAC_FILE_RENAME, [TOMOYO_TYPE_PIVOT_ROOT] = TOMOYO_MAC_FILE_PIVOT_ROOT, @@ -87,7 +87,7 @@ static const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION] = { * Mapping table from "enum tomoyo_path_number_acl_index" to * "enum tomoyo_mac_index". */ -static const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION] = { +const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION] = { [TOMOYO_TYPE_CREATE] = TOMOYO_MAC_FILE_CREATE, [TOMOYO_TYPE_MKDIR] = TOMOYO_MAC_FILE_MKDIR, [TOMOYO_TYPE_MKFIFO] = TOMOYO_MAC_FILE_MKFIFO, @@ -211,8 +211,7 @@ static int tomoyo_audit_path_log(struct tomoyo_request_info *r) if (r->granted) return 0; tomoyo_warn_log(r, "%s %s", operation, filename->name); - return tomoyo_supervisor(r, "allow_%s %s\n", operation, - filename->name); + return tomoyo_supervisor(r, "file %s %s\n", operation, filename->name); } /** @@ -231,7 +230,7 @@ static int tomoyo_audit_path2_log(struct tomoyo_request_info *r) return 0; tomoyo_warn_log(r, "%s %s %s", operation, filename1->name, filename2->name); - return tomoyo_supervisor(r, "allow_%s %s %s\n", operation, + return tomoyo_supervisor(r, "file %s %s %s\n", operation, filename1->name, filename2->name); } @@ -253,7 +252,7 @@ static int tomoyo_audit_mkdev_log(struct tomoyo_request_info *r) return 0; tomoyo_warn_log(r, "%s %s 0%o %u %u", operation, filename->name, mode, major, minor); - return tomoyo_supervisor(r, "allow_%s %s 0%o %u %u\n", operation, + return tomoyo_supervisor(r, "file %s %s 0%o %u %u\n", operation, filename->name, mode, major, minor); } @@ -291,7 +290,7 @@ static int tomoyo_audit_path_number_log(struct tomoyo_request_info *r) tomoyo_print_ulong(buffer, sizeof(buffer), r->param.path_number.number, radix); tomoyo_warn_log(r, "%s %s %s", operation, filename->name, buffer); - return tomoyo_supervisor(r, "allow_%s %s %s\n", operation, + return tomoyo_supervisor(r, "file %s %s %s\n", operation, filename->name, buffer); } -- cgit v1.2.3 From d5ca1725ac9ba876c2dd614bb9826d0c4e13d818 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:18:21 +0900 Subject: TOMOYO: Simplify profile structure. Remove global preference from profile structure in order to make code simpler. Due to this structure change, printk() warnings upon policy violation are temporarily disabled. They will be replaced by /sys/kernel/security/tomoyo/audit by next patch. Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/common.c | 205 ++++++++++++++++------------------------------- security/tomoyo/common.h | 7 ++ security/tomoyo/util.c | 39 ++------- 3 files changed, 83 insertions(+), 168 deletions(-) diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 465df022c211..2b280350708f 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -11,16 +11,6 @@ #include #include "common.h" -static struct tomoyo_profile tomoyo_default_profile = { - .learning = &tomoyo_default_profile.preference, - .permissive = &tomoyo_default_profile.preference, - .enforcing = &tomoyo_default_profile.preference, - .preference.enforcing_verbose = true, - .preference.learning_max_entry = 2048, - .preference.learning_verbose = false, - .preference.permissive_verbose = true -}; - /* Profile version. Currently only 20090903 is defined. */ static unsigned int tomoyo_profile_version; @@ -61,6 +51,11 @@ static const char *tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_FILE] = "file", }; +/* String table for PREFERENCE keyword. */ +static const char * const tomoyo_pref_keywords[TOMOYO_MAX_PREF] = { + [TOMOYO_PREF_MAX_LEARNING_ENTRY] = "max_learning_entry", +}; + /* Permit policy management by non-root user? */ static bool tomoyo_manage_by_non_root; @@ -71,11 +66,22 @@ static bool tomoyo_manage_by_non_root; * * @value: Bool value. */ +/* static const char *tomoyo_yesno(const unsigned int value) { return value ? "yes" : "no"; } +*/ +/** + * tomoyo_addprintf - strncat()-like-snprintf(). + * + * @buffer: Buffer to write to. Must be '\0'-terminated. + * @len: Size of @buffer. + * @fmt: The printf()'s format string, followed by parameters. + * + * Returns nothing. + */ static void tomoyo_addprintf(char *buffer, int len, const char *fmt, ...) { va_list args; @@ -294,12 +300,10 @@ static struct tomoyo_profile *tomoyo_assign_profile(const unsigned int profile) ptr = tomoyo_profile_ptr[profile]; if (!ptr && tomoyo_memory_ok(entry)) { ptr = entry; - ptr->learning = &tomoyo_default_profile.preference; - ptr->permissive = &tomoyo_default_profile.preference; - ptr->enforcing = &tomoyo_default_profile.preference; ptr->default_config = TOMOYO_CONFIG_DISABLED; memset(ptr->config, TOMOYO_CONFIG_USE_DEFAULT, sizeof(ptr->config)); + ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] = 2048; mb(); /* Avoid out-of-order execution. */ tomoyo_profile_ptr[profile] = ptr; entry = NULL; @@ -319,13 +323,22 @@ static struct tomoyo_profile *tomoyo_assign_profile(const unsigned int profile) */ struct tomoyo_profile *tomoyo_profile(const u8 profile) { + static struct tomoyo_profile tomoyo_null_profile; struct tomoyo_profile *ptr = tomoyo_profile_ptr[profile]; - if (!tomoyo_policy_loaded) - return &tomoyo_default_profile; - BUG_ON(!ptr); + if (!ptr) + ptr = &tomoyo_null_profile; return ptr; } +/** + * tomoyo_find_yesno - Find values for specified keyword. + * + * @string: String to check. + * @find: Name of keyword. + * + * Returns 1 if "@find=yes" was found, 0 if "@find=no" was found, -1 otherwise. + */ +/* static s8 tomoyo_find_yesno(const char *string, const char *find) { const char *cp = strstr(string, find); @@ -338,19 +351,17 @@ static s8 tomoyo_find_yesno(const char *string, const char *find) } return -1; } +*/ -static void tomoyo_set_bool(bool *b, const char *string, const char *find) -{ - switch (tomoyo_find_yesno(string, find)) { - case 1: - *b = true; - break; - case 0: - *b = false; - break; - } -} - +/** + * tomoyo_set_uint - Set value for specified preference. + * + * @i: Pointer to "unsigned int". + * @string: String to check. + * @find: Name of keyword. + * + * Returns nothing. + */ static void tomoyo_set_uint(unsigned int *i, const char *string, const char *find) { @@ -359,51 +370,16 @@ static void tomoyo_set_uint(unsigned int *i, const char *string, sscanf(cp + strlen(find), "=%u", i); } -static void tomoyo_set_pref(const char *name, const char *value, - const bool use_default, - struct tomoyo_profile *profile) -{ - struct tomoyo_preference **pref; - bool *verbose; - if (!strcmp(name, "enforcing")) { - if (use_default) { - pref = &profile->enforcing; - goto set_default; - } - profile->enforcing = &profile->preference; - verbose = &profile->preference.enforcing_verbose; - goto set_verbose; - } - if (!strcmp(name, "permissive")) { - if (use_default) { - pref = &profile->permissive; - goto set_default; - } - profile->permissive = &profile->preference; - verbose = &profile->preference.permissive_verbose; - goto set_verbose; - } - if (!strcmp(name, "learning")) { - if (use_default) { - pref = &profile->learning; - goto set_default; - } - profile->learning = &profile->preference; - tomoyo_set_uint(&profile->preference.learning_max_entry, value, - "max_entry"); - verbose = &profile->preference.learning_verbose; - goto set_verbose; - } - return; - set_default: - *pref = &tomoyo_default_profile.preference; - return; - set_verbose: - tomoyo_set_bool(verbose, value, "verbose"); -} - +/** + * tomoyo_set_mode - Set mode for specified profile. + * + * @name: Name of functionality. + * @value: Mode for @name. + * @profile: Pointer to "struct tomoyo_profile". + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_set_mode(char *name, const char *value, - const bool use_default, struct tomoyo_profile *profile) { u8 i; @@ -425,7 +401,7 @@ static int tomoyo_set_mode(char *name, const char *value, } else { return -EINVAL; } - if (use_default) { + if (strstr(value, "use_default")) { config = TOMOYO_CONFIG_USE_DEFAULT; } else { u8 mode; @@ -455,34 +431,21 @@ static int tomoyo_write_profile(struct tomoyo_io_buffer *head) { char *data = head->write_buf; unsigned int i; - bool use_default = false; char *cp; struct tomoyo_profile *profile; if (sscanf(data, "PROFILE_VERSION=%u", &tomoyo_profile_version) == 1) return 0; i = simple_strtoul(data, &cp, 10); - if (data == cp) { - profile = &tomoyo_default_profile; - } else { - if (*cp != '-') - return -EINVAL; - data = cp + 1; - profile = tomoyo_assign_profile(i); - if (!profile) - return -EINVAL; - } + if (*cp != '-') + return -EINVAL; + data = cp + 1; + profile = tomoyo_assign_profile(i); + if (!profile) + return -EINVAL; cp = strchr(data, '='); if (!cp) return -EINVAL; *cp++ = '\0'; - if (profile != &tomoyo_default_profile) - use_default = strstr(cp, "use_default") != NULL; - if (tomoyo_str_starts(&data, "PREFERENCE::")) { - tomoyo_set_pref(data, cp, use_default, profile); - return 0; - } - if (profile == &tomoyo_default_profile) - return -EINVAL; if (!strcmp(data, "COMMENT")) { static DEFINE_SPINLOCK(lock); const struct tomoyo_path_info *new_comment @@ -497,48 +460,13 @@ static int tomoyo_write_profile(struct tomoyo_io_buffer *head) tomoyo_put_name(old_comment); return 0; } - return tomoyo_set_mode(data, cp, use_default, profile); -} - -static void tomoyo_print_preference(struct tomoyo_io_buffer *head, - const int idx) -{ - struct tomoyo_preference *pref = &tomoyo_default_profile.preference; - const struct tomoyo_profile *profile = idx >= 0 ? - tomoyo_profile_ptr[idx] : NULL; - char buffer[16] = ""; - if (profile) { - buffer[sizeof(buffer) - 1] = '\0'; - snprintf(buffer, sizeof(buffer) - 1, "%u-", idx); - } - if (profile) { - pref = profile->learning; - if (pref == &tomoyo_default_profile.preference) - goto skip1; - } - tomoyo_io_printf(head, "%sPREFERENCE::%s={ " - "verbose=%s max_entry=%u }\n", - buffer, "learning", - tomoyo_yesno(pref->learning_verbose), - pref->learning_max_entry); - skip1: - if (profile) { - pref = profile->permissive; - if (pref == &tomoyo_default_profile.preference) - goto skip2; - } - tomoyo_io_printf(head, "%sPREFERENCE::%s={ verbose=%s }\n", - buffer, "permissive", - tomoyo_yesno(pref->permissive_verbose)); - skip2: - if (profile) { - pref = profile->enforcing; - if (pref == &tomoyo_default_profile.preference) - return; + if (!strcmp(data, "PREFERENCE")) { + for (i = 0; i < TOMOYO_MAX_PREF; i++) + tomoyo_set_uint(&profile->pref[i], cp, + tomoyo_pref_keywords[i]); + return 0; } - tomoyo_io_printf(head, "%sPREFERENCE::%s={ verbose=%s }\n", - buffer, "enforcing", - tomoyo_yesno(pref->enforcing_verbose)); + return tomoyo_set_mode(data, cp, profile); } static void tomoyo_print_config(struct tomoyo_io_buffer *head, const u8 config) @@ -561,7 +489,6 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) switch (head->r.step) { case 0: tomoyo_io_printf(head, "PROFILE_VERSION=%s\n", "20090903"); - tomoyo_print_preference(head, -1); head->r.step++; break; case 1: @@ -575,11 +502,18 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) break; case 2: { + u8 i; const struct tomoyo_path_info *comment = profile->comment; tomoyo_io_printf(head, "%u-COMMENT=", index); tomoyo_set_string(head, comment ? comment->name : ""); tomoyo_set_lf(head); + tomoyo_io_printf(head, "%u-PREFERENCE={ ", index); + for (i = 0; i < TOMOYO_MAX_PREF; i++) + tomoyo_io_printf(head, "%s=%u ", + tomoyo_pref_keywords[i], + profile->pref[i]); + tomoyo_set_string(head, "}\n"); head->r.step++; } break; @@ -606,7 +540,6 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) } if (head->r.bit == TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX) { - tomoyo_print_preference(head, index); head->r.index++; head->r.step = 1; } @@ -1777,7 +1710,7 @@ static int tomoyo_write_answer(struct tomoyo_io_buffer *head) static void tomoyo_read_version(struct tomoyo_io_buffer *head) { if (!head->r.eof) { - tomoyo_io_printf(head, "2.3.0"); + tomoyo_io_printf(head, "2.4.0"); head->r.eof = true; } } diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index 139ad7544460..2b39e63234c8 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -211,6 +211,12 @@ enum tomoyo_mac_category_index { */ #define TOMOYO_RETRY_REQUEST 1 +/* Index numbers for profile's PREFERENCE values. */ +enum tomoyo_pref_index { + TOMOYO_PREF_MAX_LEARNING_ENTRY, + TOMOYO_MAX_PREF +}; + /********** Structure definitions. **********/ /* Common header for holding ACL entries. */ @@ -497,6 +503,7 @@ struct tomoyo_profile { struct tomoyo_preference preference; u8 default_config; u8 config[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX]; + unsigned int pref[TOMOYO_MAX_PREF]; }; /********** Function prototypes. **********/ diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c index 72cd2b97cae8..adcbdebd7352 100644 --- a/security/tomoyo/util.c +++ b/security/tomoyo/util.c @@ -899,35 +899,10 @@ const char *tomoyo_last_word(const char *name) */ void tomoyo_warn_log(struct tomoyo_request_info *r, const char *fmt, ...) { - va_list args; - char *buffer; - const struct tomoyo_domain_info * const domain = r->domain; - const struct tomoyo_profile *profile = tomoyo_profile(domain->profile); - switch (r->mode) { - case TOMOYO_CONFIG_ENFORCING: - if (!profile->enforcing->enforcing_verbose) - return; - break; - case TOMOYO_CONFIG_PERMISSIVE: - if (!profile->permissive->permissive_verbose) - return; - break; - case TOMOYO_CONFIG_LEARNING: - if (!profile->learning->learning_verbose) - return; - break; - } - buffer = kmalloc(4096, GFP_NOFS); - if (!buffer) - return; - va_start(args, fmt); - vsnprintf(buffer, 4095, fmt, args); - va_end(args); - buffer[4095] = '\0'; - printk(KERN_WARNING "%s: Access %s denied for %s\n", - r->mode == TOMOYO_CONFIG_ENFORCING ? "ERROR" : "WARNING", buffer, - tomoyo_last_word(domain->domainname->name)); - kfree(buffer); + /* + * Temporarily disabled. + * Will be replaced with /sys/kernel/security/tomoyo/audit interface. + */ } /** @@ -978,13 +953,13 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r) if (perm & (1 << i)) count++; } - if (count < tomoyo_profile(domain->profile)->learning-> - learning_max_entry) + if (count < tomoyo_profile(domain->profile)-> + pref[TOMOYO_PREF_MAX_LEARNING_ENTRY]) return true; if (!domain->quota_warned) { domain->quota_warned = true; printk(KERN_WARNING "TOMOYO-WARNING: " - "Domain '%s' has so many ACLs to hold. " + "Domain '%s' has too many ACLs to hold. " "Stopped learning mode.\n", domain->domainname->name); } return false; -- cgit v1.2.3 From eadd99cc85347b4f9eb10122ac90032eb4971b02 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:18:58 +0900 Subject: TOMOYO: Add auditing interface. Add /sys/kernel/security/tomoyo/audit interface. This interface generates audit logs in the form of domain policy so that /usr/sbin/tomoyo-auditd can reuse audit logs for appending to /sys/kernel/security/tomoyo/domain_policy interface. Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/Makefile | 2 +- security/tomoyo/audit.c | 300 ++++++++++++++++++++++++++++++++++++++ security/tomoyo/common.c | 311 +++++++++++++++++++--------------------- security/tomoyo/common.h | 83 ++++++++++- security/tomoyo/file.c | 49 +++---- security/tomoyo/memory.c | 5 + security/tomoyo/mount.c | 26 +--- security/tomoyo/securityfs_if.c | 2 + security/tomoyo/util.c | 14 -- 9 files changed, 557 insertions(+), 235 deletions(-) create mode 100644 security/tomoyo/audit.c diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile index 91640e96bd06..b13f7f9fbb52 100644 --- a/security/tomoyo/Makefile +++ b/security/tomoyo/Makefile @@ -1 +1 @@ -obj-y = common.o domain.o file.o gc.o group.o load_policy.o memory.o mount.o realpath.o securityfs_if.o tomoyo.o util.o +obj-y = audit.o common.o domain.o file.o gc.o group.o load_policy.o memory.o mount.o realpath.o securityfs_if.o tomoyo.o util.o diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c new file mode 100644 index 000000000000..e882f17065f2 --- /dev/null +++ b/security/tomoyo/audit.c @@ -0,0 +1,300 @@ +/* + * security/tomoyo/audit.c + * + * Pathname restriction functions. + * + * Copyright (C) 2005-2010 NTT DATA CORPORATION + */ + +#include "common.h" +#include + +/** + * tomoyo_convert_time - Convert time_t to YYYY/MM/DD hh/mm/ss. + * + * @time: Seconds since 1970/01/01 00:00:00. + * @stamp: Pointer to "struct tomoyo_time". + * + * Returns nothing. + * + * This function does not handle Y2038 problem. + */ +static void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp) +{ + static const u16 tomoyo_eom[2][12] = { + { 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, + { 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } + }; + u16 y; + u8 m; + bool r; + stamp->sec = time % 60; + time /= 60; + stamp->min = time % 60; + time /= 60; + stamp->hour = time % 24; + time /= 24; + for (y = 1970; ; y++) { + const unsigned short days = (y & 3) ? 365 : 366; + if (time < days) + break; + time -= days; + } + r = (y & 3) == 0; + for (m = 0; m < 11 && time >= tomoyo_eom[r][m]; m++) + ; + if (m) + time -= tomoyo_eom[r][m - 1]; + stamp->year = y; + stamp->month = ++m; + stamp->day = ++time; +} + +/** + * tomoyo_print_header - Get header line of audit log. + * + * @r: Pointer to "struct tomoyo_request_info". + * + * Returns string representation. + * + * This function uses kmalloc(), so caller must kfree() if this function + * didn't return NULL. + */ +static char *tomoyo_print_header(struct tomoyo_request_info *r) +{ + struct tomoyo_time stamp; + const pid_t gpid = task_pid_nr(current); + static const int tomoyo_buffer_len = 4096; + char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS); + pid_t ppid; + if (!buffer) + return NULL; + { + struct timeval tv; + do_gettimeofday(&tv); + tomoyo_convert_time(tv.tv_sec, &stamp); + } + rcu_read_lock(); + ppid = task_tgid_vnr(current->real_parent); + rcu_read_unlock(); + snprintf(buffer, tomoyo_buffer_len - 1, + "#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s " + "granted=%s (global-pid=%u) task={ pid=%u ppid=%u " + "uid=%u gid=%u euid=%u egid=%u suid=%u sgid=%u " + "fsuid=%u fsgid=%u }", + stamp.year, stamp.month, stamp.day, stamp.hour, + stamp.min, stamp.sec, r->profile, tomoyo_mode[r->mode], + tomoyo_yesno(r->granted), gpid, task_tgid_vnr(current), ppid, + current_uid(), current_gid(), current_euid(), current_egid(), + current_suid(), current_sgid(), current_fsuid(), + current_fsgid()); + return buffer; +} + +/** + * tomoyo_init_log - Allocate buffer for audit logs. + * + * @r: Pointer to "struct tomoyo_request_info". + * @len: Buffer size needed for @fmt and @args. + * @fmt: The printf()'s format string. + * @args: va_list structure for @fmt. + * + * Returns pointer to allocated memory. + * + * This function uses kzalloc(), so caller must kfree() if this function + * didn't return NULL. + */ +char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt, + va_list args) +{ + char *buf = NULL; + const char *header = NULL; + int pos; + const char *domainname = tomoyo_domain()->domainname->name; + header = tomoyo_print_header(r); + if (!header) + return NULL; + /* +10 is for '\n' etc. and '\0'. */ + len += strlen(domainname) + strlen(header) + 10; + len = tomoyo_round2(len); + buf = kzalloc(len, GFP_NOFS); + if (!buf) + goto out; + len--; + pos = snprintf(buf, len, "%s", header); + pos += snprintf(buf + pos, len - pos, "\n%s\n", domainname); + vsnprintf(buf + pos, len - pos, fmt, args); +out: + kfree(header); + return buf; +} + +/* Wait queue for /sys/kernel/security/tomoyo/audit. */ +static DECLARE_WAIT_QUEUE_HEAD(tomoyo_log_wait); + +/* Structure for audit log. */ +struct tomoyo_log { + struct list_head list; + char *log; + int size; +}; + +/* The list for "struct tomoyo_log". */ +static LIST_HEAD(tomoyo_log); + +/* Lock for "struct list_head tomoyo_log". */ +static DEFINE_SPINLOCK(tomoyo_log_lock); + +/* Length of "stuct list_head tomoyo_log". */ +static unsigned int tomoyo_log_count; + +/** + * tomoyo_get_audit - Get audit mode. + * + * @profile: Profile number. + * @index: Index number of functionality. + * @is_granted: True if granted log, false otherwise. + * + * Returns true if this request should be audited, false otherwise. + */ +static bool tomoyo_get_audit(const u8 profile, const u8 index, + const bool is_granted) +{ + u8 mode; + const u8 category = TOMOYO_MAC_CATEGORY_FILE + TOMOYO_MAX_MAC_INDEX; + struct tomoyo_profile *p; + if (!tomoyo_policy_loaded) + return false; + p = tomoyo_profile(profile); + if (tomoyo_log_count >= p->pref[TOMOYO_PREF_MAX_AUDIT_LOG]) + return false; + mode = p->config[index]; + if (mode == TOMOYO_CONFIG_USE_DEFAULT) + mode = p->config[category]; + if (mode == TOMOYO_CONFIG_USE_DEFAULT) + mode = p->default_config; + if (is_granted) + return mode & TOMOYO_CONFIG_WANT_GRANT_LOG; + return mode & TOMOYO_CONFIG_WANT_REJECT_LOG; +} + +/** + * tomoyo_write_log2 - Write an audit log. + * + * @r: Pointer to "struct tomoyo_request_info". + * @len: Buffer size needed for @fmt and @args. + * @fmt: The printf()'s format string. + * @args: va_list structure for @fmt. + * + * Returns nothing. + */ +void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt, + va_list args) +{ + char *buf; + struct tomoyo_log *entry; + bool quota_exceeded = false; + if (!tomoyo_get_audit(r->profile, r->type, r->granted)) + goto out; + buf = tomoyo_init_log(r, len, fmt, args); + if (!buf) + goto out; + entry = kzalloc(sizeof(*entry), GFP_NOFS); + if (!entry) { + kfree(buf); + goto out; + } + entry->log = buf; + len = tomoyo_round2(strlen(buf) + 1); + /* + * The entry->size is used for memory quota checks. + * Don't go beyond strlen(entry->log). + */ + entry->size = len + tomoyo_round2(sizeof(*entry)); + spin_lock(&tomoyo_log_lock); + if (tomoyo_memory_quota[TOMOYO_MEMORY_AUDIT] && + tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] + entry->size >= + tomoyo_memory_quota[TOMOYO_MEMORY_AUDIT]) { + quota_exceeded = true; + } else { + tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] += entry->size; + list_add_tail(&entry->list, &tomoyo_log); + tomoyo_log_count++; + } + spin_unlock(&tomoyo_log_lock); + if (quota_exceeded) { + kfree(buf); + kfree(entry); + goto out; + } + wake_up(&tomoyo_log_wait); +out: + return; +} + +/** + * tomoyo_write_log - Write an audit log. + * + * @r: Pointer to "struct tomoyo_request_info". + * @fmt: The printf()'s format string, followed by parameters. + * + * Returns nothing. + */ +void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...) +{ + va_list args; + int len; + va_start(args, fmt); + len = vsnprintf((char *) &len, 1, fmt, args) + 1; + va_end(args); + va_start(args, fmt); + tomoyo_write_log2(r, len, fmt, args); + va_end(args); +} + +/** + * tomoyo_read_log - Read an audit log. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. + */ +void tomoyo_read_log(struct tomoyo_io_buffer *head) +{ + struct tomoyo_log *ptr = NULL; + if (head->r.w_pos) + return; + kfree(head->read_buf); + head->read_buf = NULL; + spin_lock(&tomoyo_log_lock); + if (!list_empty(&tomoyo_log)) { + ptr = list_entry(tomoyo_log.next, typeof(*ptr), list); + list_del(&ptr->list); + tomoyo_log_count--; + tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] -= ptr->size; + } + spin_unlock(&tomoyo_log_lock); + if (ptr) { + head->read_buf = ptr->log; + head->r.w[head->r.w_pos++] = head->read_buf; + kfree(ptr); + } +} + +/** + * tomoyo_poll_log - Wait for an audit log. + * + * @file: Pointer to "struct file". + * @wait: Pointer to "poll_table". + * + * Returns POLLIN | POLLRDNORM when ready to read an audit log. + */ +int tomoyo_poll_log(struct file *file, poll_table *wait) +{ + if (tomoyo_log_count) + return POLLIN | POLLRDNORM; + poll_wait(file, &tomoyo_log_wait, wait); + if (tomoyo_log_count) + return POLLIN | POLLRDNORM; + return 0; +} diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 2b280350708f..6580ef35074b 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -17,9 +17,12 @@ static unsigned int tomoyo_profile_version; /* Profile table. Memory is allocated as needed. */ static struct tomoyo_profile *tomoyo_profile_ptr[TOMOYO_MAX_PROFILES]; -/* String table for functionality that takes 4 modes. */ -static const char *tomoyo_mode[4] = { - "disabled", "learning", "permissive", "enforcing" +/* String table for operation mode. */ +const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE] = { + [TOMOYO_CONFIG_DISABLED] = "disabled", + [TOMOYO_CONFIG_LEARNING] = "learning", + [TOMOYO_CONFIG_PERMISSIVE] = "permissive", + [TOMOYO_CONFIG_ENFORCING] = "enforcing" }; /* String table for /sys/kernel/security/tomoyo/profile */ @@ -53,6 +56,7 @@ static const char *tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX /* String table for PREFERENCE keyword. */ static const char * const tomoyo_pref_keywords[TOMOYO_MAX_PREF] = { + [TOMOYO_PREF_MAX_AUDIT_LOG] = "max_audit_log", [TOMOYO_PREF_MAX_LEARNING_ENTRY] = "max_learning_entry", }; @@ -66,12 +70,10 @@ static bool tomoyo_manage_by_non_root; * * @value: Bool value. */ -/* -static const char *tomoyo_yesno(const unsigned int value) +const char *tomoyo_yesno(const unsigned int value) { return value ? "yes" : "no"; } -*/ /** * tomoyo_addprintf - strncat()-like-snprintf(). @@ -117,7 +119,7 @@ static bool tomoyo_flush(struct tomoyo_io_buffer *head) head->r.w[0] = w; if (*w) return false; - /* Add '\0' for query. */ + /* Add '\0' for audit logs and query. */ if (head->poll) { if (!head->read_user_buf_avail || copy_to_user(head->read_user_buf, "", 1)) @@ -300,9 +302,12 @@ static struct tomoyo_profile *tomoyo_assign_profile(const unsigned int profile) ptr = tomoyo_profile_ptr[profile]; if (!ptr && tomoyo_memory_ok(entry)) { ptr = entry; - ptr->default_config = TOMOYO_CONFIG_DISABLED; + ptr->default_config = TOMOYO_CONFIG_DISABLED | + TOMOYO_CONFIG_WANT_GRANT_LOG | + TOMOYO_CONFIG_WANT_REJECT_LOG; memset(ptr->config, TOMOYO_CONFIG_USE_DEFAULT, sizeof(ptr->config)); + ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] = 1024; ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] = 2048; mb(); /* Avoid out-of-order execution. */ tomoyo_profile_ptr[profile] = ptr; @@ -338,7 +343,6 @@ struct tomoyo_profile *tomoyo_profile(const u8 profile) * * Returns 1 if "@find=yes" was found, 0 if "@find=no" was found, -1 otherwise. */ -/* static s8 tomoyo_find_yesno(const char *string, const char *find) { const char *cp = strstr(string, find); @@ -351,7 +355,6 @@ static s8 tomoyo_find_yesno(const char *string, const char *find) } return -1; } -*/ /** * tomoyo_set_uint - Set value for specified preference. @@ -412,6 +415,24 @@ static int tomoyo_set_mode(char *name, const char *value, * 'config' from 'TOMOYO_CONFIG_USE_DEAFULT'. */ config = (config & ~7) | mode; + if (config != TOMOYO_CONFIG_USE_DEFAULT) { + switch (tomoyo_find_yesno(value, "grant_log")) { + case 1: + config |= TOMOYO_CONFIG_WANT_GRANT_LOG; + break; + case 0: + config &= ~TOMOYO_CONFIG_WANT_GRANT_LOG; + break; + } + switch (tomoyo_find_yesno(value, "reject_log")) { + case 1: + config |= TOMOYO_CONFIG_WANT_REJECT_LOG; + break; + case 0: + config &= ~TOMOYO_CONFIG_WANT_REJECT_LOG; + break; + } + } } if (i < TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX) profile->config[i] = config; @@ -469,15 +490,30 @@ static int tomoyo_write_profile(struct tomoyo_io_buffer *head) return tomoyo_set_mode(data, cp, profile); } +/** + * tomoyo_print_config - Print mode for specified functionality. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * @config: Mode for that functionality. + * + * Returns nothing. + * + * Caller prints functionality's name. + */ static void tomoyo_print_config(struct tomoyo_io_buffer *head, const u8 config) { - tomoyo_io_printf(head, "={ mode=%s }\n", tomoyo_mode[config & 3]); + tomoyo_io_printf(head, "={ mode=%s grant_log=%s reject_log=%s }\n", + tomoyo_mode[config & 3], + tomoyo_yesno(config & TOMOYO_CONFIG_WANT_GRANT_LOG), + tomoyo_yesno(config & TOMOYO_CONFIG_WANT_REJECT_LOG)); } /** * tomoyo_read_profile - Read profile table. * * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. */ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) { @@ -488,7 +524,7 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) profile = tomoyo_profile_ptr[index]; switch (head->r.step) { case 0: - tomoyo_io_printf(head, "PROFILE_VERSION=%s\n", "20090903"); + tomoyo_io_printf(head, "PROFILE_VERSION=%u\n", 20090903); head->r.step++; break; case 1: @@ -1359,103 +1395,68 @@ static void tomoyo_read_exception(struct tomoyo_io_buffer *head) head->r.eof = true; } -/** - * tomoyo_print_header - Get header line of audit log. - * - * @r: Pointer to "struct tomoyo_request_info". - * - * Returns string representation. - * - * This function uses kmalloc(), so caller must kfree() if this function - * didn't return NULL. - */ -static char *tomoyo_print_header(struct tomoyo_request_info *r) -{ - struct timeval tv; - const pid_t gpid = task_pid_nr(current); - static const int tomoyo_buffer_len = 4096; - char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS); - pid_t ppid; - if (!buffer) - return NULL; - do_gettimeofday(&tv); - rcu_read_lock(); - ppid = task_tgid_vnr(current->real_parent); - rcu_read_unlock(); - snprintf(buffer, tomoyo_buffer_len - 1, - "#timestamp=%lu profile=%u mode=%s (global-pid=%u)" - " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u" - " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }", - tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid, - task_tgid_vnr(current), ppid, - current_uid(), current_gid(), current_euid(), - current_egid(), current_suid(), current_sgid(), - current_fsuid(), current_fsgid()); - return buffer; -} - -/** - * tomoyo_init_audit_log - Allocate buffer for audit logs. - * - * @len: Required size. - * @r: Pointer to "struct tomoyo_request_info". - * - * Returns pointer to allocated memory. - * - * The @len is updated to add the header lines' size on success. - * - * This function uses kzalloc(), so caller must kfree() if this function - * didn't return NULL. - */ -static char *tomoyo_init_audit_log(int *len, struct tomoyo_request_info *r) -{ - char *buf = NULL; - const char *header; - const char *domainname; - if (!r->domain) - r->domain = tomoyo_domain(); - domainname = r->domain->domainname->name; - header = tomoyo_print_header(r); - if (!header) - return NULL; - *len += strlen(domainname) + strlen(header) + 10; - buf = kzalloc(*len, GFP_NOFS); - if (buf) - snprintf(buf, (*len) - 1, "%s\n%s\n", header, domainname); - kfree(header); - return buf; -} - -/* Wait queue for tomoyo_query_list. */ +/* Wait queue for kernel -> userspace notification. */ static DECLARE_WAIT_QUEUE_HEAD(tomoyo_query_wait); - -/* Lock for manipulating tomoyo_query_list. */ -static DEFINE_SPINLOCK(tomoyo_query_list_lock); +/* Wait queue for userspace -> kernel notification. */ +static DECLARE_WAIT_QUEUE_HEAD(tomoyo_answer_wait); /* Structure for query. */ struct tomoyo_query { struct list_head list; char *query; - int query_len; + size_t query_len; unsigned int serial; - int timer; - int answer; + u8 timer; + u8 answer; + u8 retry; }; /* The list for "struct tomoyo_query". */ static LIST_HEAD(tomoyo_query_list); +/* Lock for manipulating tomoyo_query_list. */ +static DEFINE_SPINLOCK(tomoyo_query_list_lock); + /* * Number of "struct file" referring /sys/kernel/security/tomoyo/query * interface. */ static atomic_t tomoyo_query_observers = ATOMIC_INIT(0); +/** + * tomoyo_add_entry - Add an ACL to current thread's domain. Used by learning mode. + * + * @domain: Pointer to "struct tomoyo_domain_info". + * @header: Lines containing ACL. + * + * Returns nothing. + */ +static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header) +{ + char *buffer; + char *cp = strchr(header, '\n'); + int len; + if (!cp) + return; + cp = strchr(cp + 1, '\n'); + if (!cp) + return; + *cp++ = '\0'; + len = strlen(cp) + 1; + buffer = kmalloc(len, GFP_NOFS); + if (!buffer) + return; + snprintf(buffer, len - 1, "%s", cp); + tomoyo_normalize_line(buffer); + tomoyo_write_domain2(&domain->acl_info_list, buffer, false); + kfree(buffer); +} + /** * tomoyo_supervisor - Ask for the supervisor's decision. * - * @r: Pointer to "struct tomoyo_request_info". - * @fmt: The printf()'s format string, followed by parameters. + * @r: Pointer to "struct tomoyo_request_info". + * @fmt: The printf()'s format string, followed by parameters. * * Returns 0 if the supervisor decided to permit the access request which * violated the policy in enforcing mode, TOMOYO_RETRY_REQUEST if the @@ -1465,88 +1466,77 @@ static atomic_t tomoyo_query_observers = ATOMIC_INIT(0); int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) { va_list args; - int error = -EPERM; - int pos; + int error; int len; static unsigned int tomoyo_serial; - struct tomoyo_query *entry = NULL; + struct tomoyo_query entry = { }; bool quota_exceeded = false; - char *header; + va_start(args, fmt); + len = vsnprintf((char *) &len, 1, fmt, args) + 1; + va_end(args); + /* Write /sys/kernel/security/tomoyo/audit. */ + va_start(args, fmt); + tomoyo_write_log2(r, len, fmt, args); + va_end(args); + /* Nothing more to do if granted. */ + if (r->granted) + return 0; switch (r->mode) { - char *buffer; + case TOMOYO_CONFIG_ENFORCING: + error = -EPERM; + if (atomic_read(&tomoyo_query_observers)) + break; + goto out; case TOMOYO_CONFIG_LEARNING: - if (!tomoyo_domain_quota_is_ok(r)) - return 0; - va_start(args, fmt); - len = vsnprintf((char *) &pos, sizeof(pos) - 1, fmt, args) + 4; - va_end(args); - buffer = kmalloc(len, GFP_NOFS); - if (!buffer) - return 0; - va_start(args, fmt); - vsnprintf(buffer, len - 1, fmt, args); - va_end(args); - tomoyo_normalize_line(buffer); - tomoyo_write_domain2(&r->domain->acl_info_list, buffer, false); - kfree(buffer); + error = 0; + /* Check max_learning_entry parameter. */ + if (tomoyo_domain_quota_is_ok(r)) + break; /* fall through */ - case TOMOYO_CONFIG_PERMISSIVE: + default: return 0; } - if (!r->domain) - r->domain = tomoyo_domain(); - if (!atomic_read(&tomoyo_query_observers)) - return -EPERM; + /* Get message. */ va_start(args, fmt); - len = vsnprintf((char *) &pos, sizeof(pos) - 1, fmt, args) + 32; + entry.query = tomoyo_init_log(r, len, fmt, args); va_end(args); - header = tomoyo_init_audit_log(&len, r); - if (!header) + if (!entry.query) goto out; - entry = kzalloc(sizeof(*entry), GFP_NOFS); - if (!entry) - goto out; - entry->query = kzalloc(len, GFP_NOFS); - if (!entry->query) + entry.query_len = strlen(entry.query) + 1; + if (!error) { + tomoyo_add_entry(r->domain, entry.query); goto out; - len = ksize(entry->query); + } + len = tomoyo_round2(entry.query_len); spin_lock(&tomoyo_query_list_lock); - if (tomoyo_quota_for_query && tomoyo_query_memory_size + len + - sizeof(*entry) >= tomoyo_quota_for_query) { + if (tomoyo_memory_quota[TOMOYO_MEMORY_QUERY] && + tomoyo_memory_used[TOMOYO_MEMORY_QUERY] + len + >= tomoyo_memory_quota[TOMOYO_MEMORY_QUERY]) { quota_exceeded = true; } else { - tomoyo_query_memory_size += len + sizeof(*entry); - entry->serial = tomoyo_serial++; + entry.serial = tomoyo_serial++; + entry.retry = r->retry; + tomoyo_memory_used[TOMOYO_MEMORY_QUERY] += len; + list_add_tail(&entry.list, &tomoyo_query_list); } spin_unlock(&tomoyo_query_list_lock); if (quota_exceeded) goto out; - pos = snprintf(entry->query, len - 1, "Q%u-%hu\n%s", - entry->serial, r->retry, header); - kfree(header); - header = NULL; - va_start(args, fmt); - vsnprintf(entry->query + pos, len - 1 - pos, fmt, args); - entry->query_len = strlen(entry->query) + 1; - va_end(args); - spin_lock(&tomoyo_query_list_lock); - list_add_tail(&entry->list, &tomoyo_query_list); - spin_unlock(&tomoyo_query_list_lock); /* Give 10 seconds for supervisor's opinion. */ - for (entry->timer = 0; - atomic_read(&tomoyo_query_observers) && entry->timer < 100; - entry->timer++) { - wake_up(&tomoyo_query_wait); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ / 10); - if (entry->answer) + while (entry.timer < 10) { + wake_up_all(&tomoyo_query_wait); + if (wait_event_interruptible_timeout + (tomoyo_answer_wait, entry.answer || + !atomic_read(&tomoyo_query_observers), HZ)) break; + else + entry.timer++; } spin_lock(&tomoyo_query_list_lock); - list_del(&entry->list); - tomoyo_query_memory_size -= len + sizeof(*entry); + list_del(&entry.list); + tomoyo_memory_used[TOMOYO_MEMORY_QUERY] -= len; spin_unlock(&tomoyo_query_list_lock); - switch (entry->answer) { + switch (entry.answer) { case 3: /* Asked to retry by administrator. */ error = TOMOYO_RETRY_REQUEST; r->retry++; @@ -1555,18 +1545,12 @@ int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) /* Granted by administrator. */ error = 0; break; - case 0: - /* Timed out. */ - break; default: - /* Rejected by administrator. */ + /* Timed out or rejected by administrator. */ break; } - out: - if (entry) - kfree(entry->query); - kfree(entry); - kfree(header); +out: + kfree(entry.query); return error; } @@ -1637,7 +1621,7 @@ static void tomoyo_read_query(struct tomoyo_io_buffer *head) head->r.query_index = 0; return; } - buf = kzalloc(len, GFP_NOFS); + buf = kzalloc(len + 32, GFP_NOFS); if (!buf) return; pos = 0; @@ -1653,7 +1637,8 @@ static void tomoyo_read_query(struct tomoyo_io_buffer *head) * can change, but I don't care. */ if (len == ptr->query_len) - memmove(buf, ptr->query, len); + snprintf(buf, len + 31, "Q%u-%hu\n%s", ptr->serial, + ptr->retry, ptr->query); break; } spin_unlock(&tomoyo_query_list_lock); @@ -1764,6 +1749,11 @@ int tomoyo_open_control(const u8 type, struct file *file) head->write = tomoyo_write_exception; head->read = tomoyo_read_exception; break; + case TOMOYO_AUDIT: + /* /sys/kernel/security/tomoyo/audit */ + head->poll = tomoyo_poll_log; + head->read = tomoyo_read_log; + break; case TOMOYO_SELFDOMAIN: /* /sys/kernel/security/tomoyo/self_domain */ head->read = tomoyo_read_self_domain; @@ -1837,7 +1827,7 @@ int tomoyo_open_control(const u8 type, struct file *file) return -ENOMEM; } } - if (type != TOMOYO_QUERY) + if (type != TOMOYO_QUERY && type != TOMOYO_AUDIT) head->reader_idx = tomoyo_read_lock(); file->private_data = head; /* @@ -1858,7 +1848,8 @@ int tomoyo_open_control(const u8 type, struct file *file) * @wait: Pointer to "poll_table". * * Waits for read readiness. - * /sys/kernel/security/tomoyo/query is handled by /usr/sbin/tomoyo-queryd . + * /sys/kernel/security/tomoyo/query is handled by /usr/sbin/tomoyo-queryd and + * /sys/kernel/security/tomoyo/audit is handled by /usr/sbin/tomoyo-auditd. */ int tomoyo_poll_control(struct file *file, poll_table *wait) { @@ -1970,7 +1961,7 @@ int tomoyo_close_control(struct tomoyo_io_buffer *head) */ if (head->type == TOMOYO_QUERY) atomic_dec(&tomoyo_query_observers); - else + else if (head->type != TOMOYO_AUDIT) tomoyo_read_unlock(head->reader_idx); /* Release memory used for policy I/O. */ kfree(head->read_buf); diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index 2b39e63234c8..f40ec1fcbc5d 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -44,7 +44,10 @@ enum tomoyo_mode_index { TOMOYO_CONFIG_LEARNING, TOMOYO_CONFIG_PERMISSIVE, TOMOYO_CONFIG_ENFORCING, - TOMOYO_CONFIG_USE_DEFAULT = 255 + TOMOYO_CONFIG_MAX_MODE, + TOMOYO_CONFIG_WANT_REJECT_LOG = 64, + TOMOYO_CONFIG_WANT_GRANT_LOG = 128, + TOMOYO_CONFIG_USE_DEFAULT = 255, }; /* Index numbers for entry type. */ @@ -115,6 +118,13 @@ enum tomoyo_path_acl_index { TOMOYO_MAX_PATH_OPERATION }; +enum tomoyo_memory_stat_type { + TOMOYO_MEMORY_POLICY, + TOMOYO_MEMORY_AUDIT, + TOMOYO_MEMORY_QUERY, + TOMOYO_MAX_MEMORY_STAT +}; + enum tomoyo_mkdev_acl_index { TOMOYO_TYPE_MKBLOCK, TOMOYO_TYPE_MKCHAR, @@ -150,6 +160,7 @@ enum tomoyo_securityfs_interface_index { TOMOYO_PROCESS_STATUS, TOMOYO_MEMINFO, TOMOYO_SELFDOMAIN, + TOMOYO_AUDIT, TOMOYO_VERSION, TOMOYO_PROFILE, TOMOYO_QUERY, @@ -213,6 +224,7 @@ enum tomoyo_mac_category_index { /* Index numbers for profile's PREFERENCE values. */ enum tomoyo_pref_index { + TOMOYO_PREF_MAX_AUDIT_LOG, TOMOYO_PREF_MAX_LEARNING_ENTRY, TOMOYO_MAX_PREF }; @@ -506,13 +518,21 @@ struct tomoyo_profile { unsigned int pref[TOMOYO_MAX_PREF]; }; +/* Structure for representing YYYY/MM/DD hh/mm/ss. */ +struct tomoyo_time { + u16 year; + u8 month; + u8 day; + u8 hour; + u8 min; + u8 sec; +}; + /********** Function prototypes. **********/ bool tomoyo_str_starts(char **src, const char *find); const char *tomoyo_get_exe(void); void tomoyo_normalize_line(unsigned char *buffer); -void tomoyo_warn_log(struct tomoyo_request_info *r, const char *fmt, ...) - __attribute__ ((format(printf, 2, 3))); void tomoyo_check_profile(void); int tomoyo_open_control(const u8 type, struct file *file); int tomoyo_close_control(struct tomoyo_io_buffer *head); @@ -620,6 +640,14 @@ void tomoyo_check_acl(struct tomoyo_request_info *r, char *tomoyo_read_token(struct tomoyo_acl_param *param); bool tomoyo_permstr(const char *string, const char *keyword); +const char *tomoyo_yesno(const unsigned int value); +void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt, + va_list args); +void tomoyo_read_log(struct tomoyo_io_buffer *head); +int tomoyo_poll_log(struct file *file, poll_table *wait); +char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt, + va_list args); + /********** External variable definitions. **********/ /* Lock for GC. */ @@ -650,8 +678,9 @@ extern const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION]; extern const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION]; extern const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION]; -extern unsigned int tomoyo_quota_for_query; -extern unsigned int tomoyo_query_memory_size; +extern const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE]; +extern unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT]; +extern unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT]; /********** Inlined functions. **********/ @@ -773,6 +802,50 @@ static inline bool tomoyo_same_number_union a->value_type[1] == b->value_type[1]; } +#if defined(CONFIG_SLOB) + +/** + * tomoyo_round2 - Round up to power of 2 for calculating memory usage. + * + * @size: Size to be rounded up. + * + * Returns @size. + * + * Since SLOB does not round up, this function simply returns @size. + */ +static inline int tomoyo_round2(size_t size) +{ + return size; +} + +#else + +/** + * tomoyo_round2 - Round up to power of 2 for calculating memory usage. + * + * @size: Size to be rounded up. + * + * Returns rounded size. + * + * Strictly speaking, SLAB may be able to allocate (e.g.) 96 bytes instead of + * (e.g.) 128 bytes. + */ +static inline int tomoyo_round2(size_t size) +{ +#if PAGE_SIZE == 4096 + size_t bsize = 32; +#else + size_t bsize = 64; +#endif + if (!size) + return 0; + while (size > bsize) + bsize <<= 1; + return bsize; +} + +#endif + /** * list_for_each_cookie - iterate over a list with cookie. * @pos: the &struct list_head to use as a loop cursor. diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index 0673a69b1320..4f8526af9069 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c @@ -206,12 +206,9 @@ static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, struct path *path) */ static int tomoyo_audit_path_log(struct tomoyo_request_info *r) { - const char *operation = tomoyo_path_keyword[r->param.path.operation]; - const struct tomoyo_path_info *filename = r->param.path.filename; - if (r->granted) - return 0; - tomoyo_warn_log(r, "%s %s", operation, filename->name); - return tomoyo_supervisor(r, "file %s %s\n", operation, filename->name); + return tomoyo_supervisor(r, "file %s %s\n", tomoyo_path_keyword + [r->param.path.operation], + r->param.path.filename->name); } /** @@ -223,15 +220,10 @@ static int tomoyo_audit_path_log(struct tomoyo_request_info *r) */ static int tomoyo_audit_path2_log(struct tomoyo_request_info *r) { - const char *operation = tomoyo_path2_keyword[r->param.path2.operation]; - const struct tomoyo_path_info *filename1 = r->param.path2.filename1; - const struct tomoyo_path_info *filename2 = r->param.path2.filename2; - if (r->granted) - return 0; - tomoyo_warn_log(r, "%s %s %s", operation, filename1->name, - filename2->name); - return tomoyo_supervisor(r, "file %s %s %s\n", operation, - filename1->name, filename2->name); + return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_path2_keyword + [r->param.path2.operation], + r->param.path2.filename1->name, + r->param.path2.filename2->name); } /** @@ -243,17 +235,12 @@ static int tomoyo_audit_path2_log(struct tomoyo_request_info *r) */ static int tomoyo_audit_mkdev_log(struct tomoyo_request_info *r) { - const char *operation = tomoyo_mkdev_keyword[r->param.mkdev.operation]; - const struct tomoyo_path_info *filename = r->param.mkdev.filename; - const unsigned int major = r->param.mkdev.major; - const unsigned int minor = r->param.mkdev.minor; - const unsigned int mode = r->param.mkdev.mode; - if (r->granted) - return 0; - tomoyo_warn_log(r, "%s %s 0%o %u %u", operation, filename->name, mode, - major, minor); - return tomoyo_supervisor(r, "file %s %s 0%o %u %u\n", operation, - filename->name, mode, major, minor); + return tomoyo_supervisor(r, "file %s %s 0%o %u %u\n", + tomoyo_mkdev_keyword + [r->param.mkdev.operation], + r->param.mkdev.filename->name, + r->param.mkdev.mode, r->param.mkdev.major, + r->param.mkdev.minor); } /** @@ -267,11 +254,7 @@ static int tomoyo_audit_path_number_log(struct tomoyo_request_info *r) { const u8 type = r->param.path_number.operation; u8 radix; - const struct tomoyo_path_info *filename = r->param.path_number.filename; - const char *operation = tomoyo_path_number_keyword[type]; char buffer[64]; - if (r->granted) - return 0; switch (type) { case TOMOYO_TYPE_CREATE: case TOMOYO_TYPE_MKDIR: @@ -289,9 +272,9 @@ static int tomoyo_audit_path_number_log(struct tomoyo_request_info *r) } tomoyo_print_ulong(buffer, sizeof(buffer), r->param.path_number.number, radix); - tomoyo_warn_log(r, "%s %s %s", operation, filename->name, buffer); - return tomoyo_supervisor(r, "file %s %s %s\n", operation, - filename->name, buffer); + return tomoyo_supervisor(r, "file %s %s %s\n", + tomoyo_path_number_keyword[type], + r->param.path_number.filename->name, buffer); } /** diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c index 839b8ebc6fe6..598282cd0bdd 100644 --- a/security/tomoyo/memory.c +++ b/security/tomoyo/memory.c @@ -29,6 +29,11 @@ void tomoyo_warn_oom(const char *function) panic("MAC Initialization failed.\n"); } +/* Memoy currently used by policy/audit log/query. */ +unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT]; +/* Memory quota for "policy"/"audit log"/"query". */ +unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT]; + /* Memory allocated for policy. */ static atomic_t tomoyo_policy_memory_size; /* Quota for holding policy. */ diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c index 1e610f96c99d..8ba28fda4727 100644 --- a/security/tomoyo/mount.c +++ b/security/tomoyo/mount.c @@ -27,29 +27,11 @@ static const char * const tomoyo_mounts[TOMOYO_MAX_SPECIAL_MOUNT] = { */ static int tomoyo_audit_mount_log(struct tomoyo_request_info *r) { - const char *dev = r->param.mount.dev->name; - const char *dir = r->param.mount.dir->name; - const char *type = r->param.mount.type->name; - const unsigned long flags = r->param.mount.flags; - if (r->granted) - return 0; - if (type == tomoyo_mounts[TOMOYO_MOUNT_REMOUNT]) - tomoyo_warn_log(r, "mount -o remount %s 0x%lX", dir, flags); - else if (type == tomoyo_mounts[TOMOYO_MOUNT_BIND] - || type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) - tomoyo_warn_log(r, "mount %s %s %s 0x%lX", type, dev, dir, - flags); - else if (type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE] || - type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE] || - type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE] || - type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED]) - tomoyo_warn_log(r, "mount %s %s 0x%lX", type, dir, flags); - else - tomoyo_warn_log(r, "mount -t %s %s %s 0x%lX", type, dev, dir, - flags); - return tomoyo_supervisor(r, "allow_mount %s %s %s 0x%lX\n", + return tomoyo_supervisor(r, "file mount %s %s %s 0x%lX\n", r->param.mount.dev->name, - r->param.mount.dir->name, type, flags); + r->param.mount.dir->name, + r->param.mount.type->name, + r->param.mount.flags); } /** diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c index 6410868c8a3d..e056609b422b 100644 --- a/security/tomoyo/securityfs_if.c +++ b/security/tomoyo/securityfs_if.c @@ -135,6 +135,8 @@ static int __init tomoyo_initerface_init(void) TOMOYO_DOMAINPOLICY); tomoyo_create_entry("exception_policy", 0600, tomoyo_dir, TOMOYO_EXCEPTIONPOLICY); + tomoyo_create_entry("audit", 0400, tomoyo_dir, + TOMOYO_AUDIT); tomoyo_create_entry("self_domain", 0400, tomoyo_dir, TOMOYO_SELFDOMAIN); tomoyo_create_entry(".domain_status", 0600, tomoyo_dir, diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c index adcbdebd7352..bc71528ff440 100644 --- a/security/tomoyo/util.c +++ b/security/tomoyo/util.c @@ -891,20 +891,6 @@ const char *tomoyo_last_word(const char *name) return name; } -/** - * tomoyo_warn_log - Print warning or error message on console. - * - * @r: Pointer to "struct tomoyo_request_info". - * @fmt: The printf()'s format string, followed by parameters. - */ -void tomoyo_warn_log(struct tomoyo_request_info *r, const char *fmt, ...) -{ - /* - * Temporarily disabled. - * Will be replaced with /sys/kernel/security/tomoyo/audit interface. - */ -} - /** * tomoyo_domain_quota_is_ok - Check for domain's quota. * -- cgit v1.2.3 From 32997144fd9925fc4d506a16990a0c405f766526 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:19:28 +0900 Subject: TOMOYO: Add ACL group support. ACL group allows administrator to globally grant not only "file read" permission but also other permissions. Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/common.c | 51 +++++++++++++++++++++++++++++++++++++++++------- security/tomoyo/common.h | 7 +++++++ security/tomoyo/domain.c | 23 +++++++++++++++++++++- security/tomoyo/gc.c | 16 ++++++++++++--- security/tomoyo/memory.c | 2 ++ 5 files changed, 88 insertions(+), 11 deletions(-) diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 6580ef35074b..507ebf01e43b 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -896,6 +896,12 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head) domain->profile = (u8) profile; return 0; } + if (sscanf(data, "use_group %u\n", &profile) == 1 + && profile < TOMOYO_MAX_ACL_GROUPS) { + if (!is_delete) + domain->group = (u8) profile; + return 0; + } if (!strcmp(data, "quota_exceeded")) { domain->quota_warned = !is_delete; return 0; @@ -908,7 +914,7 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head) } /** - * tomoyo_set_group - Print category name. + * tomoyo_set_group - Print "acl_group " header keyword and category name. * * @head: Pointer to "struct tomoyo_io_buffer". * @category: Category name. @@ -918,6 +924,9 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head) static void tomoyo_set_group(struct tomoyo_io_buffer *head, const char *category) { + if (head->type == TOMOYO_EXCEPTIONPOLICY) + tomoyo_io_printf(head, "acl_group %u ", + head->r.acl_group_index); tomoyo_set_string(head, category); } @@ -1041,17 +1050,17 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head, /** * tomoyo_read_domain2 - Read domain policy. * - * @head: Pointer to "struct tomoyo_io_buffer". - * @domain: Pointer to "struct tomoyo_domain_info". + * @head: Pointer to "struct tomoyo_io_buffer". + * @list: Pointer to "struct list_head". * * Caller holds tomoyo_read_lock(). * * Returns true on success, false otherwise. */ static bool tomoyo_read_domain2(struct tomoyo_io_buffer *head, - struct tomoyo_domain_info *domain) + struct list_head *list) { - list_for_each_cookie(head->r.acl, &domain->acl_info_list) { + list_for_each_cookie(head->r.acl, list) { struct tomoyo_acl_info *ptr = list_entry(head->r.acl, typeof(*ptr), list); if (!tomoyo_print_entry(head, ptr)) @@ -1085,6 +1094,8 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head) tomoyo_set_lf(head); tomoyo_io_printf(head, "use_profile %u\n", domain->profile); + tomoyo_io_printf(head, "use_group %u\n", + domain->group); if (domain->quota_warned) tomoyo_set_string(head, "quota_exceeded\n"); if (domain->transition_failed) @@ -1093,7 +1104,7 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head) tomoyo_set_lf(head); /* fall through */ case 1: - if (!tomoyo_read_domain2(head, domain)) + if (!tomoyo_read_domain2(head, &domain->acl_info_list)) return; head->r.step++; if (!tomoyo_set_lf(head)) @@ -1262,6 +1273,14 @@ static int tomoyo_write_exception(struct tomoyo_io_buffer *head) }; u8 i; param.is_delete = tomoyo_str_starts(¶m.data, "delete "); + if (!param.is_delete && tomoyo_str_starts(¶m.data, "select ") && + !strcmp(param.data, "execute_only")) { + head->r.print_execute_only = true; + return 0; + } + /* Don't allow updating policies by non manager programs. */ + if (!tomoyo_manager()) + return -EPERM; if (tomoyo_str_starts(¶m.data, "aggregator ")) return tomoyo_write_aggregator(¶m); for (i = 0; i < TOMOYO_MAX_TRANSITION_TYPE; i++) @@ -1270,6 +1289,14 @@ static int tomoyo_write_exception(struct tomoyo_io_buffer *head) for (i = 0; i < TOMOYO_MAX_GROUP; i++) if (tomoyo_str_starts(¶m.data, tomoyo_group_name[i])) return tomoyo_write_group(¶m, i); + if (tomoyo_str_starts(¶m.data, "acl_group ")) { + unsigned int group; + char *data; + group = simple_strtoul(param.data, &data, 10); + if (group < TOMOYO_MAX_ACL_GROUPS && *data++ == ' ') + return tomoyo_write_domain2(&tomoyo_acl_group[group], + data, param.is_delete); + } return -EINVAL; } @@ -1392,6 +1419,15 @@ static void tomoyo_read_exception(struct tomoyo_io_buffer *head) head->r.step++; if (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP) return; + while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP + + TOMOYO_MAX_ACL_GROUPS) { + head->r.acl_group_index = head->r.step - TOMOYO_MAX_POLICY + - TOMOYO_MAX_GROUP; + if (!tomoyo_read_domain2(head, &tomoyo_acl_group + [head->r.acl_group_index])) + return; + head->r.step++; + } head->r.eof = true; } @@ -1914,7 +1950,8 @@ int tomoyo_write_control(struct tomoyo_io_buffer *head, return -EFAULT; /* Don't allow updating policies by non manager programs. */ if (head->write != tomoyo_write_pid && - head->write != tomoyo_write_domain && !tomoyo_manager()) + head->write != tomoyo_write_domain && + head->write != tomoyo_write_exception && !tomoyo_manager()) return -EPERM; if (mutex_lock_interruptible(&head->io_sem)) return -EINTR; diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index f40ec1fcbc5d..4bc3975516cb 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -38,6 +38,9 @@ struct linux_binprm; /* Profile number is an integer between 0 and 255. */ #define TOMOYO_MAX_PROFILES 256 +/* Group number is an integer between 0 and 255. */ +#define TOMOYO_MAX_ACL_GROUPS 256 + /* Index numbers for operation mode. */ enum tomoyo_mode_index { TOMOYO_CONFIG_DISABLED, @@ -357,6 +360,7 @@ struct tomoyo_domain_info { /* Name of this domain. Never NULL. */ const struct tomoyo_path_info *domainname; u8 profile; /* Profile number to use. */ + u8 group; /* Group number to use. */ bool is_deleted; /* Delete flag. */ bool quota_warned; /* Quota warnning flag. */ bool transition_failed; /* Domain transition failed flag. */ @@ -446,6 +450,7 @@ struct tomoyo_io_buffer { int step; int query_index; u16 index; + u8 acl_group_index; u8 bit; u8 w_pos; bool eof; @@ -666,6 +671,8 @@ extern struct mutex tomoyo_policy_lock; /* Has /sbin/init started? */ extern bool tomoyo_policy_loaded; +extern struct list_head tomoyo_acl_group[TOMOYO_MAX_ACL_GROUPS]; + /* The kernel's domain. */ extern struct tomoyo_domain_info tomoyo_kernel_domain; diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index cb5d2b05c244..af5f325e2f33 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -12,6 +12,9 @@ /* Variables definitions.*/ +/* The global ACL referred by "use_group" keyword. */ +struct list_head tomoyo_acl_group[TOMOYO_MAX_ACL_GROUPS]; + /* The initial domain. */ struct tomoyo_domain_info tomoyo_kernel_domain; @@ -125,14 +128,27 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, return error; } +/** + * tomoyo_check_acl - Do permission check. + * + * @r: Pointer to "struct tomoyo_request_info". + * @check_entry: Callback function to check type specific parameters. + * + * Returns 0 on success, negative value otherwise. + * + * Caller holds tomoyo_read_lock(). + */ void tomoyo_check_acl(struct tomoyo_request_info *r, bool (*check_entry) (struct tomoyo_request_info *, const struct tomoyo_acl_info *)) { const struct tomoyo_domain_info *domain = r->domain; struct tomoyo_acl_info *ptr; + bool retried = false; + const struct list_head *list = &domain->acl_info_list; - list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) { +retry: + list_for_each_entry_rcu(ptr, list, list) { if (ptr->is_deleted || ptr->type != r->param_type) continue; if (check_entry(r, ptr)) { @@ -140,6 +156,11 @@ void tomoyo_check_acl(struct tomoyo_request_info *r, return; } } + if (!retried) { + retried = true; + list = &tomoyo_acl_group[domain->group]; + goto retry; + } r->granted = false; } diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c index de14030823cd..412ee8309c23 100644 --- a/security/tomoyo/gc.c +++ b/security/tomoyo/gc.c @@ -265,10 +265,17 @@ static bool tomoyo_collect_member(const enum tomoyo_policy_id id, return true; } -static bool tomoyo_collect_acl(struct tomoyo_domain_info *domain) +/** + * tomoyo_collect_acl - Delete elements in "struct tomoyo_domain_info". + * + * @list: Pointer to "struct list_head". + * + * Returns true if some elements are deleted, false otherwise. + */ +static bool tomoyo_collect_acl(struct list_head *list) { struct tomoyo_acl_info *acl; - list_for_each_entry(acl, &domain->acl_info_list, list) { + list_for_each_entry(acl, list, list) { if (!acl->is_deleted) continue; if (!tomoyo_add_to_gc(TOMOYO_ID_ACL, &acl->list)) @@ -291,10 +298,13 @@ static void tomoyo_collect_entry(void) if (!tomoyo_collect_member(i, &tomoyo_policy_list[i])) goto unlock; } + for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++) + if (!tomoyo_collect_acl(&tomoyo_acl_group[i])) + goto unlock; { struct tomoyo_domain_info *domain; list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { - if (!tomoyo_collect_acl(domain)) + if (!tomoyo_collect_acl(&domain->acl_info_list)) goto unlock; if (!domain->is_deleted || atomic_read(&domain->users)) continue; diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c index 598282cd0bdd..7a0493943d6d 100644 --- a/security/tomoyo/memory.c +++ b/security/tomoyo/memory.c @@ -213,6 +213,8 @@ void __init tomoyo_mm_init(void) for (idx = 0; idx < TOMOYO_MAX_HASH; idx++) INIT_LIST_HEAD(&tomoyo_name_list[idx]); INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list); + for (idx = 0; idx < TOMOYO_MAX_ACL_GROUPS; idx++) + INIT_LIST_HEAD(&tomoyo_acl_group[idx]); tomoyo_kernel_domain.domainname = tomoyo_get_name(TOMOYO_ROOT_NAME); list_add_tail_rcu(&tomoyo_kernel_domain.list, &tomoyo_domain_list); idx = tomoyo_read_lock(); -- cgit v1.2.3 From bd03a3e4c9a9df0c6b007045fa7fc8889111a478 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:19:52 +0900 Subject: TOMOYO: Add policy namespace support. Mauras Olivier reported that it is difficult to use TOMOYO in LXC environments, for TOMOYO cannot distinguish between environments outside the container and environments inside the container since LXC environments are created using pivot_root(). To address this problem, this patch introduces policy namespace. Each policy namespace has its own set of domain policy, exception policy and profiles, which are all independent of other namespaces. This independency allows users to develop policy without worrying interference among namespaces. Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/audit.c | 8 +- security/tomoyo/common.c | 383 ++++++++++++++++++++++++++++++++++------------- security/tomoyo/common.h | 63 ++++++-- security/tomoyo/domain.c | 360 +++++++++++++++++++++++++++++++------------- security/tomoyo/file.c | 2 +- security/tomoyo/gc.c | 73 +++++---- security/tomoyo/memory.c | 21 +-- security/tomoyo/util.c | 58 ++++--- 8 files changed, 669 insertions(+), 299 deletions(-) diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c index e882f17065f2..ef2172f29583 100644 --- a/security/tomoyo/audit.c +++ b/security/tomoyo/audit.c @@ -151,13 +151,15 @@ static unsigned int tomoyo_log_count; /** * tomoyo_get_audit - Get audit mode. * + * @ns: Pointer to "struct tomoyo_policy_namespace". * @profile: Profile number. * @index: Index number of functionality. * @is_granted: True if granted log, false otherwise. * * Returns true if this request should be audited, false otherwise. */ -static bool tomoyo_get_audit(const u8 profile, const u8 index, +static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns, + const u8 profile, const u8 index, const bool is_granted) { u8 mode; @@ -165,7 +167,7 @@ static bool tomoyo_get_audit(const u8 profile, const u8 index, struct tomoyo_profile *p; if (!tomoyo_policy_loaded) return false; - p = tomoyo_profile(profile); + p = tomoyo_profile(ns, profile); if (tomoyo_log_count >= p->pref[TOMOYO_PREF_MAX_AUDIT_LOG]) return false; mode = p->config[index]; @@ -194,7 +196,7 @@ void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt, char *buf; struct tomoyo_log *entry; bool quota_exceeded = false; - if (!tomoyo_get_audit(r->profile, r->type, r->granted)) + if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type, r->granted)) goto out; buf = tomoyo_init_log(r, len, fmt, args); if (!buf) diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 507ebf01e43b..50481d2cf970 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -11,12 +11,6 @@ #include #include "common.h" -/* Profile version. Currently only 20090903 is defined. */ -static unsigned int tomoyo_profile_version; - -/* Profile table. Memory is allocated as needed. */ -static struct tomoyo_profile *tomoyo_profile_ptr[TOMOYO_MAX_PROFILES]; - /* String table for operation mode. */ const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE] = { [TOMOYO_CONFIG_DISABLED] = "disabled", @@ -216,6 +210,50 @@ static void tomoyo_set_slash(struct tomoyo_io_buffer *head) tomoyo_set_string(head, "/"); } +/* List of namespaces. */ +LIST_HEAD(tomoyo_namespace_list); +/* True if namespace other than tomoyo_kernel_namespace is defined. */ +static bool tomoyo_namespace_enabled; + +/** + * tomoyo_init_policy_namespace - Initialize namespace. + * + * @ns: Pointer to "struct tomoyo_policy_namespace". + * + * Returns nothing. + */ +void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns) +{ + unsigned int idx; + for (idx = 0; idx < TOMOYO_MAX_ACL_GROUPS; idx++) + INIT_LIST_HEAD(&ns->acl_group[idx]); + for (idx = 0; idx < TOMOYO_MAX_GROUP; idx++) + INIT_LIST_HEAD(&ns->group_list[idx]); + for (idx = 0; idx < TOMOYO_MAX_POLICY; idx++) + INIT_LIST_HEAD(&ns->policy_list[idx]); + ns->profile_version = 20100903; + tomoyo_namespace_enabled = !list_empty(&tomoyo_namespace_list); + list_add_tail_rcu(&ns->namespace_list, &tomoyo_namespace_list); +} + +/** + * tomoyo_print_namespace - Print namespace header. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. + */ +static void tomoyo_print_namespace(struct tomoyo_io_buffer *head) +{ + if (!tomoyo_namespace_enabled) + return; + tomoyo_set_string(head, + container_of(head->r.ns, + struct tomoyo_policy_namespace, + namespace_list)->name); + tomoyo_set_space(head); +} + /** * tomoyo_print_name_union - Print a tomoyo_name_union. * @@ -283,23 +321,25 @@ static void tomoyo_print_number_union(struct tomoyo_io_buffer *head, /** * tomoyo_assign_profile - Create a new profile. * + * @ns: Pointer to "struct tomoyo_policy_namespace". * @profile: Profile number to create. * * Returns pointer to "struct tomoyo_profile" on success, NULL otherwise. */ -static struct tomoyo_profile *tomoyo_assign_profile(const unsigned int profile) +static struct tomoyo_profile *tomoyo_assign_profile +(struct tomoyo_policy_namespace *ns, const unsigned int profile) { struct tomoyo_profile *ptr; struct tomoyo_profile *entry; if (profile >= TOMOYO_MAX_PROFILES) return NULL; - ptr = tomoyo_profile_ptr[profile]; + ptr = ns->profile_ptr[profile]; if (ptr) return ptr; entry = kzalloc(sizeof(*entry), GFP_NOFS); if (mutex_lock_interruptible(&tomoyo_policy_lock)) goto out; - ptr = tomoyo_profile_ptr[profile]; + ptr = ns->profile_ptr[profile]; if (!ptr && tomoyo_memory_ok(entry)) { ptr = entry; ptr->default_config = TOMOYO_CONFIG_DISABLED | @@ -310,7 +350,7 @@ static struct tomoyo_profile *tomoyo_assign_profile(const unsigned int profile) ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] = 1024; ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] = 2048; mb(); /* Avoid out-of-order execution. */ - tomoyo_profile_ptr[profile] = ptr; + ns->profile_ptr[profile] = ptr; entry = NULL; } mutex_unlock(&tomoyo_policy_lock); @@ -322,14 +362,16 @@ static struct tomoyo_profile *tomoyo_assign_profile(const unsigned int profile) /** * tomoyo_profile - Find a profile. * + * @ns: Pointer to "struct tomoyo_policy_namespace". * @profile: Profile number to find. * * Returns pointer to "struct tomoyo_profile". */ -struct tomoyo_profile *tomoyo_profile(const u8 profile) +struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns, + const u8 profile) { static struct tomoyo_profile tomoyo_null_profile; - struct tomoyo_profile *ptr = tomoyo_profile_ptr[profile]; + struct tomoyo_profile *ptr = ns->profile_ptr[profile]; if (!ptr) ptr = &tomoyo_null_profile; return ptr; @@ -454,13 +496,14 @@ static int tomoyo_write_profile(struct tomoyo_io_buffer *head) unsigned int i; char *cp; struct tomoyo_profile *profile; - if (sscanf(data, "PROFILE_VERSION=%u", &tomoyo_profile_version) == 1) + if (sscanf(data, "PROFILE_VERSION=%u", &head->w.ns->profile_version) + == 1) return 0; i = simple_strtoul(data, &cp, 10); if (*cp != '-') return -EINVAL; data = cp + 1; - profile = tomoyo_assign_profile(i); + profile = tomoyo_assign_profile(head->w.ns, i); if (!profile) return -EINVAL; cp = strchr(data, '='); @@ -518,19 +561,25 @@ static void tomoyo_print_config(struct tomoyo_io_buffer *head, const u8 config) static void tomoyo_read_profile(struct tomoyo_io_buffer *head) { u8 index; + struct tomoyo_policy_namespace *ns = + container_of(head->r.ns, typeof(*ns), namespace_list); const struct tomoyo_profile *profile; + if (head->r.eof) + return; next: index = head->r.index; - profile = tomoyo_profile_ptr[index]; + profile = ns->profile_ptr[index]; switch (head->r.step) { case 0: - tomoyo_io_printf(head, "PROFILE_VERSION=%u\n", 20090903); + tomoyo_print_namespace(head); + tomoyo_io_printf(head, "PROFILE_VERSION=%u\n", + ns->profile_version); head->r.step++; break; case 1: for ( ; head->r.index < TOMOYO_MAX_PROFILES; head->r.index++) - if (tomoyo_profile_ptr[head->r.index]) + if (ns->profile_ptr[head->r.index]) break; if (head->r.index == TOMOYO_MAX_PROFILES) return; @@ -541,6 +590,7 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) u8 i; const struct tomoyo_path_info *comment = profile->comment; + tomoyo_print_namespace(head); tomoyo_io_printf(head, "%u-COMMENT=", index); tomoyo_set_string(head, comment ? comment->name : ""); tomoyo_set_lf(head); @@ -555,6 +605,7 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) break; case 3: { + tomoyo_print_namespace(head); tomoyo_io_printf(head, "%u-%s", index, "CONFIG"); tomoyo_print_config(head, profile->default_config); head->r.bit = 0; @@ -568,6 +619,7 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) const u8 config = profile->config[i]; if (config == TOMOYO_CONFIG_USE_DEFAULT) continue; + tomoyo_print_namespace(head); tomoyo_io_printf(head, "%u-%s%s", index, "CONFIG::", tomoyo_mac_keywords[i]); tomoyo_print_config(head, config); @@ -607,8 +659,10 @@ static int tomoyo_update_manager_entry(const char *manager, { struct tomoyo_manager e = { }; struct tomoyo_acl_param param = { + /* .ns = &tomoyo_kernel_namespace, */ .is_delete = is_delete, - .list = &tomoyo_policy_list[TOMOYO_ID_MANAGER], + .list = &tomoyo_kernel_namespace. + policy_list[TOMOYO_ID_MANAGER], }; int error = is_delete ? -ENOENT : -ENOMEM; if (tomoyo_domain_def(manager)) { @@ -640,13 +694,12 @@ static int tomoyo_update_manager_entry(const char *manager, static int tomoyo_write_manager(struct tomoyo_io_buffer *head) { char *data = head->write_buf; - bool is_delete = tomoyo_str_starts(&data, "delete "); if (!strcmp(data, "manage_by_non_root")) { - tomoyo_manage_by_non_root = !is_delete; + tomoyo_manage_by_non_root = !head->w.is_delete; return 0; } - return tomoyo_update_manager_entry(data, is_delete); + return tomoyo_update_manager_entry(data, head->w.is_delete); } /** @@ -660,8 +713,8 @@ static void tomoyo_read_manager(struct tomoyo_io_buffer *head) { if (head->r.eof) return; - list_for_each_cookie(head->r.acl, - &tomoyo_policy_list[TOMOYO_ID_MANAGER]) { + list_for_each_cookie(head->r.acl, &tomoyo_kernel_namespace. + policy_list[TOMOYO_ID_MANAGER]) { struct tomoyo_manager *ptr = list_entry(head->r.acl, typeof(*ptr), head.list); if (ptr->head.is_deleted) @@ -694,8 +747,8 @@ static bool tomoyo_manager(void) return true; if (!tomoyo_manage_by_non_root && (task->cred->uid || task->cred->euid)) return false; - list_for_each_entry_rcu(ptr, &tomoyo_policy_list[TOMOYO_ID_MANAGER], - head.list) { + list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace. + policy_list[TOMOYO_ID_MANAGER], head.list) { if (!ptr->head.is_deleted && ptr->is_domain && !tomoyo_pathcmp(domainname, ptr->manager)) { found = true; @@ -707,8 +760,8 @@ static bool tomoyo_manager(void) exe = tomoyo_get_exe(); if (!exe) return false; - list_for_each_entry_rcu(ptr, &tomoyo_policy_list[TOMOYO_ID_MANAGER], - head.list) { + list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace. + policy_list[TOMOYO_ID_MANAGER], head.list) { if (!ptr->head.is_deleted && !ptr->is_domain && !strcmp(exe, ptr->manager->name)) { found = true; @@ -729,7 +782,7 @@ static bool tomoyo_manager(void) } /** - * tomoyo_select_one - Parse select command. + * tomoyo_select_domain - Parse select command. * * @head: Pointer to "struct tomoyo_io_buffer". * @data: String to parse. @@ -738,16 +791,15 @@ static bool tomoyo_manager(void) * * Caller holds tomoyo_read_lock(). */ -static bool tomoyo_select_one(struct tomoyo_io_buffer *head, const char *data) +static bool tomoyo_select_domain(struct tomoyo_io_buffer *head, + const char *data) { unsigned int pid; struct tomoyo_domain_info *domain = NULL; bool global_pid = false; - - if (!strcmp(data, "allow_execute")) { - head->r.print_execute_only = true; - return true; - } + if (strncmp(data, "select ", 7)) + return false; + data += 7; if (sscanf(data, "pid=%u", &pid) == 1 || (global_pid = true, sscanf(data, "global-pid=%u", &pid) == 1)) { struct task_struct *p; @@ -818,6 +870,7 @@ static int tomoyo_delete_domain(char *domainname) /** * tomoyo_write_domain2 - Write domain policy. * + * @ns: Pointer to "struct tomoyo_policy_namespace". * @list: Pointer to "struct list_head". * @data: Policy to be interpreted. * @is_delete: True if it is a delete request. @@ -826,10 +879,12 @@ static int tomoyo_delete_domain(char *domainname) * * Caller holds tomoyo_read_lock(). */ -static int tomoyo_write_domain2(struct list_head *list, char *data, +static int tomoyo_write_domain2(struct tomoyo_policy_namespace *ns, + struct list_head *list, char *data, const bool is_delete) { struct tomoyo_acl_param param = { + .ns = ns, .list = list, .data = data, .is_delete = is_delete, @@ -862,37 +917,28 @@ static int tomoyo_write_domain2(struct list_head *list, char *data, static int tomoyo_write_domain(struct tomoyo_io_buffer *head) { char *data = head->write_buf; + struct tomoyo_policy_namespace *ns; struct tomoyo_domain_info *domain = head->w.domain; - bool is_delete = false; - bool is_select = false; + const bool is_delete = head->w.is_delete; + bool is_select = !is_delete && tomoyo_str_starts(&data, "select "); unsigned int profile; - - if (tomoyo_str_starts(&data, "delete ")) - is_delete = true; - else if (tomoyo_str_starts(&data, "select ")) - is_select = true; - if (is_select && tomoyo_select_one(head, data)) - return 0; - /* Don't allow updating policies by non manager programs. */ - if (!tomoyo_manager()) - return -EPERM; - if (tomoyo_domain_def(data)) { + if (*data == '<') { domain = NULL; if (is_delete) tomoyo_delete_domain(data); else if (is_select) domain = tomoyo_find_domain(data); else - domain = tomoyo_assign_domain(data, 0); + domain = tomoyo_assign_domain(data, false); head->w.domain = domain; return 0; } if (!domain) return -EINVAL; - + ns = domain->ns; if (sscanf(data, "use_profile %u", &profile) == 1 && profile < TOMOYO_MAX_PROFILES) { - if (tomoyo_profile_ptr[profile] || !tomoyo_policy_loaded) + if (!tomoyo_policy_loaded || ns->profile_ptr[profile]) domain->profile = (u8) profile; return 0; } @@ -910,7 +956,8 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head) domain->transition_failed = !is_delete; return 0; } - return tomoyo_write_domain2(&domain->acl_info_list, data, is_delete); + return tomoyo_write_domain2(ns, &domain->acl_info_list, data, + is_delete); } /** @@ -924,9 +971,11 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head) static void tomoyo_set_group(struct tomoyo_io_buffer *head, const char *category) { - if (head->type == TOMOYO_EXCEPTIONPOLICY) + if (head->type == TOMOYO_EXCEPTIONPOLICY) { + tomoyo_print_namespace(head); tomoyo_io_printf(head, "acl_group %u ", head->r.acl_group_index); + } tomoyo_set_string(head, category); } @@ -956,7 +1005,7 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head, for (bit = 0; bit < TOMOYO_MAX_PATH_OPERATION; bit++) { if (!(perm & (1 << bit))) continue; - if (head->r.print_execute_only && + if (head->r.print_transition_related_only && bit != TOMOYO_TYPE_EXECUTE) continue; if (first) { @@ -970,7 +1019,7 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head, if (first) return true; tomoyo_print_name_union(head, &ptr->name); - } else if (head->r.print_execute_only) { + } else if (head->r.print_transition_related_only) { return true; } else if (acl_type == TOMOYO_TYPE_PATH2_ACL) { struct tomoyo_path2_acl *ptr = @@ -1147,8 +1196,8 @@ static int tomoyo_write_domain_profile(struct tomoyo_io_buffer *head) domain = tomoyo_find_domain(cp + 1); if (strict_strtoul(data, 10, &profile)) return -EINVAL; - if (domain && profile < TOMOYO_MAX_PROFILES - && (tomoyo_profile_ptr[profile] || !tomoyo_policy_loaded)) + if (domain && (!tomoyo_policy_loaded || + head->w.ns->profile_ptr[(u8) profile])) domain->profile = (u8) profile; return 0; } @@ -1246,10 +1295,12 @@ static void tomoyo_read_pid(struct tomoyo_io_buffer *head) } static const char *tomoyo_transition_type[TOMOYO_MAX_TRANSITION_TYPE] = { - [TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE] = "no_initialize_domain", - [TOMOYO_TRANSITION_CONTROL_INITIALIZE] = "initialize_domain", - [TOMOYO_TRANSITION_CONTROL_NO_KEEP] = "no_keep_domain", - [TOMOYO_TRANSITION_CONTROL_KEEP] = "keep_domain", + [TOMOYO_TRANSITION_CONTROL_NO_RESET] = "no_reset_domain ", + [TOMOYO_TRANSITION_CONTROL_RESET] = "reset_domain ", + [TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE] = "no_initialize_domain ", + [TOMOYO_TRANSITION_CONTROL_INITIALIZE] = "initialize_domain ", + [TOMOYO_TRANSITION_CONTROL_NO_KEEP] = "no_keep_domain ", + [TOMOYO_TRANSITION_CONTROL_KEEP] = "keep_domain ", }; static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = { @@ -1268,19 +1319,13 @@ static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = { */ static int tomoyo_write_exception(struct tomoyo_io_buffer *head) { + const bool is_delete = head->w.is_delete; struct tomoyo_acl_param param = { + .ns = head->w.ns, + .is_delete = is_delete, .data = head->write_buf, }; u8 i; - param.is_delete = tomoyo_str_starts(¶m.data, "delete "); - if (!param.is_delete && tomoyo_str_starts(¶m.data, "select ") && - !strcmp(param.data, "execute_only")) { - head->r.print_execute_only = true; - return 0; - } - /* Don't allow updating policies by non manager programs. */ - if (!tomoyo_manager()) - return -EPERM; if (tomoyo_str_starts(¶m.data, "aggregator ")) return tomoyo_write_aggregator(¶m); for (i = 0; i < TOMOYO_MAX_TRANSITION_TYPE; i++) @@ -1294,8 +1339,9 @@ static int tomoyo_write_exception(struct tomoyo_io_buffer *head) char *data; group = simple_strtoul(param.data, &data, 10); if (group < TOMOYO_MAX_ACL_GROUPS && *data++ == ' ') - return tomoyo_write_domain2(&tomoyo_acl_group[group], - data, param.is_delete); + return tomoyo_write_domain2 + (head->w.ns, &head->w.ns->acl_group[group], + data, is_delete); } return -EINVAL; } @@ -1312,7 +1358,10 @@ static int tomoyo_write_exception(struct tomoyo_io_buffer *head) */ static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx) { - list_for_each_cookie(head->r.group, &tomoyo_group_list[idx]) { + struct tomoyo_policy_namespace *ns = + container_of(head->r.ns, typeof(*ns), namespace_list); + struct list_head *list = &ns->group_list[idx]; + list_for_each_cookie(head->r.group, list) { struct tomoyo_group *group = list_entry(head->r.group, typeof(*group), head.list); list_for_each_cookie(head->r.acl, &group->member_list) { @@ -1322,6 +1371,7 @@ static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx) continue; if (!tomoyo_flush(head)) return false; + tomoyo_print_namespace(head); tomoyo_set_string(head, tomoyo_group_name[idx]); tomoyo_set_string(head, group->group_name->name); if (idx == TOMOYO_PATH_GROUP) { @@ -1355,7 +1405,10 @@ static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx) */ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) { - list_for_each_cookie(head->r.acl, &tomoyo_policy_list[idx]) { + struct tomoyo_policy_namespace *ns = + container_of(head->r.ns, typeof(*ns), namespace_list); + struct list_head *list = &ns->policy_list[idx]; + list_for_each_cookie(head->r.acl, list) { struct tomoyo_acl_head *acl = container_of(head->r.acl, typeof(*acl), list); if (acl->is_deleted) @@ -1367,6 +1420,7 @@ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) { struct tomoyo_transition_control *ptr = container_of(acl, typeof(*ptr), head); + tomoyo_print_namespace(head); tomoyo_set_string(head, tomoyo_transition_type [ptr->type]); tomoyo_set_string(head, ptr->program ? @@ -1381,6 +1435,7 @@ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) { struct tomoyo_aggregator *ptr = container_of(acl, typeof(*ptr), head); + tomoyo_print_namespace(head); tomoyo_set_string(head, "aggregator "); tomoyo_set_string(head, ptr->original_name->name); @@ -1407,6 +1462,8 @@ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) */ static void tomoyo_read_exception(struct tomoyo_io_buffer *head) { + struct tomoyo_policy_namespace *ns = + container_of(head->r.ns, typeof(*ns), namespace_list); if (head->r.eof) return; while (head->r.step < TOMOYO_MAX_POLICY && @@ -1423,7 +1480,7 @@ static void tomoyo_read_exception(struct tomoyo_io_buffer *head) + TOMOYO_MAX_ACL_GROUPS) { head->r.acl_group_index = head->r.step - TOMOYO_MAX_POLICY - TOMOYO_MAX_GROUP; - if (!tomoyo_read_domain2(head, &tomoyo_acl_group + if (!tomoyo_read_domain2(head, &ns->acl_group [head->r.acl_group_index])) return; head->r.step++; @@ -1484,7 +1541,8 @@ static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header) return; snprintf(buffer, len - 1, "%s", cp); tomoyo_normalize_line(buffer); - tomoyo_write_domain2(&domain->acl_info_list, buffer, false); + tomoyo_write_domain2(domain->ns, &domain->acl_info_list, buffer, + false); kfree(buffer); } @@ -1895,6 +1953,45 @@ int tomoyo_poll_control(struct file *file, poll_table *wait) return head->poll(file, wait); } +/** + * tomoyo_set_namespace_cursor - Set namespace to read. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. + */ +static inline void tomoyo_set_namespace_cursor(struct tomoyo_io_buffer *head) +{ + struct list_head *ns; + if (head->type != TOMOYO_EXCEPTIONPOLICY && + head->type != TOMOYO_PROFILE) + return; + /* + * If this is the first read, or reading previous namespace finished + * and has more namespaces to read, update the namespace cursor. + */ + ns = head->r.ns; + if (!ns || (head->r.eof && ns->next != &tomoyo_namespace_list)) { + /* Clearing is OK because tomoyo_flush() returned true. */ + memset(&head->r, 0, sizeof(head->r)); + head->r.ns = ns ? ns->next : tomoyo_namespace_list.next; + } +} + +/** + * tomoyo_has_more_namespace - Check for unread namespaces. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns true if we have more entries to print, false otherwise. + */ +static inline bool tomoyo_has_more_namespace(struct tomoyo_io_buffer *head) +{ + return (head->type == TOMOYO_EXCEPTIONPOLICY || + head->type == TOMOYO_PROFILE) && head->r.eof && + head->r.ns->next != &tomoyo_namespace_list; +} + /** * tomoyo_read_control - read() for /sys/kernel/security/tomoyo/ interface. * @@ -1919,13 +2016,53 @@ int tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, head->read_user_buf_avail = buffer_len; if (tomoyo_flush(head)) /* Call the policy handler. */ - head->read(head); - tomoyo_flush(head); + do { + tomoyo_set_namespace_cursor(head); + head->read(head); + } while (tomoyo_flush(head) && + tomoyo_has_more_namespace(head)); len = head->read_user_buf - buffer; mutex_unlock(&head->io_sem); return len; } +/** + * tomoyo_parse_policy - Parse a policy line. + * + * @head: Poiter to "struct tomoyo_io_buffer". + * @line: Line to parse. + * + * Returns 0 on success, negative value otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +static int tomoyo_parse_policy(struct tomoyo_io_buffer *head, char *line) +{ + /* Delete request? */ + head->w.is_delete = !strncmp(line, "delete ", 7); + if (head->w.is_delete) + memmove(line, line + 7, strlen(line + 7) + 1); + /* Selecting namespace to update. */ + if (head->type == TOMOYO_EXCEPTIONPOLICY || + head->type == TOMOYO_PROFILE) { + if (*line == '<') { + char *cp = strchr(line, ' '); + if (cp) { + *cp++ = '\0'; + head->w.ns = tomoyo_assign_namespace(line); + memmove(line, cp, strlen(cp) + 1); + } else + head->w.ns = NULL; + } else + head->w.ns = &tomoyo_kernel_namespace; + /* Don't allow updating if namespace is invalid. */ + if (!head->w.ns) + return -ENOENT; + } + /* Do the update. */ + return head->write(head); +} + /** * tomoyo_write_control - write() for /sys/kernel/security/tomoyo/ interface. * @@ -1941,27 +2078,31 @@ int tomoyo_write_control(struct tomoyo_io_buffer *head, const char __user *buffer, const int buffer_len) { int error = buffer_len; - int avail_len = buffer_len; + size_t avail_len = buffer_len; char *cp0 = head->write_buf; - if (!head->write) return -ENOSYS; if (!access_ok(VERIFY_READ, buffer, buffer_len)) return -EFAULT; - /* Don't allow updating policies by non manager programs. */ - if (head->write != tomoyo_write_pid && - head->write != tomoyo_write_domain && - head->write != tomoyo_write_exception && !tomoyo_manager()) - return -EPERM; if (mutex_lock_interruptible(&head->io_sem)) return -EINTR; /* Read a line and dispatch it to the policy handler. */ while (avail_len > 0) { char c; if (head->w.avail >= head->writebuf_size - 1) { - error = -ENOMEM; - break; - } else if (get_user(c, buffer)) { + const int len = head->writebuf_size * 2; + char *cp = kzalloc(len, GFP_NOFS); + if (!cp) { + error = -ENOMEM; + break; + } + memmove(cp, cp0, head->w.avail); + kfree(cp0); + head->write_buf = cp; + cp0 = cp; + head->writebuf_size = len; + } + if (get_user(c, buffer)) { error = -EFAULT; break; } @@ -1973,8 +2114,40 @@ int tomoyo_write_control(struct tomoyo_io_buffer *head, cp0[head->w.avail - 1] = '\0'; head->w.avail = 0; tomoyo_normalize_line(cp0); - head->write(head); + if (!strcmp(cp0, "reset")) { + head->w.ns = &tomoyo_kernel_namespace; + head->w.domain = NULL; + memset(&head->r, 0, sizeof(head->r)); + continue; + } + /* Don't allow updating policies by non manager programs. */ + switch (head->type) { + case TOMOYO_PROCESS_STATUS: + /* This does not write anything. */ + break; + case TOMOYO_DOMAINPOLICY: + if (tomoyo_select_domain(head, cp0)) + continue; + /* fall through */ + case TOMOYO_EXCEPTIONPOLICY: + if (!strcmp(cp0, "select transition_only")) { + head->r.print_transition_related_only = true; + continue; + } + /* fall through */ + default: + if (!tomoyo_manager()) { + error = -EPERM; + goto out; + } + } + switch (tomoyo_parse_policy(head, cp0)) { + case -EPERM: + error = -EPERM; + goto out; + } } +out: mutex_unlock(&head->io_sem); return error; } @@ -2019,27 +2192,27 @@ void tomoyo_check_profile(void) struct tomoyo_domain_info *domain; const int idx = tomoyo_read_lock(); tomoyo_policy_loaded = true; - /* Check all profiles currently assigned to domains are defined. */ + printk(KERN_INFO "TOMOYO: 2.4.0\n"); list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { const u8 profile = domain->profile; - if (tomoyo_profile_ptr[profile]) + const struct tomoyo_policy_namespace *ns = domain->ns; + if (ns->profile_version != 20100903) + printk(KERN_ERR + "Profile version %u is not supported.\n", + ns->profile_version); + else if (!ns->profile_ptr[profile]) + printk(KERN_ERR + "Profile %u (used by '%s') is not defined.\n", + profile, domain->domainname->name); + else continue; - printk(KERN_ERR "You need to define profile %u before using it.\n", - profile); - printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.3/ " + printk(KERN_ERR + "Userland tools for TOMOYO 2.4 must be installed and " + "policy must be initialized.\n"); + printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.4/ " "for more information.\n"); - panic("Profile %u (used by '%s') not defined.\n", - profile, domain->domainname->name); + panic("STOP!"); } tomoyo_read_unlock(idx); - if (tomoyo_profile_version != 20090903) { - printk(KERN_ERR "You need to install userland programs for " - "TOMOYO 2.3 and initialize policy configuration.\n"); - printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.3/ " - "for more information.\n"); - panic("Profile version %u is not supported.\n", - tomoyo_profile_version); - } - printk(KERN_INFO "TOMOYO: 2.3.0\n"); printk(KERN_INFO "Mandatory Access Control activated.\n"); } diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index 4bc3975516cb..53c8798e38b7 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -74,10 +74,6 @@ enum tomoyo_group_id { TOMOYO_MAX_GROUP }; -/* A domain definition starts with . */ -#define TOMOYO_ROOT_NAME "" -#define TOMOYO_ROOT_NAME_LEN (sizeof(TOMOYO_ROOT_NAME) - 1) - /* Index numbers for type of numeric values. */ enum tomoyo_value_type { TOMOYO_VALUE_TYPE_INVALID, @@ -89,6 +85,8 @@ enum tomoyo_value_type { /* Index numbers for domain transition control keywords. */ enum tomoyo_transition_type { /* Do not change this order, */ + TOMOYO_TRANSITION_CONTROL_NO_RESET, + TOMOYO_TRANSITION_CONTROL_RESET, TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE, TOMOYO_TRANSITION_CONTROL_INITIALIZE, TOMOYO_TRANSITION_CONTROL_NO_KEEP, @@ -246,6 +244,8 @@ struct tomoyo_shared_acl_head { atomic_t users; } __packed; +struct tomoyo_policy_namespace; + /* Structure for request info. */ struct tomoyo_request_info { struct tomoyo_domain_info *domain; @@ -359,6 +359,8 @@ struct tomoyo_domain_info { struct list_head acl_info_list; /* Name of this domain. Never NULL. */ const struct tomoyo_path_info *domainname; + /* Namespace for this domain. Never NULL. */ + struct tomoyo_policy_namespace *ns; u8 profile; /* Profile number to use. */ u8 group; /* Group number to use. */ bool is_deleted; /* Delete flag. */ @@ -423,6 +425,7 @@ struct tomoyo_mount_acl { struct tomoyo_acl_param { char *data; struct list_head *list; + struct tomoyo_policy_namespace *ns; bool is_delete; }; @@ -443,6 +446,7 @@ struct tomoyo_io_buffer { char __user *read_user_buf; int read_user_buf_avail; struct { + struct list_head *ns; struct list_head *domain; struct list_head *group; struct list_head *acl; @@ -455,14 +459,16 @@ struct tomoyo_io_buffer { u8 w_pos; bool eof; bool print_this_domain_only; - bool print_execute_only; + bool print_transition_related_only; const char *w[TOMOYO_MAX_IO_READ_QUEUE]; } r; struct { + struct tomoyo_policy_namespace *ns; /* The position currently writing to. */ struct tomoyo_domain_info *domain; /* Bytes available for writing. */ int avail; + bool is_delete; } w; /* Buffer for reading. */ char *read_buf; @@ -533,8 +539,27 @@ struct tomoyo_time { u8 sec; }; +/* Structure for policy namespace. */ +struct tomoyo_policy_namespace { + /* Profile table. Memory is allocated as needed. */ + struct tomoyo_profile *profile_ptr[TOMOYO_MAX_PROFILES]; + /* List of "struct tomoyo_group". */ + struct list_head group_list[TOMOYO_MAX_GROUP]; + /* List of policy. */ + struct list_head policy_list[TOMOYO_MAX_POLICY]; + /* The global ACL referred by "use_group" keyword. */ + struct list_head acl_group[TOMOYO_MAX_ACL_GROUPS]; + /* List for connecting to tomoyo_namespace_list list. */ + struct list_head namespace_list; + /* Profile version. Currently only 20100903 is defined. */ + unsigned int profile_version; + /* Name of this namespace (e.g. "", "" ). */ + const char *name; +}; + /********** Function prototypes. **********/ +void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns); bool tomoyo_str_starts(char **src, const char *find); const char *tomoyo_get_exe(void); void tomoyo_normalize_line(unsigned char *buffer); @@ -553,7 +578,8 @@ tomoyo_compare_name_union(const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr); bool tomoyo_compare_number_union(const unsigned long value, const struct tomoyo_number_union *ptr); -int tomoyo_get_mode(const u8 profile, const u8 index); +int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile, + const u8 index); void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...) __attribute__ ((format(printf, 2, 3))); bool tomoyo_correct_domain(const unsigned char *domainname); @@ -589,8 +615,11 @@ int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) __attribute__ ((format(printf, 2, 3))); struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname); struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname, - const u8 profile); -struct tomoyo_profile *tomoyo_profile(const u8 profile); + const bool transit); +struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns, + const u8 profile); +struct tomoyo_policy_namespace *tomoyo_assign_namespace +(const char *domainname); struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param, const u8 idx); unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain, @@ -646,6 +675,8 @@ char *tomoyo_read_token(struct tomoyo_acl_param *param); bool tomoyo_permstr(const char *string, const char *keyword); const char *tomoyo_yesno(const unsigned int value); +void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt, va_list args); void tomoyo_read_log(struct tomoyo_io_buffer *head); @@ -661,8 +692,6 @@ extern struct srcu_struct tomoyo_ss; /* The list for "struct tomoyo_domain_info". */ extern struct list_head tomoyo_domain_list; -extern struct list_head tomoyo_policy_list[TOMOYO_MAX_POLICY]; -extern struct list_head tomoyo_group_list[TOMOYO_MAX_GROUP]; extern struct list_head tomoyo_name_list[TOMOYO_MAX_HASH]; /* Lock for protecting policy. */ @@ -671,10 +700,10 @@ extern struct mutex tomoyo_policy_lock; /* Has /sbin/init started? */ extern bool tomoyo_policy_loaded; -extern struct list_head tomoyo_acl_group[TOMOYO_MAX_ACL_GROUPS]; - /* The kernel's domain. */ extern struct tomoyo_domain_info tomoyo_kernel_domain; +extern struct tomoyo_policy_namespace tomoyo_kernel_namespace; +extern struct list_head tomoyo_namespace_list; extern const char *tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION]; extern const char *tomoyo_mkdev_keyword[TOMOYO_MAX_MKDEV_OPERATION]; @@ -809,6 +838,16 @@ static inline bool tomoyo_same_number_union a->value_type[1] == b->value_type[1]; } +/** + * tomoyo_current_namespace - Get "struct tomoyo_policy_namespace" for current thread. + * + * Returns pointer to "struct tomoyo_policy_namespace" for current thread. + */ +static inline struct tomoyo_policy_namespace *tomoyo_current_namespace(void) +{ + return tomoyo_domain()->ns; +} + #if defined(CONFIG_SLOB) /** diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index af5f325e2f33..71acebc747c3 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -12,9 +12,6 @@ /* Variables definitions.*/ -/* The global ACL referred by "use_group" keyword. */ -struct list_head tomoyo_acl_group[TOMOYO_MAX_ACL_GROUPS]; - /* The initial domain. */ struct tomoyo_domain_info tomoyo_kernel_domain; @@ -158,7 +155,7 @@ retry: } if (!retried) { retried = true; - list = &tomoyo_acl_group[domain->group]; + list = &domain->ns->acl_group[domain->group]; goto retry; } r->granted = false; @@ -167,13 +164,10 @@ retry: /* The list for "struct tomoyo_domain_info". */ LIST_HEAD(tomoyo_domain_list); -struct list_head tomoyo_policy_list[TOMOYO_MAX_POLICY]; -struct list_head tomoyo_group_list[TOMOYO_MAX_GROUP]; - /** * tomoyo_last_word - Get last component of a domainname. * - * @domainname: Domainname to check. + * @name: Domainname to check. * * Returns the last word of @domainname. */ @@ -247,7 +241,7 @@ int tomoyo_write_transition_control(struct tomoyo_acl_param *param, if (!e.domainname) goto out; } - param->list = &tomoyo_policy_list[TOMOYO_ID_TRANSITION_CONTROL]; + param->list = ¶m->ns->policy_list[TOMOYO_ID_TRANSITION_CONTROL]; error = tomoyo_update_policy(&e.head, sizeof(e), param, tomoyo_same_transition_control); out: @@ -257,59 +251,88 @@ out: } /** - * tomoyo_transition_type - Get domain transition type. + * tomoyo_scan_transition - Try to find specific domain transition type. * - * @domainname: The name of domain. - * @program: The name of program. + * @list: Pointer to "struct list_head". + * @domainname: The name of current domain. + * @program: The name of requested program. + * @last_name: The last component of @domainname. + * @type: One of values in "enum tomoyo_transition_type". * - * Returns TOMOYO_TRANSITION_CONTROL_INITIALIZE if executing @program - * reinitializes domain transition, TOMOYO_TRANSITION_CONTROL_KEEP if executing - * @program suppresses domain transition, others otherwise. + * Returns true if found one, false otherwise. * * Caller holds tomoyo_read_lock(). */ -static u8 tomoyo_transition_type(const struct tomoyo_path_info *domainname, - const struct tomoyo_path_info *program) +static inline bool tomoyo_scan_transition +(const struct list_head *list, const struct tomoyo_path_info *domainname, + const struct tomoyo_path_info *program, const char *last_name, + const enum tomoyo_transition_type type) { const struct tomoyo_transition_control *ptr; - const char *last_name = tomoyo_last_word(domainname->name); - u8 type; - for (type = 0; type < TOMOYO_MAX_TRANSITION_TYPE; type++) { - next: - list_for_each_entry_rcu(ptr, &tomoyo_policy_list - [TOMOYO_ID_TRANSITION_CONTROL], - head.list) { - if (ptr->head.is_deleted || ptr->type != type) - continue; - if (ptr->domainname) { - if (!ptr->is_last_name) { - if (ptr->domainname != domainname) - continue; - } else { - /* - * Use direct strcmp() since this is - * unlikely used. - */ - if (strcmp(ptr->domainname->name, - last_name)) - continue; - } - } - if (ptr->program && - tomoyo_pathcmp(ptr->program, program)) - continue; - if (type == TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE) { + list_for_each_entry_rcu(ptr, list, head.list) { + if (ptr->head.is_deleted || ptr->type != type) + continue; + if (ptr->domainname) { + if (!ptr->is_last_name) { + if (ptr->domainname != domainname) + continue; + } else { /* - * Do not check for initialize_domain if - * no_initialize_domain matched. + * Use direct strcmp() since this is + * unlikely used. */ - type = TOMOYO_TRANSITION_CONTROL_NO_KEEP; - goto next; + if (strcmp(ptr->domainname->name, last_name)) + continue; } - goto done; } + if (ptr->program && tomoyo_pathcmp(ptr->program, program)) + continue; + return true; + } + return false; +} + +/** + * tomoyo_transition_type - Get domain transition type. + * + * @ns: Pointer to "struct tomoyo_policy_namespace". + * @domainname: The name of current domain. + * @program: The name of requested program. + * + * Returns TOMOYO_TRANSITION_CONTROL_TRANSIT if executing @program causes + * domain transition across namespaces, TOMOYO_TRANSITION_CONTROL_INITIALIZE if + * executing @program reinitializes domain transition within that namespace, + * TOMOYO_TRANSITION_CONTROL_KEEP if executing @program stays at @domainname , + * others otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +static enum tomoyo_transition_type tomoyo_transition_type +(const struct tomoyo_policy_namespace *ns, + const struct tomoyo_path_info *domainname, + const struct tomoyo_path_info *program) +{ + const char *last_name = tomoyo_last_word(domainname->name); + enum tomoyo_transition_type type = TOMOYO_TRANSITION_CONTROL_NO_RESET; + while (type < TOMOYO_MAX_TRANSITION_TYPE) { + const struct list_head * const list = + &ns->policy_list[TOMOYO_ID_TRANSITION_CONTROL]; + if (!tomoyo_scan_transition(list, domainname, program, + last_name, type)) { + type++; + continue; + } + if (type != TOMOYO_TRANSITION_CONTROL_NO_RESET && + type != TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE) + break; + /* + * Do not check for reset_domain if no_reset_domain matched. + * Do not check for initialize_domain if no_initialize_domain + * matched. + */ + type++; + type++; } - done: return type; } @@ -355,7 +378,7 @@ int tomoyo_write_aggregator(struct tomoyo_acl_param *param) if (!e.original_name || !e.aggregated_name || e.aggregated_name->is_patterned) /* No patterns allowed. */ goto out; - param->list = &tomoyo_policy_list[TOMOYO_ID_AGGREGATOR]; + param->list = ¶m->ns->policy_list[TOMOYO_ID_AGGREGATOR]; error = tomoyo_update_policy(&e.head, sizeof(e), param, tomoyo_same_aggregator); out: @@ -365,53 +388,171 @@ out: } /** - * tomoyo_assign_domain - Create a domain. + * tomoyo_find_namespace - Find specified namespace. * - * @domainname: The name of domain. - * @profile: Profile number to assign if the domain was newly created. + * @name: Name of namespace to find. + * @len: Length of @name. * - * Returns pointer to "struct tomoyo_domain_info" on success, NULL otherwise. + * Returns pointer to "struct tomoyo_policy_namespace" if found, + * NULL otherwise. * * Caller holds tomoyo_read_lock(). */ -struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname, - const u8 profile) +static struct tomoyo_policy_namespace *tomoyo_find_namespace +(const char *name, const unsigned int len) { - struct tomoyo_domain_info *entry; - struct tomoyo_domain_info *domain = NULL; - const struct tomoyo_path_info *saved_domainname; - bool found = false; + struct tomoyo_policy_namespace *ns; + list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) { + if (strncmp(name, ns->name, len) || + (name[len] && name[len] != ' ')) + continue; + return ns; + } + return NULL; +} - if (!tomoyo_correct_domain(domainname)) +/** + * tomoyo_assign_namespace - Create a new namespace. + * + * @domainname: Name of namespace to create. + * + * Returns pointer to "struct tomoyo_policy_namespace" on success, + * NULL otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +struct tomoyo_policy_namespace *tomoyo_assign_namespace(const char *domainname) +{ + struct tomoyo_policy_namespace *ptr; + struct tomoyo_policy_namespace *entry; + const char *cp = domainname; + unsigned int len = 0; + while (*cp && *cp++ != ' ') + len++; + ptr = tomoyo_find_namespace(domainname, len); + if (ptr) + return ptr; + if (len >= TOMOYO_EXEC_TMPSIZE - 10 || !tomoyo_domain_def(domainname)) return NULL; - saved_domainname = tomoyo_get_name(domainname); - if (!saved_domainname) + entry = kzalloc(sizeof(*entry) + len + 1, GFP_NOFS); + if (!entry) return NULL; - entry = kzalloc(sizeof(*entry), GFP_NOFS); if (mutex_lock_interruptible(&tomoyo_policy_lock)) goto out; - list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { - if (domain->is_deleted || - tomoyo_pathcmp(saved_domainname, domain->domainname)) - continue; - found = true; - break; - } - if (!found && tomoyo_memory_ok(entry)) { - INIT_LIST_HEAD(&entry->acl_info_list); - entry->domainname = saved_domainname; - saved_domainname = NULL; - entry->profile = profile; - list_add_tail_rcu(&entry->list, &tomoyo_domain_list); - domain = entry; + ptr = tomoyo_find_namespace(domainname, len); + if (!ptr && tomoyo_memory_ok(entry)) { + char *name = (char *) (entry + 1); + ptr = entry; + memmove(name, domainname, len); + name[len] = '\0'; + entry->name = name; + tomoyo_init_policy_namespace(entry); entry = NULL; - found = true; } mutex_unlock(&tomoyo_policy_lock); - out: - tomoyo_put_name(saved_domainname); +out: kfree(entry); - return found ? domain : NULL; + return ptr; +} + +/** + * tomoyo_namespace_jump - Check for namespace jump. + * + * @domainname: Name of domain. + * + * Returns true if namespace differs, false otherwise. + */ +static bool tomoyo_namespace_jump(const char *domainname) +{ + const char *namespace = tomoyo_current_namespace()->name; + const int len = strlen(namespace); + return strncmp(domainname, namespace, len) || + (domainname[len] && domainname[len] != ' '); +} + +/** + * tomoyo_assign_domain - Create a domain or a namespace. + * + * @domainname: The name of domain. + * @transit: True if transit to domain found or created. + * + * Returns pointer to "struct tomoyo_domain_info" on success, NULL otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname, + const bool transit) +{ + struct tomoyo_domain_info e = { }; + struct tomoyo_domain_info *entry = tomoyo_find_domain(domainname); + bool created = false; + if (entry) { + if (transit) { + /* + * Since namespace is created at runtime, profiles may + * not be created by the moment the process transits to + * that domain. Do not perform domain transition if + * profile for that domain is not yet created. + */ + if (!entry->ns->profile_ptr[entry->profile]) + return NULL; + } + return entry; + } + /* Requested domain does not exist. */ + /* Don't create requested domain if domainname is invalid. */ + if (strlen(domainname) >= TOMOYO_EXEC_TMPSIZE - 10 || + !tomoyo_correct_domain(domainname)) + return NULL; + /* + * Since definition of profiles and acl_groups may differ across + * namespaces, do not inherit "use_profile" and "use_group" settings + * by automatically creating requested domain upon domain transition. + */ + if (transit && tomoyo_namespace_jump(domainname)) + return NULL; + e.ns = tomoyo_assign_namespace(domainname); + if (!e.ns) + return NULL; + /* + * "use_profile" and "use_group" settings for automatically created + * domains are inherited from current domain. These are 0 for manually + * created domains. + */ + if (transit) { + const struct tomoyo_domain_info *domain = tomoyo_domain(); + e.profile = domain->profile; + e.group = domain->group; + } + e.domainname = tomoyo_get_name(domainname); + if (!e.domainname) + return NULL; + if (mutex_lock_interruptible(&tomoyo_policy_lock)) + goto out; + entry = tomoyo_find_domain(domainname); + if (!entry) { + entry = tomoyo_commit_ok(&e, sizeof(e)); + if (entry) { + INIT_LIST_HEAD(&entry->acl_info_list); + list_add_tail_rcu(&entry->list, &tomoyo_domain_list); + created = true; + } + } + mutex_unlock(&tomoyo_policy_lock); +out: + tomoyo_put_name(e.domainname); + if (entry && transit) { + if (created) { + struct tomoyo_request_info r; + tomoyo_init_request_info(&r, entry, + TOMOYO_MAC_FILE_EXECUTE); + r.granted = false; + tomoyo_write_log(&r, "use_profile %u\n", + entry->profile); + tomoyo_write_log(&r, "use_group %u\n", entry->group); + } + } + return entry; } /** @@ -434,6 +575,7 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm) bool is_enforce; int retval = -ENOMEM; bool need_kfree = false; + bool reject_on_transition_failure = false; struct tomoyo_path_info rn = { }; /* real name */ mode = tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_EXECUTE); @@ -457,8 +599,10 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm) /* Check 'aggregator' directive. */ { struct tomoyo_aggregator *ptr; - list_for_each_entry_rcu(ptr, &tomoyo_policy_list - [TOMOYO_ID_AGGREGATOR], head.list) { + struct list_head *list = + &old_domain->ns->policy_list[TOMOYO_ID_AGGREGATOR]; + /* Check 'aggregator' directive. */ + list_for_each_entry_rcu(ptr, list, head.list) { if (ptr->head.is_deleted || !tomoyo_path_matches_pattern(&rn, ptr->original_name)) @@ -492,11 +636,21 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm) } /* Calculate domain to transit to. */ - switch (tomoyo_transition_type(old_domain->domainname, &rn)) { + switch (tomoyo_transition_type(old_domain->ns, old_domain->domainname, + &rn)) { + case TOMOYO_TRANSITION_CONTROL_RESET: + /* Transit to the root of specified namespace. */ + snprintf(tmp, TOMOYO_EXEC_TMPSIZE - 1, "<%s>", rn.name); + /* + * Make do_execve() fail if domain transition across namespaces + * has failed. + */ + reject_on_transition_failure = true; + break; case TOMOYO_TRANSITION_CONTROL_INITIALIZE: - /* Transit to the child of tomoyo_kernel_domain domain. */ - snprintf(tmp, TOMOYO_EXEC_TMPSIZE - 1, TOMOYO_ROOT_NAME " " - "%s", rn.name); + /* Transit to the child of current namespace's root. */ + snprintf(tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s", + old_domain->ns->name, rn.name); break; case TOMOYO_TRANSITION_CONTROL_KEEP: /* Keep current domain. */ @@ -519,19 +673,25 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm) } break; } - if (domain || strlen(tmp) >= TOMOYO_EXEC_TMPSIZE - 10) - goto done; - domain = tomoyo_find_domain(tmp); if (!domain) - domain = tomoyo_assign_domain(tmp, old_domain->profile); - done: + domain = tomoyo_assign_domain(tmp, true); if (domain) - goto out; - printk(KERN_WARNING "TOMOYO-ERROR: Domain '%s' not defined.\n", tmp); - if (is_enforce) - retval = -EPERM; - else - old_domain->transition_failed = true; + retval = 0; + else if (reject_on_transition_failure) { + printk(KERN_WARNING "ERROR: Domain '%s' not ready.\n", tmp); + retval = -ENOMEM; + } else if (r.mode == TOMOYO_CONFIG_ENFORCING) + retval = -ENOMEM; + else { + retval = 0; + if (!old_domain->transition_failed) { + old_domain->transition_failed = true; + r.granted = false; + tomoyo_write_log(&r, "%s", "transition_failed\n"); + printk(KERN_WARNING + "ERROR: Domain '%s' not defined.\n", tmp); + } + } out: if (!domain) domain = old_domain; diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index 4f8526af9069..323ddc73a125 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c @@ -603,7 +603,7 @@ int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, int error; r->type = tomoyo_p2mac[operation]; - r->mode = tomoyo_get_mode(r->profile, r->type); + r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type); if (r->mode == TOMOYO_CONFIG_DISABLED) return 0; r->param_type = TOMOYO_TYPE_PATH_ACL; diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c index 412ee8309c23..782e844dca7f 100644 --- a/security/tomoyo/gc.c +++ b/security/tomoyo/gc.c @@ -292,15 +292,12 @@ static bool tomoyo_collect_acl(struct list_head *list) static void tomoyo_collect_entry(void) { int i; + enum tomoyo_policy_id id; + struct tomoyo_policy_namespace *ns; + int idx; if (mutex_lock_interruptible(&tomoyo_policy_lock)) return; - for (i = 0; i < TOMOYO_MAX_POLICY; i++) { - if (!tomoyo_collect_member(i, &tomoyo_policy_list[i])) - goto unlock; - } - for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++) - if (!tomoyo_collect_acl(&tomoyo_acl_group[i])) - goto unlock; + idx = tomoyo_read_lock(); { struct tomoyo_domain_info *domain; list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { @@ -317,39 +314,49 @@ static void tomoyo_collect_entry(void) goto unlock; } } - for (i = 0; i < TOMOYO_MAX_HASH; i++) { - struct tomoyo_name *ptr; - list_for_each_entry_rcu(ptr, &tomoyo_name_list[i], head.list) { - if (atomic_read(&ptr->head.users)) - continue; - if (!tomoyo_add_to_gc(TOMOYO_ID_NAME, &ptr->head.list)) + list_for_each_entry_rcu(ns, &tomoyo_namespace_list, namespace_list) { + for (id = 0; id < TOMOYO_MAX_POLICY; id++) + if (!tomoyo_collect_member(id, &ns->policy_list[id])) goto unlock; + for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++) + if (!tomoyo_collect_acl(&ns->acl_group[i])) + goto unlock; + for (i = 0; i < TOMOYO_MAX_GROUP; i++) { + struct list_head *list = &ns->group_list[i]; + struct tomoyo_group *group; + switch (i) { + case 0: + id = TOMOYO_ID_PATH_GROUP; + break; + default: + id = TOMOYO_ID_NUMBER_GROUP; + break; + } + list_for_each_entry(group, list, head.list) { + if (!tomoyo_collect_member + (id, &group->member_list)) + goto unlock; + if (!list_empty(&group->member_list) || + atomic_read(&group->head.users)) + continue; + if (!tomoyo_add_to_gc(TOMOYO_ID_GROUP, + &group->head.list)) + goto unlock; + } } } - for (i = 0; i < TOMOYO_MAX_GROUP; i++) { - struct list_head *list = &tomoyo_group_list[i]; - int id; - struct tomoyo_group *group; - switch (i) { - case 0: - id = TOMOYO_ID_PATH_GROUP; - break; - default: - id = TOMOYO_ID_NUMBER_GROUP; - break; - } - list_for_each_entry(group, list, head.list) { - if (!tomoyo_collect_member(id, &group->member_list)) - goto unlock; - if (!list_empty(&group->member_list) || - atomic_read(&group->head.users)) + for (i = 0; i < TOMOYO_MAX_HASH; i++) { + struct list_head *list = &tomoyo_name_list[i]; + struct tomoyo_shared_acl_head *ptr; + list_for_each_entry(ptr, list, list) { + if (atomic_read(&ptr->users)) continue; - if (!tomoyo_add_to_gc(TOMOYO_ID_GROUP, - &group->head.list)) + if (!tomoyo_add_to_gc(TOMOYO_ID_NAME, &ptr->list)) goto unlock; } } - unlock: +unlock: + tomoyo_read_unlock(idx); mutex_unlock(&tomoyo_policy_lock); } diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c index 7a0493943d6d..39d012823f84 100644 --- a/security/tomoyo/memory.c +++ b/security/tomoyo/memory.c @@ -118,7 +118,7 @@ struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param, return NULL; if (mutex_lock_interruptible(&tomoyo_policy_lock)) goto out; - list = &tomoyo_group_list[idx]; + list = ¶m->ns->group_list[idx]; list_for_each_entry(group, list, head.list) { if (e.group_name != group->group_name) continue; @@ -199,27 +199,23 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name) return ptr ? &ptr->entry : NULL; } +/* Initial namespace.*/ +struct tomoyo_policy_namespace tomoyo_kernel_namespace; + /** * tomoyo_mm_init - Initialize mm related code. */ void __init tomoyo_mm_init(void) { int idx; - - for (idx = 0; idx < TOMOYO_MAX_POLICY; idx++) - INIT_LIST_HEAD(&tomoyo_policy_list[idx]); - for (idx = 0; idx < TOMOYO_MAX_GROUP; idx++) - INIT_LIST_HEAD(&tomoyo_group_list[idx]); for (idx = 0; idx < TOMOYO_MAX_HASH; idx++) INIT_LIST_HEAD(&tomoyo_name_list[idx]); + tomoyo_kernel_namespace.name = ""; + tomoyo_init_policy_namespace(&tomoyo_kernel_namespace); + tomoyo_kernel_domain.ns = &tomoyo_kernel_namespace; INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list); - for (idx = 0; idx < TOMOYO_MAX_ACL_GROUPS; idx++) - INIT_LIST_HEAD(&tomoyo_acl_group[idx]); - tomoyo_kernel_domain.domainname = tomoyo_get_name(TOMOYO_ROOT_NAME); + tomoyo_kernel_domain.domainname = tomoyo_get_name(""); list_add_tail_rcu(&tomoyo_kernel_domain.list, &tomoyo_domain_list); - idx = tomoyo_read_lock(); - if (tomoyo_find_domain(TOMOYO_ROOT_NAME) != &tomoyo_kernel_domain) - panic("Can't register tomoyo_kernel_domain"); #if 0 /* Will be replaced with tomoyo_load_builtin_policy(). */ { @@ -230,7 +226,6 @@ void __init tomoyo_mm_init(void) TOMOYO_TRANSITION_CONTROL_INITIALIZE); } #endif - tomoyo_read_unlock(idx); } diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c index bc71528ff440..fda15c1fc1c0 100644 --- a/security/tomoyo/util.c +++ b/security/tomoyo/util.c @@ -416,26 +416,21 @@ bool tomoyo_correct_path(const char *filename) */ bool tomoyo_correct_domain(const unsigned char *domainname) { - if (!domainname || strncmp(domainname, TOMOYO_ROOT_NAME, - TOMOYO_ROOT_NAME_LEN)) - goto out; - domainname += TOMOYO_ROOT_NAME_LEN; - if (!*domainname) + if (!domainname || !tomoyo_domain_def(domainname)) + return false; + domainname = strchr(domainname, ' '); + if (!domainname++) return true; - if (*domainname++ != ' ') - goto out; while (1) { const unsigned char *cp = strchr(domainname, ' '); if (!cp) break; if (*domainname != '/' || !tomoyo_correct_word2(domainname, cp - domainname)) - goto out; + return false; domainname = cp + 1; } return tomoyo_correct_path(domainname); - out: - return false; } /** @@ -447,7 +442,19 @@ bool tomoyo_correct_domain(const unsigned char *domainname) */ bool tomoyo_domain_def(const unsigned char *buffer) { - return !strncmp(buffer, TOMOYO_ROOT_NAME, TOMOYO_ROOT_NAME_LEN); + const unsigned char *cp; + int len; + if (*buffer != '<') + return false; + cp = strchr(buffer, ' '); + if (!cp) + len = strlen(buffer); + else + len = cp - buffer; + if (buffer[len - 1] != '>' || + !tomoyo_correct_word2(buffer + 1, len - 2)) + return false; + return true; } /** @@ -833,22 +840,24 @@ const char *tomoyo_get_exe(void) /** * tomoyo_get_mode - Get MAC mode. * + * @ns: Pointer to "struct tomoyo_policy_namespace". * @profile: Profile number. * @index: Index number of functionality. * * Returns mode. */ -int tomoyo_get_mode(const u8 profile, const u8 index) +int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile, + const u8 index) { u8 mode; const u8 category = TOMOYO_MAC_CATEGORY_FILE; if (!tomoyo_policy_loaded) return TOMOYO_CONFIG_DISABLED; - mode = tomoyo_profile(profile)->config[index]; + mode = tomoyo_profile(ns, profile)->config[index]; if (mode == TOMOYO_CONFIG_USE_DEFAULT) - mode = tomoyo_profile(profile)->config[category]; + mode = tomoyo_profile(ns, profile)->config[category]; if (mode == TOMOYO_CONFIG_USE_DEFAULT) - mode = tomoyo_profile(profile)->default_config; + mode = tomoyo_profile(ns, profile)->default_config; return mode & 3; } @@ -872,25 +881,10 @@ int tomoyo_init_request_info(struct tomoyo_request_info *r, profile = domain->profile; r->profile = profile; r->type = index; - r->mode = tomoyo_get_mode(profile, index); + r->mode = tomoyo_get_mode(domain->ns, profile, index); return r->mode; } -/** - * tomoyo_last_word - Get last component of a line. - * - * @line: A line. - * - * Returns the last word of a line. - */ -const char *tomoyo_last_word(const char *name) -{ - const char *cp = strrchr(name, ' '); - if (cp) - return cp + 1; - return name; -} - /** * tomoyo_domain_quota_is_ok - Check for domain's quota. * @@ -939,7 +933,7 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r) if (perm & (1 << i)) count++; } - if (count < tomoyo_profile(domain->profile)-> + if (count < tomoyo_profile(domain->ns, domain->profile)-> pref[TOMOYO_PREF_MAX_LEARNING_ENTRY]) return true; if (!domain->quota_warned) { -- cgit v1.2.3 From 5625f2e3266319fd29fe4f1c76ccd3f550c79ac4 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:20:23 +0900 Subject: TOMOYO: Change pathname for non-rename()able filesystems. TOMOYO wants to use /proc/self/ rather than /proc/$PID/ if $PID matches current thread's process ID in order to prevent current thread from accessing other process's information unless needed. But since procfs can be mounted on various locations (e.g. /proc/ /proc2/ /p/ /tmp/foo/100/p/ ), TOMOYO cannot tell that whether the numeric part in the string returned by __d_path() represents process ID or not. Therefore, to be able to convert from $PID to self no matter where procfs is mounted, this patch changes pathname representations for filesystems which do not support rename() operation (e.g. proc, sysfs, securityfs). Examples: /proc/self/mounts => proc:/self/mounts /sys/kernel/security/ => sys:/kernel/security/ /dev/pts/0 => devpts:/0 Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/file.c | 12 +-- security/tomoyo/realpath.c | 222 ++++++++++++++++++++++++++++++++++++--------- 2 files changed, 180 insertions(+), 54 deletions(-) diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index 323ddc73a125..8410f28a35e0 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c @@ -712,7 +712,7 @@ int tomoyo_path_number_perm(const u8 type, struct path *path, int idx; if (tomoyo_init_request_info(&r, NULL, tomoyo_pn2mac[type]) - == TOMOYO_CONFIG_DISABLED || !path->mnt || !path->dentry) + == TOMOYO_CONFIG_DISABLED || !path->dentry) return 0; idx = tomoyo_read_lock(); if (!tomoyo_get_realpath(&buf, path)) @@ -753,8 +753,6 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, struct tomoyo_request_info r; int idx; - if (!path->mnt) - return 0; buf.name = NULL; r.mode = TOMOYO_CONFIG_DISABLED; idx = tomoyo_read_lock(); @@ -798,8 +796,6 @@ int tomoyo_path_perm(const u8 operation, struct path *path) bool is_enforce; int idx; - if (!path->mnt) - return 0; if (tomoyo_init_request_info(&r, NULL, tomoyo_p2mac[operation]) == TOMOYO_CONFIG_DISABLED) return 0; @@ -842,8 +838,7 @@ int tomoyo_mkdev_perm(const u8 operation, struct path *path, struct tomoyo_path_info buf; int idx; - if (!path->mnt || - tomoyo_init_request_info(&r, NULL, tomoyo_pnnn2mac[operation]) + if (tomoyo_init_request_info(&r, NULL, tomoyo_pnnn2mac[operation]) == TOMOYO_CONFIG_DISABLED) return 0; idx = tomoyo_read_lock(); @@ -884,8 +879,7 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1, struct tomoyo_request_info r; int idx; - if (!path1->mnt || !path2->mnt || - tomoyo_init_request_info(&r, NULL, tomoyo_pp2mac[operation]) + if (tomoyo_init_request_info(&r, NULL, tomoyo_pp2mac[operation]) == TOMOYO_CONFIG_DISABLED) return 0; buf1.name = NULL; diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c index d1e05b047715..1a785777118b 100644 --- a/security/tomoyo/realpath.c +++ b/security/tomoyo/realpath.c @@ -69,6 +69,161 @@ char *tomoyo_encode(const char *str) return cp0; } +/** + * tomoyo_get_absolute_path - Get the path of a dentry but ignores chroot'ed root. + * + * @path: Pointer to "struct path". + * @buffer: Pointer to buffer to return value in. + * @buflen: Sizeof @buffer. + * + * Returns the buffer on success, an error code otherwise. + * + * If dentry is a directory, trailing '/' is appended. + */ +static char *tomoyo_get_absolute_path(struct path *path, char * const buffer, + const int buflen) +{ + char *pos = ERR_PTR(-ENOMEM); + if (buflen >= 256) { + struct path ns_root = { }; + /* go to whatever namespace root we are under */ + pos = __d_path(path, &ns_root, buffer, buflen - 1); + if (!IS_ERR(pos) && *pos == '/' && pos[1]) { + struct inode *inode = path->dentry->d_inode; + if (inode && S_ISDIR(inode->i_mode)) { + buffer[buflen - 2] = '/'; + buffer[buflen - 1] = '\0'; + } + } + } + return pos; +} + +/** + * tomoyo_get_dentry_path - Get the path of a dentry. + * + * @dentry: Pointer to "struct dentry". + * @buffer: Pointer to buffer to return value in. + * @buflen: Sizeof @buffer. + * + * Returns the buffer on success, an error code otherwise. + * + * If dentry is a directory, trailing '/' is appended. + */ +static char *tomoyo_get_dentry_path(struct dentry *dentry, char * const buffer, + const int buflen) +{ + char *pos = ERR_PTR(-ENOMEM); + if (buflen >= 256) { + pos = dentry_path_raw(dentry, buffer, buflen - 1); + if (!IS_ERR(pos) && *pos == '/' && pos[1]) { + struct inode *inode = dentry->d_inode; + if (inode && S_ISDIR(inode->i_mode)) { + buffer[buflen - 2] = '/'; + buffer[buflen - 1] = '\0'; + } + } + } + return pos; +} + +/** + * tomoyo_get_local_path - Get the path of a dentry. + * + * @dentry: Pointer to "struct dentry". + * @buffer: Pointer to buffer to return value in. + * @buflen: Sizeof @buffer. + * + * Returns the buffer on success, an error code otherwise. + */ +static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer, + const int buflen) +{ + struct super_block *sb = dentry->d_sb; + char *pos = tomoyo_get_dentry_path(dentry, buffer, buflen); + if (IS_ERR(pos)) + return pos; + /* Convert from $PID to self if $PID is current thread. */ + if (sb->s_magic == PROC_SUPER_MAGIC && *pos == '/') { + char *ep; + const pid_t pid = (pid_t) simple_strtoul(pos + 1, &ep, 10); + if (*ep == '/' && pid && pid == + task_tgid_nr_ns(current, sb->s_fs_info)) { + pos = ep - 5; + if (pos < buffer) + goto out; + memmove(pos, "/self", 5); + } + goto prepend_filesystem_name; + } + /* Use filesystem name for unnamed devices. */ + if (!MAJOR(sb->s_dev)) + goto prepend_filesystem_name; + { + struct inode *inode = sb->s_root->d_inode; + /* + * Use filesystem name if filesystem does not support rename() + * operation. + */ + if (inode->i_op && !inode->i_op->rename) + goto prepend_filesystem_name; + } + /* Prepend device name. */ + { + char name[64]; + int name_len; + const dev_t dev = sb->s_dev; + name[sizeof(name) - 1] = '\0'; + snprintf(name, sizeof(name) - 1, "dev(%u,%u):", MAJOR(dev), + MINOR(dev)); + name_len = strlen(name); + pos -= name_len; + if (pos < buffer) + goto out; + memmove(pos, name, name_len); + return pos; + } + /* Prepend filesystem name. */ +prepend_filesystem_name: + { + const char *name = sb->s_type->name; + const int name_len = strlen(name); + pos -= name_len + 1; + if (pos < buffer) + goto out; + memmove(pos, name, name_len); + pos[name_len] = ':'; + } + return pos; +out: + return ERR_PTR(-ENOMEM); +} + +/** + * tomoyo_get_socket_name - Get the name of a socket. + * + * @path: Pointer to "struct path". + * @buffer: Pointer to buffer to return value in. + * @buflen: Sizeof @buffer. + * + * Returns the buffer. + */ +static char *tomoyo_get_socket_name(struct path *path, char * const buffer, + const int buflen) +{ + struct inode *inode = path->dentry->d_inode; + struct socket *sock = inode ? SOCKET_I(inode) : NULL; + struct sock *sk = sock ? sock->sk : NULL; + if (sk) { + snprintf(buffer, buflen, "socket:[family=%u:type=%u:" + "protocol=%u]", sk->sk_family, sk->sk_type, + sk->sk_protocol); + } else { + snprintf(buffer, buflen, "socket:[unknown]"); + } + return buffer; +} + /** * tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root. * @@ -90,55 +245,42 @@ char *tomoyo_realpath_from_path(struct path *path) char *name = NULL; unsigned int buf_len = PAGE_SIZE / 2; struct dentry *dentry = path->dentry; - bool is_dir; + struct super_block *sb; if (!dentry) return NULL; - is_dir = dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode); + sb = dentry->d_sb; while (1) { - struct path ns_root = { .mnt = NULL, .dentry = NULL }; char *pos; + struct inode *inode; buf_len <<= 1; kfree(buf); buf = kmalloc(buf_len, GFP_NOFS); if (!buf) break; + /* To make sure that pos is '\0' terminated. */ + buf[buf_len - 1] = '\0'; /* Get better name for socket. */ - if (dentry->d_sb && dentry->d_sb->s_magic == SOCKFS_MAGIC) { - struct inode *inode = dentry->d_inode; - struct socket *sock = inode ? SOCKET_I(inode) : NULL; - struct sock *sk = sock ? sock->sk : NULL; - if (sk) { - snprintf(buf, buf_len - 1, "socket:[family=%u:" - "type=%u:protocol=%u]", sk->sk_family, - sk->sk_type, sk->sk_protocol); - } else { - snprintf(buf, buf_len - 1, "socket:[unknown]"); - } - name = tomoyo_encode(buf); - break; + if (sb->s_magic == SOCKFS_MAGIC) { + pos = tomoyo_get_socket_name(path, buf, buf_len - 1); + goto encode; } - /* For "socket:[\$]" and "pipe:[\$]". */ + /* For "pipe:[\$]". */ if (dentry->d_op && dentry->d_op->d_dname) { pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1); - if (IS_ERR(pos)) - continue; - name = tomoyo_encode(pos); - break; - } - /* If we don't have a vfsmount, we can't calculate. */ - if (!path->mnt) - break; - /* go to whatever namespace root we are under */ - pos = __d_path(path, &ns_root, buf, buf_len); - /* Prepend "/proc" prefix if using internal proc vfs mount. */ - if (!IS_ERR(pos) && (path->mnt->mnt_flags & MNT_INTERNAL) && - (path->mnt->mnt_sb->s_magic == PROC_SUPER_MAGIC)) { - pos -= 5; - if (pos >= buf) - memcpy(pos, "/proc", 5); - else - pos = ERR_PTR(-ENOMEM); + goto encode; } + inode = sb->s_root->d_inode; + /* + * Get local name for filesystems without rename() operation + * or dentry without vfsmount. + */ + if (!path->mnt || (inode->i_op && !inode->i_op->rename)) + pos = tomoyo_get_local_path(path->dentry, buf, + buf_len - 1); + /* Get absolute name for the rest. */ + else + pos = tomoyo_get_absolute_path(path, buf, buf_len - 1); +encode: if (IS_ERR(pos)) continue; name = tomoyo_encode(pos); @@ -147,16 +289,6 @@ char *tomoyo_realpath_from_path(struct path *path) kfree(buf); if (!name) tomoyo_warn_oom(__func__); - else if (is_dir && *name) { - /* Append trailing '/' if dentry is a directory. */ - char *pos = name + strlen(name) - 1; - if (*pos != '/') - /* - * This is OK because tomoyo_encode() reserves space - * for appending "/". - */ - *++pos = '/'; - } return name; } -- cgit v1.2.3 From 2e503bbb435ae418aebbe4aeede1c6f2a33d6f74 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:20:55 +0900 Subject: TOMOYO: Fix lockdep warning. Currently TOMOYO holds SRCU lock upon open() and releases it upon close() because list elements stored in the "struct tomoyo_io_buffer" instances are accessed until close() is called. However, such SRCU usage causes lockdep to complain about leaving the kernel with SRCU lock held. This patch solves the warning by holding/releasing SRCU upon each read()/write(). This patch is doing something similar to calling kfree() without calling synchronize_srcu(), by selectively deferring kfree() by keeping track of the "struct tomoyo_io_buffer" instances. Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/common.c | 41 +++---- security/tomoyo/common.h | 8 +- security/tomoyo/gc.c | 278 +++++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 276 insertions(+), 51 deletions(-) diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 50481d2cf970..691c34025a4a 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -1820,9 +1820,7 @@ static void tomoyo_read_self_domain(struct tomoyo_io_buffer *head) * @type: Type of interface. * @file: Pointer to "struct file". * - * Associates policy handler and returns 0 on success, -ENOMEM otherwise. - * - * Caller acquires tomoyo_read_lock(). + * Returns 0 on success, negative value otherwise. */ int tomoyo_open_control(const u8 type, struct file *file) { @@ -1921,9 +1919,6 @@ int tomoyo_open_control(const u8 type, struct file *file) return -ENOMEM; } } - if (type != TOMOYO_QUERY && type != TOMOYO_AUDIT) - head->reader_idx = tomoyo_read_lock(); - file->private_data = head; /* * If the file is /sys/kernel/security/tomoyo/query , increment the * observer counter. @@ -1932,6 +1927,8 @@ int tomoyo_open_control(const u8 type, struct file *file) */ if (type == TOMOYO_QUERY) atomic_inc(&tomoyo_query_observers); + file->private_data = head; + tomoyo_notify_gc(head, true); return 0; } @@ -2000,13 +1997,12 @@ static inline bool tomoyo_has_more_namespace(struct tomoyo_io_buffer *head) * @buffer_len: Size of @buffer. * * Returns bytes read on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). */ int tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, const int buffer_len) { int len; + int idx; if (!head->read) return -ENOSYS; @@ -2014,6 +2010,7 @@ int tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, return -EINTR; head->read_user_buf = buffer; head->read_user_buf_avail = buffer_len; + idx = tomoyo_read_lock(); if (tomoyo_flush(head)) /* Call the policy handler. */ do { @@ -2021,6 +2018,7 @@ int tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, head->read(head); } while (tomoyo_flush(head) && tomoyo_has_more_namespace(head)); + tomoyo_read_unlock(idx); len = head->read_user_buf - buffer; mutex_unlock(&head->io_sem); return len; @@ -2071,8 +2069,6 @@ static int tomoyo_parse_policy(struct tomoyo_io_buffer *head, char *line) * @buffer_len: Size of @buffer. * * Returns @buffer_len on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). */ int tomoyo_write_control(struct tomoyo_io_buffer *head, const char __user *buffer, const int buffer_len) @@ -2080,12 +2076,14 @@ int tomoyo_write_control(struct tomoyo_io_buffer *head, int error = buffer_len; size_t avail_len = buffer_len; char *cp0 = head->write_buf; + int idx; if (!head->write) return -ENOSYS; if (!access_ok(VERIFY_READ, buffer, buffer_len)) return -EFAULT; if (mutex_lock_interruptible(&head->io_sem)) return -EINTR; + idx = tomoyo_read_lock(); /* Read a line and dispatch it to the policy handler. */ while (avail_len > 0) { char c; @@ -2148,6 +2146,7 @@ int tomoyo_write_control(struct tomoyo_io_buffer *head, } } out: + tomoyo_read_unlock(idx); mutex_unlock(&head->io_sem); return error; } @@ -2157,30 +2156,18 @@ out: * * @head: Pointer to "struct tomoyo_io_buffer". * - * Releases memory and returns 0. - * - * Caller looses tomoyo_read_lock(). + * Returns 0. */ int tomoyo_close_control(struct tomoyo_io_buffer *head) { - const bool is_write = !!head->write_buf; - /* * If the file is /sys/kernel/security/tomoyo/query , decrement the * observer counter. */ - if (head->type == TOMOYO_QUERY) - atomic_dec(&tomoyo_query_observers); - else if (head->type != TOMOYO_AUDIT) - tomoyo_read_unlock(head->reader_idx); - /* Release memory used for policy I/O. */ - kfree(head->read_buf); - head->read_buf = NULL; - kfree(head->write_buf); - head->write_buf = NULL; - kfree(head); - if (is_write) - tomoyo_run_gc(); + if (head->type == TOMOYO_QUERY && + atomic_dec_and_test(&tomoyo_query_observers)) + wake_up_all(&tomoyo_answer_wait); + tomoyo_notify_gc(head, false); return 0; } diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index 53c8798e38b7..a5eeabcc0738 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -441,8 +441,6 @@ struct tomoyo_io_buffer { int (*poll) (struct file *file, poll_table *wait); /* Exclusive lock for this structure. */ struct mutex io_sem; - /* Index returned by tomoyo_read_lock(). */ - int reader_idx; char __user *read_user_buf; int read_user_buf_avail; struct { @@ -480,6 +478,10 @@ struct tomoyo_io_buffer { int writebuf_size; /* Type of this interface. */ u8 type; + /* Users counter protected by tomoyo_io_buffer_list_lock. */ + u8 users; + /* List for telling GC not to kfree() elements. */ + struct list_head list; }; /* @@ -651,7 +653,7 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm); void tomoyo_print_ulong(char *buffer, const int buffer_len, const unsigned long value, const u8 type); void tomoyo_put_name_union(struct tomoyo_name_union *ptr); -void tomoyo_run_gc(void); +void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register); void tomoyo_memory_free(void *ptr); int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, struct tomoyo_acl_param *param, diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c index 782e844dca7f..1e1a6c8c832c 100644 --- a/security/tomoyo/gc.c +++ b/security/tomoyo/gc.c @@ -11,13 +11,123 @@ #include #include +/* The list for "struct tomoyo_io_buffer". */ +static LIST_HEAD(tomoyo_io_buffer_list); +/* Lock for protecting tomoyo_io_buffer_list. */ +static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock); + +/* Size of an element. */ +static const u8 tomoyo_element_size[TOMOYO_MAX_POLICY] = { + [TOMOYO_ID_GROUP] = sizeof(struct tomoyo_group), + [TOMOYO_ID_PATH_GROUP] = sizeof(struct tomoyo_path_group), + [TOMOYO_ID_NUMBER_GROUP] = sizeof(struct tomoyo_number_group), + [TOMOYO_ID_AGGREGATOR] = sizeof(struct tomoyo_aggregator), + [TOMOYO_ID_TRANSITION_CONTROL] = + sizeof(struct tomoyo_transition_control), + [TOMOYO_ID_MANAGER] = sizeof(struct tomoyo_manager), + /* [TOMOYO_ID_NAME] = "struct tomoyo_name"->size, */ + /* [TOMOYO_ID_ACL] = + tomoyo_acl_size["struct tomoyo_acl_info"->type], */ + [TOMOYO_ID_DOMAIN] = sizeof(struct tomoyo_domain_info), +}; + +/* Size of a domain ACL element. */ +static const u8 tomoyo_acl_size[] = { + [TOMOYO_TYPE_PATH_ACL] = sizeof(struct tomoyo_path_acl), + [TOMOYO_TYPE_PATH2_ACL] = sizeof(struct tomoyo_path2_acl), + [TOMOYO_TYPE_PATH_NUMBER_ACL] = sizeof(struct tomoyo_path_number_acl), + [TOMOYO_TYPE_MKDEV_ACL] = sizeof(struct tomoyo_mkdev_acl), + [TOMOYO_TYPE_MOUNT_ACL] = sizeof(struct tomoyo_mount_acl), +}; + +/** + * tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not. + * + * @element: Pointer to "struct list_head". + * + * Returns true if @element is used by /sys/kernel/security/tomoyo/ users, + * false otherwise. + */ +static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element) +{ + struct tomoyo_io_buffer *head; + bool in_use = false; + + spin_lock(&tomoyo_io_buffer_list_lock); + list_for_each_entry(head, &tomoyo_io_buffer_list, list) { + head->users++; + spin_unlock(&tomoyo_io_buffer_list_lock); + if (mutex_lock_interruptible(&head->io_sem)) { + in_use = true; + goto out; + } + if (head->r.domain == element || head->r.group == element || + head->r.acl == element || &head->w.domain->list == element) + in_use = true; + mutex_unlock(&head->io_sem); +out: + spin_lock(&tomoyo_io_buffer_list_lock); + head->users--; + if (in_use) + break; + } + spin_unlock(&tomoyo_io_buffer_list_lock); + return in_use; +} + +/** + * tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not. + * + * @string: String to check. + * @size: Memory allocated for @string . + * + * Returns true if @string is used by /sys/kernel/security/tomoyo/ users, + * false otherwise. + */ +static bool tomoyo_name_used_by_io_buffer(const char *string, + const size_t size) +{ + struct tomoyo_io_buffer *head; + bool in_use = false; + + spin_lock(&tomoyo_io_buffer_list_lock); + list_for_each_entry(head, &tomoyo_io_buffer_list, list) { + int i; + head->users++; + spin_unlock(&tomoyo_io_buffer_list_lock); + if (mutex_lock_interruptible(&head->io_sem)) { + in_use = true; + goto out; + } + for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) { + const char *w = head->r.w[i]; + if (w < string || w > string + size) + continue; + in_use = true; + break; + } + mutex_unlock(&head->io_sem); +out: + spin_lock(&tomoyo_io_buffer_list_lock); + head->users--; + if (in_use) + break; + } + spin_unlock(&tomoyo_io_buffer_list_lock); + return in_use; +} + +/* Structure for garbage collection. */ struct tomoyo_gc { struct list_head list; enum tomoyo_policy_id type; + size_t size; struct list_head *element; }; -static LIST_HEAD(tomoyo_gc_queue); -static DEFINE_MUTEX(tomoyo_gc_mutex); +/* List of entries to be deleted. */ +static LIST_HEAD(tomoyo_gc_list); +/* Length of tomoyo_gc_list. */ +static int tomoyo_gc_list_len; /** * tomoyo_add_to_gc - Add an entry to to be deleted list. @@ -43,10 +153,42 @@ static bool tomoyo_add_to_gc(const int type, struct list_head *element) if (!entry) return false; entry->type = type; + if (type == TOMOYO_ID_ACL) + entry->size = tomoyo_acl_size[ + container_of(element, + typeof(struct tomoyo_acl_info), + list)->type]; + else if (type == TOMOYO_ID_NAME) + entry->size = strlen(container_of(element, + typeof(struct tomoyo_name), + head.list)->entry.name) + 1; + else + entry->size = tomoyo_element_size[type]; entry->element = element; - list_add(&entry->list, &tomoyo_gc_queue); + list_add(&entry->list, &tomoyo_gc_list); list_del_rcu(element); - return true; + return tomoyo_gc_list_len++ < 128; +} + +/** + * tomoyo_element_linked_by_gc - Validate next element of an entry. + * + * @element: Pointer to an element. + * @size: Size of @element in byte. + * + * Returns true if @element is linked by other elements in the garbage + * collector's queue, false otherwise. + */ +static bool tomoyo_element_linked_by_gc(const u8 *element, const size_t size) +{ + struct tomoyo_gc *p; + list_for_each_entry(p, &tomoyo_gc_list, list) { + const u8 *ptr = (const u8 *) p->element->next; + if (ptr < element || element + size < ptr) + continue; + return true; + } + return false; } /** @@ -151,6 +293,13 @@ static void tomoyo_del_acl(struct list_head *element) } } +/** + * tomoyo_del_domain - Delete members in "struct tomoyo_domain_info". + * + * @element: Pointer to "struct list_head". + * + * Returns true if deleted, false otherwise. + */ static bool tomoyo_del_domain(struct list_head *element) { struct tomoyo_domain_info *domain = @@ -360,13 +509,44 @@ unlock: mutex_unlock(&tomoyo_policy_lock); } -static void tomoyo_kfree_entry(void) +/** + * tomoyo_kfree_entry - Delete entries in tomoyo_gc_list. + * + * Returns true if some entries were kfree()d, false otherwise. + */ +static bool tomoyo_kfree_entry(void) { struct tomoyo_gc *p; struct tomoyo_gc *tmp; + bool result = false; - list_for_each_entry_safe(p, tmp, &tomoyo_gc_queue, list) { + list_for_each_entry_safe(p, tmp, &tomoyo_gc_list, list) { struct list_head *element = p->element; + + /* + * list_del_rcu() in tomoyo_add_to_gc() guarantees that the + * list element became no longer reachable from the list which + * the element was originally on (e.g. tomoyo_domain_list). + * Also, synchronize_srcu() in tomoyo_gc_thread() guarantees + * that the list element became no longer referenced by syscall + * users. + * + * However, there are three users which may still be using the + * list element. We need to defer until all of these users + * forget the list element. + * + * Firstly, defer until "struct tomoyo_io_buffer"->r.{domain, + * group,acl} and "struct tomoyo_io_buffer"->w.domain forget + * the list element. + */ + if (tomoyo_struct_used_by_io_buffer(element)) + continue; + /* + * Secondly, defer until all other elements in the + * tomoyo_gc_list list forget the list element. + */ + if (tomoyo_element_linked_by_gc((const u8 *) element, p->size)) + continue; switch (p->type) { case TOMOYO_ID_TRANSITION_CONTROL: tomoyo_del_transition_control(element); @@ -378,6 +558,14 @@ static void tomoyo_kfree_entry(void) tomoyo_del_manager(element); break; case TOMOYO_ID_NAME: + /* + * Thirdly, defer until all "struct tomoyo_io_buffer" + * ->r.w[] forget the list element. + */ + if (tomoyo_name_used_by_io_buffer( + container_of(element, typeof(struct tomoyo_name), + head.list)->entry.name, p->size)) + continue; tomoyo_del_name(element); break; case TOMOYO_ID_ACL: @@ -402,7 +590,10 @@ static void tomoyo_kfree_entry(void) tomoyo_memory_free(element); list_del(&p->list); kfree(p); + tomoyo_gc_list_len--; + result = true; } + return result; } /** @@ -418,25 +609,70 @@ static void tomoyo_kfree_entry(void) */ static int tomoyo_gc_thread(void *unused) { + /* Garbage collector thread is exclusive. */ + static DEFINE_MUTEX(tomoyo_gc_mutex); + if (!mutex_trylock(&tomoyo_gc_mutex)) + goto out; daemonize("GC for TOMOYO"); - if (mutex_trylock(&tomoyo_gc_mutex)) { - int i; - for (i = 0; i < 10; i++) { - tomoyo_collect_entry(); - if (list_empty(&tomoyo_gc_queue)) - break; - synchronize_srcu(&tomoyo_ss); - tomoyo_kfree_entry(); + do { + tomoyo_collect_entry(); + if (list_empty(&tomoyo_gc_list)) + break; + synchronize_srcu(&tomoyo_ss); + } while (tomoyo_kfree_entry()); + { + struct tomoyo_io_buffer *head; + struct tomoyo_io_buffer *tmp; + + spin_lock(&tomoyo_io_buffer_list_lock); + list_for_each_entry_safe(head, tmp, &tomoyo_io_buffer_list, + list) { + if (head->users) + continue; + list_del(&head->list); + kfree(head->read_buf); + kfree(head->write_buf); + kfree(head); } - mutex_unlock(&tomoyo_gc_mutex); + spin_unlock(&tomoyo_io_buffer_list_lock); } - do_exit(0); + mutex_unlock(&tomoyo_gc_mutex); +out: + /* This acts as do_exit(0). */ + return 0; } -void tomoyo_run_gc(void) +/** + * tomoyo_notify_gc - Register/unregister /sys/kernel/security/tomoyo/ users. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * @is_register: True if register, false if unregister. + * + * Returns nothing. + */ +void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register) { - struct task_struct *task = kthread_create(tomoyo_gc_thread, NULL, - "GC for TOMOYO"); - if (!IS_ERR(task)) - wake_up_process(task); + bool is_write = false; + + spin_lock(&tomoyo_io_buffer_list_lock); + if (is_register) { + head->users = 1; + list_add(&head->list, &tomoyo_io_buffer_list); + } else { + is_write = head->write_buf != NULL; + if (!--head->users) { + list_del(&head->list); + kfree(head->read_buf); + kfree(head->write_buf); + kfree(head); + } + } + spin_unlock(&tomoyo_io_buffer_list_lock); + if (is_write) { + struct task_struct *task = kthread_create(tomoyo_gc_thread, + NULL, + "GC for TOMOYO"); + if (!IS_ERR(task)) + wake_up_process(task); + } } -- cgit v1.2.3 From 2c47ab9353242b0f061959318f83c55360b88fa4 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:21:19 +0900 Subject: TOMOYO: Cleanup part 4. Gather string constants to one file in order to make the object size smaller. Use unsigned type where appropriate. read()/write() returns ssize_t. Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/audit.c | 3 +- security/tomoyo/common.c | 135 +++++++++++++++++++++++++++++++---------------- security/tomoyo/common.h | 51 +++++++++++------- security/tomoyo/domain.c | 7 +-- security/tomoyo/file.c | 63 +++++----------------- security/tomoyo/util.c | 39 ++++++++++++-- 6 files changed, 177 insertions(+), 121 deletions(-) diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c index ef2172f29583..45e0a9f3c384 100644 --- a/security/tomoyo/audit.c +++ b/security/tomoyo/audit.c @@ -163,7 +163,8 @@ static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns, const bool is_granted) { u8 mode; - const u8 category = TOMOYO_MAC_CATEGORY_FILE + TOMOYO_MAX_MAC_INDEX; + const u8 category = tomoyo_index2category[index] + + TOMOYO_MAX_MAC_INDEX; struct tomoyo_profile *p; if (!tomoyo_policy_loaded) return false; diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 691c34025a4a..6402183e2a6b 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -20,31 +20,31 @@ const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE] = { }; /* String table for /sys/kernel/security/tomoyo/profile */ -static const char *tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX +const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX] = { - [TOMOYO_MAC_FILE_EXECUTE] = "file::execute", - [TOMOYO_MAC_FILE_OPEN] = "file::open", - [TOMOYO_MAC_FILE_CREATE] = "file::create", - [TOMOYO_MAC_FILE_UNLINK] = "file::unlink", - [TOMOYO_MAC_FILE_GETATTR] = "file::getattr", - [TOMOYO_MAC_FILE_MKDIR] = "file::mkdir", - [TOMOYO_MAC_FILE_RMDIR] = "file::rmdir", - [TOMOYO_MAC_FILE_MKFIFO] = "file::mkfifo", - [TOMOYO_MAC_FILE_MKSOCK] = "file::mksock", - [TOMOYO_MAC_FILE_TRUNCATE] = "file::truncate", - [TOMOYO_MAC_FILE_SYMLINK] = "file::symlink", - [TOMOYO_MAC_FILE_MKBLOCK] = "file::mkblock", - [TOMOYO_MAC_FILE_MKCHAR] = "file::mkchar", - [TOMOYO_MAC_FILE_LINK] = "file::link", - [TOMOYO_MAC_FILE_RENAME] = "file::rename", - [TOMOYO_MAC_FILE_CHMOD] = "file::chmod", - [TOMOYO_MAC_FILE_CHOWN] = "file::chown", - [TOMOYO_MAC_FILE_CHGRP] = "file::chgrp", - [TOMOYO_MAC_FILE_IOCTL] = "file::ioctl", - [TOMOYO_MAC_FILE_CHROOT] = "file::chroot", - [TOMOYO_MAC_FILE_MOUNT] = "file::mount", - [TOMOYO_MAC_FILE_UMOUNT] = "file::unmount", - [TOMOYO_MAC_FILE_PIVOT_ROOT] = "file::pivot_root", + [TOMOYO_MAC_FILE_EXECUTE] = "execute", + [TOMOYO_MAC_FILE_OPEN] = "open", + [TOMOYO_MAC_FILE_CREATE] = "create", + [TOMOYO_MAC_FILE_UNLINK] = "unlink", + [TOMOYO_MAC_FILE_GETATTR] = "getattr", + [TOMOYO_MAC_FILE_MKDIR] = "mkdir", + [TOMOYO_MAC_FILE_RMDIR] = "rmdir", + [TOMOYO_MAC_FILE_MKFIFO] = "mkfifo", + [TOMOYO_MAC_FILE_MKSOCK] = "mksock", + [TOMOYO_MAC_FILE_TRUNCATE] = "truncate", + [TOMOYO_MAC_FILE_SYMLINK] = "symlink", + [TOMOYO_MAC_FILE_MKBLOCK] = "mkblock", + [TOMOYO_MAC_FILE_MKCHAR] = "mkchar", + [TOMOYO_MAC_FILE_LINK] = "link", + [TOMOYO_MAC_FILE_RENAME] = "rename", + [TOMOYO_MAC_FILE_CHMOD] = "chmod", + [TOMOYO_MAC_FILE_CHOWN] = "chown", + [TOMOYO_MAC_FILE_CHGRP] = "chgrp", + [TOMOYO_MAC_FILE_IOCTL] = "ioctl", + [TOMOYO_MAC_FILE_CHROOT] = "chroot", + [TOMOYO_MAC_FILE_MOUNT] = "mount", + [TOMOYO_MAC_FILE_UMOUNT] = "unmount", + [TOMOYO_MAC_FILE_PIVOT_ROOT] = "pivot_root", [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_FILE] = "file", }; @@ -54,6 +54,27 @@ static const char * const tomoyo_pref_keywords[TOMOYO_MAX_PREF] = { [TOMOYO_PREF_MAX_LEARNING_ENTRY] = "max_learning_entry", }; +/* String table for path operation. */ +const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = { + [TOMOYO_TYPE_EXECUTE] = "execute", + [TOMOYO_TYPE_READ] = "read", + [TOMOYO_TYPE_WRITE] = "write", + [TOMOYO_TYPE_APPEND] = "append", + [TOMOYO_TYPE_UNLINK] = "unlink", + [TOMOYO_TYPE_GETATTR] = "getattr", + [TOMOYO_TYPE_RMDIR] = "rmdir", + [TOMOYO_TYPE_TRUNCATE] = "truncate", + [TOMOYO_TYPE_SYMLINK] = "symlink", + [TOMOYO_TYPE_CHROOT] = "chroot", + [TOMOYO_TYPE_UMOUNT] = "unmount", +}; + +/* String table for categories. */ +static const char * const tomoyo_category_keywords +[TOMOYO_MAX_MAC_CATEGORY_INDEX] = { + [TOMOYO_MAC_CATEGORY_FILE] = "file", +}; + /* Permit policy management by non-root user? */ static bool tomoyo_manage_by_non_root; @@ -98,7 +119,7 @@ static bool tomoyo_flush(struct tomoyo_io_buffer *head) { while (head->r.w_pos) { const char *w = head->r.w[0]; - int len = strlen(w); + size_t len = strlen(w); if (len) { if (len > head->read_user_buf_avail) len = head->read_user_buf_avail; @@ -157,8 +178,8 @@ static void tomoyo_set_string(struct tomoyo_io_buffer *head, const char *string) void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...) { va_list args; - int len; - int pos = head->r.avail; + size_t len; + size_t pos = head->r.avail; int size = head->readbuf_size - pos; if (size <= 0) return; @@ -436,7 +457,17 @@ static int tomoyo_set_mode(char *name, const char *value, config = 0; for (i = 0; i < TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX; i++) { - if (strcmp(name, tomoyo_mac_keywords[i])) + int len = 0; + if (i < TOMOYO_MAX_MAC_INDEX) { + const u8 c = tomoyo_index2category[i]; + const char *category = + tomoyo_category_keywords[c]; + len = strlen(category); + if (strncmp(name, category, len) || + name[len++] != ':' || name[len++] != ':') + continue; + } + if (strcmp(name + len, tomoyo_mac_keywords[i])) continue; config = profile->config[i]; break; @@ -620,8 +651,15 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) if (config == TOMOYO_CONFIG_USE_DEFAULT) continue; tomoyo_print_namespace(head); - tomoyo_io_printf(head, "%u-%s%s", index, "CONFIG::", - tomoyo_mac_keywords[i]); + if (i < TOMOYO_MAX_MAC_INDEX) + tomoyo_io_printf(head, "%u-CONFIG::%s::%s", + index, + tomoyo_category_keywords + [tomoyo_index2category[i]], + tomoyo_mac_keywords[i]); + else + tomoyo_io_printf(head, "%u-CONFIG::%s", index, + tomoyo_mac_keywords[i]); tomoyo_print_config(head, config); head->r.bit++; break; @@ -905,6 +943,12 @@ static int tomoyo_write_domain2(struct tomoyo_policy_namespace *ns, return -EINVAL; } +/* String table for domain flags. */ +const char * const tomoyo_dif[TOMOYO_MAX_DOMAIN_INFO_FLAGS] = { + [TOMOYO_DIF_QUOTA_WARNED] = "quota_exceeded\n", + [TOMOYO_DIF_TRANSITION_FAILED] = "transition_failed\n", +}; + /** * tomoyo_write_domain - Write domain policy. * @@ -948,12 +992,11 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head) domain->group = (u8) profile; return 0; } - if (!strcmp(data, "quota_exceeded")) { - domain->quota_warned = !is_delete; - return 0; - } - if (!strcmp(data, "transition_failed")) { - domain->transition_failed = !is_delete; + for (profile = 0; profile < TOMOYO_MAX_DOMAIN_INFO_FLAGS; profile++) { + const char *cp = tomoyo_dif[profile]; + if (strncmp(data, cp, strlen(cp) - 1)) + continue; + domain->flags[profile] = !is_delete; return 0; } return tomoyo_write_domain2(ns, &domain->acl_info_list, data, @@ -1134,6 +1177,7 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head) struct tomoyo_domain_info *domain = list_entry(head->r.domain, typeof(*domain), list); switch (head->r.step) { + u8 i; case 0: if (domain->is_deleted && !head->r.print_this_domain_only) @@ -1145,10 +1189,9 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head) domain->profile); tomoyo_io_printf(head, "use_group %u\n", domain->group); - if (domain->quota_warned) - tomoyo_set_string(head, "quota_exceeded\n"); - if (domain->transition_failed) - tomoyo_set_string(head, "transition_failed\n"); + for (i = 0; i < TOMOYO_MAX_DOMAIN_INFO_FLAGS; i++) + if (domain->flags[i]) + tomoyo_set_string(head, tomoyo_dif[i]); head->r.step++; tomoyo_set_lf(head); /* fall through */ @@ -1691,8 +1734,8 @@ static int tomoyo_poll_query(struct file *file, poll_table *wait) static void tomoyo_read_query(struct tomoyo_io_buffer *head) { struct list_head *tmp; - int pos = 0; - int len = 0; + unsigned int pos = 0; + size_t len = 0; char *buf; if (head->r.w_pos) return; @@ -1998,8 +2041,8 @@ static inline bool tomoyo_has_more_namespace(struct tomoyo_io_buffer *head) * * Returns bytes read on success, negative value otherwise. */ -int tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, - const int buffer_len) +ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, + const int buffer_len) { int len; int idx; @@ -2070,8 +2113,8 @@ static int tomoyo_parse_policy(struct tomoyo_io_buffer *head, char *line) * * Returns @buffer_len on success, negative value otherwise. */ -int tomoyo_write_control(struct tomoyo_io_buffer *head, - const char __user *buffer, const int buffer_len) +ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, + const char __user *buffer, const int buffer_len) { int error = buffer_len; size_t avail_len = buffer_len; diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index a5eeabcc0738..b54455dfe0ca 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -67,6 +67,20 @@ enum tomoyo_policy_id { TOMOYO_MAX_POLICY }; +/* Index numbers for domain's attributes. */ +enum tomoyo_domain_info_flags_index { + /* Quota warnning flag. */ + TOMOYO_DIF_QUOTA_WARNED, + /* + * This domain was unable to create a new domain at + * tomoyo_find_next_domain() because the name of the domain to be + * created was too long or it could not allocate memory. + * More than one process continued execve() without domain transition. + */ + TOMOYO_DIF_TRANSITION_FAILED, + TOMOYO_MAX_DOMAIN_INFO_FLAGS +}; + /* Index numbers for group entries. */ enum tomoyo_group_id { TOMOYO_PATH_GROUP, @@ -364,8 +378,7 @@ struct tomoyo_domain_info { u8 profile; /* Profile number to use. */ u8 group; /* Group number to use. */ bool is_deleted; /* Delete flag. */ - bool quota_warned; /* Quota warnning flag. */ - bool transition_failed; /* Domain transition failed flag. */ + bool flags[TOMOYO_MAX_DOMAIN_INFO_FLAGS]; atomic_t users; /* Number of referring credentials. */ }; @@ -442,15 +455,15 @@ struct tomoyo_io_buffer { /* Exclusive lock for this structure. */ struct mutex io_sem; char __user *read_user_buf; - int read_user_buf_avail; + size_t read_user_buf_avail; struct { struct list_head *ns; struct list_head *domain; struct list_head *group; struct list_head *acl; - int avail; - int step; - int query_index; + size_t avail; + unsigned int step; + unsigned int query_index; u16 index; u8 acl_group_index; u8 bit; @@ -465,19 +478,19 @@ struct tomoyo_io_buffer { /* The position currently writing to. */ struct tomoyo_domain_info *domain; /* Bytes available for writing. */ - int avail; + size_t avail; bool is_delete; } w; /* Buffer for reading. */ char *read_buf; /* Size of read buffer. */ - int readbuf_size; + size_t readbuf_size; /* Buffer for writing. */ char *write_buf; /* Size of write buffer. */ - int writebuf_size; + size_t writebuf_size; /* Type of this interface. */ - u8 type; + enum tomoyo_securityfs_interface_index type; /* Users counter protected by tomoyo_io_buffer_list_lock. */ u8 users; /* List for telling GC not to kfree() elements. */ @@ -569,10 +582,10 @@ void tomoyo_check_profile(void); int tomoyo_open_control(const u8 type, struct file *file); int tomoyo_close_control(struct tomoyo_io_buffer *head); int tomoyo_poll_control(struct file *file, poll_table *wait); -int tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, - const int buffer_len); -int tomoyo_write_control(struct tomoyo_io_buffer *head, - const char __user *buffer, const int buffer_len); +ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, + const int buffer_len); +ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, + const char __user *buffer, const int buffer_len); bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r); void tomoyo_warn_oom(const char *function); const struct tomoyo_path_info * @@ -707,15 +720,17 @@ extern struct tomoyo_domain_info tomoyo_kernel_domain; extern struct tomoyo_policy_namespace tomoyo_kernel_namespace; extern struct list_head tomoyo_namespace_list; -extern const char *tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION]; -extern const char *tomoyo_mkdev_keyword[TOMOYO_MAX_MKDEV_OPERATION]; -extern const char *tomoyo_path2_keyword[TOMOYO_MAX_PATH2_OPERATION]; -extern const char *tomoyo_path_number_keyword[TOMOYO_MAX_PATH_NUMBER_OPERATION]; +extern const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX + + TOMOYO_MAX_MAC_CATEGORY_INDEX]; +extern const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION]; +extern const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX]; + extern const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION]; extern const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION]; extern const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION]; +extern const char * const tomoyo_dif[TOMOYO_MAX_DOMAIN_INFO_FLAGS]; extern const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE]; extern unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT]; extern unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT]; diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 71acebc747c3..7893127d8770 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -684,10 +684,11 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm) retval = -ENOMEM; else { retval = 0; - if (!old_domain->transition_failed) { - old_domain->transition_failed = true; + if (!old_domain->flags[TOMOYO_DIF_TRANSITION_FAILED]) { + old_domain->flags[TOMOYO_DIF_TRANSITION_FAILED] = true; r.granted = false; - tomoyo_write_log(&r, "%s", "transition_failed\n"); + tomoyo_write_log(&r, "%s", tomoyo_dif + [TOMOYO_DIF_TRANSITION_FAILED]); printk(KERN_WARNING "ERROR: Domain '%s' not defined.\n", tmp); } diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index 8410f28a35e0..6ab9e4cdd61f 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c @@ -9,46 +9,6 @@ #include "common.h" #include -/* Keyword array for operations with one pathname. */ -const char *tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = { - [TOMOYO_TYPE_EXECUTE] = "execute", - [TOMOYO_TYPE_READ] = "read", - [TOMOYO_TYPE_WRITE] = "write", - [TOMOYO_TYPE_APPEND] = "append", - [TOMOYO_TYPE_UNLINK] = "unlink", - [TOMOYO_TYPE_GETATTR] = "getattr", - [TOMOYO_TYPE_RMDIR] = "rmdir", - [TOMOYO_TYPE_TRUNCATE] = "truncate", - [TOMOYO_TYPE_SYMLINK] = "symlink", - [TOMOYO_TYPE_CHROOT] = "chroot", - [TOMOYO_TYPE_UMOUNT] = "unmount", -}; - -/* Keyword array for operations with one pathname and three numbers. */ -const char *tomoyo_mkdev_keyword[TOMOYO_MAX_MKDEV_OPERATION] = { - [TOMOYO_TYPE_MKBLOCK] = "mkblock", - [TOMOYO_TYPE_MKCHAR] = "mkchar", -}; - -/* Keyword array for operations with two pathnames. */ -const char *tomoyo_path2_keyword[TOMOYO_MAX_PATH2_OPERATION] = { - [TOMOYO_TYPE_LINK] = "link", - [TOMOYO_TYPE_RENAME] = "rename", - [TOMOYO_TYPE_PIVOT_ROOT] = "pivot_root", -}; - -/* Keyword array for operations with one pathname and one number. */ -const char *tomoyo_path_number_keyword[TOMOYO_MAX_PATH_NUMBER_OPERATION] = { - [TOMOYO_TYPE_CREATE] = "create", - [TOMOYO_TYPE_MKDIR] = "mkdir", - [TOMOYO_TYPE_MKFIFO] = "mkfifo", - [TOMOYO_TYPE_MKSOCK] = "mksock", - [TOMOYO_TYPE_IOCTL] = "ioctl", - [TOMOYO_TYPE_CHMOD] = "chmod", - [TOMOYO_TYPE_CHOWN] = "chown", - [TOMOYO_TYPE_CHGRP] = "chgrp", -}; - /* * Mapping table from "enum tomoyo_path_acl_index" to "enum tomoyo_mac_index". */ @@ -220,8 +180,8 @@ static int tomoyo_audit_path_log(struct tomoyo_request_info *r) */ static int tomoyo_audit_path2_log(struct tomoyo_request_info *r) { - return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_path2_keyword - [r->param.path2.operation], + return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_mac_keywords + [tomoyo_pp2mac[r->param.path2.operation]], r->param.path2.filename1->name, r->param.path2.filename2->name); } @@ -236,8 +196,8 @@ static int tomoyo_audit_path2_log(struct tomoyo_request_info *r) static int tomoyo_audit_mkdev_log(struct tomoyo_request_info *r) { return tomoyo_supervisor(r, "file %s %s 0%o %u %u\n", - tomoyo_mkdev_keyword - [r->param.mkdev.operation], + tomoyo_mac_keywords + [tomoyo_pnnn2mac[r->param.mkdev.operation]], r->param.mkdev.filename->name, r->param.mkdev.mode, r->param.mkdev.major, r->param.mkdev.minor); @@ -272,8 +232,8 @@ static int tomoyo_audit_path_number_log(struct tomoyo_request_info *r) } tomoyo_print_ulong(buffer, sizeof(buffer), r->param.path_number.number, radix); - return tomoyo_supervisor(r, "file %s %s %s\n", - tomoyo_path_number_keyword[type], + return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_mac_keywords + [tomoyo_pn2mac[type]], r->param.path_number.filename->name, buffer); } @@ -985,22 +945,25 @@ int tomoyo_write_file(struct tomoyo_acl_param *param) if (perm) return tomoyo_update_path_acl(perm, param); for (type = 0; type < TOMOYO_MAX_PATH2_OPERATION; type++) - if (tomoyo_permstr(operation, tomoyo_path2_keyword[type])) + if (tomoyo_permstr(operation, + tomoyo_mac_keywords[tomoyo_pp2mac[type]])) perm |= 1 << type; if (perm) return tomoyo_update_path2_acl(perm, param); for (type = 0; type < TOMOYO_MAX_PATH_NUMBER_OPERATION; type++) if (tomoyo_permstr(operation, - tomoyo_path_number_keyword[type])) + tomoyo_mac_keywords[tomoyo_pn2mac[type]])) perm |= 1 << type; if (perm) return tomoyo_update_path_number_acl(perm, param); for (type = 0; type < TOMOYO_MAX_MKDEV_OPERATION; type++) - if (tomoyo_permstr(operation, tomoyo_mkdev_keyword[type])) + if (tomoyo_permstr(operation, + tomoyo_mac_keywords[tomoyo_pnnn2mac[type]])) perm |= 1 << type; if (perm) return tomoyo_update_mkdev_acl(perm, param); - if (tomoyo_permstr(operation, "mount")) + if (tomoyo_permstr(operation, + tomoyo_mac_keywords[TOMOYO_MAC_FILE_MOUNT])) return tomoyo_update_mount_acl(param); return -EINVAL; } diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c index fda15c1fc1c0..daf7a45f70f1 100644 --- a/security/tomoyo/util.c +++ b/security/tomoyo/util.c @@ -15,6 +15,37 @@ DEFINE_MUTEX(tomoyo_policy_lock); /* Has /sbin/init started? */ bool tomoyo_policy_loaded; +/* + * Mapping table from "enum tomoyo_mac_index" to + * "enum tomoyo_mac_category_index". + */ +const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX] = { + /* CONFIG::file group */ + [TOMOYO_MAC_FILE_EXECUTE] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_OPEN] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_CREATE] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_UNLINK] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_GETATTR] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_MKDIR] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_RMDIR] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_MKFIFO] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_MKSOCK] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_TRUNCATE] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_SYMLINK] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_MKBLOCK] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_MKCHAR] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_LINK] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_RENAME] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_CHMOD] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_CHOWN] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_CHGRP] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_IOCTL] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_CHROOT] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_MOUNT] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_UMOUNT] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_PIVOT_ROOT] = TOMOYO_MAC_CATEGORY_FILE, +}; + /** * tomoyo_permstr - Find permission keywords. * @@ -936,9 +967,11 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r) if (count < tomoyo_profile(domain->ns, domain->profile)-> pref[TOMOYO_PREF_MAX_LEARNING_ENTRY]) return true; - if (!domain->quota_warned) { - domain->quota_warned = true; - printk(KERN_WARNING "TOMOYO-WARNING: " + if (!domain->flags[TOMOYO_DIF_QUOTA_WARNED]) { + domain->flags[TOMOYO_DIF_QUOTA_WARNED] = true; + /* r->granted = false; */ + tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]); + printk(KERN_WARNING "WARNING: " "Domain '%s' has too many ACLs to hold. " "Stopped learning mode.\n", domain->domainname->name); } -- cgit v1.2.3 From b22b8b9fd90eecfb7133e56b4e113595f09f4492 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:21:50 +0900 Subject: TOMOYO: Rename meminfo to stat and show more statistics. Show statistics such as last policy update time and last policy violation time in addition to memory usage. Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/audit.c | 41 ------------- security/tomoyo/common.c | 129 +++++++++++++++++++++++++++++++++++++--- security/tomoyo/common.h | 17 +++++- security/tomoyo/memory.c | 117 +++++++++--------------------------- security/tomoyo/securityfs_if.c | 4 +- security/tomoyo/util.c | 41 +++++++++++++ 6 files changed, 206 insertions(+), 143 deletions(-) diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c index 45e0a9f3c384..f2c869767d79 100644 --- a/security/tomoyo/audit.c +++ b/security/tomoyo/audit.c @@ -9,47 +9,6 @@ #include "common.h" #include -/** - * tomoyo_convert_time - Convert time_t to YYYY/MM/DD hh/mm/ss. - * - * @time: Seconds since 1970/01/01 00:00:00. - * @stamp: Pointer to "struct tomoyo_time". - * - * Returns nothing. - * - * This function does not handle Y2038 problem. - */ -static void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp) -{ - static const u16 tomoyo_eom[2][12] = { - { 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, - { 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } - }; - u16 y; - u8 m; - bool r; - stamp->sec = time % 60; - time /= 60; - stamp->min = time % 60; - time /= 60; - stamp->hour = time % 24; - time /= 24; - for (y = 1970; ; y++) { - const unsigned short days = (y & 3) ? 365 : 366; - if (time < days) - break; - time -= days; - } - r = (y & 3) == 0; - for (m = 0; m < 11 && time >= tomoyo_eom[r][m]; m++) - ; - if (m) - time -= tomoyo_eom[r][m - 1]; - stamp->year = y; - stamp->month = ++m; - stamp->day = ++time; -} - /** * tomoyo_print_header - Get header line of audit log. * diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 6402183e2a6b..7bc0d1d95867 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -1584,8 +1584,9 @@ static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header) return; snprintf(buffer, len - 1, "%s", cp); tomoyo_normalize_line(buffer); - tomoyo_write_domain2(domain->ns, &domain->acl_info_list, buffer, - false); + if (!tomoyo_write_domain2(domain->ns, &domain->acl_info_list, buffer, + false)) + tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES); kfree(buffer); } @@ -1618,6 +1619,8 @@ int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) /* Nothing more to do if granted. */ if (r->granted) return 0; + if (r->mode) + tomoyo_update_stat(r->mode); switch (r->mode) { case TOMOYO_CONFIG_ENFORCING: error = -EPERM; @@ -1857,6 +1860,104 @@ static void tomoyo_read_self_domain(struct tomoyo_io_buffer *head) } } +/* String table for /sys/kernel/security/tomoyo/stat interface. */ +static const char * const tomoyo_policy_headers[TOMOYO_MAX_POLICY_STAT] = { + [TOMOYO_STAT_POLICY_UPDATES] = "update:", + [TOMOYO_STAT_POLICY_LEARNING] = "violation in learning mode:", + [TOMOYO_STAT_POLICY_PERMISSIVE] = "violation in permissive mode:", + [TOMOYO_STAT_POLICY_ENFORCING] = "violation in enforcing mode:", +}; + +/* String table for /sys/kernel/security/tomoyo/stat interface. */ +static const char * const tomoyo_memory_headers[TOMOYO_MAX_MEMORY_STAT] = { + [TOMOYO_MEMORY_POLICY] = "policy:", + [TOMOYO_MEMORY_AUDIT] = "audit log:", + [TOMOYO_MEMORY_QUERY] = "query message:", +}; + +/* Timestamp counter for last updated. */ +static unsigned int tomoyo_stat_updated[TOMOYO_MAX_POLICY_STAT]; +/* Counter for number of updates. */ +static unsigned int tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT]; + +/** + * tomoyo_update_stat - Update statistic counters. + * + * @index: Index for policy type. + * + * Returns nothing. + */ +void tomoyo_update_stat(const u8 index) +{ + struct timeval tv; + do_gettimeofday(&tv); + /* + * I don't use atomic operations because race condition is not fatal. + */ + tomoyo_stat_updated[index]++; + tomoyo_stat_modified[index] = tv.tv_sec; +} + +/** + * tomoyo_read_stat - Read statistic data. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. + */ +static void tomoyo_read_stat(struct tomoyo_io_buffer *head) +{ + u8 i; + unsigned int total = 0; + if (head->r.eof) + return; + for (i = 0; i < TOMOYO_MAX_POLICY_STAT; i++) { + tomoyo_io_printf(head, "Policy %-30s %10u", + tomoyo_policy_headers[i], + tomoyo_stat_updated[i]); + if (tomoyo_stat_modified[i]) { + struct tomoyo_time stamp; + tomoyo_convert_time(tomoyo_stat_modified[i], &stamp); + tomoyo_io_printf(head, " (Last: %04u/%02u/%02u " + "%02u:%02u:%02u)", + stamp.year, stamp.month, stamp.day, + stamp.hour, stamp.min, stamp.sec); + } + tomoyo_set_lf(head); + } + for (i = 0; i < TOMOYO_MAX_MEMORY_STAT; i++) { + unsigned int used = tomoyo_memory_used[i]; + total += used; + tomoyo_io_printf(head, "Memory used by %-22s %10u", + tomoyo_memory_headers[i], used); + used = tomoyo_memory_quota[i]; + if (used) + tomoyo_io_printf(head, " (Quota: %10u)", used); + tomoyo_set_lf(head); + } + tomoyo_io_printf(head, "Total memory used: %10u\n", + total); + head->r.eof = true; +} + +/** + * tomoyo_write_stat - Set memory quota. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns 0. + */ +static int tomoyo_write_stat(struct tomoyo_io_buffer *head) +{ + char *data = head->write_buf; + u8 i; + if (tomoyo_str_starts(&data, "Memory used by ")) + for (i = 0; i < TOMOYO_MAX_MEMORY_STAT; i++) + if (tomoyo_str_starts(&data, tomoyo_memory_headers[i])) + sscanf(data, "%u", &tomoyo_memory_quota[i]); + return 0; +} + /** * tomoyo_open_control - open() for /sys/kernel/security/tomoyo/ interface. * @@ -1908,11 +2009,11 @@ int tomoyo_open_control(const u8 type, struct file *file) head->read = tomoyo_read_version; head->readbuf_size = 128; break; - case TOMOYO_MEMINFO: - /* /sys/kernel/security/tomoyo/meminfo */ - head->write = tomoyo_write_memory_quota; - head->read = tomoyo_read_memory_counter; - head->readbuf_size = 512; + case TOMOYO_STAT: + /* /sys/kernel/security/tomoyo/stat */ + head->write = tomoyo_write_stat; + head->read = tomoyo_read_stat; + head->readbuf_size = 1024; break; case TOMOYO_PROFILE: /* /sys/kernel/security/tomoyo/profile */ @@ -2186,6 +2287,20 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, case -EPERM: error = -EPERM; goto out; + case 0: + switch (head->type) { + case TOMOYO_DOMAINPOLICY: + case TOMOYO_EXCEPTIONPOLICY: + case TOMOYO_DOMAIN_STATUS: + case TOMOYO_STAT: + case TOMOYO_PROFILE: + case TOMOYO_MANAGER: + tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES); + break; + default: + break; + } + break; } } out: diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index b54455dfe0ca..7984a0ed548b 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -133,6 +133,7 @@ enum tomoyo_path_acl_index { TOMOYO_MAX_PATH_OPERATION }; +/* Index numbers for /sys/kernel/security/tomoyo/stat interface. */ enum tomoyo_memory_stat_type { TOMOYO_MEMORY_POLICY, TOMOYO_MEMORY_AUDIT, @@ -173,7 +174,7 @@ enum tomoyo_securityfs_interface_index { TOMOYO_EXCEPTIONPOLICY, TOMOYO_DOMAIN_STATUS, TOMOYO_PROCESS_STATUS, - TOMOYO_MEMINFO, + TOMOYO_STAT, TOMOYO_SELFDOMAIN, TOMOYO_AUDIT, TOMOYO_VERSION, @@ -237,6 +238,16 @@ enum tomoyo_mac_category_index { */ #define TOMOYO_RETRY_REQUEST 1 +/* Index numbers for /sys/kernel/security/tomoyo/stat interface. */ +enum tomoyo_policy_stat_type { + /* Do not change this order. */ + TOMOYO_STAT_POLICY_UPDATES, + TOMOYO_STAT_POLICY_LEARNING, /* == TOMOYO_CONFIG_LEARNING */ + TOMOYO_STAT_POLICY_PERMISSIVE, /* == TOMOYO_CONFIG_PERMISSIVE */ + TOMOYO_STAT_POLICY_ENFORCING, /* == TOMOYO_CONFIG_ENFORCING */ + TOMOYO_MAX_POLICY_STAT +}; + /* Index numbers for profile's PREFERENCE values. */ enum tomoyo_pref_index { TOMOYO_PREF_MAX_AUDIT_LOG, @@ -648,8 +659,8 @@ char *tomoyo_realpath_from_path(struct path *path); bool tomoyo_memory_ok(void *ptr); void *tomoyo_commit_ok(void *data, const unsigned int size); const struct tomoyo_path_info *tomoyo_get_name(const char *name); -void tomoyo_read_memory_counter(struct tomoyo_io_buffer *head); -int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head); +void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp); +void tomoyo_update_stat(const u8 index); void __init tomoyo_mm_init(void); int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, const struct tomoyo_path_info *filename); diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c index 39d012823f84..78b6143068de 100644 --- a/security/tomoyo/memory.c +++ b/security/tomoyo/memory.c @@ -29,16 +29,13 @@ void tomoyo_warn_oom(const char *function) panic("MAC Initialization failed.\n"); } +/* Lock for protecting tomoyo_memory_used. */ +static DEFINE_SPINLOCK(tomoyo_policy_memory_lock); /* Memoy currently used by policy/audit log/query. */ unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT]; /* Memory quota for "policy"/"audit log"/"query". */ unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT]; -/* Memory allocated for policy. */ -static atomic_t tomoyo_policy_memory_size; -/* Quota for holding policy. */ -static unsigned int tomoyo_quota_for_policy; - /** * tomoyo_memory_ok - Check memory quota. * @@ -50,15 +47,20 @@ static unsigned int tomoyo_quota_for_policy; */ bool tomoyo_memory_ok(void *ptr) { - size_t s = ptr ? ksize(ptr) : 0; - atomic_add(s, &tomoyo_policy_memory_size); - if (ptr && (!tomoyo_quota_for_policy || - atomic_read(&tomoyo_policy_memory_size) - <= tomoyo_quota_for_policy)) { - memset(ptr, 0, s); - return true; + if (ptr) { + const size_t s = ksize(ptr); + bool result; + spin_lock(&tomoyo_policy_memory_lock); + tomoyo_memory_used[TOMOYO_MEMORY_POLICY] += s; + result = !tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] || + tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <= + tomoyo_memory_quota[TOMOYO_MEMORY_POLICY]; + if (!result) + tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s; + spin_unlock(&tomoyo_policy_memory_lock); + if (result) + return true; } - atomic_sub(s, &tomoyo_policy_memory_size); tomoyo_warn_oom(__func__); return false; } @@ -91,7 +93,10 @@ void *tomoyo_commit_ok(void *data, const unsigned int size) */ void tomoyo_memory_free(void *ptr) { - atomic_sub(ksize(ptr), &tomoyo_policy_memory_size); + size_t s = ksize(ptr); + spin_lock(&tomoyo_policy_memory_lock); + tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s; + spin_unlock(&tomoyo_policy_memory_lock); kfree(ptr); } @@ -162,7 +167,6 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name) struct tomoyo_name *ptr; unsigned int hash; int len; - int allocated_len; struct list_head *head; if (!name) @@ -179,22 +183,17 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name) goto out; } ptr = kzalloc(sizeof(*ptr) + len, GFP_NOFS); - allocated_len = ptr ? ksize(ptr) : 0; - if (!ptr || (tomoyo_quota_for_policy && - atomic_read(&tomoyo_policy_memory_size) + allocated_len - > tomoyo_quota_for_policy)) { + if (tomoyo_memory_ok(ptr)) { + ptr->entry.name = ((char *) ptr) + sizeof(*ptr); + memmove((char *) ptr->entry.name, name, len); + atomic_set(&ptr->head.users, 1); + tomoyo_fill_path_info(&ptr->entry); + list_add_tail(&ptr->head.list, head); + } else { kfree(ptr); ptr = NULL; - tomoyo_warn_oom(__func__); - goto out; } - atomic_add(allocated_len, &tomoyo_policy_memory_size); - ptr->entry.name = ((char *) ptr) + sizeof(*ptr); - memmove((char *) ptr->entry.name, name, len); - atomic_set(&ptr->head.users, 1); - tomoyo_fill_path_info(&ptr->entry); - list_add_tail(&ptr->head.list, head); - out: +out: mutex_unlock(&tomoyo_policy_lock); return ptr ? &ptr->entry : NULL; } @@ -227,65 +226,3 @@ void __init tomoyo_mm_init(void) } #endif } - - -/* Memory allocated for query lists. */ -unsigned int tomoyo_query_memory_size; -/* Quota for holding query lists. */ -unsigned int tomoyo_quota_for_query; - -/** - * tomoyo_read_memory_counter - Check for memory usage in bytes. - * - * @head: Pointer to "struct tomoyo_io_buffer". - * - * Returns memory usage. - */ -void tomoyo_read_memory_counter(struct tomoyo_io_buffer *head) -{ - if (!head->r.eof) { - const unsigned int policy - = atomic_read(&tomoyo_policy_memory_size); - const unsigned int query = tomoyo_query_memory_size; - char buffer[64]; - - memset(buffer, 0, sizeof(buffer)); - if (tomoyo_quota_for_policy) - snprintf(buffer, sizeof(buffer) - 1, - " (Quota: %10u)", - tomoyo_quota_for_policy); - else - buffer[0] = '\0'; - tomoyo_io_printf(head, "Policy: %10u%s\n", policy, - buffer); - if (tomoyo_quota_for_query) - snprintf(buffer, sizeof(buffer) - 1, - " (Quota: %10u)", - tomoyo_quota_for_query); - else - buffer[0] = '\0'; - tomoyo_io_printf(head, "Query lists: %10u%s\n", query, - buffer); - tomoyo_io_printf(head, "Total: %10u\n", policy + query); - head->r.eof = true; - } -} - -/** - * tomoyo_write_memory_quota - Set memory quota. - * - * @head: Pointer to "struct tomoyo_io_buffer". - * - * Returns 0. - */ -int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head) -{ - char *data = head->write_buf; - unsigned int size; - - if (sscanf(data, "Policy: %u", &size) == 1) - tomoyo_quota_for_policy = size; - else if (sscanf(data, "Query lists: %u", &size) == 1) - tomoyo_quota_for_query = size; - return 0; -} diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c index e056609b422b..b509e2cd2ab1 100644 --- a/security/tomoyo/securityfs_if.c +++ b/security/tomoyo/securityfs_if.c @@ -143,8 +143,8 @@ static int __init tomoyo_initerface_init(void) TOMOYO_DOMAIN_STATUS); tomoyo_create_entry(".process_status", 0600, tomoyo_dir, TOMOYO_PROCESS_STATUS); - tomoyo_create_entry("meminfo", 0600, tomoyo_dir, - TOMOYO_MEMINFO); + tomoyo_create_entry("stat", 0644, tomoyo_dir, + TOMOYO_STAT); tomoyo_create_entry("profile", 0600, tomoyo_dir, TOMOYO_PROFILE); tomoyo_create_entry("manager", 0600, tomoyo_dir, diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c index daf7a45f70f1..7ff54c95e1f2 100644 --- a/security/tomoyo/util.c +++ b/security/tomoyo/util.c @@ -46,6 +46,47 @@ const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX] = { [TOMOYO_MAC_FILE_PIVOT_ROOT] = TOMOYO_MAC_CATEGORY_FILE, }; +/** + * tomoyo_convert_time - Convert time_t to YYYY/MM/DD hh/mm/ss. + * + * @time: Seconds since 1970/01/01 00:00:00. + * @stamp: Pointer to "struct tomoyo_time". + * + * Returns nothing. + * + * This function does not handle Y2038 problem. + */ +void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp) +{ + static const u16 tomoyo_eom[2][12] = { + { 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, + { 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } + }; + u16 y; + u8 m; + bool r; + stamp->sec = time % 60; + time /= 60; + stamp->min = time % 60; + time /= 60; + stamp->hour = time % 24; + time /= 24; + for (y = 1970; ; y++) { + const unsigned short days = (y & 3) ? 365 : 366; + if (time < days) + break; + time -= days; + } + r = (y & 3) == 0; + for (m = 0; m < 11 && time >= tomoyo_eom[r][m]; m++) + ; + if (m) + time -= tomoyo_eom[r][m - 1]; + stamp->year = y; + stamp->month = ++m; + stamp->day = ++time; +} + /** * tomoyo_permstr - Find permission keywords. * -- cgit v1.2.3 From efe836ab2b514ae7b59528af36d452978b42d266 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:22:18 +0900 Subject: TOMOYO: Add built-in policy support. To be able to start using enforcing mode from the early stage of boot sequence, this patch adds support for built-in policy configuration (and next patch adds support for activating access control without calling external policy loader program). Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/Makefile | 47 +++++++++++++++++++++++++++++++++++++ security/tomoyo/common.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++ security/tomoyo/common.h | 1 + security/tomoyo/memory.c | 10 -------- 4 files changed, 108 insertions(+), 10 deletions(-) diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile index b13f7f9fbb52..04f676a940ae 100644 --- a/security/tomoyo/Makefile +++ b/security/tomoyo/Makefile @@ -1 +1,48 @@ obj-y = audit.o common.o domain.o file.o gc.o group.o load_policy.o memory.o mount.o realpath.o securityfs_if.o tomoyo.o util.o + +$(obj)/policy/profile.conf: + @mkdir -p $(obj)/policy/ + @echo Creating an empty policy/profile.conf + @touch $@ + +$(obj)/policy/exception_policy.conf: + @mkdir -p $(obj)/policy/ + @echo Creating a default policy/exception_policy.conf + @echo initialize_domain /sbin/modprobe from any >> $@ + @echo initialize_domain /sbin/hotplug from any >> $@ + +$(obj)/policy/domain_policy.conf: + @mkdir -p $(obj)/policy/ + @echo Creating an empty policy/domain_policy.conf + @touch $@ + +$(obj)/policy/manager.conf: + @mkdir -p $(obj)/policy/ + @echo Creating an empty policy/manager.conf + @touch $@ + +$(obj)/policy/stat.conf: + @mkdir -p $(obj)/policy/ + @echo Creating an empty policy/stat.conf + @touch $@ + +$(obj)/builtin-policy.h: $(obj)/policy/profile.conf $(obj)/policy/exception_policy.conf $(obj)/policy/domain_policy.conf $(obj)/policy/manager.conf $(obj)/policy/stat.conf + @echo Generating built-in policy for TOMOYO 2.4.x. + @echo "static char tomoyo_builtin_profile[] __initdata =" > $@.tmp + @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/profile.conf >> $@.tmp + @echo "\"\";" >> $@.tmp + @echo "static char tomoyo_builtin_exception_policy[] __initdata =" >> $@.tmp + @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/exception_policy.conf >> $@.tmp + @echo "\"\";" >> $@.tmp + @echo "static char tomoyo_builtin_domain_policy[] __initdata =" >> $@.tmp + @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/domain_policy.conf >> $@.tmp + @echo "\"\";" >> $@.tmp + @echo "static char tomoyo_builtin_manager[] __initdata =" >> $@.tmp + @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/manager.conf >> $@.tmp + @echo "\"\";" >> $@.tmp + @echo "static char tomoyo_builtin_stat[] __initdata =" >> $@.tmp + @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/stat.conf >> $@.tmp + @echo "\"\";" >> $@.tmp + @mv $@.tmp $@ + +$(obj)/common.o: $(obj)/builtin-policy.h diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 7bc0d1d95867..01e60ad68b3a 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -2361,3 +2361,63 @@ void tomoyo_check_profile(void) tomoyo_read_unlock(idx); printk(KERN_INFO "Mandatory Access Control activated.\n"); } + +/** + * tomoyo_load_builtin_policy - Load built-in policy. + * + * Returns nothing. + */ +void __init tomoyo_load_builtin_policy(void) +{ + /* + * This include file is manually created and contains built-in policy + * named "tomoyo_builtin_profile", "tomoyo_builtin_exception_policy", + * "tomoyo_builtin_domain_policy", "tomoyo_builtin_manager", + * "tomoyo_builtin_stat" in the form of "static char [] __initdata". + */ +#include "builtin-policy.h" + u8 i; + const int idx = tomoyo_read_lock(); + for (i = 0; i < 5; i++) { + struct tomoyo_io_buffer head = { }; + char *start = ""; + switch (i) { + case 0: + start = tomoyo_builtin_profile; + head.type = TOMOYO_PROFILE; + head.write = tomoyo_write_profile; + break; + case 1: + start = tomoyo_builtin_exception_policy; + head.type = TOMOYO_EXCEPTIONPOLICY; + head.write = tomoyo_write_exception; + break; + case 2: + start = tomoyo_builtin_domain_policy; + head.type = TOMOYO_DOMAINPOLICY; + head.write = tomoyo_write_domain; + break; + case 3: + start = tomoyo_builtin_manager; + head.type = TOMOYO_MANAGER; + head.write = tomoyo_write_manager; + break; + case 4: + start = tomoyo_builtin_stat; + head.type = TOMOYO_STAT; + head.write = tomoyo_write_stat; + break; + } + while (1) { + char *end = strchr(start, '\n'); + if (!end) + break; + *end = '\0'; + tomoyo_normalize_line(start); + head.write_buf = start; + tomoyo_parse_policy(&head, start); + start = end + 1; + } + } + tomoyo_read_unlock(idx); +} diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index 7984a0ed548b..a15fe29740a4 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -662,6 +662,7 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name); void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp); void tomoyo_update_stat(const u8 index); void __init tomoyo_mm_init(void); +void __init tomoyo_load_builtin_policy(void); int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, const struct tomoyo_path_info *filename); int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c index 78b6143068de..46538ce47d72 100644 --- a/security/tomoyo/memory.c +++ b/security/tomoyo/memory.c @@ -215,14 +215,4 @@ void __init tomoyo_mm_init(void) INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list); tomoyo_kernel_domain.domainname = tomoyo_get_name(""); list_add_tail_rcu(&tomoyo_kernel_domain.list, &tomoyo_domain_list); -#if 0 - /* Will be replaced with tomoyo_load_builtin_policy(). */ - { - /* Load built-in policy. */ - tomoyo_write_transition_control("/sbin/hotplug", false, - TOMOYO_TRANSITION_CONTROL_INITIALIZE); - tomoyo_write_transition_control("/sbin/modprobe", false, - TOMOYO_TRANSITION_CONTROL_INITIALIZE); - } -#endif } -- cgit v1.2.3 From 0e4ae0e0dec634b2ae53ac57d14141b140467dbe Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 26 Jun 2011 23:22:59 +0900 Subject: TOMOYO: Make several options configurable. To be able to start using enforcing mode from the early stage of boot sequence, this patch adds support for activating access control without calling external policy loader program. This will be useful for systems where operations which can lead to the hijacking of the boot sequence are needed before loading the policy. For example, you can activate immediately after loading the fixed part of policy which will allow only operations needed for mounting a partition which contains the variant part of policy and verifying (e.g. running GPG check) and loading the variant part of policy. Since you can start using enforcing mode from the beginning, you can reduce the possibility of hijacking the boot sequence. This patch makes several variables configurable on build time. This patch also adds TOMOYO_loader= and TOMOYO_trigger= kernel command line option to boot the same kernel in two different init systems (BSD-style init and systemd). Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/Kconfig | 61 ++++++++++++++++++++++++++++++++++ security/tomoyo/common.c | 3 ++ security/tomoyo/load_policy.c | 76 ++++++++++++++++++++++++++++++------------- 3 files changed, 117 insertions(+), 23 deletions(-) diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig index c8f385793235..7c7f8c16c10f 100644 --- a/security/tomoyo/Kconfig +++ b/security/tomoyo/Kconfig @@ -9,3 +9,64 @@ config SECURITY_TOMOYO Required userspace tools and further information may be found at . If you are unsure how to answer this question, answer N. + +config SECURITY_TOMOYO_MAX_ACCEPT_ENTRY + int "Default maximal count for learning mode" + default 2048 + range 0 2147483647 + depends on SECURITY_TOMOYO + help + This is the default value for maximal ACL entries + that are automatically appended into policy at "learning mode". + Some programs access thousands of objects, so running + such programs in "learning mode" dulls the system response + and consumes much memory. + This is the safeguard for such programs. + +config SECURITY_TOMOYO_MAX_AUDIT_LOG + int "Default maximal count for audit log" + default 1024 + range 0 2147483647 + depends on SECURITY_TOMOYO + help + This is the default value for maximal entries for + audit logs that the kernel can hold on memory. + You can read the log via /sys/kernel/security/tomoyo/audit. + If you don't need audit logs, you may set this value to 0. + +config SECURITY_TOMOYO_OMIT_USERSPACE_LOADER + bool "Activate without calling userspace policy loader." + default n + depends on SECURITY_TOMOYO + ---help--- + Say Y here if you want to activate access control as soon as built-in + policy was loaded. This option will be useful for systems where + operations which can lead to the hijacking of the boot sequence are + needed before loading the policy. For example, you can activate + immediately after loading the fixed part of policy which will allow + only operations needed for mounting a partition which contains the + variant part of policy and verifying (e.g. running GPG check) and + loading the variant part of policy. Since you can start using + enforcing mode from the beginning, you can reduce the possibility of + hijacking the boot sequence. + +config SECURITY_TOMOYO_POLICY_LOADER + string "Location of userspace policy loader" + default "/sbin/tomoyo-init" + depends on SECURITY_TOMOYO + depends on !SECURITY_TOMOYO_OMIT_USERSPACE_LOADER + ---help--- + This is the default pathname of policy loader which is called before + activation. You can override this setting via TOMOYO_loader= kernel + command line option. + +config SECURITY_TOMOYO_ACTIVATION_TRIGGER + string "Trigger for calling userspace policy loader" + default "/sbin/init" + depends on SECURITY_TOMOYO + depends on !SECURITY_TOMOYO_OMIT_USERSPACE_LOADER + ---help--- + This is the default pathname of activation trigger. + You can override this setting via TOMOYO_trigger= kernel command line + option. For example, if you pass init=/bin/systemd option, you may + want to also pass TOMOYO_trigger=/bin/systemd option. diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 01e60ad68b3a..8b14cef2338d 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -2420,4 +2420,7 @@ void __init tomoyo_load_builtin_policy(void) } } tomoyo_read_unlock(idx); +#ifdef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER + tomoyo_check_profile(); +#endif } diff --git a/security/tomoyo/load_policy.c b/security/tomoyo/load_policy.c index 3312e5624f24..6a5463d26635 100644 --- a/security/tomoyo/load_policy.c +++ b/security/tomoyo/load_policy.c @@ -8,8 +8,27 @@ #include "common.h" -/* path to policy loader */ -static const char *tomoyo_loader = "/sbin/tomoyo-init"; +#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER + +/* + * Path to the policy loader. (default = CONFIG_SECURITY_TOMOYO_POLICY_LOADER) + */ +static const char *tomoyo_loader; + +/** + * tomoyo_loader_setup - Set policy loader. + * + * @str: Program to use as a policy loader (e.g. /sbin/tomoyo-init ). + * + * Returns 0. + */ +static int __init tomoyo_loader_setup(char *str) +{ + tomoyo_loader = str; + return 0; +} + +__setup("TOMOYO_loader=", tomoyo_loader_setup); /** * tomoyo_policy_loader_exists - Check whether /sbin/tomoyo-init exists. @@ -18,24 +37,38 @@ static const char *tomoyo_loader = "/sbin/tomoyo-init"; */ static bool tomoyo_policy_loader_exists(void) { - /* - * Don't activate MAC if the policy loader doesn't exist. - * If the initrd includes /sbin/init but real-root-dev has not - * mounted on / yet, activating MAC will block the system since - * policies are not loaded yet. - * Thus, let do_execve() call this function every time. - */ struct path path; - + if (!tomoyo_loader) + tomoyo_loader = CONFIG_SECURITY_TOMOYO_POLICY_LOADER; if (kern_path(tomoyo_loader, LOOKUP_FOLLOW, &path)) { - printk(KERN_INFO "Not activating Mandatory Access Control now " - "since %s doesn't exist.\n", tomoyo_loader); + printk(KERN_INFO "Not activating Mandatory Access Control " + "as %s does not exist.\n", tomoyo_loader); return false; } path_put(&path); return true; } +/* + * Path to the trigger. (default = CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER) + */ +static const char *tomoyo_trigger; + +/** + * tomoyo_trigger_setup - Set trigger for activation. + * + * @str: Program to use as an activation trigger (e.g. /sbin/init ). + * + * Returns 0. + */ +static int __init tomoyo_trigger_setup(char *str) +{ + tomoyo_trigger = str; + return 0; +} + +__setup("TOMOYO_trigger=", tomoyo_trigger_setup); + /** * tomoyo_load_policy - Run external policy loader to load policy. * @@ -51,24 +84,19 @@ static bool tomoyo_policy_loader_exists(void) */ void tomoyo_load_policy(const char *filename) { + static bool done; char *argv[2]; char *envp[3]; - if (tomoyo_policy_loaded) + if (tomoyo_policy_loaded || done) return; - /* - * Check filename is /sbin/init or /sbin/tomoyo-start. - * /sbin/tomoyo-start is a dummy filename in case where /sbin/init can't - * be passed. - * You can create /sbin/tomoyo-start by - * "ln -s /bin/true /sbin/tomoyo-start". - */ - if (strcmp(filename, "/sbin/init") && - strcmp(filename, "/sbin/tomoyo-start")) + if (!tomoyo_trigger) + tomoyo_trigger = CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER; + if (strcmp(filename, tomoyo_trigger)) return; if (!tomoyo_policy_loader_exists()) return; - + done = true; printk(KERN_INFO "Calling %s to load policy. Please wait.\n", tomoyo_loader); argv[0] = (char *) tomoyo_loader; @@ -79,3 +107,5 @@ void tomoyo_load_policy(const char *filename) call_usermodehelper(argv[0], argv, envp, 1); tomoyo_check_profile(); } + +#endif -- cgit v1.2.3 From 04fdc099f9c80c7775dbac388fc97e156d4d47e7 Mon Sep 17 00:00:00 2001 From: John Johansen Date: Tue, 28 Jun 2011 15:06:38 +0100 Subject: AppArmor: Fix reference to rcu protected pointer outside of rcu_read_lock The pointer returned from tracehook_tracer_task() is only valid inside the rcu_read_lock. However the tracer pointer obtained is being passed to aa_may_ptrace outside of the rcu_read_lock critical section. Mover the aa_may_ptrace test into the rcu_read_lock critical section, to fix this. Kernels affected: 2.6.36 - 3.0 Reported-by: Oleg Nesterov Cc: stable@kernel.org Signed-off-by: John Johansen --- security/apparmor/domain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index c825c6e0b636..78adc4303efa 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -73,7 +73,6 @@ static int may_change_ptraced_domain(struct task_struct *task, cred = get_task_cred(tracer); tracerp = aa_cred_profile(cred); } - rcu_read_unlock(); /* not ptraced */ if (!tracer || unconfined(tracerp)) @@ -82,6 +81,7 @@ static int may_change_ptraced_domain(struct task_struct *task, error = aa_may_ptrace(tracer, tracerp, to_profile, PTRACE_MODE_ATTACH); out: + rcu_read_unlock(); if (cred) put_cred(cred); -- cgit v1.2.3 From 25e75dff519bcce2cb35023105e7df51d7b9e691 Mon Sep 17 00:00:00 2001 From: John Johansen Date: Sat, 25 Jun 2011 16:57:07 +0100 Subject: AppArmor: Fix masking of capabilities in complain mode AppArmor is masking the capabilities returned by capget against the capabilities mask in the profile. This is wrong, in complain mode the profile has effectively all capabilities, as the profile restrictions are not being enforced, merely tested against to determine if an access is known by the profile. This can result in the wrong behavior of security conscience applications like sshd which examine their capability set, and change their behavior accordingly. In this case because of the masked capability set being returned sshd fails due to DAC checks, even when the profile is in complain mode. Kernels affected: 2.6.36 - 3.0. Signed-off-by: John Johansen --- security/apparmor/lsm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 3d2fd141dff7..37832026e58a 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -127,7 +127,7 @@ static int apparmor_capget(struct task_struct *target, kernel_cap_t *effective, *inheritable = cred->cap_inheritable; *permitted = cred->cap_permitted; - if (!unconfined(profile)) { + if (!unconfined(profile) && !COMPLAIN_MODE(profile)) { *effective = cap_intersect(*effective, profile->caps.allow); *permitted = cap_intersect(*permitted, profile->caps.allow); } -- cgit v1.2.3 From 1638207910019368253fc4c4a930c49ce2e98432 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Tue, 28 Jun 2011 14:23:30 -0700 Subject: Input: gpio_keys - fix a memory leak Signed-off-by: Axel Lin Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/gpio_keys.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index 320b59ab8902..97bada4b680d 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -569,6 +569,7 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev) } input_unregister_device(input); + kfree(ddata); return 0; } -- cgit v1.2.3 From 7c40952295db64867a45938b860a217b622cc3ed Mon Sep 17 00:00:00 2001 From: Hans-Christian Egtvedt Date: Wed, 29 Jun 2011 00:13:26 -0700 Subject: Input: update author email for gpio_mouse, at32psif, and atmel-wm97xx This patch updates the email address of the gpio_mouse, at32psif, and atmel-wm97xx drivers supported by me to an email account I will use on a more regular basis in the future. Signed-off-by: Hans-Christian Egtvedt Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/gpio_mouse.c | 2 +- drivers/input/serio/at32psif.c | 2 +- drivers/input/touchscreen/atmel-wm97xx.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c index 7b6ce178f1b6..58902fbb9896 100644 --- a/drivers/input/mouse/gpio_mouse.c +++ b/drivers/input/mouse/gpio_mouse.c @@ -191,7 +191,7 @@ static void __exit gpio_mouse_exit(void) } module_exit(gpio_mouse_exit); -MODULE_AUTHOR("Hans-Christian Egtvedt "); +MODULE_AUTHOR("Hans-Christian Egtvedt "); MODULE_DESCRIPTION("GPIO mouse driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:gpio_mouse"); /* work with hotplug and coldplug */ diff --git a/drivers/input/serio/at32psif.c b/drivers/input/serio/at32psif.c index 6ee8f0ddad51..95280f9207e1 100644 --- a/drivers/input/serio/at32psif.c +++ b/drivers/input/serio/at32psif.c @@ -372,6 +372,6 @@ static void __exit psif_exit(void) module_init(psif_init); module_exit(psif_exit); -MODULE_AUTHOR("Hans-Christian Egtvedt "); +MODULE_AUTHOR("Hans-Christian Egtvedt "); MODULE_DESCRIPTION("Atmel AVR32 PSIF PS/2 driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/input/touchscreen/atmel-wm97xx.c b/drivers/input/touchscreen/atmel-wm97xx.c index fa8e56bd9094..c804189ccc3b 100644 --- a/drivers/input/touchscreen/atmel-wm97xx.c +++ b/drivers/input/touchscreen/atmel-wm97xx.c @@ -442,6 +442,6 @@ static void __exit atmel_wm97xx_exit(void) } module_exit(atmel_wm97xx_exit); -MODULE_AUTHOR("Hans-Christian Egtvedt "); +MODULE_AUTHOR("Hans-Christian Egtvedt "); MODULE_DESCRIPTION("wm97xx continuous touch driver for Atmel AT91 and AVR32"); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 631b16e81eab82e2894425a94c3fc14bf21adb26 Mon Sep 17 00:00:00 2001 From: Joseph Lai Date: Mon, 27 Jun 2011 13:26:53 -0700 Subject: Input: add a driver to support InvenSense mpu3050 gyroscope chip This driver is registered as an input device. An IRQ is required in this basic driver configuration. Signed-off-by: Joseph Lai [Cleaned up PM_RUNTIME defines] Signed-off-by: Alan Cox [dtor@mail.ru: consolidated PM methods, some code rearrangement] Signed-off-by: Dmitry Torokhov --- drivers/input/misc/Kconfig | 10 ++ drivers/input/misc/Makefile | 1 + drivers/input/misc/mpu3050.c | 376 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 387 insertions(+) create mode 100644 drivers/input/misc/mpu3050.c diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 0f22918ad9ce..6fdce4b86856 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -111,6 +111,16 @@ config INPUT_MMA8450 To compile this driver as a module, choose M here: the module will be called mma8450. +config INPUT_MPU3050 + tristate "MPU3050 Triaxial gyroscope sensor" + depends on I2C + help + Say Y here if you want to support InvenSense MPU3050 + connected via an I2C bus. + + To compile this driver as a module, choose M here: the + module will be called mpu3050. + config INPUT_APANEL tristate "Fujitsu Lifebook Application Panel buttons" depends on X86 && I2C && LEDS_CLASS diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 99953c3c442c..b7e227aa117a 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o obj-$(CONFIG_INPUT_MMA8450) += mma8450.o +obj-$(CONFIG_INPUT_MPU3050) += mpu3050.o obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c new file mode 100644 index 000000000000..b95fac15b2ea --- /dev/null +++ b/drivers/input/misc/mpu3050.c @@ -0,0 +1,376 @@ +/* + * MPU3050 Tri-axis gyroscope driver + * + * Copyright (C) 2011 Wistron Co.Ltd + * Joseph Lai + * + * Trimmed down by Alan Cox to produce this version + * + * This is a 'lite' version of the driver, while we consider the right way + * to present the other features to user space. In particular it requires the + * device has an IRQ, and it only provides an input interface, so is not much + * use for device orientation. A fuller version is available from the Meego + * tree. + * + * This program is based on bma023.c. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MPU3050_CHIP_ID_REG 0x00 +#define MPU3050_CHIP_ID 0x69 +#define MPU3050_XOUT_H 0x1D +#define MPU3050_PWR_MGM 0x3E +#define MPU3050_PWR_MGM_POS 6 +#define MPU3050_PWR_MGM_MASK 0x40 + +#define MPU3050_AUTO_DELAY 1000 + +#define MPU3050_MIN_VALUE -32768 +#define MPU3050_MAX_VALUE 32767 + +struct axis_data { + s16 x; + s16 y; + s16 z; +}; + +struct mpu3050_sensor { + struct i2c_client *client; + struct device *dev; + struct input_dev *idev; +}; + +/** + * mpu3050_xyz_read_reg - read the axes values + * @buffer: provide register addr and get register + * @length: length of register + * + * Reads the register values in one transaction or returns a negative + * error code on failure. + */ +static int mpu3050_xyz_read_reg(struct i2c_client *client, + u8 *buffer, int length) +{ + /* + * Annoying we can't make this const because the i2c layer doesn't + * declare input buffers const. + */ + char cmd = MPU3050_XOUT_H; + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = 0, + .len = 1, + .buf = &cmd, + }, + { + .addr = client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = buffer, + }, + }; + + return i2c_transfer(client->adapter, msg, 2); +} + +/** + * mpu3050_read_xyz - get co-ordinates from device + * @client: i2c address of sensor + * @coords: co-ordinates to update + * + * Return the converted X Y and Z co-ordinates from the sensor device + */ +static void mpu3050_read_xyz(struct i2c_client *client, + struct axis_data *coords) +{ + u16 buffer[3]; + + mpu3050_xyz_read_reg(client, (u8 *)buffer, 6); + coords->x = be16_to_cpu(buffer[0]); + coords->y = be16_to_cpu(buffer[1]); + coords->z = be16_to_cpu(buffer[2]); + dev_dbg(&client->dev, "%s: x %d, y %d, z %d\n", __func__, + coords->x, coords->y, coords->z); +} + +/** + * mpu3050_set_power_mode - set the power mode + * @client: i2c client for the sensor + * @val: value to switch on/off of power, 1: normal power, 0: low power + * + * Put device to normal-power mode or low-power mode. + */ +static void mpu3050_set_power_mode(struct i2c_client *client, u8 val) +{ + u8 value; + + value = i2c_smbus_read_byte_data(client, MPU3050_PWR_MGM); + value = (value & ~MPU3050_PWR_MGM_MASK) | + (((val << MPU3050_PWR_MGM_POS) & MPU3050_PWR_MGM_MASK) ^ + MPU3050_PWR_MGM_MASK); + i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM, value); +} + +/** + * mpu3050_input_open - called on input event open + * @input: input dev of opened device + * + * The input layer calls this function when input event is opened. The + * function will push the device to resume. Then, the device is ready + * to provide data. + */ +static int mpu3050_input_open(struct input_dev *input) +{ + struct mpu3050_sensor *sensor = input_get_drvdata(input); + + pm_runtime_get(sensor->dev); + + return 0; +} + +/** + * mpu3050_input_close - called on input event close + * @input: input dev of closed device + * + * The input layer calls this function when input event is closed. The + * function will push the device to suspend. + */ +static void mpu3050_input_close(struct input_dev *input) +{ + struct mpu3050_sensor *sensor = input_get_drvdata(input); + + pm_runtime_put(sensor->dev); +} + +/** + * mpu3050_interrupt_thread - handle an IRQ + * @irq: interrupt numner + * @data: the sensor + * + * Called by the kernel single threaded after an interrupt occurs. Read + * the sensor data and generate an input event for it. + */ +static irqreturn_t mpu3050_interrupt_thread(int irq, void *data) +{ + struct mpu3050_sensor *sensor = data; + struct axis_data axis; + + mpu3050_read_xyz(sensor->client, &axis); + + input_report_abs(sensor->idev, ABS_X, axis.x); + input_report_abs(sensor->idev, ABS_Y, axis.y); + input_report_abs(sensor->idev, ABS_Z, axis.z); + input_sync(sensor->idev); + + return IRQ_HANDLED; +} + +/** + * mpu3050_probe - device detection callback + * @client: i2c client of found device + * @id: id match information + * + * The I2C layer calls us when it believes a sensor is present at this + * address. Probe to see if this is correct and to validate the device. + * + * If present install the relevant sysfs interfaces and input device. + */ +static int __devinit mpu3050_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct mpu3050_sensor *sensor; + struct input_dev *idev; + int ret; + int error; + + sensor = kzalloc(sizeof(struct mpu3050_sensor), GFP_KERNEL); + idev = input_allocate_device(); + if (!sensor || !idev) { + dev_err(&client->dev, "failed to allocate driver data\n"); + error = -ENOMEM; + goto err_free_mem; + } + + sensor->client = client; + sensor->dev = &client->dev; + sensor->idev = idev; + + mpu3050_set_power_mode(client, 1); + msleep(10); + + ret = i2c_smbus_read_byte_data(client, MPU3050_CHIP_ID_REG); + if (ret < 0) { + dev_err(&client->dev, "failed to detect device\n"); + error = -ENXIO; + goto err_free_mem; + } + + if (ret != MPU3050_CHIP_ID) { + dev_err(&client->dev, "unsupported chip id\n"); + error = -ENXIO; + goto err_free_mem; + } + + idev->name = "MPU3050"; + idev->id.bustype = BUS_I2C; + idev->dev.parent = &client->dev; + + idev->open = mpu3050_input_open; + idev->close = mpu3050_input_close; + + __set_bit(EV_ABS, idev->evbit); + input_set_abs_params(idev, ABS_X, + MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0); + input_set_abs_params(idev, ABS_Y, + MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0); + input_set_abs_params(idev, ABS_Z, + MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0); + + input_set_drvdata(idev, sensor); + + pm_runtime_set_active(&client->dev); + + error = request_threaded_irq(client->irq, + NULL, mpu3050_interrupt_thread, + IRQF_TRIGGER_RISING, + "mpu_int", sensor); + if (error) { + dev_err(&client->dev, + "can't get IRQ %d, error %d\n", client->irq, error); + goto err_pm_set_suspended; + } + + error = input_register_device(idev); + if (error) { + dev_err(&client->dev, "failed to register input device\n"); + goto err_free_irq; + } + + pm_runtime_enable(&client->dev); + pm_runtime_set_autosuspend_delay(&client->dev, MPU3050_AUTO_DELAY); + + return 0; + +err_free_irq: + free_irq(client->irq, sensor); +err_pm_set_suspended: + pm_runtime_set_suspended(&client->dev); +err_free_mem: + input_unregister_device(idev); + kfree(sensor); + return error; +} + +/** + * mpu3050_remove - remove a sensor + * @client: i2c client of sensor being removed + * + * Our sensor is going away, clean up the resources. + */ +static int __devexit mpu3050_remove(struct i2c_client *client) +{ + struct mpu3050_sensor *sensor = i2c_get_clientdata(client); + + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + + free_irq(client->irq, sensor); + input_unregister_device(sensor->idev); + kfree(sensor); + + return 0; +} + +#ifdef CONFIG_PM +/** + * mpu3050_suspend - called on device suspend + * @dev: device being suspended + * + * Put the device into sleep mode before we suspend the machine. + */ +static int mpu3050_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + mpu3050_set_power_mode(client, 0); + + return 0; +} + +/** + * mpu3050_resume - called on device resume + * @dev: device being resumed + * + * Put the device into powered mode on resume. + */ +static int mpu3050_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + mpu3050_set_power_mode(client, 1); + msleep(100); /* wait for gyro chip resume */ + + return 0; +} +#endif + +static UNIVERSAL_DEV_PM_OPS(mpu3050_pm, mpu3050_suspend, mpu3050_resume, NULL); + +static const struct i2c_device_id mpu3050_ids[] = { + { "mpu3050", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, mpu3050_ids); + +static struct i2c_driver mpu3050_i2c_driver = { + .driver = { + .name = "mpu3050", + .owner = THIS_MODULE, + .pm = &mpu3050_pm, + }, + .probe = mpu3050_probe, + .remove = __devexit_p(mpu3050_remove), + .id_table = mpu3050_ids, +}; + +static int __init mpu3050_init(void) +{ + return i2c_add_driver(&mpu3050_i2c_driver); +} +module_init(mpu3050_init); + +static void __exit mpu3050_exit(void) +{ + i2c_del_driver(&mpu3050_i2c_driver); +} +module_exit(mpu3050_exit); + +MODULE_AUTHOR("Wistron Corp."); +MODULE_DESCRIPTION("MPU3050 Tri-axis gyroscope driver"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From c7b4a5d58bffdf3aa7f923319643af0ebf925515 Mon Sep 17 00:00:00 2001 From: Jiejing Zhang Date: Wed, 29 Jun 2011 01:28:21 -0700 Subject: Input: mpr121 - improve sensibility of touch key The Quick Charge bit in Electrode conf register should be set in init function. This bit was missed in chip's document, which may cause touch controller charge too slow to generate an interrupt. Also, adjust the default vlaue of touch and release threshold to make touch key more sensitive, this fix touch may not sensitive after setup with plastic case. Signed-off-by: Jiejing Zhang Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/mpr121_touchkey.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c index 0a9e81194888..1c1615d9a7f9 100644 --- a/drivers/input/keyboard/mpr121_touchkey.c +++ b/drivers/input/keyboard/mpr121_touchkey.c @@ -43,14 +43,15 @@ * enabled capacitance sensing inputs and its run/suspend mode. */ #define ELECTRODE_CONF_ADDR 0x5e +#define ELECTRODE_CONF_QUICK_CHARGE 0x80 #define AUTO_CONFIG_CTRL_ADDR 0x7b #define AUTO_CONFIG_USL_ADDR 0x7d #define AUTO_CONFIG_LSL_ADDR 0x7e #define AUTO_CONFIG_TL_ADDR 0x7f /* Threshold of touch/release trigger */ -#define TOUCH_THRESHOLD 0x0f -#define RELEASE_THRESHOLD 0x0a +#define TOUCH_THRESHOLD 0x08 +#define RELEASE_THRESHOLD 0x05 /* Masks for touch and release triggers */ #define TOUCH_STATUS_MASK 0xfff /* MPR121 has 12 keys */ @@ -127,7 +128,7 @@ static int __devinit mpr121_phys_init(const struct mpr121_platform_data *pdata, struct i2c_client *client) { const struct mpr121_init_register *reg; - unsigned char usl, lsl, tl; + unsigned char usl, lsl, tl, eleconf; int i, t, vdd, ret; /* Set up touch/release threshold for ele0-ele11 */ @@ -163,8 +164,15 @@ static int __devinit mpr121_phys_init(const struct mpr121_platform_data *pdata, ret = i2c_smbus_write_byte_data(client, AUTO_CONFIG_USL_ADDR, usl); ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_LSL_ADDR, lsl); ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_TL_ADDR, tl); + + /* + * Quick charge bit will let the capacitive charge to ready + * state quickly, or the buttons may not function after system + * boot. + */ + eleconf = mpr121->keycount | ELECTRODE_CONF_QUICK_CHARGE; ret |= i2c_smbus_write_byte_data(client, ELECTRODE_CONF_ADDR, - mpr121->keycount); + eleconf); if (ret != 0) goto err_i2c_write; -- cgit v1.2.3 From 3e0dc6b01f5301d63046f6deddde2c7f5c57d67a Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 29 Jun 2011 10:26:42 -0700 Subject: drm/i915: hangcheck disable parameter Provide a parameter to disable hanghcheck. This is useful mostly for developers trying to debug known problems, and probably should not be touched by normal users. Reviewed-by: Chris Wilson Signed-off-by: Ben Widawsky Signed-off-by: Keith Packard --- drivers/gpu/drm/i915/i915_drv.c | 3 +++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 7 +++++-- drivers/gpu/drm/i915/i915_irq.c | 13 +++++++++---- 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 609358faaa90..b54f7d9b173a 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -70,6 +70,9 @@ module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); static bool i915_try_reset = true; module_param_named(reset, i915_try_reset, bool, 0600); +bool i915_enable_hangcheck = true; +module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644); + static struct drm_driver driver; extern int intel_agp_enabled; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 01affb63be29..e0f9ca3e5ff8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -994,6 +994,7 @@ extern unsigned int i915_panel_use_ssc; extern int i915_vbt_sdvo_panel_type; extern unsigned int i915_enable_rc6; extern unsigned int i915_enable_fbc; +extern bool i915_enable_hangcheck; extern int i915_suspend(struct drm_device *dev, pm_message_t state); extern int i915_resume(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 71cc8a353a78..8b670e7ee404 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1779,8 +1779,11 @@ i915_add_request(struct intel_ring_buffer *ring, ring->outstanding_lazy_request = false; if (!dev_priv->mm.suspended) { - mod_timer(&dev_priv->hangcheck_timer, - jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); + if (i915_enable_hangcheck) { + mod_timer(&dev_priv->hangcheck_timer, + jiffies + + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); + } if (was_empty) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index ae2b49969b99..0b0de5239ad5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -361,10 +361,12 @@ static void notify_ring(struct drm_device *dev, ring->irq_seqno = seqno; wake_up_all(&ring->irq_queue); - - dev_priv->hangcheck_count = 0; - mod_timer(&dev_priv->hangcheck_timer, - jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); + if (i915_enable_hangcheck) { + dev_priv->hangcheck_count = 0; + mod_timer(&dev_priv->hangcheck_timer, + jiffies + + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); + } } static void gen6_pm_rps_work(struct work_struct *work) @@ -1664,6 +1666,9 @@ void i915_hangcheck_elapsed(unsigned long data) uint32_t acthd, instdone, instdone1; bool err = false; + if (!i915_enable_hangcheck) + return; + /* If all work is done then ACTHD clearly hasn't advanced. */ if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && -- cgit v1.2.3 From 1c70c0cebd1295a42fec75045b8a6b4419cedef3 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 29 Jun 2011 13:34:36 -0700 Subject: drm/i915: enable ring freq scaling, RC6 and graphics turbo on Ivy Bridge v3 They use the same register interfaces, so we can simply enable the existing code on IVB. v2: - resolve conflict with ring freq scaling, we can enable it too v3: - resolve conflict again, this time on drm-intel-next Signed-off-by: Jesse Barnes Signed-off-by: Keith Packard --- drivers/gpu/drm/i915/i915_debugfs.c | 4 ++-- drivers/gpu/drm/i915/intel_display.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 8a5a032ec696..ed627307ab51 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) MEMSTAT_VID_SHIFT); seq_printf(m, "Current P-state: %d\n", (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); - } else if (IS_GEN6(dev)) { + } else if (IS_GEN6(dev) || IS_GEN7(dev)) { u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); @@ -1131,7 +1131,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) int ret; int gpu_freq, ia_freq; - if (!IS_GEN6(dev)) { + if (!(IS_GEN6(dev) || IS_GEN7(dev))) { seq_printf(m, "unsupported on this chipset\n"); return 0; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 804ac4d6cb48..823b8d99d9e6 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7970,7 +7970,7 @@ void intel_modeset_init(struct drm_device *dev) intel_init_emon(dev); } - if (IS_GEN6(dev)) { + if (IS_GEN6(dev) || IS_GEN7(dev)) { gen6_enable_rps(dev_priv); gen6_update_ring_freq(dev_priv); } @@ -8014,7 +8014,7 @@ void intel_modeset_cleanup(struct drm_device *dev) if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); - if (IS_GEN6(dev)) + if (IS_GEN6(dev) || IS_GEN7(dev)) gen6_disable_rps(dev); if (IS_IRONLAKE_M(dev)) -- cgit v1.2.3 From 275d3ba6b40d0f098693b9089c6fee9bd4e55d74 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 29 Jun 2011 21:44:45 -0400 Subject: ext4: remove loop around bio_alloc() These days, bio_alloc() is guaranteed to never fail (as long as nvecs is less than BIO_MAX_PAGES), so we don't need the loop around the struct bio allocation. Signed-off-by: "Theodore Ts'o" --- fs/ext4/page-io.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 7bb8f76d470a..430c401d0895 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -285,11 +285,7 @@ static int io_submit_init(struct ext4_io_submit *io, io_end = ext4_init_io_end(inode, GFP_NOFS); if (!io_end) return -ENOMEM; - do { - bio = bio_alloc(GFP_NOIO, nvecs); - nvecs >>= 1; - } while (bio == NULL); - + bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_bdev = bh->b_bdev; bio->bi_private = io->io_end = io_end; -- cgit v1.2.3 From 7986cf28bc5050967a7056d6eadda7f16f84eaab Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Wed, 29 Jun 2011 13:07:52 +0900 Subject: TOMOYO: Fix build error with CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER=y . I forgot to add #ifndef in commit 0e4ae0e0 "TOMOYO: Make several options configurable.", resulting security/built-in.o: In function `tomoyo_bprm_set_creds': tomoyo.c:(.text+0x4698e): undefined reference to `tomoyo_load_policy' error. Reported-by: Stephen Rothwell Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/tomoyo.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c index 2615c7d43960..d6f68a0ec2dc 100644 --- a/security/tomoyo/tomoyo.c +++ b/security/tomoyo/tomoyo.c @@ -51,12 +51,14 @@ static int tomoyo_bprm_set_creds(struct linux_binprm *bprm) */ if (bprm->cred_prepared) return 0; +#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER /* * Load policy if /sbin/tomoyo-init exists and /sbin/init is requested * for the first time. */ if (!tomoyo_policy_loaded) tomoyo_load_policy(bprm->filename); +#endif /* * Release reference to "struct tomoyo_domain_info" stored inside * "bprm->cred->security". New reference to "struct tomoyo_domain_info" -- cgit v1.2.3 From 3a6297abf3b179ae19b849e429841a7646711b70 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Wed, 29 Jun 2011 14:17:31 +0900 Subject: TOMOYO: Update MAINTAINERS file. Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index d2dcef7cd9b2..bb179a861a38 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6207,7 +6207,7 @@ L: tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for users in English) L: tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese) L: tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese) W: http://tomoyo.sourceforge.jp/ -T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.3.x/tomoyo-lsm/patches/ +T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.4.x/tomoyo-lsm/patches/ S: Maintained F: security/tomoyo/ -- cgit v1.2.3 From 3ddf17f08cf2f0d7ff06858eb07d1cc3db8994de Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Wed, 29 Jun 2011 14:22:37 +0900 Subject: TOMOYO: Cleanup header file. Sort by alphabetic order. Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/common.h | 213 +++++++++++++++++++++-------------------------- 1 file changed, 96 insertions(+), 117 deletions(-) diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index a15fe29740a4..465e34bd4eb9 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -585,165 +585,144 @@ struct tomoyo_policy_namespace { /********** Function prototypes. **********/ -void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns); -bool tomoyo_str_starts(char **src, const char *find); -const char *tomoyo_get_exe(void); -void tomoyo_normalize_line(unsigned char *buffer); -void tomoyo_check_profile(void); -int tomoyo_open_control(const u8 type, struct file *file); -int tomoyo_close_control(struct tomoyo_io_buffer *head); -int tomoyo_poll_control(struct file *file, poll_table *wait); -ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, - const int buffer_len); -ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, - const char __user *buffer, const int buffer_len); -bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r); -void tomoyo_warn_oom(const char *function); -const struct tomoyo_path_info * -tomoyo_compare_name_union(const struct tomoyo_path_info *name, - const struct tomoyo_name_union *ptr); bool tomoyo_compare_number_union(const unsigned long value, const struct tomoyo_number_union *ptr); -int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile, - const u8 index); -void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...) - __attribute__ ((format(printf, 2, 3))); bool tomoyo_correct_domain(const unsigned char *domainname); bool tomoyo_correct_path(const char *filename); bool tomoyo_correct_word(const char *string); bool tomoyo_domain_def(const unsigned char *buffer); -bool tomoyo_parse_name_union(struct tomoyo_acl_param *param, - struct tomoyo_name_union *ptr); -const struct tomoyo_path_info * -tomoyo_path_matches_group(const struct tomoyo_path_info *pathname, - const struct tomoyo_group *group); +bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r); +bool tomoyo_memory_ok(void *ptr); bool tomoyo_number_matches_group(const unsigned long min, const unsigned long max, const struct tomoyo_group *group); -bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename, - const struct tomoyo_path_info *pattern); +bool tomoyo_parse_name_union(struct tomoyo_acl_param *param, + struct tomoyo_name_union *ptr); bool tomoyo_parse_number_union(struct tomoyo_acl_param *param, struct tomoyo_number_union *ptr); -bool tomoyo_tokenize(char *buffer, char *w[], size_t size); -bool tomoyo_verbose_mode(const struct tomoyo_domain_info *domain); +bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename, + const struct tomoyo_path_info *pattern); +bool tomoyo_permstr(const char *string, const char *keyword); +bool tomoyo_str_starts(char **src, const char *find); +char *tomoyo_encode(const char *str); +char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt, + va_list args); +char *tomoyo_read_token(struct tomoyo_acl_param *param); +char *tomoyo_realpath_from_path(struct path *path); +char *tomoyo_realpath_nofollow(const char *pathname); +const char *tomoyo_get_exe(void); +const char *tomoyo_yesno(const unsigned int value); +const struct tomoyo_path_info *tomoyo_compare_name_union +(const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr); +const struct tomoyo_path_info *tomoyo_get_name(const char *name); +const struct tomoyo_path_info *tomoyo_path_matches_group +(const struct tomoyo_path_info *pathname, const struct tomoyo_group *group); +int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, + struct path *path, const int flag); +int tomoyo_close_control(struct tomoyo_io_buffer *head); +int tomoyo_find_next_domain(struct linux_binprm *bprm); +int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile, + const u8 index); int tomoyo_init_request_info(struct tomoyo_request_info *r, struct tomoyo_domain_info *domain, const u8 index); +int tomoyo_mkdev_perm(const u8 operation, struct path *path, + const unsigned int mode, unsigned int dev); int tomoyo_mount_permission(char *dev_name, struct path *path, const char *type, unsigned long flags, void *data_page); +int tomoyo_open_control(const u8 type, struct file *file); +int tomoyo_path2_perm(const u8 operation, struct path *path1, + struct path *path2); +int tomoyo_path_number_perm(const u8 operation, struct path *path, + unsigned long number); +int tomoyo_path_perm(const u8 operation, struct path *path); +int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, + const struct tomoyo_path_info *filename); +int tomoyo_poll_control(struct file *file, poll_table *wait); +int tomoyo_poll_log(struct file *file, poll_table *wait); +int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) + __printf(2, 3); +int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, + struct tomoyo_acl_param *param, + bool (*check_duplicate) + (const struct tomoyo_acl_info *, + const struct tomoyo_acl_info *), + bool (*merge_duplicate) + (struct tomoyo_acl_info *, struct tomoyo_acl_info *, + const bool)); +int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, + struct tomoyo_acl_param *param, + bool (*check_duplicate) + (const struct tomoyo_acl_head *, + const struct tomoyo_acl_head *)); int tomoyo_write_aggregator(struct tomoyo_acl_param *param); -int tomoyo_write_transition_control(struct tomoyo_acl_param *param, - const u8 type); int tomoyo_write_file(struct tomoyo_acl_param *param); int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type); -int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) - __attribute__ ((format(printf, 2, 3))); -struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname); +int tomoyo_write_transition_control(struct tomoyo_acl_param *param, + const u8 type); +ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, + const int buffer_len); +ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, + const char __user *buffer, const int buffer_len); struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname, const bool transit); -struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns, - const u8 profile); -struct tomoyo_policy_namespace *tomoyo_assign_namespace -(const char *domainname); +struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname); struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param, const u8 idx); +struct tomoyo_policy_namespace *tomoyo_assign_namespace +(const char *domainname); +struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns, + const u8 profile); unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain, const u8 index); -void tomoyo_fill_path_info(struct tomoyo_path_info *ptr); -void tomoyo_load_policy(const char *filename); -void tomoyo_put_number_union(struct tomoyo_number_union *ptr); -char *tomoyo_encode(const char *str); -char *tomoyo_realpath_nofollow(const char *pathname); -char *tomoyo_realpath_from_path(struct path *path); -bool tomoyo_memory_ok(void *ptr); void *tomoyo_commit_ok(void *data, const unsigned int size); -const struct tomoyo_path_info *tomoyo_get_name(const char *name); -void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp); -void tomoyo_update_stat(const u8 index); -void __init tomoyo_mm_init(void); void __init tomoyo_load_builtin_policy(void); -int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, - const struct tomoyo_path_info *filename); -int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, - struct path *path, const int flag); -int tomoyo_path_number_perm(const u8 operation, struct path *path, - unsigned long number); -int tomoyo_mkdev_perm(const u8 operation, struct path *path, - const unsigned int mode, unsigned int dev); -int tomoyo_path_perm(const u8 operation, struct path *path); -int tomoyo_path2_perm(const u8 operation, struct path *path1, - struct path *path2); -int tomoyo_find_next_domain(struct linux_binprm *bprm); -void tomoyo_print_ulong(char *buffer, const int buffer_len, - const unsigned long value, const u8 type); -void tomoyo_put_name_union(struct tomoyo_name_union *ptr); -void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register); -void tomoyo_memory_free(void *ptr); -int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, - struct tomoyo_acl_param *param, - bool (*check_duplicate) (const struct tomoyo_acl_info - *, - const struct tomoyo_acl_info - *), - bool (*merge_duplicate) (struct tomoyo_acl_info *, - struct tomoyo_acl_info *, - const bool)); -int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, - struct tomoyo_acl_param *param, - bool (*check_duplicate) (const struct tomoyo_acl_head - *, - const struct tomoyo_acl_head - *)); +void __init tomoyo_mm_init(void); void tomoyo_check_acl(struct tomoyo_request_info *r, bool (*check_entry) (struct tomoyo_request_info *, const struct tomoyo_acl_info *)); -char *tomoyo_read_token(struct tomoyo_acl_param *param); -bool tomoyo_permstr(const char *string, const char *keyword); - -const char *tomoyo_yesno(const unsigned int value); +void tomoyo_check_profile(void); +void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp); +void tomoyo_fill_path_info(struct tomoyo_path_info *ptr); +void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns); +void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...) + __printf(2, 3); +void tomoyo_load_policy(const char *filename); +void tomoyo_memory_free(void *ptr); +void tomoyo_normalize_line(unsigned char *buffer); +void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register); +void tomoyo_print_ulong(char *buffer, const int buffer_len, + const unsigned long value, const u8 type); +void tomoyo_put_name_union(struct tomoyo_name_union *ptr); +void tomoyo_put_number_union(struct tomoyo_number_union *ptr); +void tomoyo_read_log(struct tomoyo_io_buffer *head); +void tomoyo_update_stat(const u8 index); +void tomoyo_warn_oom(const char *function); void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...) - __attribute__ ((format(printf, 2, 3))); + __printf(2, 3); void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt, va_list args); -void tomoyo_read_log(struct tomoyo_io_buffer *head); -int tomoyo_poll_log(struct file *file, poll_table *wait); -char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt, - va_list args); /********** External variable definitions. **********/ -/* Lock for GC. */ -extern struct srcu_struct tomoyo_ss; - -/* The list for "struct tomoyo_domain_info". */ -extern struct list_head tomoyo_domain_list; - -extern struct list_head tomoyo_name_list[TOMOYO_MAX_HASH]; - -/* Lock for protecting policy. */ -extern struct mutex tomoyo_policy_lock; - -/* Has /sbin/init started? */ extern bool tomoyo_policy_loaded; - -/* The kernel's domain. */ -extern struct tomoyo_domain_info tomoyo_kernel_domain; -extern struct tomoyo_policy_namespace tomoyo_kernel_namespace; -extern struct list_head tomoyo_namespace_list; - -extern const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX + - TOMOYO_MAX_MAC_CATEGORY_INDEX]; +extern const char * const tomoyo_dif[TOMOYO_MAX_DOMAIN_INFO_FLAGS]; +extern const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX + + TOMOYO_MAX_MAC_CATEGORY_INDEX]; +extern const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE]; extern const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION]; extern const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX]; - - +extern const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION]; extern const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION]; extern const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION]; -extern const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION]; - -extern const char * const tomoyo_dif[TOMOYO_MAX_DOMAIN_INFO_FLAGS]; -extern const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE]; +extern struct list_head tomoyo_domain_list; +extern struct list_head tomoyo_name_list[TOMOYO_MAX_HASH]; +extern struct list_head tomoyo_namespace_list; +extern struct mutex tomoyo_policy_lock; +extern struct srcu_struct tomoyo_ss; +extern struct tomoyo_domain_info tomoyo_kernel_domain; +extern struct tomoyo_policy_namespace tomoyo_kernel_namespace; extern unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT]; extern unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT]; -- cgit v1.2.3 From ad599f9cf0187e823bc92bc83f3867a38fa266b9 Mon Sep 17 00:00:00 2001 From: Mimi Zohar Date: Wed, 29 Jun 2011 14:53:56 -0400 Subject: encrypted-keys: move ecryptfs documentation to proper location Move keys-ecryptfs.txt to Documentation/security. Signed-off-by: Mimi Zohar Signed-off-by: James Morris --- Documentation/keys-ecryptfs.txt | 68 -------------------------------- Documentation/security/keys-ecryptfs.txt | 68 ++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 68 deletions(-) delete mode 100644 Documentation/keys-ecryptfs.txt create mode 100644 Documentation/security/keys-ecryptfs.txt diff --git a/Documentation/keys-ecryptfs.txt b/Documentation/keys-ecryptfs.txt deleted file mode 100644 index c3bbeba63562..000000000000 --- a/Documentation/keys-ecryptfs.txt +++ /dev/null @@ -1,68 +0,0 @@ - Encrypted keys for the eCryptfs filesystem - -ECryptfs is a stacked filesystem which transparently encrypts and decrypts each -file using a randomly generated File Encryption Key (FEK). - -Each FEK is in turn encrypted with a File Encryption Key Encryption Key (FEFEK) -either in kernel space or in user space with a daemon called 'ecryptfsd'. In -the former case the operation is performed directly by the kernel CryptoAPI -using a key, the FEFEK, derived from a user prompted passphrase; in the latter -the FEK is encrypted by 'ecryptfsd' with the help of external libraries in order -to support other mechanisms like public key cryptography, PKCS#11 and TPM based -operations. - -The data structure defined by eCryptfs to contain information required for the -FEK decryption is called authentication token and, currently, can be stored in a -kernel key of the 'user' type, inserted in the user's session specific keyring -by the userspace utility 'mount.ecryptfs' shipped with the package -'ecryptfs-utils'. - -The 'encrypted' key type has been extended with the introduction of the new -format 'ecryptfs' in order to be used in conjunction with the eCryptfs -filesystem. Encrypted keys of the newly introduced format store an -authentication token in its payload with a FEFEK randomly generated by the -kernel and protected by the parent master key. - -In order to avoid known-plaintext attacks, the datablob obtained through -commands 'keyctl print' or 'keyctl pipe' does not contain the overall -authentication token, which content is well known, but only the FEFEK in -encrypted form. - -The eCryptfs filesystem may really benefit from using encrypted keys in that the -required key can be securely generated by an Administrator and provided at boot -time after the unsealing of a 'trusted' key in order to perform the mount in a -controlled environment. Another advantage is that the key is not exposed to -threats of malicious software, because it is available in clear form only at -kernel level. - -Usage: - keyctl add encrypted name "new ecryptfs key-type:master-key-name keylen" ring - keyctl add encrypted name "load hex_blob" ring - keyctl update keyid "update key-type:master-key-name" - -name:= '<16 hexadecimal characters>' -key-type:= 'trusted' | 'user' -keylen:= 64 - - -Example of encrypted key usage with the eCryptfs filesystem: - -Create an encrypted key "1000100010001000" of length 64 bytes with format -'ecryptfs' and save it using a previously loaded user key "test": - - $ keyctl add encrypted 1000100010001000 "new ecryptfs user:test 64" @u - 19184530 - - $ keyctl print 19184530 - ecryptfs user:test 64 490045d4bfe48c99f0d465fbbbb79e7500da954178e2de0697 - dd85091f5450a0511219e9f7cd70dcd498038181466f78ac8d4c19504fcc72402bfc41c2 - f253a41b7507ccaa4b2b03fff19a69d1cc0b16e71746473f023a95488b6edfd86f7fdd40 - 9d292e4bacded1258880122dd553a661 - - $ keyctl pipe 19184530 > ecryptfs.blob - -Mount an eCryptfs filesystem using the created encrypted key "1000100010001000" -into the '/secret' directory: - - $ mount -i -t ecryptfs -oecryptfs_sig=1000100010001000,\ - ecryptfs_cipher=aes,ecryptfs_key_bytes=32 /secret /secret diff --git a/Documentation/security/keys-ecryptfs.txt b/Documentation/security/keys-ecryptfs.txt new file mode 100644 index 000000000000..c3bbeba63562 --- /dev/null +++ b/Documentation/security/keys-ecryptfs.txt @@ -0,0 +1,68 @@ + Encrypted keys for the eCryptfs filesystem + +ECryptfs is a stacked filesystem which transparently encrypts and decrypts each +file using a randomly generated File Encryption Key (FEK). + +Each FEK is in turn encrypted with a File Encryption Key Encryption Key (FEFEK) +either in kernel space or in user space with a daemon called 'ecryptfsd'. In +the former case the operation is performed directly by the kernel CryptoAPI +using a key, the FEFEK, derived from a user prompted passphrase; in the latter +the FEK is encrypted by 'ecryptfsd' with the help of external libraries in order +to support other mechanisms like public key cryptography, PKCS#11 and TPM based +operations. + +The data structure defined by eCryptfs to contain information required for the +FEK decryption is called authentication token and, currently, can be stored in a +kernel key of the 'user' type, inserted in the user's session specific keyring +by the userspace utility 'mount.ecryptfs' shipped with the package +'ecryptfs-utils'. + +The 'encrypted' key type has been extended with the introduction of the new +format 'ecryptfs' in order to be used in conjunction with the eCryptfs +filesystem. Encrypted keys of the newly introduced format store an +authentication token in its payload with a FEFEK randomly generated by the +kernel and protected by the parent master key. + +In order to avoid known-plaintext attacks, the datablob obtained through +commands 'keyctl print' or 'keyctl pipe' does not contain the overall +authentication token, which content is well known, but only the FEFEK in +encrypted form. + +The eCryptfs filesystem may really benefit from using encrypted keys in that the +required key can be securely generated by an Administrator and provided at boot +time after the unsealing of a 'trusted' key in order to perform the mount in a +controlled environment. Another advantage is that the key is not exposed to +threats of malicious software, because it is available in clear form only at +kernel level. + +Usage: + keyctl add encrypted name "new ecryptfs key-type:master-key-name keylen" ring + keyctl add encrypted name "load hex_blob" ring + keyctl update keyid "update key-type:master-key-name" + +name:= '<16 hexadecimal characters>' +key-type:= 'trusted' | 'user' +keylen:= 64 + + +Example of encrypted key usage with the eCryptfs filesystem: + +Create an encrypted key "1000100010001000" of length 64 bytes with format +'ecryptfs' and save it using a previously loaded user key "test": + + $ keyctl add encrypted 1000100010001000 "new ecryptfs user:test 64" @u + 19184530 + + $ keyctl print 19184530 + ecryptfs user:test 64 490045d4bfe48c99f0d465fbbbb79e7500da954178e2de0697 + dd85091f5450a0511219e9f7cd70dcd498038181466f78ac8d4c19504fcc72402bfc41c2 + f253a41b7507ccaa4b2b03fff19a69d1cc0b16e71746473f023a95488b6edfd86f7fdd40 + 9d292e4bacded1258880122dd553a661 + + $ keyctl pipe 19184530 > ecryptfs.blob + +Mount an eCryptfs filesystem using the created encrypted key "1000100010001000" +into the '/secret' directory: + + $ mount -i -t ecryptfs -oecryptfs_sig=1000100010001000,\ + ecryptfs_cipher=aes,ecryptfs_key_bytes=32 /secret /secret -- cgit v1.2.3 From ea504819122a76a236f8b95d1556f807a0a41397 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Thu, 30 Jun 2011 17:32:30 +0900 Subject: TOMOYO: Fix wrong domainname in tomoyo_init_log(). Commit eadd99cc "TOMOYO: Add auditing interface." by error replaced "struct tomoyo_request_info"->domain with tomoyo_domain(). Signed-off-by: Tetsuo Handa Signed-off-by: James Morris --- security/tomoyo/audit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c index f2c869767d79..967b5648dce3 100644 --- a/security/tomoyo/audit.c +++ b/security/tomoyo/audit.c @@ -69,7 +69,7 @@ char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt, char *buf = NULL; const char *header = NULL; int pos; - const char *domainname = tomoyo_domain()->domainname->name; + const char *domainname = r->domain->domainname->name; header = tomoyo_print_header(r); if (!header) return NULL; -- cgit v1.2.3 From 24e6289c029b0cf5b4f75e12c1b66000d441c9ed Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Mon, 23 May 2011 11:51:18 +0300 Subject: OMAP: DSS2: remove extra includes from include/video/omapdss.h omapdss.h included platform_device.h and atomic.h, neither of which is needed by omapdss.h. Remove those includes from omapdss.h, and fix the affected .c files which did not include platform_device.h even though they should. Signed-off-by: Tomi Valkeinen --- drivers/video/omap2/dss/dispc.c | 1 + drivers/video/omap2/dss/dss.c | 1 + drivers/video/omap2/dss/hdmi.c | 1 + drivers/video/omap2/dss/rfbi.c | 1 + include/video/omapdss.h | 2 -- 5 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index 7a9a2e7d9685..a9eebd8e79fe 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index d9489d5c4f08..d0b3f81b3724 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c @@ -28,6 +28,7 @@ #include #include #include +#include #include