From 354e57f3a0a26120af3bcd6c92c355ad00a057c1 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 7 Nov 2013 10:25:55 +0100 Subject: ARM/serial: at91: switch atmel serial to use gpiolib MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This passes the errata fix using a GPIO to control the RTS pin on one of the AT91 chips to use gpiolib instead of the AT91-specific interfaces. Also remove the reliance on compile-time #defines and the cpu_* check and rely on the platform passing down the proper GPIO pin through platform data. This is a prerequisite for getting rid of the local GPIO implementation in the AT91 platform and move toward multiplatform. The patch also adds device tree support for getting the RTS GPIO pin from the device tree on DT boot paths. Signed-off-by: Nicolas Ferre Acked-by: Jean-Christophe PLAGNIOL-VILLARD Signed-off-by: Linus Walleij Acked-by: Greg Kroah-Hartman Signed-off-by: Uwe Kleine-König --- include/linux/platform_data/atmel.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h index cea9f70133c5..e26b0c14edea 100644 --- a/include/linux/platform_data/atmel.h +++ b/include/linux/platform_data/atmel.h @@ -84,6 +84,7 @@ struct atmel_uart_data { short use_dma_rx; /* use receive DMA? */ void __iomem *regs; /* virt. base address, if any */ struct serial_rs485 rs485; /* rs485 settings */ + int rts_gpio; /* optional RTS GPIO */ }; /* Touchscreen Controller */ -- cgit v1.2.3 From ee1e0994ab1bd302fd03432de79c07751df47ffa Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 24 Jan 2014 14:37:57 +0100 Subject: regulator: s5m8767: Use GPIO for controlling Buck9/eMMC Add support for GPIO control (enable/disable) over Buck9. The Buck9 Converter is used as a supply for eMMC Host Controller. BUCK9EN GPIO of S5M8767 chip may be used by application processor to enable or disable the Buck9. This has two benefits: - It is faster than toggling it over I2C bus. - It allows disabling the regulator during suspend to RAM; The AP will enable it during resume; Without the patch the regulator supplying eMMC must be defined as fixed-regulator. Signed-off-by: Krzysztof Kozlowski Acked-by: Lee Jones Signed-off-by: Mark Brown --- drivers/regulator/s5m8767.c | 87 +++++++++++++++++++++++++++++++++++-- include/linux/mfd/samsung/core.h | 3 +- include/linux/mfd/samsung/s5m8767.h | 7 +++ 3 files changed, 93 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index d7164bb75d3e..6850a25a41c4 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -11,11 +11,8 @@ * */ -#include #include -#include #include -#include #include #include #include @@ -483,6 +480,65 @@ static struct regulator_desc regulators[] = { s5m8767_regulator_desc(BUCK9), }; +/* + * Enable GPIO control over BUCK9 in regulator_config for that regulator. + */ +static void s5m8767_regulator_config_ext_control(struct s5m8767_info *s5m8767, + struct sec_regulator_data *rdata, + struct regulator_config *config) +{ + int i, mode = 0; + + if (rdata->id != S5M8767_BUCK9) + return; + + /* Check if opmode for regulator matches S5M8767_ENCTRL_USE_GPIO */ + for (i = 0; i < s5m8767->num_regulators; i++) { + const struct sec_opmode_data *opmode = &s5m8767->opmode[i]; + if (opmode->id == rdata->id) { + mode = s5m8767_opmode_reg[rdata->id][opmode->mode]; + break; + } + } + if (mode != S5M8767_ENCTRL_USE_GPIO) { + dev_warn(s5m8767->dev, + "ext-control for %s: mismatched op_mode (%x), ignoring\n", + rdata->reg_node->name, mode); + return; + } + + if (!gpio_is_valid(rdata->ext_control_gpio)) { + dev_warn(s5m8767->dev, + "ext-control for %s: GPIO not valid, ignoring\n", + rdata->reg_node->name); + return; + } + + config->ena_gpio = rdata->ext_control_gpio; + config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH; +} + +/* + * Turn on GPIO control over BUCK9. + */ +static int s5m8767_enable_ext_control(struct s5m8767_info *s5m8767, + struct regulator_dev *rdev) +{ + int ret, reg, enable_ctrl; + + if (rdev_get_id(rdev) != S5M8767_BUCK9) + return -EINVAL; + + ret = s5m8767_get_register(rdev, ®, &enable_ctrl); + if (ret) + return ret; + + return regmap_update_bits(s5m8767->iodev->regmap_pmic, + reg, S5M8767_ENCTRL_MASK, + S5M8767_ENCTRL_USE_GPIO << S5M8767_ENCTRL_SHIFT); +} + + #ifdef CONFIG_OF static int s5m8767_pmic_dt_parse_dvs_gpio(struct sec_pmic_dev *iodev, struct sec_platform_data *pdata, @@ -520,6 +576,16 @@ static int s5m8767_pmic_dt_parse_ds_gpio(struct sec_pmic_dev *iodev, return 0; } +static void s5m8767_pmic_dt_parse_ext_control_gpio(struct sec_pmic_dev *iodev, + struct sec_regulator_data *rdata, + struct device_node *reg_np) +{ + rdata->ext_control_gpio = of_get_named_gpio(reg_np, + "s5m8767,pmic-ext-control-gpios", 0); + if (!gpio_is_valid(rdata->ext_control_gpio)) + rdata->ext_control_gpio = 0; +} + static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev, struct sec_platform_data *pdata) { @@ -574,6 +640,8 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev, continue; } + s5m8767_pmic_dt_parse_ext_control_gpio(iodev, rdata, reg_np); + rdata->id = i; rdata->initdata = of_get_regulator_init_data( &pdev->dev, reg_np); @@ -940,6 +1008,9 @@ static int s5m8767_pmic_probe(struct platform_device *pdev) config.driver_data = s5m8767; config.regmap = iodev->regmap_pmic; config.of_node = pdata->regulators[i].reg_node; + if (pdata->regulators[i].ext_control_gpio) + s5m8767_regulator_config_ext_control(s5m8767, + &pdata->regulators[i], &config); rdev[i] = devm_regulator_register(&pdev->dev, ®ulators[id], &config); @@ -949,6 +1020,16 @@ static int s5m8767_pmic_probe(struct platform_device *pdev) id); return ret; } + + if (pdata->regulators[i].ext_control_gpio) { + ret = s5m8767_enable_ext_control(s5m8767, rdev[i]); + if (ret < 0) { + dev_err(s5m8767->dev, + "failed to enable gpio control over %s: %d\n", + rdev[i]->desc->name, ret); + return ret; + } + } } return 0; diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 41c9bde410c5..55510444b9fd 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -119,7 +119,8 @@ struct sec_platform_data { struct sec_regulator_data { int id; struct regulator_init_data *initdata; - struct device_node *reg_node; + struct device_node *reg_node; + int ext_control_gpio; }; /* diff --git a/include/linux/mfd/samsung/s5m8767.h b/include/linux/mfd/samsung/s5m8767.h index 2ab0b0f03641..243b58fec33d 100644 --- a/include/linux/mfd/samsung/s5m8767.h +++ b/include/linux/mfd/samsung/s5m8767.h @@ -183,9 +183,16 @@ enum s5m8767_regulators { S5M8767_REG_MAX, }; +/* LDO_EN/BUCK_EN field in registers */ #define S5M8767_ENCTRL_SHIFT 6 #define S5M8767_ENCTRL_MASK (0x3 << S5M8767_ENCTRL_SHIFT) +/* + * LDO_EN/BUCK_EN register value for controlling this Buck or LDO + * by GPIO (PWREN, BUCKEN). + */ +#define S5M8767_ENCTRL_USE_GPIO 0x1 + /* * Values for BUCK_RAMP field in DVS_RAMP register, matching raw values * in mV/us. -- cgit v1.2.3 From cd4dc0821bc97947f25c8483a4aa0711bff8619a Mon Sep 17 00:00:00 2001 From: Frank Praznik Date: Wed, 22 Jan 2014 13:49:41 -0500 Subject: HID: Add transport-driver callbacks to the hid_ll_driver struct Add raw_request and output_report callbacks to the hid_ll_driver struct. Signed-off-by: Frank Praznik Acked-by: David Herrmann Signed-off-by: Jiri Kosina --- include/linux/hid.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/hid.h b/include/linux/hid.h index 31b9d299ef6c..003cc8e89831 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -700,8 +700,14 @@ struct hid_ll_driver { struct hid_report *report, int reqtype); int (*wait)(struct hid_device *hdev); - int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype); + int (*raw_request) (struct hid_device *hdev, unsigned char reportnum, + __u8 *buf, size_t len, unsigned char rtype, + int reqtype); + + int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len); + + int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype); }; #define PM_HINT_FULLON 1<<5 -- cgit v1.2.3 From c05c4186bbe4e99d64e8a36f7ca7f480da5d109f Mon Sep 17 00:00:00 2001 From: Jens Freimann Date: Mon, 7 Oct 2013 16:13:45 +0200 Subject: KVM: s390: add floating irq controller This patch adds a floating irq controller as a kvm_device. It will be necessary for migration of floating interrupts as well as for hardening the reset code by allowing user space to explicitly remove all pending floating interrupts. Signed-off-by: Jens Freimann Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- Documentation/virtual/kvm/devices/s390_flic.txt | 36 +++ arch/s390/include/asm/kvm_host.h | 1 + arch/s390/include/uapi/asm/kvm.h | 14 ++ arch/s390/kvm/interrupt.c | 304 ++++++++++++++++++++---- arch/s390/kvm/kvm-s390.c | 1 + include/linux/kvm_host.h | 1 + include/uapi/linux/kvm.h | 1 + virt/kvm/kvm_main.c | 5 + 8 files changed, 312 insertions(+), 51 deletions(-) create mode 100644 Documentation/virtual/kvm/devices/s390_flic.txt (limited to 'include/linux') diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt new file mode 100644 index 000000000000..6b557953066a --- /dev/null +++ b/Documentation/virtual/kvm/devices/s390_flic.txt @@ -0,0 +1,36 @@ +FLIC (floating interrupt controller) +==================================== + +FLIC handles floating (non per-cpu) interrupts, i.e. I/O, service and some +machine check interruptions. All interrupts are stored in a per-vm list of +pending interrupts. FLIC performs operations on this list. + +Only one FLIC instance may be instantiated. + +FLIC provides support to +- add interrupts (KVM_DEV_FLIC_ENQUEUE) +- inspect currently pending interrupts (KVM_FLIC_GET_ALL_IRQS) +- purge all pending floating interrupts (KVM_DEV_FLIC_CLEAR_IRQS) + +Groups: + KVM_DEV_FLIC_ENQUEUE + Passes a buffer and length into the kernel which are then injected into + the list of pending interrupts. + attr->addr contains the pointer to the buffer and attr->attr contains + the length of the buffer. + The format of the data structure kvm_s390_irq as it is copied from userspace + is defined in usr/include/linux/kvm.h. + + KVM_DEV_FLIC_GET_ALL_IRQS + Copies all floating interrupts into a buffer provided by userspace. + When the buffer is too small it returns -ENOMEM, which is the indication + for userspace to try again with a bigger buffer. + All interrupts remain pending, i.e. are not deleted from the list of + currently pending interrupts. + attr->addr contains the userspace address of the buffer into which all + interrupt data will be copied. + attr->attr contains the size of the buffer in bytes. + + KVM_DEV_FLIC_CLEAR_IRQS + Simply deletes all elements from the list of currently pending floating + interrupts. No interrupts are injected into the guest. diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 3ffc9646e742..59635b5c59a6 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -243,6 +243,7 @@ struct kvm_arch{ struct sca_block *sca; debug_info_t *dbf; struct kvm_s390_float_interrupt float_int; + struct kvm_device *flic; struct gmap *gmap; int css_support; }; diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index d25da598ec62..38d5f98552bb 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -16,6 +16,20 @@ #define __KVM_S390 +/* Device control API: s390-specific devices */ +#define KVM_DEV_FLIC_GET_ALL_IRQS 1 +#define KVM_DEV_FLIC_ENQUEUE 2 +#define KVM_DEV_FLIC_CLEAR_IRQS 3 +/* + * We can have up to 4*64k pending subchannels + 8 adapter interrupts, + * as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts. + * There are also sclp and machine checks. This gives us + * sizeof(kvm_s390_irq)*(4*65536+8+64*64+1+1) = 72 * 266250 = 19170000 + * Lets round up to 8192 pages. + */ + +#define KVM_S390_FLIC_MAX_BUFFER 0x2000000 + /* for KVM_GET_REGS and KVM_SET_REGS */ struct kvm_regs { /* general purpose regs for s390 */ diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 5f79d2d79ca7..a5f18babed4c 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -659,53 +659,86 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, return inti; } -int kvm_s390_inject_vm(struct kvm *kvm, - struct kvm_s390_interrupt *s390int) +static void __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) { struct kvm_s390_local_interrupt *li; struct kvm_s390_float_interrupt *fi; - struct kvm_s390_interrupt_info *inti, *iter; + struct kvm_s390_interrupt_info *iter; int sigcpu; + mutex_lock(&kvm->lock); + fi = &kvm->arch.float_int; + spin_lock(&fi->lock); + if (!is_ioint(inti->type)) { + list_add_tail(&inti->list, &fi->list); + } else { + u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); + + /* Keep I/O interrupts sorted in isc order. */ + list_for_each_entry(iter, &fi->list, list) { + if (!is_ioint(iter->type)) + continue; + if (int_word_to_isc_bits(iter->io.io_int_word) + <= isc_bits) + continue; + break; + } + list_add_tail(&inti->list, &iter->list); + } + atomic_set(&fi->active, 1); + sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); + if (sigcpu == KVM_MAX_VCPUS) { + do { + sigcpu = fi->next_rr_cpu++; + if (sigcpu == KVM_MAX_VCPUS) + sigcpu = fi->next_rr_cpu = 0; + } while (fi->local_int[sigcpu] == NULL); + } + li = fi->local_int[sigcpu]; + spin_lock_bh(&li->lock); + atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + if (waitqueue_active(li->wq)) + wake_up_interruptible(li->wq); + spin_unlock_bh(&li->lock); + spin_unlock(&fi->lock); + mutex_unlock(&kvm->lock); +} + +int kvm_s390_inject_vm(struct kvm *kvm, + struct kvm_s390_interrupt *s390int) +{ + struct kvm_s390_interrupt_info *inti; + inti = kzalloc(sizeof(*inti), GFP_KERNEL); if (!inti) return -ENOMEM; - switch (s390int->type) { + inti->type = s390int->type; + switch (inti->type) { case KVM_S390_INT_VIRTIO: VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", s390int->parm, s390int->parm64); - inti->type = s390int->type; inti->ext.ext_params = s390int->parm; inti->ext.ext_params2 = s390int->parm64; break; case KVM_S390_INT_SERVICE: VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); - inti->type = s390int->type; inti->ext.ext_params = s390int->parm; break; - case KVM_S390_PROGRAM_INT: - case KVM_S390_SIGP_STOP: - case KVM_S390_INT_EXTERNAL_CALL: - case KVM_S390_INT_EMERGENCY: - kfree(inti); - return -EINVAL; case KVM_S390_MCHK: VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", s390int->parm64); - inti->type = s390int->type; inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ inti->mchk.mcic = s390int->parm64; break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - if (s390int->type & IOINT_AI_MASK) + if (inti->type & IOINT_AI_MASK) VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); else VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", s390int->type & IOINT_CSSID_MASK, s390int->type & IOINT_SSID_MASK, s390int->type & IOINT_SCHID_MASK); - inti->type = s390int->type; inti->io.subchannel_id = s390int->parm >> 16; inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; inti->io.io_int_parm = s390int->parm64 >> 32; @@ -718,42 +751,7 @@ int kvm_s390_inject_vm(struct kvm *kvm, trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 2); - mutex_lock(&kvm->lock); - fi = &kvm->arch.float_int; - spin_lock(&fi->lock); - if (!is_ioint(inti->type)) - list_add_tail(&inti->list, &fi->list); - else { - u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); - - /* Keep I/O interrupts sorted in isc order. */ - list_for_each_entry(iter, &fi->list, list) { - if (!is_ioint(iter->type)) - continue; - if (int_word_to_isc_bits(iter->io.io_int_word) - <= isc_bits) - continue; - break; - } - list_add_tail(&inti->list, &iter->list); - } - atomic_set(&fi->active, 1); - sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); - if (sigcpu == KVM_MAX_VCPUS) { - do { - sigcpu = fi->next_rr_cpu++; - if (sigcpu == KVM_MAX_VCPUS) - sigcpu = fi->next_rr_cpu = 0; - } while (fi->local_int[sigcpu] == NULL); - } - li = fi->local_int[sigcpu]; - spin_lock_bh(&li->lock); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); - if (waitqueue_active(li->wq)) - wake_up_interruptible(li->wq); - spin_unlock_bh(&li->lock); - spin_unlock(&fi->lock); - mutex_unlock(&kvm->lock); + __inject_vm(kvm, inti); return 0; } @@ -841,3 +839,207 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, mutex_unlock(&vcpu->kvm->lock); return 0; } + +static void clear_floating_interrupts(struct kvm *kvm) +{ + struct kvm_s390_float_interrupt *fi; + struct kvm_s390_interrupt_info *n, *inti = NULL; + + mutex_lock(&kvm->lock); + fi = &kvm->arch.float_int; + spin_lock(&fi->lock); + list_for_each_entry_safe(inti, n, &fi->list, list) { + list_del(&inti->list); + kfree(inti); + } + atomic_set(&fi->active, 0); + spin_unlock(&fi->lock); + mutex_unlock(&kvm->lock); +} + +static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti, + u8 *addr) +{ + struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; + struct kvm_s390_irq irq = {0}; + + irq.type = inti->type; + switch (inti->type) { + case KVM_S390_INT_VIRTIO: + case KVM_S390_INT_SERVICE: + irq.u.ext = inti->ext; + break; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + irq.u.io = inti->io; + break; + case KVM_S390_MCHK: + irq.u.mchk = inti->mchk; + break; + default: + return -EINVAL; + } + + if (copy_to_user(uptr, &irq, sizeof(irq))) + return -EFAULT; + + return 0; +} + +static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) +{ + struct kvm_s390_interrupt_info *inti; + struct kvm_s390_float_interrupt *fi; + int ret = 0; + int n = 0; + + mutex_lock(&kvm->lock); + fi = &kvm->arch.float_int; + spin_lock(&fi->lock); + + list_for_each_entry(inti, &fi->list, list) { + if (len < sizeof(struct kvm_s390_irq)) { + /* signal userspace to try again */ + ret = -ENOMEM; + break; + } + ret = copy_irq_to_user(inti, buf); + if (ret) + break; + buf += sizeof(struct kvm_s390_irq); + len -= sizeof(struct kvm_s390_irq); + n++; + } + + spin_unlock(&fi->lock); + mutex_unlock(&kvm->lock); + + return ret < 0 ? ret : n; +} + +static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) +{ + int r; + + switch (attr->group) { + case KVM_DEV_FLIC_GET_ALL_IRQS: + r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr, + attr->attr); + break; + default: + r = -EINVAL; + } + + return r; +} + +static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti, + u64 addr) +{ + struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; + void *target = NULL; + void __user *source; + u64 size; + + if (get_user(inti->type, (u64 __user *)addr)) + return -EFAULT; + + switch (inti->type) { + case KVM_S390_INT_VIRTIO: + case KVM_S390_INT_SERVICE: + target = (void *) &inti->ext; + source = &uptr->u.ext; + size = sizeof(inti->ext); + break; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + target = (void *) &inti->io; + source = &uptr->u.io; + size = sizeof(inti->io); + break; + case KVM_S390_MCHK: + target = (void *) &inti->mchk; + source = &uptr->u.mchk; + size = sizeof(inti->mchk); + break; + default: + return -EINVAL; + } + + if (copy_from_user(target, source, size)) + return -EFAULT; + + return 0; +} + +static int enqueue_floating_irq(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + struct kvm_s390_interrupt_info *inti = NULL; + int r = 0; + int len = attr->attr; + + if (len % sizeof(struct kvm_s390_irq) != 0) + return -EINVAL; + else if (len > KVM_S390_FLIC_MAX_BUFFER) + return -EINVAL; + + while (len >= sizeof(struct kvm_s390_irq)) { + inti = kzalloc(sizeof(*inti), GFP_KERNEL); + if (!inti) + return -ENOMEM; + + r = copy_irq_from_user(inti, attr->addr); + if (r) { + kfree(inti); + return r; + } + __inject_vm(dev->kvm, inti); + len -= sizeof(struct kvm_s390_irq); + attr->addr += sizeof(struct kvm_s390_irq); + } + + return r; +} + +static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) +{ + int r = 0; + + switch (attr->group) { + case KVM_DEV_FLIC_ENQUEUE: + r = enqueue_floating_irq(dev, attr); + break; + case KVM_DEV_FLIC_CLEAR_IRQS: + r = 0; + clear_floating_interrupts(dev->kvm); + break; + default: + r = -EINVAL; + } + + return r; +} + +static int flic_create(struct kvm_device *dev, u32 type) +{ + if (!dev) + return -EINVAL; + if (dev->kvm->arch.flic) + return -EINVAL; + dev->kvm->arch.flic = dev; + return 0; +} + +static void flic_destroy(struct kvm_device *dev) +{ + dev->kvm->arch.flic = NULL; + kfree(dev); +} + +/* s390 floating irq controller (flic) */ +struct kvm_device_ops kvm_flic_ops = { + .name = "kvm-flic", + .get_attr = flic_get_attr, + .set_attr = flic_set_attr, + .create = flic_create, + .destroy = flic_destroy, +}; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index e0676f390d57..782420f3c4d5 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -157,6 +157,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_ENABLE_CAP: case KVM_CAP_S390_CSS_SUPPORT: case KVM_CAP_IOEVENTFD: + case KVM_CAP_DEVICE_CTRL: r = 1; break; case KVM_CAP_NR_VCPUS: diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index b8e9a43e501a..c0102ef2de48 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1064,6 +1064,7 @@ extern struct kvm_device_ops kvm_mpic_ops; extern struct kvm_device_ops kvm_xics_ops; extern struct kvm_device_ops kvm_vfio_ops; extern struct kvm_device_ops kvm_arm_vgic_v2_ops; +extern struct kvm_device_ops kvm_flic_ops; #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 86faf47ae494..19f717b15297 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -918,6 +918,7 @@ struct kvm_device_attr { #define KVM_DEV_VFIO_GROUP_ADD 1 #define KVM_DEV_VFIO_GROUP_DEL 2 #define KVM_DEV_TYPE_ARM_VGIC_V2 5 +#define KVM_DEV_TYPE_FLIC 6 /* * ioctls for VM fds diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 03a0381b1cb7..a9e999a48e43 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2283,6 +2283,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm, case KVM_DEV_TYPE_ARM_VGIC_V2: ops = &kvm_arm_vgic_v2_ops; break; +#endif +#ifdef CONFIG_S390 + case KVM_DEV_TYPE_FLIC: + ops = &kvm_flic_ops; + break; #endif default: return -ENODEV; -- cgit v1.2.3 From e0ead41a6dac09f86675ce07a66e4b253a9b7bd5 Mon Sep 17 00:00:00 2001 From: Dominik Dingel Date: Thu, 6 Jun 2013 15:32:37 +0200 Subject: KVM: async_pf: Provide additional direct page notification By setting a Kconfig option, the architecture can control when guest notifications will be presented by the apf backend. There is the default batch mechanism, working as before, where the vcpu thread should pull in this information. Opposite to this, there is now the direct mechanism, that will push the information to the guest. This way s390 can use an already existing architecture interface. Still the vcpu thread should call check_completion to cleanup leftovers. Signed-off-by: Dominik Dingel Signed-off-by: Christian Borntraeger --- arch/x86/kvm/mmu.c | 2 +- include/linux/kvm_host.h | 2 +- virt/kvm/Kconfig | 4 ++++ virt/kvm/async_pf.c | 20 ++++++++++++++++++-- 4 files changed, 24 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e50425d0f5f7..aaa60f347b73 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3328,7 +3328,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) arch.direct_map = vcpu->arch.mmu.direct_map; arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); - return kvm_setup_async_pf(vcpu, gva, gfn, &arch); + return kvm_setup_async_pf(vcpu, gva, gfn_to_hva(vcpu->kvm, gfn), &arch); } static bool can_do_async_pf(struct kvm_vcpu *vcpu) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c0102ef2de48..f5937b8188b4 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -192,7 +192,7 @@ struct kvm_async_pf { void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); -int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, +int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, struct kvm_arch_async_pf *arch); int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index fbe1a48bd629..13f2d19793e3 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -22,6 +22,10 @@ config KVM_MMIO config KVM_ASYNC_PF bool +# Toggle to switch between direct notification and batch job +config KVM_ASYNC_PF_SYNC + bool + config HAVE_KVM_MSI bool diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 8631d9c14320..00980ab02c45 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -28,6 +28,21 @@ #include "async_pf.h" #include +static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work) +{ +#ifdef CONFIG_KVM_ASYNC_PF_SYNC + kvm_arch_async_page_present(vcpu, work); +#endif +} +static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work) +{ +#ifndef CONFIG_KVM_ASYNC_PF_SYNC + kvm_arch_async_page_present(vcpu, work); +#endif +} + static struct kmem_cache *async_pf_cache; int kvm_async_pf_init(void) @@ -69,6 +84,7 @@ static void async_pf_execute(struct work_struct *work) down_read(&mm->mmap_sem); get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL); up_read(&mm->mmap_sem); + kvm_async_page_present_sync(vcpu, apf); unuse_mm(mm); spin_lock(&vcpu->async_pf.lock); @@ -138,7 +154,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) } } -int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, +int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, struct kvm_arch_async_pf *arch) { struct kvm_async_pf *work; @@ -159,7 +175,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, work->wakeup_all = false; work->vcpu = vcpu; work->gva = gva; - work->addr = gfn_to_hva(vcpu->kvm, gfn); + work->addr = hva; work->arch = *arch; work->mm = current->mm; atomic_inc(&work->mm->mm_count); -- cgit v1.2.3 From 8cc7212a036118fcb5cfbbdb013c5032677bbd23 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 30 Jan 2014 19:57:54 +0400 Subject: idr: remove unused prototype of idr_free() There is no such function. Remove the redundant prototype. Signed-off-by: Vladimir Davydov Acked-by: Tejun Heo Signed-off-by: Jiri Kosina --- include/linux/idr.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/idr.h b/include/linux/idr.h index 871a213a8477..ff94a4875e1c 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -82,7 +82,6 @@ int idr_for_each(struct idr *idp, void *idr_get_next(struct idr *idp, int *nextid); void *idr_replace(struct idr *idp, void *ptr, int id); void idr_remove(struct idr *idp, int id); -void idr_free(struct idr *idp, int id); void idr_destroy(struct idr *idp); void idr_init(struct idr *idp); -- cgit v1.2.3 From 81993e81a994504f4c8b97d3410c9a052cdbcc9d Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Sat, 1 Feb 2014 18:54:11 -0800 Subject: compat: Get rid of (get|put)_compat_time(val|spec) We have two APIs for compatiblity timespec/val, with confusingly similar names. compat_(get|put)_time(val|spec) *do* handle the case where COMPAT_USE_64BIT_TIME is set, whereas (get|put)_compat_time(val|spec) do not. This is an accident waiting to happen. Clean it up by favoring the full-service version; the limited version is replaced with double-underscore versions static to kernel/compat.c. A common pattern is to convert a struct timespec to kernel format in an allocation on the user stack. Unfortunately it is open-coded in several places. Since this allocation isn't actually needed if COMPAT_USE_64BIT_TIME is true (since user format == kernel format) encapsulate that whole pattern into the function compat_convert_timespec(). An equivalent function should be written for struct timeval if it is needed in the future. Finally, get rid of compat_(get|put)_timeval_convert(): each was only used once, and the latter was not even doing what the function said (no conversion actually was being done.) Moving the conversion into compat_sys_settimeofday() itself makes the code much more similar to sys_settimeofday() itself. v3: Remove unused compat_convert_timeval(). v2: Drop bogus "const" in the destination argument for compat_convert_time*(). Cc: Mauro Carvalho Chehab Cc: Alexander Viro Cc: Hans Verkuil Cc: Andrew Morton Cc: Heiko Carstens Cc: Manfred Spraul Cc: Mateusz Guzik Cc: Rafael Aquini Cc: Davidlohr Bueso Cc: Stephen Rothwell Cc: Dan Carpenter Cc: Arnd Bergmann Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Linus Torvalds Cc: Catalin Marinas Cc: Will Deacon Tested-by: H.J. Lu Signed-off-by: H. Peter Anvin --- drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 2 +- fs/compat.c | 6 +- include/linux/compat.h | 23 +++--- ipc/compat.c | 12 +-- ipc/compat_mq.c | 19 +---- kernel/compat.c | 108 +++++++++++++------------- kernel/futex_compat.c | 2 +- 7 files changed, 75 insertions(+), 97 deletions(-) (limited to 'include/linux') diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 8f7a6a454a4c..6191968db8fa 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -733,7 +733,7 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u copy_to_user(&up->u, &kp->u, sizeof(kp->u)) || put_user(kp->pending, &up->pending) || put_user(kp->sequence, &up->sequence) || - put_compat_timespec(&kp->timestamp, &up->timestamp) || + compat_put_timespec(&kp->timestamp, &up->timestamp) || put_user(kp->id, &up->id) || copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32))) return -EFAULT; diff --git a/fs/compat.c b/fs/compat.c index 6af20de2c1a3..62092f433759 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -92,8 +92,8 @@ asmlinkage long compat_sys_utimensat(unsigned int dfd, const char __user *filena struct timespec tv[2]; if (t) { - if (get_compat_timespec(&tv[0], &t[0]) || - get_compat_timespec(&tv[1], &t[1])) + if (compat_get_timespec(&tv[0], &t[0]) || + compat_get_timespec(&tv[1], &t[1])) return -EFAULT; if (tv[0].tv_nsec == UTIME_OMIT && tv[1].tv_nsec == UTIME_OMIT) @@ -512,7 +512,7 @@ compat_sys_io_getevents(aio_context_t ctx_id, nr * sizeof(struct io_event)))) goto out; if (timeout) { - if (get_compat_timespec(&t, timeout)) + if (compat_get_timespec(&t, timeout)) goto out; ut = compat_alloc_user_space(sizeof(*ut)); diff --git a/include/linux/compat.h b/include/linux/compat.h index 3f448c65511b..f7d5bc0a239f 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -140,27 +140,24 @@ struct compat_sigaction { compat_sigset_t sa_mask __packed; }; -/* - * These functions operate strictly on struct compat_time* - */ -extern int get_compat_timespec(struct timespec *, - const struct compat_timespec __user *); -extern int put_compat_timespec(const struct timespec *, - struct compat_timespec __user *); -extern int get_compat_timeval(struct timeval *, - const struct compat_timeval __user *); -extern int put_compat_timeval(const struct timeval *, - struct compat_timeval __user *); /* * These functions operate on 32- or 64-bit specs depending on - * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments and the - * naming as compat_get/put_ rather than get/put_compat_. + * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments. */ extern int compat_get_timespec(struct timespec *, const void __user *); extern int compat_put_timespec(const struct timespec *, void __user *); extern int compat_get_timeval(struct timeval *, const void __user *); extern int compat_put_timeval(const struct timeval *, void __user *); +/* + * This function convert a timespec if necessary and returns a *user + * space* pointer. If no conversion is necessary, it returns the + * initial pointer. NULL is a legitimate argument and will always + * output NULL. + */ +extern int compat_convert_timespec(struct timespec __user **, + const void __user *); + struct compat_iovec { compat_uptr_t iov_base; compat_size_t iov_len; diff --git a/ipc/compat.c b/ipc/compat.c index f486b0096a67..1048522479e5 100644 --- a/ipc/compat.c +++ b/ipc/compat.c @@ -752,14 +752,8 @@ long compat_sys_shmctl(int first, int second, void __user *uptr) long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, unsigned nsops, const struct compat_timespec __user *timeout) { - struct timespec __user *ts64 = NULL; - if (timeout) { - struct timespec ts; - ts64 = compat_alloc_user_space(sizeof(*ts64)); - if (get_compat_timespec(&ts, timeout)) - return -EFAULT; - if (copy_to_user(ts64, &ts, sizeof(ts))) - return -EFAULT; - } + struct timespec __user *ts64; + if (compat_convert_timespec(&ts64, timeout)) + return -EFAULT; return sys_semtimedop(semid, tsems, nsops, ts64); } diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c index 63d7c6de335b..a9cf16378d7a 100644 --- a/ipc/compat_mq.c +++ b/ipc/compat_mq.c @@ -64,20 +64,6 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name, return sys_mq_open(u_name, oflag, mode, p); } -static int compat_prepare_timeout(struct timespec __user **p, - const struct compat_timespec __user *u) -{ - struct timespec ts; - if (!u) { - *p = NULL; - return 0; - } - *p = compat_alloc_user_space(sizeof(ts)); - if (get_compat_timespec(&ts, u) || copy_to_user(*p, &ts, sizeof(ts))) - return -EFAULT; - return 0; -} - asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, size_t msg_len, unsigned int msg_prio, @@ -85,7 +71,7 @@ asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, { struct timespec __user *u_ts; - if (compat_prepare_timeout(&u_ts, u_abs_timeout)) + if (compat_convert_timespec(&u_ts, u_abs_timeout)) return -EFAULT; return sys_mq_timedsend(mqdes, u_msg_ptr, msg_len, @@ -98,7 +84,8 @@ asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, const struct compat_timespec __user *u_abs_timeout) { struct timespec __user *u_ts; - if (compat_prepare_timeout(&u_ts, u_abs_timeout)) + + if (compat_convert_timespec(&u_ts, u_abs_timeout)) return -EFAULT; return sys_mq_timedreceive(mqdes, u_msg_ptr, msg_len, diff --git a/kernel/compat.c b/kernel/compat.c index 0a09e481b70b..3afc524a57ad 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -30,28 +30,6 @@ #include -/* - * Get/set struct timeval with struct timespec on the native side - */ -static int compat_get_timeval_convert(struct timespec *o, - struct compat_timeval __user *i) -{ - long usec; - - if (get_user(o->tv_sec, &i->tv_sec) || - get_user(usec, &i->tv_usec)) - return -EFAULT; - o->tv_nsec = usec * 1000; - return 0; -} - -static int compat_put_timeval_convert(struct compat_timeval __user *o, - struct timeval *i) -{ - return (put_user(i->tv_sec, &o->tv_sec) || - put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0; -} - static int compat_get_timex(struct timex *txc, struct compat_timex __user *utp) { memset(txc, 0, sizeof(struct timex)); @@ -116,7 +94,7 @@ asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, if (tv) { struct timeval ktv; do_gettimeofday(&ktv); - if (compat_put_timeval_convert(tv, &ktv)) + if (compat_put_timeval(&ktv, tv)) return -EFAULT; } if (tz) { @@ -130,59 +108,58 @@ asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) { - struct timespec kts; - struct timezone ktz; + struct timeval user_tv; + struct timespec new_ts; + struct timezone new_tz; if (tv) { - if (compat_get_timeval_convert(&kts, tv)) + if (compat_get_timeval(&user_tv, tv)) return -EFAULT; + new_ts.tv_sec = user_tv.tv_sec; + new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC; } if (tz) { - if (copy_from_user(&ktz, tz, sizeof(ktz))) + if (copy_from_user(&new_tz, tz, sizeof(*tz))) return -EFAULT; } - return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); + return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); } -int get_compat_timeval(struct timeval *tv, const struct compat_timeval __user *ctv) +static int __compat_get_timeval(struct timeval *tv, const struct compat_timeval __user *ctv) { return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) || __get_user(tv->tv_sec, &ctv->tv_sec) || __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; } -EXPORT_SYMBOL_GPL(get_compat_timeval); -int put_compat_timeval(const struct timeval *tv, struct compat_timeval __user *ctv) +static int __compat_put_timeval(const struct timeval *tv, struct compat_timeval __user *ctv) { return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) || __put_user(tv->tv_sec, &ctv->tv_sec) || __put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; } -EXPORT_SYMBOL_GPL(put_compat_timeval); -int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) +static int __compat_get_timespec(struct timespec *ts, const struct compat_timespec __user *cts) { return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || __get_user(ts->tv_sec, &cts->tv_sec) || __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; } -EXPORT_SYMBOL_GPL(get_compat_timespec); -int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts) +static int __compat_put_timespec(const struct timespec *ts, struct compat_timespec __user *cts) { return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || __put_user(ts->tv_sec, &cts->tv_sec) || __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; } -EXPORT_SYMBOL_GPL(put_compat_timespec); int compat_get_timeval(struct timeval *tv, const void __user *utv) { if (COMPAT_USE_64BIT_TIME) return copy_from_user(tv, utv, sizeof *tv) ? -EFAULT : 0; else - return get_compat_timeval(tv, utv); + return __compat_get_timeval(tv, utv); } EXPORT_SYMBOL_GPL(compat_get_timeval); @@ -191,7 +168,7 @@ int compat_put_timeval(const struct timeval *tv, void __user *utv) if (COMPAT_USE_64BIT_TIME) return copy_to_user(utv, tv, sizeof *tv) ? -EFAULT : 0; else - return put_compat_timeval(tv, utv); + return __compat_put_timeval(tv, utv); } EXPORT_SYMBOL_GPL(compat_put_timeval); @@ -200,7 +177,7 @@ int compat_get_timespec(struct timespec *ts, const void __user *uts) if (COMPAT_USE_64BIT_TIME) return copy_from_user(ts, uts, sizeof *ts) ? -EFAULT : 0; else - return get_compat_timespec(ts, uts); + return __compat_get_timespec(ts, uts); } EXPORT_SYMBOL_GPL(compat_get_timespec); @@ -209,10 +186,33 @@ int compat_put_timespec(const struct timespec *ts, void __user *uts) if (COMPAT_USE_64BIT_TIME) return copy_to_user(uts, ts, sizeof *ts) ? -EFAULT : 0; else - return put_compat_timespec(ts, uts); + return __compat_put_timespec(ts, uts); } EXPORT_SYMBOL_GPL(compat_put_timespec); +int compat_convert_timespec(struct timespec __user **kts, + const void __user *cts) +{ + struct timespec ts; + struct timespec __user *uts; + + if (!cts || COMPAT_USE_64BIT_TIME) { + *kts = (struct timespec __user *)cts; + return 0; + } + + uts = compat_alloc_user_space(sizeof(ts)); + if (!uts) + return -EFAULT; + if (compat_get_timespec(&ts, cts)) + return -EFAULT; + if (copy_to_user(uts, &ts, sizeof(ts))) + return -EFAULT; + + *kts = uts; + return 0; +} + static long compat_nanosleep_restart(struct restart_block *restart) { struct compat_timespec __user *rmtp; @@ -229,7 +229,7 @@ static long compat_nanosleep_restart(struct restart_block *restart) if (ret) { rmtp = restart->nanosleep.compat_rmtp; - if (rmtp && put_compat_timespec(&rmt, rmtp)) + if (rmtp && compat_put_timespec(&rmt, rmtp)) return -EFAULT; } @@ -243,7 +243,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, mm_segment_t oldfs; long ret; - if (get_compat_timespec(&tu, rqtp)) + if (compat_get_timespec(&tu, rqtp)) return -EFAULT; if (!timespec_valid(&tu)) @@ -263,7 +263,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, restart->fn = compat_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; - if (rmtp && put_compat_timespec(&rmt, rmtp)) + if (rmtp && compat_put_timespec(&rmt, rmtp)) return -EFAULT; } @@ -647,8 +647,8 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, int get_compat_itimerspec(struct itimerspec *dst, const struct compat_itimerspec __user *src) { - if (get_compat_timespec(&dst->it_interval, &src->it_interval) || - get_compat_timespec(&dst->it_value, &src->it_value)) + if (__compat_get_timespec(&dst->it_interval, &src->it_interval) || + __compat_get_timespec(&dst->it_value, &src->it_value)) return -EFAULT; return 0; } @@ -656,8 +656,8 @@ int get_compat_itimerspec(struct itimerspec *dst, int put_compat_itimerspec(struct compat_itimerspec __user *dst, const struct itimerspec *src) { - if (put_compat_timespec(&src->it_interval, &dst->it_interval) || - put_compat_timespec(&src->it_value, &dst->it_value)) + if (__compat_put_timespec(&src->it_interval, &dst->it_interval) || + __compat_put_timespec(&src->it_value, &dst->it_value)) return -EFAULT; return 0; } @@ -727,7 +727,7 @@ long compat_sys_clock_settime(clockid_t which_clock, mm_segment_t oldfs; struct timespec ts; - if (get_compat_timespec(&ts, tp)) + if (compat_get_timespec(&ts, tp)) return -EFAULT; oldfs = get_fs(); set_fs(KERNEL_DS); @@ -749,7 +749,7 @@ long compat_sys_clock_gettime(clockid_t which_clock, err = sys_clock_gettime(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); - if (!err && put_compat_timespec(&ts, tp)) + if (!err && compat_put_timespec(&ts, tp)) return -EFAULT; return err; } @@ -789,7 +789,7 @@ long compat_sys_clock_getres(clockid_t which_clock, err = sys_clock_getres(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); - if (!err && tp && put_compat_timespec(&ts, tp)) + if (!err && tp && compat_put_timespec(&ts, tp)) return -EFAULT; return err; } @@ -808,7 +808,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart) set_fs(oldfs); if ((err == -ERESTART_RESTARTBLOCK) && rmtp && - put_compat_timespec(&tu, rmtp)) + compat_put_timespec(&tu, rmtp)) return -EFAULT; if (err == -ERESTART_RESTARTBLOCK) { @@ -827,7 +827,7 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, struct timespec in, out; struct restart_block *restart; - if (get_compat_timespec(&in, rqtp)) + if (compat_get_timespec(&in, rqtp)) return -EFAULT; oldfs = get_fs(); @@ -838,7 +838,7 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, set_fs(oldfs); if ((err == -ERESTART_RESTARTBLOCK) && rmtp && - put_compat_timespec(&out, rmtp)) + compat_put_timespec(&out, rmtp)) return -EFAULT; if (err == -ERESTART_RESTARTBLOCK) { @@ -1130,7 +1130,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval, set_fs(KERNEL_DS); ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); set_fs(old_fs); - if (put_compat_timespec(&t, interval)) + if (compat_put_timespec(&t, interval)) return -EFAULT; return ret; } diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index f9f44fd4d34d..55c8c9349cfe 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -183,7 +183,7 @@ COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || cmd == FUTEX_WAIT_BITSET || cmd == FUTEX_WAIT_REQUEUE_PI)) { - if (get_compat_timespec(&ts, utime)) + if (compat_get_timespec(&ts, utime)) return -EFAULT; if (!timespec_valid(&ts)) return -EINVAL; -- cgit v1.2.3 From fc0a5921561c71be2c334a335c1680f7930434d7 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Fri, 20 Dec 2013 22:41:07 +0100 Subject: reset: Add of_reset_control_get In some cases, you might need to deassert from reset an hardware block that doesn't associated to a struct device (CPUs, timers, etc.). Add a small helper to retrieve the reset controller from the device tree without the need to pass a struct device. Signed-off-by: Maxime Ripard Signed-off-by: Philipp Zabel --- drivers/reset/core.c | 39 ++++++++++++++++++++++++++++++--------- include/linux/reset.h | 4 ++++ 2 files changed, 34 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 217d2fa4fd95..baeaf82d40d9 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -126,15 +126,16 @@ int reset_control_deassert(struct reset_control *rstc) EXPORT_SYMBOL_GPL(reset_control_deassert); /** - * reset_control_get - Lookup and obtain a reference to a reset controller. - * @dev: device to be reset by the controller + * of_reset_control_get - Lookup and obtain a reference to a reset controller. + * @node: device to be reset by the controller * @id: reset line name * * Returns a struct reset_control or IS_ERR() condition containing errno. * * Use of id names is optional. */ -struct reset_control *reset_control_get(struct device *dev, const char *id) +struct reset_control *of_reset_control_get(struct device_node *node, + const char *id) { struct reset_control *rstc = ERR_PTR(-EPROBE_DEFER); struct reset_controller_dev *r, *rcdev; @@ -143,13 +144,10 @@ struct reset_control *reset_control_get(struct device *dev, const char *id) int rstc_id; int ret; - if (!dev) - return ERR_PTR(-EINVAL); - if (id) - index = of_property_match_string(dev->of_node, + index = of_property_match_string(node, "reset-names", id); - ret = of_parse_phandle_with_args(dev->of_node, "resets", "#reset-cells", + ret = of_parse_phandle_with_args(node, "resets", "#reset-cells", index, &args); if (ret) return ERR_PTR(ret); @@ -184,12 +182,35 @@ struct reset_control *reset_control_get(struct device *dev, const char *id) return ERR_PTR(-ENOMEM); } - rstc->dev = dev; rstc->rcdev = rcdev; rstc->id = rstc_id; return rstc; } +EXPORT_SYMBOL_GPL(of_reset_control_get); + +/** + * reset_control_get - Lookup and obtain a reference to a reset controller. + * @dev: device to be reset by the controller + * @id: reset line name + * + * Returns a struct reset_control or IS_ERR() condition containing errno. + * + * Use of id names is optional. + */ +struct reset_control *reset_control_get(struct device *dev, const char *id) +{ + struct reset_control *rstc; + + if (!dev) + return ERR_PTR(-EINVAL); + + rstc = of_reset_control_get(dev->of_node, id); + if (!IS_ERR(rstc)) + rstc->dev = dev; + + return rstc; +} EXPORT_SYMBOL_GPL(reset_control_get); /** diff --git a/include/linux/reset.h b/include/linux/reset.h index 6082247feab1..a398025d1138 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -1,6 +1,8 @@ #ifndef _LINUX_RESET_H_ #define _LINUX_RESET_H_ +#include + struct device; struct reset_control; @@ -8,6 +10,8 @@ int reset_control_reset(struct reset_control *rstc); int reset_control_assert(struct reset_control *rstc); int reset_control_deassert(struct reset_control *rstc); +struct reset_control *of_reset_control_get(struct device_node *node, + const char *id); struct reset_control *reset_control_get(struct device *dev, const char *id); void reset_control_put(struct reset_control *rstc); struct reset_control *devm_reset_control_get(struct device *dev, const char *id); -- cgit v1.2.3 From 99adef310f682d6343cb40c1f6c9c25a4b3a450d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 16 Jan 2014 12:22:43 +0000 Subject: spi: Provide core support for DMA mapping transfers The process of DMA mapping buffers for SPI transfers does not vary between devices so in order to save duplication of code in drivers this can be factored out into the core, allowing it to be integrated with the work that is being done on factoring out the common elements from the data path including more sharing of dmaengine code. In order to use this masters need to provide a can_dma() operation and while the hardware is prepared they should ensure that DMA channels are provided in tx_dma and rx_dma. The core will then ensure that the buffers are mapped for DMA prior to calling transfer_one_message(). Currently the cleanup on error is not complete, this needs to be improved. Signed-off-by: Mark Brown --- drivers/spi/spi.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/spi/spi.h | 18 +++++++++++ 2 files changed, 100 insertions(+) (limited to 'include/linux') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 23756b0f9036..bcdaa74f1c8e 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include #include #include #include @@ -580,6 +582,77 @@ static void spi_set_cs(struct spi_device *spi, bool enable) spi->master->set_cs(spi, !enable); } +static int spi_map_msg(struct spi_master *master, struct spi_message *msg) +{ + struct device *dev = master->dev.parent; + struct device *tx_dev, *rx_dev; + struct spi_transfer *xfer; + + if (msg->is_dma_mapped || !master->can_dma) + return 0; + + tx_dev = &master->dma_tx->dev->device; + rx_dev = &master->dma_rx->dev->device; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (!master->can_dma(master, msg->spi, xfer)) + continue; + + if (xfer->tx_buf != NULL) { + xfer->tx_dma = dma_map_single(tx_dev, + (void *)xfer->tx_buf, + xfer->len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, xfer->tx_dma)) { + dev_err(dev, "dma_map_single Tx failed\n"); + return -ENOMEM; + } + } + + if (xfer->rx_buf != NULL) { + xfer->rx_dma = dma_map_single(rx_dev, + xfer->rx_buf, xfer->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, xfer->rx_dma)) { + dev_err(dev, "dma_map_single Rx failed\n"); + dma_unmap_single(tx_dev, xfer->tx_dma, + xfer->len, DMA_TO_DEVICE); + return -ENOMEM; + } + } + } + + master->cur_msg_mapped = true; + + return 0; +} + +static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) +{ + struct spi_transfer *xfer; + struct device *tx_dev, *rx_dev; + + if (!master->cur_msg_mapped || msg->is_dma_mapped || !master->can_dma) + return 0; + + tx_dev = &master->dma_tx->dev->device; + rx_dev = &master->dma_rx->dev->device; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (!master->can_dma(master, msg->spi, xfer)) + continue; + + if (xfer->rx_buf) + dma_unmap_single(rx_dev, xfer->rx_dma, xfer->len, + DMA_FROM_DEVICE); + if (xfer->tx_buf) + dma_unmap_single(tx_dev, xfer->tx_dma, xfer->len, + DMA_TO_DEVICE); + } + + return 0; +} + /* * spi_transfer_one_message - Default implementation of transfer_one_message() * @@ -752,6 +825,13 @@ static void spi_pump_messages(struct kthread_work *work) master->cur_msg_prepared = true; } + ret = spi_map_msg(master, master->cur_msg); + if (ret) { + master->cur_msg->status = ret; + spi_finalize_current_message(master); + return; + } + ret = master->transfer_one_message(master, master->cur_msg); if (ret) { dev_err(&master->dev, @@ -841,6 +921,8 @@ void spi_finalize_current_message(struct spi_master *master) queue_kthread_work(&master->kworker, &master->pump_messages); spin_unlock_irqrestore(&master->queue_lock, flags); + spi_unmap_msg(master, mesg); + if (master->cur_msg_prepared && master->unprepare_message) { ret = master->unprepare_message(master, mesg); if (ret) { diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index a1d4ca290862..b354dcbed55b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -25,6 +25,8 @@ #include #include +struct dma_chan; + /* * INTERFACES between SPI master-side drivers and SPI infrastructure. * (There's no SPI slave support for Linux yet...) @@ -386,6 +388,17 @@ struct spi_master { /* called on release() to free memory provided by spi_master */ void (*cleanup)(struct spi_device *spi); + /* + * Used to enable core support for DMA handling, if can_dma() + * exists and returns true then the transfer will be mapped + * prior to transfer_one() being called. The driver should + * not modify or store xfer and dma_tx and dma_rx must be set + * while the device is prepared. + */ + bool (*can_dma)(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *xfer); + /* * These hooks are for drivers that want to use the generic * master transfer queueing mechanism. If these are used, the @@ -404,6 +417,7 @@ struct spi_master { bool rt; bool auto_runtime_pm; bool cur_msg_prepared; + bool cur_msg_mapped; struct completion xfer_completion; int (*prepare_transfer_hardware)(struct spi_master *master); @@ -425,6 +439,10 @@ struct spi_master { /* gpio chip select */ int *cs_gpios; + + /* DMA channels for use with core dmaengine helpers */ + struct dma_chan *dma_tx; + struct dma_chan *dma_rx; }; static inline void *spi_master_get_devdata(struct spi_master *master) -- cgit v1.2.3 From 588453c69dace39351129a038dd2796608f74bb3 Mon Sep 17 00:00:00 2001 From: Pantelis Antoniou Date: Fri, 8 Nov 2013 17:03:56 +0200 Subject: of: Introduce device tree node flag helpers. Helper functions for working with device node flags. Signed-off-by: Pantelis Antoniou Signed-off-by: Grant Likely --- include/linux/of.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include/linux') diff --git a/include/linux/of.h b/include/linux/of.h index 70c64ba17fa5..3d0593943f47 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -114,6 +114,26 @@ static inline void of_node_set_flag(struct device_node *n, unsigned long flag) set_bit(flag, &n->_flags); } +static inline void of_node_clear_flag(struct device_node *n, unsigned long flag) +{ + clear_bit(flag, &n->_flags); +} + +static inline int of_property_check_flag(struct property *p, unsigned long flag) +{ + return test_bit(flag, &p->_flags); +} + +static inline void of_property_set_flag(struct property *p, unsigned long flag) +{ + set_bit(flag, &p->_flags); +} + +static inline void of_property_clear_flag(struct property *p, unsigned long flag) +{ + clear_bit(flag, &p->_flags); +} + extern struct device_node *of_find_all_nodes(struct device_node *prev); /* -- cgit v1.2.3 From 486c79b5002e4a75c1c3614180702196ec6d904d Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 24 Jan 2014 15:25:10 -0700 Subject: ACPI / numa: Fix acpi_get_node() prototype acpi_get_node() takes an acpi_handle, not an "acpi_handle *". This fixes the prototype and the definitions. Signed-off-by: Bjorn Helgaas Acked-by: Rafael J. Wysocki --- drivers/acpi/numa.c | 2 +- include/linux/acpi.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 9e6816ef280a..dab7dac6b1a8 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -331,7 +331,7 @@ int acpi_get_pxm(acpi_handle h) return -1; } -int acpi_get_node(acpi_handle *handle) +int acpi_get_node(acpi_handle handle) { int pxm, node = NUMA_NO_NODE; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 1151a1dcfe41..17bee650a0cb 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -260,13 +260,13 @@ extern void acpi_osi_setup(char *str); #ifdef CONFIG_ACPI_NUMA int acpi_get_pxm(acpi_handle handle); -int acpi_get_node(acpi_handle *handle); +int acpi_get_node(acpi_handle handle); #else static inline int acpi_get_pxm(acpi_handle handle) { return 0; } -static inline int acpi_get_node(acpi_handle *handle) +static inline int acpi_get_node(acpi_handle handle) { return 0; } -- cgit v1.2.3 From d79ed248d923f219053760376a33371894a6d47c Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 24 Jan 2014 15:48:01 -0700 Subject: ACPI / numa: Make __acpi_map_pxm_to_node(), acpi_get_pxm() static __acpi_map_pxm_to_node() and acpi_get_pxm() are only used within drivers/acpi/numa.c. This makes them static and removes their declarations. Signed-off-by: Bjorn Helgaas Acked-by: Rafael J. Wysocki --- drivers/acpi/numa.c | 4 ++-- include/acpi/acpi_numa.h | 1 - include/linux/acpi.h | 5 ----- 3 files changed, 2 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index a7e779bfa2f6..359a43bdd948 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -60,7 +60,7 @@ int node_to_pxm(int node) return node_to_pxm_map[node]; } -void __acpi_map_pxm_to_node(int pxm, int node) +static void __acpi_map_pxm_to_node(int pxm, int node) { if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm]) pxm_to_node_map[pxm] = node; @@ -314,7 +314,7 @@ int __init acpi_numa_init(void) return 0; } -int acpi_get_pxm(acpi_handle h) +static int acpi_get_pxm(acpi_handle h) { unsigned long long pxm; acpi_status status; diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h index 451823cb8837..94a37cd7fbda 100644 --- a/include/acpi/acpi_numa.h +++ b/include/acpi/acpi_numa.h @@ -13,7 +13,6 @@ extern int pxm_to_node(int); extern int node_to_pxm(int); -extern void __acpi_map_pxm_to_node(int, int); extern int acpi_map_pxm_to_node(int); extern unsigned char acpi_srat_revision; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 17bee650a0cb..6c29abbefd41 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -259,13 +259,8 @@ extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); extern void acpi_osi_setup(char *str); #ifdef CONFIG_ACPI_NUMA -int acpi_get_pxm(acpi_handle handle); int acpi_get_node(acpi_handle handle); #else -static inline int acpi_get_pxm(acpi_handle handle) -{ - return 0; -} static inline int acpi_get_node(acpi_handle handle) { return 0; -- cgit v1.2.3 From d1debafc381cb1fa340b5d0dc79637ad1d523770 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 3 Feb 2014 14:51:51 +0200 Subject: ASoC: davinci-mcasp: Rename platform data struct Rename the struct for the platform data: snd_platform_data -> davinci_mcasp_pdata Since we have users under arch/arm/mach-davinci/ for this struct add temporary define to avoid breakage. The arch code can be updated later to use the new struct name. Signed-off-by: Peter Ujfalusi Signed-off-by: Mark Brown --- include/linux/platform_data/davinci_asp.h | 4 +++- sound/soc/davinci/davinci-mcasp.c | 16 ++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index 5245992b0367..85ad68f9206a 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -18,7 +18,7 @@ #include -struct snd_platform_data { +struct davinci_mcasp_pdata { u32 tx_dma_offset; u32 rx_dma_offset; int asp_chan_q; /* event queue number for ASP channel */ @@ -87,6 +87,8 @@ struct snd_platform_data { int tx_dma_channel; int rx_dma_channel; }; +/* TODO: Fix arch/arm/mach-davinci/ users and remove this define */ +#define snd_platform_data davinci_mcasp_pdata enum { MCASP_VERSION_1 = 0, /* DM646x */ diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index 63b1ecc97cb1..e5fce2ed4dc6 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c @@ -823,28 +823,28 @@ static const struct snd_soc_component_driver davinci_mcasp_component = { }; /* Some HW specific values and defaults. The rest is filled in from DT. */ -static struct snd_platform_data dm646x_mcasp_pdata = { +static struct davinci_mcasp_pdata dm646x_mcasp_pdata = { .tx_dma_offset = 0x400, .rx_dma_offset = 0x400, .asp_chan_q = EVENTQ_0, .version = MCASP_VERSION_1, }; -static struct snd_platform_data da830_mcasp_pdata = { +static struct davinci_mcasp_pdata da830_mcasp_pdata = { .tx_dma_offset = 0x2000, .rx_dma_offset = 0x2000, .asp_chan_q = EVENTQ_0, .version = MCASP_VERSION_2, }; -static struct snd_platform_data am33xx_mcasp_pdata = { +static struct davinci_mcasp_pdata am33xx_mcasp_pdata = { .tx_dma_offset = 0, .rx_dma_offset = 0, .asp_chan_q = EVENTQ_0, .version = MCASP_VERSION_3, }; -static struct snd_platform_data dra7_mcasp_pdata = { +static struct davinci_mcasp_pdata dra7_mcasp_pdata = { .tx_dma_offset = 0x200, .rx_dma_offset = 0x284, .asp_chan_q = EVENTQ_0, @@ -912,11 +912,11 @@ err1: return ret; } -static struct snd_platform_data *davinci_mcasp_set_pdata_from_of( +static struct davinci_mcasp_pdata *davinci_mcasp_set_pdata_from_of( struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - struct snd_platform_data *pdata = NULL; + struct davinci_mcasp_pdata *pdata = NULL; const struct of_device_id *match = of_match_device(mcasp_dt_ids, &pdev->dev); struct of_phandle_args dma_spec; @@ -929,7 +929,7 @@ static struct snd_platform_data *davinci_mcasp_set_pdata_from_of( pdata = pdev->dev.platform_data; return pdata; } else if (match) { - pdata = (struct snd_platform_data *) match->data; + pdata = (struct davinci_mcasp_pdata*) match->data; } else { /* control shouldn't reach here. something is wrong */ ret = -EINVAL; @@ -1023,7 +1023,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev) { struct davinci_pcm_dma_params *dma_data; struct resource *mem, *ioarea, *res, *dat; - struct snd_platform_data *pdata; + struct davinci_mcasp_pdata *pdata; struct davinci_mcasp *mcasp; int ret; -- cgit v1.2.3 From a028c6da34d434e35ba8322568c756ea97ff3c18 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Sat, 14 Dec 2013 16:23:51 +0100 Subject: ARM: shmobile: wait for MSTP clock status to toggle, when enabling it On r-/sh-mobile SoCs MSTP clocks are used by the runtime PM to dynamically enable and disable peripheral clocks. To make sure the clock has really started we have to read back its status register until it confirms success. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Simon Horman --- drivers/sh/clk/cpg.c | 38 ++++++++++++++++++++++++++++++++++++++ include/linux/sh_clk.h | 19 ++++++++++++------- 2 files changed, 50 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c index 1ebe67cd1833..7442bc130055 100644 --- a/drivers/sh/clk/cpg.c +++ b/drivers/sh/clk/cpg.c @@ -36,9 +36,47 @@ static void sh_clk_write(int value, struct clk *clk) iowrite32(value, clk->mapped_reg); } +static unsigned int r8(const void __iomem *addr) +{ + return ioread8(addr); +} + +static unsigned int r16(const void __iomem *addr) +{ + return ioread16(addr); +} + +static unsigned int r32(const void __iomem *addr) +{ + return ioread32(addr); +} + static int sh_clk_mstp_enable(struct clk *clk) { sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk); + if (clk->status_reg) { + unsigned int (*read)(const void __iomem *addr); + int i; + void __iomem *mapped_status = (phys_addr_t)clk->status_reg - + (phys_addr_t)clk->enable_reg + clk->mapped_reg; + + if (clk->flags & CLK_ENABLE_REG_8BIT) + read = r8; + else if (clk->flags & CLK_ENABLE_REG_16BIT) + read = r16; + else + read = r32; + + for (i = 1000; + (read(mapped_status) & (1 << clk->enable_bit)) && i; + i--) + cpu_relax(); + if (!i) { + pr_err("cpg: failed to enable %p[%d]\n", + clk->enable_reg, clk->enable_bit); + return -ETIMEDOUT; + } + } return 0; } diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h index 60c72395ec6b..1f208b2a1ed6 100644 --- a/include/linux/sh_clk.h +++ b/include/linux/sh_clk.h @@ -52,6 +52,7 @@ struct clk { unsigned long flags; void __iomem *enable_reg; + void __iomem *status_reg; unsigned int enable_bit; void __iomem *mapped_reg; @@ -116,22 +117,26 @@ long clk_round_parent(struct clk *clk, unsigned long target, unsigned long *best_freq, unsigned long *parent_freq, unsigned int div_min, unsigned int div_max); -#define SH_CLK_MSTP(_parent, _enable_reg, _enable_bit, _flags) \ +#define SH_CLK_MSTP(_parent, _enable_reg, _enable_bit, _status_reg, _flags) \ { \ .parent = _parent, \ .enable_reg = (void __iomem *)_enable_reg, \ .enable_bit = _enable_bit, \ + .status_reg = _status_reg, \ .flags = _flags, \ } -#define SH_CLK_MSTP32(_p, _r, _b, _f) \ - SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_32BIT) +#define SH_CLK_MSTP32(_p, _r, _b, _f) \ + SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_32BIT) -#define SH_CLK_MSTP16(_p, _r, _b, _f) \ - SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_16BIT) +#define SH_CLK_MSTP32_STS(_p, _r, _b, _s, _f) \ + SH_CLK_MSTP(_p, _r, _b, _s, _f | CLK_ENABLE_REG_32BIT) -#define SH_CLK_MSTP8(_p, _r, _b, _f) \ - SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_8BIT) +#define SH_CLK_MSTP16(_p, _r, _b, _f) \ + SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_16BIT) + +#define SH_CLK_MSTP8(_p, _r, _b, _f) \ + SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_8BIT) int sh_clk_mstp_register(struct clk *clks, int nr); -- cgit v1.2.3 From cb2518ca9f06dfcfa3d175773631bfb1e461bdc7 Mon Sep 17 00:00:00 2001 From: Stephane Grosjean Date: Wed, 15 Jan 2014 09:50:13 +0100 Subject: can: add ability to allocate CANFD frame in skb data This patch adds the ability of allocating a CANFD frame data structure in the skb data area. Signed-off-by: Stephane Grosjean Acked-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev.c | 24 ++++++++++++++++++++++++ include/linux/can/dev.h | 2 ++ 2 files changed, 26 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 13a909822e25..cb584ea00331 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -521,6 +521,30 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) } EXPORT_SYMBOL_GPL(alloc_can_skb); +struct sk_buff *alloc_canfd_skb(struct net_device *dev, + struct canfd_frame **cfd) +{ + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) + + sizeof(struct canfd_frame)); + if (unlikely(!skb)) + return NULL; + + skb->protocol = htons(ETH_P_CANFD); + skb->pkt_type = PACKET_BROADCAST; + skb->ip_summed = CHECKSUM_UNNECESSARY; + + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = dev->ifindex; + + *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame)); + memset(*cfd, 0, sizeof(struct canfd_frame)); + + return skb; +} +EXPORT_SYMBOL_GPL(alloc_canfd_skb); + struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf) { struct sk_buff *skb; diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index fb0ab651a041..dc5f9026b67f 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -124,6 +124,8 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); void can_free_echo_skb(struct net_device *dev, unsigned int idx); struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); +struct sk_buff *alloc_canfd_skb(struct net_device *dev, + struct canfd_frame **cfd); struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf); -- cgit v1.2.3 From 3a2eba9bd0a6447dfbc01635e4cd0689f5f2bdad Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 28 Jan 2014 20:17:03 +0000 Subject: spi: Provide core support for full duplex devices It is fairly common for SPI devices to require that one or both transfer directions is always active. Currently drivers open code this in various ways with varying degrees of efficiency. Start factoring this out by providing flags SPI_MASTER_MUST_TX and SPI_MASTER_MUST_RX. These will cause the core to provide buffers for the requested direction if none are specified in the underlying transfer. Currently this is fairly inefficient since we actually allocate a data buffer which may get large, support for mapping transfers using a scatterlist will allow us to avoid this for DMA based transfers. Signed-off-by: Mark Brown --- drivers/spi/spi.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/spi/spi.h | 6 ++++++ 2 files changed, 53 insertions(+) (limited to 'include/linux') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index bcdaa74f1c8e..bb7cf561c311 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -587,6 +587,49 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg) struct device *dev = master->dev.parent; struct device *tx_dev, *rx_dev; struct spi_transfer *xfer; + void *tmp; + size_t max_tx, max_rx; + + if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { + max_tx = 0; + max_rx = 0; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if ((master->flags & SPI_MASTER_MUST_TX) && + !xfer->tx_buf) + max_tx = max(xfer->len, max_tx); + if ((master->flags & SPI_MASTER_MUST_RX) && + !xfer->rx_buf) + max_rx = max(xfer->len, max_rx); + } + + if (max_tx) { + tmp = krealloc(master->dummy_tx, max_tx, + GFP_KERNEL | GFP_DMA); + if (!tmp) + return -ENOMEM; + master->dummy_tx = tmp; + memset(tmp, 0, max_tx); + } + + if (max_rx) { + tmp = krealloc(master->dummy_rx, max_rx, + GFP_KERNEL | GFP_DMA); + if (!tmp) + return -ENOMEM; + master->dummy_rx = tmp; + } + + if (max_tx || max_rx) { + list_for_each_entry(xfer, &msg->transfers, + transfer_list) { + if (!xfer->tx_buf) + xfer->tx_buf = master->dummy_tx; + if (!xfer->rx_buf) + xfer->rx_buf = master->dummy_rx; + } + } + } if (msg->is_dma_mapped || !master->can_dma) return 0; @@ -759,6 +802,10 @@ static void spi_pump_messages(struct kthread_work *work) } master->busy = false; spin_unlock_irqrestore(&master->queue_lock, flags); + kfree(master->dummy_rx); + master->dummy_rx = NULL; + kfree(master->dummy_tx); + master->dummy_tx = NULL; if (master->unprepare_transfer_hardware && master->unprepare_transfer_hardware(master)) dev_err(&master->dev, diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index b354dcbed55b..31a5b0ee93ec 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -347,6 +347,8 @@ struct spi_master { #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ +#define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ +#define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ /* lock and mutex for SPI bus locking */ spinlock_t bus_lock_spinlock; @@ -443,6 +445,10 @@ struct spi_master { /* DMA channels for use with core dmaengine helpers */ struct dma_chan *dma_tx; struct dma_chan *dma_rx; + + /* dummy data for full duplex devices */ + void *dummy_rx; + void *dummy_tx; }; static inline void *spi_master_get_devdata(struct spi_master *master) -- cgit v1.2.3 From 6ad45a27cbe343ec8d7888e5edf6335499a4b555 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sun, 2 Feb 2014 13:47:47 +0000 Subject: spi: Make core DMA mapping functions generate scatterlists We cannot unconditionally use dma_map_single() to map data for use with SPI since transfers may exceed a page and virtual addresses may not be provided with physically contiguous pages. Further, addresses allocated using vmalloc() need to be mapped differently to other addresses. Currently only the MXS driver handles all this, a few drivers do handle the possibility that buffers may not be physically contiguous which is the main potential problem but many don't even do that. Factoring this out into the core will make it easier for drivers to do a good job so if the driver is using the core DMA code then generate a scatterlist instead of mapping to a single address so do that. This code is mainly based on a combination of the existing code in the MXS and PXA2xx drivers. In future we should be able to extend it to allow the core to concatenate adjacent transfers if they are compatible, improving performance. Currently for simplicity clients are not allowed to use the scatterlist when they do DMA mapping, in the future the existing single address mappings will be replaced with use of the scatterlist most likely as part of pre-verifying transfers. This change makes it mandatory to use scatterlists when using the core DMA mapping so update the s3c64xx driver to do this when used with dmaengine. Doing so makes the code more ugly but it is expected that the old s3c-dma code can be removed very soon. Signed-off-by: Mark Brown --- drivers/spi/spi-s3c64xx.c | 14 +++++-- drivers/spi/spi.c | 101 ++++++++++++++++++++++++++++++++++------------ include/linux/spi/spi.h | 7 ++++ 3 files changed, 94 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index e515b8a6f590..25c9bd409a87 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -381,7 +381,7 @@ static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, #else static void prepare_dma(struct s3c64xx_spi_dma_data *dma, - unsigned len, dma_addr_t buf) + struct sg_table *sgt) { struct s3c64xx_spi_driver_data *sdd; struct dma_slave_config config; @@ -407,8 +407,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, dmaengine_slave_config(dma->ch, &config); } - desc = dmaengine_prep_slave_single(dma->ch, buf, len, - dma->direction, DMA_PREP_INTERRUPT); + desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, + dma->direction, DMA_PREP_INTERRUPT); desc->callback = s3c64xx_spi_dmacb; desc->callback_param = dma; @@ -515,7 +515,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, chcfg |= S3C64XX_SPI_CH_TXCH_ON; if (dma_mode) { modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; +#ifndef CONFIG_S3C_DMA + prepare_dma(&sdd->tx_dma, &xfer->tx_sg); +#else prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); +#endif } else { switch (sdd->cur_bpw) { case 32: @@ -547,7 +551,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) | S3C64XX_SPI_PACKET_CNT_EN, regs + S3C64XX_SPI_PACKET_CNT); +#ifndef CONFIG_S3C_DMA + prepare_dma(&sdd->rx_dma, &xfer->rx_sg); +#else prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); +#endif } } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index bb7cf561c311..49313dd0a144 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -582,13 +582,70 @@ static void spi_set_cs(struct spi_device *spi, bool enable) spi->master->set_cs(spi, !enable); } +static int spi_map_buf(struct spi_master *master, struct device *dev, + struct sg_table *sgt, void *buf, size_t len, + enum dma_data_direction dir) +{ + const bool vmalloced_buf = is_vmalloc_addr(buf); + const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len; + const int sgs = DIV_ROUND_UP(len, desc_len); + struct page *vm_page; + void *sg_buf; + size_t min; + int i, ret; + + ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); + if (ret != 0) + return ret; + + for (i = 0; i < sgs; i++) { + min = min_t(size_t, len, desc_len); + + if (vmalloced_buf) { + vm_page = vmalloc_to_page(buf); + if (!vm_page) { + sg_free_table(sgt); + return -ENOMEM; + } + sg_buf = page_address(vm_page) + + ((size_t)buf & ~PAGE_MASK); + } else { + sg_buf = buf; + } + + sg_set_buf(&sgt->sgl[i], sg_buf, min); + + buf += min; + len -= min; + } + + ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); + if (ret < 0) { + sg_free_table(sgt); + return ret; + } + + sgt->nents = ret; + + return 0; +} + +static void spi_unmap_buf(struct spi_master *master, struct device *dev, + struct sg_table *sgt, enum dma_data_direction dir) +{ + if (sgt->orig_nents) { + dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); + sg_free_table(sgt); + } +} + static int spi_map_msg(struct spi_master *master, struct spi_message *msg) { - struct device *dev = master->dev.parent; struct device *tx_dev, *rx_dev; struct spi_transfer *xfer; void *tmp; size_t max_tx, max_rx; + int ret; if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { max_tx = 0; @@ -631,7 +688,7 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg) } } - if (msg->is_dma_mapped || !master->can_dma) + if (!master->can_dma) return 0; tx_dev = &master->dma_tx->dev->device; @@ -642,25 +699,21 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg) continue; if (xfer->tx_buf != NULL) { - xfer->tx_dma = dma_map_single(tx_dev, - (void *)xfer->tx_buf, - xfer->len, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, xfer->tx_dma)) { - dev_err(dev, "dma_map_single Tx failed\n"); - return -ENOMEM; - } + ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, + (void *)xfer->tx_buf, xfer->len, + DMA_TO_DEVICE); + if (ret != 0) + return ret; } if (xfer->rx_buf != NULL) { - xfer->rx_dma = dma_map_single(rx_dev, - xfer->rx_buf, xfer->len, - DMA_FROM_DEVICE); - if (dma_mapping_error(dev, xfer->rx_dma)) { - dev_err(dev, "dma_map_single Rx failed\n"); - dma_unmap_single(tx_dev, xfer->tx_dma, - xfer->len, DMA_TO_DEVICE); - return -ENOMEM; + ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, + xfer->rx_buf, xfer->len, + DMA_FROM_DEVICE); + if (ret != 0) { + spi_unmap_buf(master, tx_dev, &xfer->tx_sg, + DMA_TO_DEVICE); + return ret; } } } @@ -675,7 +728,7 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) struct spi_transfer *xfer; struct device *tx_dev, *rx_dev; - if (!master->cur_msg_mapped || msg->is_dma_mapped || !master->can_dma) + if (!master->cur_msg_mapped || !master->can_dma) return 0; tx_dev = &master->dma_tx->dev->device; @@ -685,12 +738,8 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) if (!master->can_dma(master, msg->spi, xfer)) continue; - if (xfer->rx_buf) - dma_unmap_single(rx_dev, xfer->rx_dma, xfer->len, - DMA_FROM_DEVICE); - if (xfer->tx_buf) - dma_unmap_single(tx_dev, xfer->tx_dma, xfer->len, - DMA_TO_DEVICE); + spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); + spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } return 0; @@ -1503,6 +1552,8 @@ int spi_register_master(struct spi_master *master) mutex_init(&master->bus_lock_mutex); master->bus_lock_flag = 0; init_completion(&master->xfer_completion); + if (!master->max_dma_len) + master->max_dma_len = INT_MAX; /* register the device, then userspace will see it. * registration fails if the bus ID is in use. diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 31a5b0ee93ec..0c23c835d48b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -24,6 +24,7 @@ #include #include #include +#include struct dma_chan; @@ -268,6 +269,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @auto_runtime_pm: the core should ensure a runtime PM reference is held * while the hardware is prepared, using the parent * device for the spidev + * @max_dma_len: Maximum length of a DMA transfer for the device. * @prepare_transfer_hardware: a message will soon arrive from the queue * so the subsystem requests the driver to prepare the transfer hardware * by issuing this call @@ -421,6 +423,7 @@ struct spi_master { bool cur_msg_prepared; bool cur_msg_mapped; struct completion xfer_completion; + size_t max_dma_len; int (*prepare_transfer_hardware)(struct spi_master *master); int (*transfer_one_message)(struct spi_master *master, @@ -533,6 +536,8 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); * (optionally) changing the chipselect status, then starting * the next transfer or completing this @spi_message. * @transfer_list: transfers are sequenced through @spi_message.transfers + * @tx_sg: Scatterlist for transmit, currently not for client use + * @rx_sg: Scatterlist for receive, currently not for client use * * SPI transfers always write the same number of bytes as they read. * Protocol drivers should always provide @rx_buf and/or @tx_buf. @@ -600,6 +605,8 @@ struct spi_transfer { dma_addr_t tx_dma; dma_addr_t rx_dma; + struct sg_table tx_sg; + struct sg_table rx_sg; unsigned cs_change:1; unsigned tx_nbits:3; -- cgit v1.2.3 From d8ca16db6bb23d03fcb794df44bae64ae976f27c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 23 Jan 2014 16:20:29 +0100 Subject: mac80211: add length check in ieee80211_is_robust_mgmt_frame() A few places weren't checking that the frame passed to the function actually has enough data even though the function clearly documents it must have a payload byte. Make this safer by changing the function to take an skb and checking the length inside. The old version is preserved for now as the rtl* drivers use it and don't have a correct skb. Signed-off-by: Johannes Berg --- drivers/net/wireless/rtlwifi/rtl8188ee/trx.c | 2 +- drivers/net/wireless/rtlwifi/rtl8192ce/trx.c | 2 +- drivers/net/wireless/rtlwifi/rtl8192se/trx.c | 2 +- drivers/net/wireless/rtlwifi/rtl8723ae/trx.c | 2 +- include/linux/ieee80211.h | 15 +++++++++++++-- net/mac80211/rx.c | 13 ++++++------- net/mac80211/tx.c | 9 ++++----- net/mac80211/wpa.c | 2 +- 8 files changed, 28 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c index aece6c9cccf1..27ace3054d56 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c @@ -452,7 +452,7 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, /* During testing, hdr was NULL */ return false; } - if ((ieee80211_is_robust_mgmt_frame(hdr)) && + if ((_ieee80211_is_robust_mgmt_frame(hdr)) && (ieee80211_has_protected(hdr->frame_control))) rx_status->flag &= ~RX_FLAG_DECRYPTED; else diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c index 52abf0a862fa..114858d46158 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c @@ -393,7 +393,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, /* In testing, hdr was NULL here */ return false; } - if ((ieee80211_is_robust_mgmt_frame(hdr)) && + if ((_ieee80211_is_robust_mgmt_frame(hdr)) && (ieee80211_has_protected(hdr->frame_control))) rx_status->flag &= ~RX_FLAG_DECRYPTED; else diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c index 27efbcdac6a9..163a681962c6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c @@ -310,7 +310,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, /* during testing, hdr was NULL here */ return false; } - if ((ieee80211_is_robust_mgmt_frame(hdr)) && + if ((_ieee80211_is_robust_mgmt_frame(hdr)) && (ieee80211_has_protected(hdr->frame_control))) rx_status->flag &= ~RX_FLAG_DECRYPTED; else diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c index 50b7be3f3a60..721162cacc3a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c @@ -334,7 +334,7 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw, /* during testing, hdr could be NULL here */ return false; } - if ((ieee80211_is_robust_mgmt_frame(hdr)) && + if ((_ieee80211_is_robust_mgmt_frame(hdr)) && (ieee80211_has_protected(hdr->frame_control))) rx_status->flag &= ~RX_FLAG_DECRYPTED; else diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index e526a8cecb70..923c478030a3 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2192,10 +2192,10 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr) } /** - * ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame + * _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame * @hdr: the frame (buffer must include at least the first octet of payload) */ -static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) +static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) { if (ieee80211_is_disassoc(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control)) @@ -2223,6 +2223,17 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) return false; } +/** + * ieee80211_is_robust_mgmt_frame - check if skb contains a robust mgmt frame + * @skb: the skb containing the frame, length will be checked + */ +static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb) +{ + if (skb->len < 25) + return false; + return _ieee80211_is_robust_mgmt_frame((void *)skb->data); +} + /** * ieee80211_is_public_action - check if frame is a public action frame * @hdr: the frame diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c24ca0d0f469..3b7a750ebc70 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -599,10 +599,10 @@ static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1)) + if (is_multicast_ether_addr(hdr->addr1)) return 0; - return ieee80211_is_robust_mgmt_frame(hdr); + return ieee80211_is_robust_mgmt_frame(skb); } @@ -610,10 +610,10 @@ static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1)) + if (!is_multicast_ether_addr(hdr->addr1)) return 0; - return ieee80211_is_robust_mgmt_frame(hdr); + return ieee80211_is_robust_mgmt_frame(skb); } @@ -626,7 +626,7 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) return -1; - if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr)) + if (!ieee80211_is_robust_mgmt_frame(skb)) return -1; /* not a robust management frame */ mmie = (struct ieee80211_mmie *) @@ -1845,8 +1845,7 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) * having configured keys. */ if (unlikely(ieee80211_is_action(fc) && !rx->key && - ieee80211_is_robust_mgmt_frame( - (struct ieee80211_hdr *) rx->skb->data))) + ieee80211_is_robust_mgmt_frame(rx->skb))) return -EACCES; } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index bb990ecfa655..07a7f38dc348 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -452,8 +452,7 @@ static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta, if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP)) return 0; - if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) - skb->data)) + if (!ieee80211_is_robust_mgmt_frame(skb)) return 0; return 1; @@ -567,7 +566,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) tx->key = key; else if (ieee80211_is_mgmt(hdr->frame_control) && is_multicast_ether_addr(hdr->addr1) && - ieee80211_is_robust_mgmt_frame(hdr) && + ieee80211_is_robust_mgmt_frame(tx->skb) && (key = rcu_dereference(tx->sdata->default_mgmt_key))) tx->key = key; else if (is_multicast_ether_addr(hdr->addr1) && @@ -582,12 +581,12 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) tx->key = NULL; else if (tx->skb->protocol == tx->sdata->control_port_protocol) tx->key = NULL; - else if (ieee80211_is_robust_mgmt_frame(hdr) && + else if (ieee80211_is_robust_mgmt_frame(tx->skb) && !(ieee80211_is_action(hdr->frame_control) && tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP))) tx->key = NULL; else if (ieee80211_is_mgmt(hdr->frame_control) && - !ieee80211_is_robust_mgmt_frame(hdr)) + !ieee80211_is_robust_mgmt_frame(tx->skb)) tx->key = NULL; else { I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 4aed45c8ee3b..b8600e3c29c8 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -494,7 +494,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) hdrlen = ieee80211_hdrlen(hdr->frame_control); if (!ieee80211_is_data(hdr->frame_control) && - !ieee80211_is_robust_mgmt_frame(hdr)) + !ieee80211_is_robust_mgmt_frame(skb)) return RX_CONTINUE; data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - -- cgit v1.2.3 From b4ba544c8c1349afd44e10aebec03c90e9b71d98 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 24 Jan 2014 14:41:44 +0100 Subject: mac80211: fix bufferable MMPDU RX handling Action, disassoc and deauth frames are bufferable, and as such don't have the PM bit in the frame control field reserved which means we need to react to the bit when receiving in such a frame. Fix this by introducing a new helper ieee80211_is_bufferable_mmpdu() and using it for the RX path that currently ignores the PM bit in any non-data frames for doze->wake transitions, but listens to it in all frames for wake->doze transitions, both of which are wrong. Also use the new helper in the TX path to clean up the code. Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 14 ++++++++++++++ net/mac80211/rx.c | 19 ++++++++----------- net/mac80211/tx.c | 5 +---- 3 files changed, 23 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 923c478030a3..1e3912d1b029 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -596,6 +596,20 @@ static inline int ieee80211_is_qos_nullfunc(__le16 fc) cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); } +/** + * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU + * @fc: frame control field in little-endian byteorder + */ +static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc) +{ + /* IEEE 802.11-2012, definition of "bufferable management frame"; + * note that this ignores the IBSS special case. */ + return ieee80211_is_mgmt(fc) && + (ieee80211_is_action(fc) || + ieee80211_is_disassoc(fc) || + ieee80211_is_deauth(fc)); +} + /** * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set * @seq_ctrl: frame sequence control bytes in little-endian byteorder diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 3b7a750ebc70..79a89fe9d616 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1311,18 +1311,15 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) !ieee80211_has_morefrags(hdr->frame_control) && !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && (rx->sdata->vif.type == NL80211_IFTYPE_AP || - rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { + rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && + /* PM bit is only checked in frames where it isn't reserved, + * in AP mode it's reserved in non-bufferable management frames + * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) + */ + (!ieee80211_is_mgmt(hdr->frame_control) || + ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { if (test_sta_flag(sta, WLAN_STA_PS_STA)) { - /* - * Ignore doze->wake transitions that are - * indicated by non-data frames, the standard - * is unclear here, but for example going to - * PS mode and then scanning would cause a - * doze->wake transition for the probe request, - * and that is clearly undesirable. - */ - if (ieee80211_is_data(hdr->frame_control) && - !ieee80211_has_pm(hdr->frame_control)) + if (!ieee80211_has_pm(hdr->frame_control)) sta_ps_end(sta); } else { if (ieee80211_has_pm(hdr->frame_control)) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 07a7f38dc348..5476a69b45c9 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -522,11 +522,8 @@ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) return TX_CONTINUE; - /* only deauth, disassoc and action are bufferable MMPDUs */ if (ieee80211_is_mgmt(hdr->frame_control) && - !ieee80211_is_deauth(hdr->frame_control) && - !ieee80211_is_disassoc(hdr->frame_control) && - !ieee80211_is_action(hdr->frame_control)) { + !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) { if (tx->flags & IEEE80211_TX_UNICAST) info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; return TX_CONTINUE; -- cgit v1.2.3 From b66548e2a9baf65ccebeb3750f0ab9ddbef500f6 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Fri, 17 Jan 2014 12:08:30 +0100 Subject: of: Increase MAX_PHANDLE_ARGS arm-smmu driver uses of_parse_phandle_with_args when parsing DT information to determine stream IDs for a master device. Thus the number of stream IDs per master device is bound by MAX_PHANDLE_ARGS. To support Calxeda ECX-2000 hardware arm-smmu driver requires a slightly higher value for MAX_PHANDLE_ARGS as this hardware has 10 stream IDs for one master device. Increasing it to 16 seems a reasonable choice. Cc: Grant Likely Cc: Rob Herring Cc: devicetree@vger.kernel.org Cc: Andreas Herrmann Acked-by: Rob Herring Signed-off-by: Andreas Herrmann Signed-off-by: Grant Likely --- include/linux/of.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/of.h b/include/linux/of.h index 3d0593943f47..0ea516ed22c0 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -67,7 +67,7 @@ struct device_node { #endif }; -#define MAX_PHANDLE_ARGS 8 +#define MAX_PHANDLE_ARGS 16 struct of_phandle_args { struct device_node *np; int args_count; -- cgit v1.2.3 From 8c78e38025060a00155a73bf722152c156242490 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 4 Feb 2014 09:41:04 +0100 Subject: wireless: sort and extend element ID list The element ID list is currently almost sorted by amendment or similar topic, but the order is difficult to maintain and not very transparent. Sort the list by ID instead, and add a lot of missing IDs. Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 170 +++++++++++++++++++++++++++++----------------- 1 file changed, 106 insertions(+), 64 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 1e3912d1b029..5f349355ee54 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1650,51 +1650,22 @@ enum ieee80211_reasoncode { enum ieee80211_eid { WLAN_EID_SSID = 0, WLAN_EID_SUPP_RATES = 1, - WLAN_EID_FH_PARAMS = 2, + WLAN_EID_FH_PARAMS = 2, /* reserved now */ WLAN_EID_DS_PARAMS = 3, WLAN_EID_CF_PARAMS = 4, WLAN_EID_TIM = 5, WLAN_EID_IBSS_PARAMS = 6, - WLAN_EID_CHALLENGE = 16, - WLAN_EID_COUNTRY = 7, WLAN_EID_HP_PARAMS = 8, WLAN_EID_HP_TABLE = 9, WLAN_EID_REQUEST = 10, - WLAN_EID_QBSS_LOAD = 11, WLAN_EID_EDCA_PARAM_SET = 12, WLAN_EID_TSPEC = 13, WLAN_EID_TCLAS = 14, WLAN_EID_SCHEDULE = 15, - WLAN_EID_TS_DELAY = 43, - WLAN_EID_TCLAS_PROCESSING = 44, - WLAN_EID_QOS_CAPA = 46, - /* 802.11z */ - WLAN_EID_LINK_ID = 101, - /* 802.11s */ - WLAN_EID_MESH_CONFIG = 113, - WLAN_EID_MESH_ID = 114, - WLAN_EID_LINK_METRIC_REPORT = 115, - WLAN_EID_CONGESTION_NOTIFICATION = 116, - WLAN_EID_PEER_MGMT = 117, - WLAN_EID_CHAN_SWITCH_PARAM = 118, - WLAN_EID_MESH_AWAKE_WINDOW = 119, - WLAN_EID_BEACON_TIMING = 120, - WLAN_EID_MCCAOP_SETUP_REQ = 121, - WLAN_EID_MCCAOP_SETUP_RESP = 122, - WLAN_EID_MCCAOP_ADVERT = 123, - WLAN_EID_MCCAOP_TEARDOWN = 124, - WLAN_EID_GANN = 125, - WLAN_EID_RANN = 126, - WLAN_EID_PREQ = 130, - WLAN_EID_PREP = 131, - WLAN_EID_PERR = 132, - WLAN_EID_PXU = 137, - WLAN_EID_PXUC = 138, - WLAN_EID_AUTH_MESH_PEER_EXCH = 139, - WLAN_EID_MIC = 140, - + WLAN_EID_CHALLENGE = 16, + /* 17-31 reserved for challenge text extension */ WLAN_EID_PWR_CONSTRAINT = 32, WLAN_EID_PWR_CAPABILITY = 33, WLAN_EID_TPC_REQUEST = 34, @@ -1705,66 +1676,114 @@ enum ieee80211_eid { WLAN_EID_MEASURE_REPORT = 39, WLAN_EID_QUIET = 40, WLAN_EID_IBSS_DFS = 41, - WLAN_EID_ERP_INFO = 42, - WLAN_EID_EXT_SUPP_RATES = 50, - + WLAN_EID_TS_DELAY = 43, + WLAN_EID_TCLAS_PROCESSING = 44, WLAN_EID_HT_CAPABILITY = 45, - WLAN_EID_HT_OPERATION = 61, - WLAN_EID_SECONDARY_CHANNEL_OFFSET = 62, - + WLAN_EID_QOS_CAPA = 46, + /* 47 reserved for Broadcom */ WLAN_EID_RSN = 48, - WLAN_EID_MMIE = 76, - WLAN_EID_VENDOR_SPECIFIC = 221, - WLAN_EID_QOS_PARAMETER = 222, - + WLAN_EID_802_15_COEX = 49, + WLAN_EID_EXT_SUPP_RATES = 50, WLAN_EID_AP_CHAN_REPORT = 51, WLAN_EID_NEIGHBOR_REPORT = 52, WLAN_EID_RCPI = 53, + WLAN_EID_MOBILITY_DOMAIN = 54, + WLAN_EID_FAST_BSS_TRANSITION = 55, + WLAN_EID_TIMEOUT_INTERVAL = 56, + WLAN_EID_RIC_DATA = 57, + WLAN_EID_DSE_REGISTERED_LOCATION = 58, + WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59, + WLAN_EID_EXT_CHANSWITCH_ANN = 60, + WLAN_EID_HT_OPERATION = 61, + WLAN_EID_SECONDARY_CHANNEL_OFFSET = 62, WLAN_EID_BSS_AVG_ACCESS_DELAY = 63, WLAN_EID_ANTENNA_INFO = 64, WLAN_EID_RSNI = 65, WLAN_EID_MEASUREMENT_PILOT_TX_INFO = 66, WLAN_EID_BSS_AVAILABLE_CAPACITY = 67, WLAN_EID_BSS_AC_ACCESS_DELAY = 68, + WLAN_EID_TIME_ADVERTISEMENT = 69, WLAN_EID_RRM_ENABLED_CAPABILITIES = 70, WLAN_EID_MULTIPLE_BSSID = 71, WLAN_EID_BSS_COEX_2040 = 72, WLAN_EID_OVERLAP_BSS_SCAN_PARAM = 74, - WLAN_EID_EXT_CAPABILITY = 127, - - WLAN_EID_MOBILITY_DOMAIN = 54, - WLAN_EID_FAST_BSS_TRANSITION = 55, - WLAN_EID_TIMEOUT_INTERVAL = 56, - WLAN_EID_RIC_DATA = 57, WLAN_EID_RIC_DESCRIPTOR = 75, - - WLAN_EID_DSE_REGISTERED_LOCATION = 58, - WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59, - WLAN_EID_EXT_CHANSWITCH_ANN = 60, - - WLAN_EID_VHT_CAPABILITY = 191, - WLAN_EID_VHT_OPERATION = 192, - WLAN_EID_OPMODE_NOTIF = 199, - WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194, - WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196, - WLAN_EID_EXTENDED_BSS_LOAD = 193, - WLAN_EID_VHT_TX_POWER_ENVELOPE = 195, - WLAN_EID_AID = 197, - WLAN_EID_QUIET_CHANNEL = 198, - - /* 802.11ad */ + WLAN_EID_MMIE = 76, + WLAN_EID_ASSOC_COMEBACK_TIME = 77, + WLAN_EID_EVENT_REQUEST = 78, + WLAN_EID_EVENT_REPORT = 79, + WLAN_EID_DIAGNOSTIC_REQUEST = 80, + WLAN_EID_DIAGNOSTIC_REPORT = 81, + WLAN_EID_LOCATION_PARAMS = 82, WLAN_EID_NON_TX_BSSID_CAP = 83, + WLAN_EID_SSID_LIST = 84, + WLAN_EID_MULTI_BSSID_IDX = 85, + WLAN_EID_FMS_DESCRIPTOR = 86, + WLAN_EID_FMS_REQUEST = 87, + WLAN_EID_FMS_RESPONSE = 88, + WLAN_EID_QOS_TRAFFIC_CAPA = 89, + WLAN_EID_BSS_MAX_IDLE_PERIOD = 90, + WLAN_EID_TSF_REQUEST = 91, + WLAN_EID_TSF_RESPOSNE = 92, + WLAN_EID_WNM_SLEEP_MODE = 93, + WLAN_EID_TIM_BCAST_REQ = 94, + WLAN_EID_TIM_BCAST_RESP = 95, + WLAN_EID_COLL_IF_REPORT = 96, + WLAN_EID_CHANNEL_USAGE = 97, + WLAN_EID_TIME_ZONE = 98, + WLAN_EID_DMS_REQUEST = 99, + WLAN_EID_DMS_RESPONSE = 100, + WLAN_EID_LINK_ID = 101, + WLAN_EID_WAKEUP_SCHEDUL = 102, + /* 103 reserved */ + WLAN_EID_CHAN_SWITCH_TIMING = 104, + WLAN_EID_PTI_CONTROL = 105, + WLAN_EID_PU_BUFFER_STATUS = 106, + WLAN_EID_INTERWORKING = 107, + WLAN_EID_ADVERTISEMENT_PROTOCOL = 108, + WLAN_EID_EXPEDITED_BW_REQ = 109, + WLAN_EID_QOS_MAP_SET = 110, + WLAN_EID_ROAMING_CONSORTIUM = 111, + WLAN_EID_EMERGENCY_ALERT = 112, + WLAN_EID_MESH_CONFIG = 113, + WLAN_EID_MESH_ID = 114, + WLAN_EID_LINK_METRIC_REPORT = 115, + WLAN_EID_CONGESTION_NOTIFICATION = 116, + WLAN_EID_PEER_MGMT = 117, + WLAN_EID_CHAN_SWITCH_PARAM = 118, + WLAN_EID_MESH_AWAKE_WINDOW = 119, + WLAN_EID_BEACON_TIMING = 120, + WLAN_EID_MCCAOP_SETUP_REQ = 121, + WLAN_EID_MCCAOP_SETUP_RESP = 122, + WLAN_EID_MCCAOP_ADVERT = 123, + WLAN_EID_MCCAOP_TEARDOWN = 124, + WLAN_EID_GANN = 125, + WLAN_EID_RANN = 126, + WLAN_EID_EXT_CAPABILITY = 127, + /* 128, 129 reserved for Agere */ + WLAN_EID_PREQ = 130, + WLAN_EID_PREP = 131, + WLAN_EID_PERR = 132, + /* 133-136 reserved for Cisco */ + WLAN_EID_PXU = 137, + WLAN_EID_PXUC = 138, + WLAN_EID_AUTH_MESH_PEER_EXCH = 139, + WLAN_EID_MIC = 140, + WLAN_EID_DESTINATION_URI = 141, + WLAN_EID_UAPSD_COEX = 142, WLAN_EID_WAKEUP_SCHEDULE = 143, WLAN_EID_EXT_SCHEDULE = 144, WLAN_EID_STA_AVAILABILITY = 145, WLAN_EID_DMG_TSPEC = 146, WLAN_EID_DMG_AT = 147, WLAN_EID_DMG_CAP = 148, + /* 149-150 reserved for Cisco */ WLAN_EID_DMG_OPERATION = 151, WLAN_EID_DMG_BSS_PARAM_CHANGE = 152, WLAN_EID_DMG_BEAM_REFINEMENT = 153, WLAN_EID_CHANNEL_MEASURE_FEEDBACK = 154, + /* 155-156 reserved for Cisco */ WLAN_EID_AWAKE_WINDOW = 157, WLAN_EID_MULTI_BAND = 158, WLAN_EID_ADDBA_EXT = 159, @@ -1781,11 +1800,34 @@ enum ieee80211_eid { WLAN_EID_MULTIPLE_MAC_ADDR = 170, WLAN_EID_U_PID = 171, WLAN_EID_DMG_LINK_ADAPT_ACK = 172, + /* 173 reserved for Symbol */ + WLAN_EID_MCCAOP_ADV_OVERVIEW = 174, WLAN_EID_QUIET_PERIOD_REQ = 175, + /* 176 reserved for Symbol */ WLAN_EID_QUIET_PERIOD_RESP = 177, + /* 178-179 reserved for Symbol */ + /* 180 reserved for ISO/IEC 20011 */ WLAN_EID_EPAC_POLICY = 182, WLAN_EID_CLISTER_TIME_OFF = 183, + WLAN_EID_INTER_AC_PRIO = 184, + WLAN_EID_SCS_DESCRIPTOR = 185, + WLAN_EID_QLOAD_REPORT = 186, + WLAN_EID_HCCA_TXOP_UPDATE_COUNT = 187, + WLAN_EID_HL_STREAM_ID = 188, + WLAN_EID_GCR_GROUP_ADDR = 189, WLAN_EID_ANTENNA_SECTOR_ID_PATTERN = 190, + WLAN_EID_VHT_CAPABILITY = 191, + WLAN_EID_VHT_OPERATION = 192, + WLAN_EID_EXTENDED_BSS_LOAD = 193, + WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194, + WLAN_EID_VHT_TX_POWER_ENVELOPE = 195, + WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196, + WLAN_EID_AID = 197, + WLAN_EID_QUIET_CHANNEL = 198, + WLAN_EID_OPMODE_NOTIF = 199, + + WLAN_EID_VENDOR_SPECIFIC = 221, + WLAN_EID_QOS_PARAMETER = 222, }; /* Action category code */ -- cgit v1.2.3 From 006e983bbc805431c44e2135e13841f66059a045 Mon Sep 17 00:00:00 2001 From: Sricharan R Date: Tue, 3 Dec 2013 15:57:22 +0530 Subject: DRIVERS: IRQCHIP: IRQ-GIC: Add support for routable irqs In some socs the gic can be preceded by a crossbar IP which routes the peripheral interrupts to the gic inputs. The peripheral interrupts are associated with a fixed crossbar input line and the crossbar routes that to one of the free gic input line. The DT entries for peripherals provides the fixed crossbar input line as its interrupt number and the mapping code should associate this with a free gic input line. This patch adds the support inside the gic irqchip to handle such routable irqs. The routable irqs are registered in a linear domain. The registered routable domain's callback should be implemented to get a free irq and to configure the IP to route it. Cc: Thomas Gleixner Cc: Linus Walleij Cc: Santosh Shilimkar Cc: Russell King Cc: Tony Lindgren Cc: Rajendra Nayak Cc: Marc Zyngier Cc: Grant Likely Cc: Rob Herring Signed-off-by: Sricharan R Reviewed-by: Thomas Gleixner Acked-by: Santosh Shilimkar Acked-by: Linus Walleij --- Documentation/devicetree/bindings/arm/gic.txt | 6 ++ drivers/irqchip/irq-gic.c | 82 +++++++++++++++++++++++---- include/linux/irqchip/arm-gic.h | 7 ++- 3 files changed, 84 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt index bae0d87a38b2..5573c08d3180 100644 --- a/Documentation/devicetree/bindings/arm/gic.txt +++ b/Documentation/devicetree/bindings/arm/gic.txt @@ -50,6 +50,11 @@ Optional regions, used when the GIC doesn't have banked registers. The offset is cpu-offset * cpu-nr. +- arm,routable-irqs : Total number of gic irq inputs which are not directly + connected from the peripherals, but are routed dynamically + by a crossbar/multiplexer preceding the GIC. The GIC irq + input line is assigned dynamically when the corresponding + peripheral's crossbar line is mapped. Example: intc: interrupt-controller@fff11000 { @@ -57,6 +62,7 @@ Example: #interrupt-cells = <3>; #address-cells = <1>; interrupt-controller; + arm,routable-irqs = <160>; reg = <0xfff11000 0x1000>, <0xfff10100 0x100>; }; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 341c6016812d..07a7050841ec 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -824,16 +824,25 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_set_chip_and_handler(irq, &gic_chip, handle_fasteoi_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + + gic_routable_irq_domain_ops->map(d, irq, hw); } irq_set_chip_data(irq, d->host_data); return 0; } +static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq) +{ + gic_routable_irq_domain_ops->unmap(d, irq); +} + static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *controller, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type) { + unsigned long ret = 0; + if (d->of_node != controller) return -EINVAL; if (intsize < 3) @@ -843,11 +852,20 @@ static int gic_irq_domain_xlate(struct irq_domain *d, *out_hwirq = intspec[1] + 16; /* For SPIs, we need to add 16 more to get the GIC irq ID number */ - if (!intspec[0]) - *out_hwirq += 16; + if (!intspec[0]) { + ret = gic_routable_irq_domain_ops->xlate(d, controller, + intspec, + intsize, + out_hwirq, + out_type); + + if (IS_ERR_VALUE(ret)) + return ret; + } *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; - return 0; + + return ret; } #ifdef CONFIG_SMP @@ -871,9 +889,41 @@ static struct notifier_block gic_cpu_notifier = { const struct irq_domain_ops gic_irq_domain_ops = { .map = gic_irq_domain_map, + .unmap = gic_irq_domain_unmap, .xlate = gic_irq_domain_xlate, }; +/* Default functions for routable irq domain */ +static int gic_routable_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + return 0; +} + +static void gic_routable_irq_domain_unmap(struct irq_domain *d, + unsigned int irq) +{ +} + +static int gic_routable_irq_domain_xlate(struct irq_domain *d, + struct device_node *controller, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + *out_hwirq += 16; + return 0; +} + +const struct irq_domain_ops gic_default_routable_irq_domain_ops = { + .map = gic_routable_irq_domain_map, + .unmap = gic_routable_irq_domain_unmap, + .xlate = gic_routable_irq_domain_xlate, +}; + +const struct irq_domain_ops *gic_routable_irq_domain_ops = + &gic_default_routable_irq_domain_ops; + void __init gic_init_bases(unsigned int gic_nr, int irq_start, void __iomem *dist_base, void __iomem *cpu_base, u32 percpu_offset, struct device_node *node) @@ -881,6 +931,7 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, irq_hw_number_t hwirq_base; struct gic_chip_data *gic; int gic_irqs, irq_base, i; + int nr_routable_irqs; BUG_ON(gic_nr >= MAX_GIC_NR); @@ -946,14 +997,25 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, gic->gic_irqs = gic_irqs; gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ - irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id()); - if (IS_ERR_VALUE(irq_base)) { - WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", - irq_start); - irq_base = irq_start; + + if (of_property_read_u32(node, "arm,routable-irqs", + &nr_routable_irqs)) { + irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, + numa_node_id()); + if (IS_ERR_VALUE(irq_base)) { + WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", + irq_start); + irq_base = irq_start; + } + + gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, + hwirq_base, &gic_irq_domain_ops, gic); + } else { + gic->domain = irq_domain_add_linear(node, nr_routable_irqs, + &gic_irq_domain_ops, + gic); } - gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, - hwirq_base, &gic_irq_domain_ops, gic); + if (WARN_ON(!gic->domain)) return; diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 0ceb389dba6c..7ed92d0560d5 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -93,6 +93,11 @@ int gic_get_cpu_id(unsigned int cpu); void gic_migrate_target(unsigned int new_cpu_id); unsigned long gic_get_sgir_physaddr(void); +extern const struct irq_domain_ops *gic_routable_irq_domain_ops; +static inline void __init register_routable_domain_ops + (const struct irq_domain_ops *ops) +{ + gic_routable_irq_domain_ops = ops; +} #endif /* __ASSEMBLY */ - #endif -- cgit v1.2.3 From 96ca848ef7ea1be7e92d1cceb34ef3aa86053828 Mon Sep 17 00:00:00 2001 From: Sricharan R Date: Tue, 3 Dec 2013 15:57:23 +0530 Subject: DRIVERS: IRQCHIP: CROSSBAR: Add support for Crossbar IP Some socs have a large number of interrupts requests to service the needs of its many peripherals and subsystems. All of the interrupt lines from the subsystems are not needed at the same time, so they have to be muxed to the irq-controller appropriately. In such places a interrupt controllers are preceded by an CROSSBAR that provides flexibility in muxing the device requests to the controller inputs. This driver takes care a allocating a free irq and then configuring the crossbar IP as a part of the mpu's irqchip callbacks. crossbar_init should be called right before the irqchip_init, so that it is setup to handle the irqchip callbacks. Cc: Thomas Gleixner Cc: Linus Walleij Cc: Santosh Shilimkar Cc: Russell King Cc: Tony Lindgren Cc: Rajendra Nayak Cc: Marc Zyngier Cc: Grant Likely Cc: Rob Herring Signed-off-by: Sricharan R Acked-by: Kumar Gala (for DT binding portion) Acked-by: Santosh Shilimkar Acked-by: Linus Walleij Acked-by: Thomas Gleixner --- .../devicetree/bindings/arm/omap/crossbar.txt | 27 +++ drivers/irqchip/Kconfig | 8 + drivers/irqchip/Makefile | 1 + drivers/irqchip/irq-crossbar.c | 208 +++++++++++++++++++++ include/linux/irqchip/irq-crossbar.h | 11 ++ 5 files changed, 255 insertions(+) create mode 100644 Documentation/devicetree/bindings/arm/omap/crossbar.txt create mode 100644 drivers/irqchip/irq-crossbar.c create mode 100644 include/linux/irqchip/irq-crossbar.h (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/arm/omap/crossbar.txt b/Documentation/devicetree/bindings/arm/omap/crossbar.txt new file mode 100644 index 000000000000..fb88585cfb93 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/omap/crossbar.txt @@ -0,0 +1,27 @@ +Some socs have a large number of interrupts requests to service +the needs of its many peripherals and subsystems. All of the +interrupt lines from the subsystems are not needed at the same +time, so they have to be muxed to the irq-controller appropriately. +In such places a interrupt controllers are preceded by an CROSSBAR +that provides flexibility in muxing the device requests to the controller +inputs. + +Required properties: +- compatible : Should be "ti,irq-crossbar" +- reg: Base address and the size of the crossbar registers. +- ti,max-irqs: Total number of irqs available at the interrupt controller. +- ti,reg-size: Size of a individual register in bytes. Every individual + register is assumed to be of same size. Valid sizes are 1, 2, 4. +- ti,irqs-reserved: List of the reserved irq lines that are not muxed using + crossbar. These interrupt lines are reserved in the soc, + so crossbar bar driver should not consider them as free + lines. + +Examples: + crossbar_mpu: @4a020000 { + compatible = "ti,irq-crossbar"; + reg = <0x4a002a48 0x130>; + ti,max-irqs = <160>; + ti,reg-size = <2>; + ti,irqs-reserved = <0 1 2 3 5 6 131 132 139 140>; + }; diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 61ffdca96e25..111068782da4 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -69,3 +69,11 @@ config VERSATILE_FPGA_IRQ_NR config XTENSA_MX bool select IRQ_DOMAIN + +config IRQ_CROSSBAR + bool + help + Support for a CROSSBAR ip that preceeds the main interrupt controller. + The primary irqchip invokes the crossbar's callback which inturn allocates + a free irq and configures the IP. Thus the peripheral interrupts are + routed to one of the free irqchip interrupt lines. diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 86b484cb3ec2..3e776cb8dd46 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -25,3 +25,4 @@ obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o +obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c new file mode 100644 index 000000000000..fc817d28d1fe --- /dev/null +++ b/drivers/irqchip/irq-crossbar.c @@ -0,0 +1,208 @@ +/* + * drivers/irqchip/irq-crossbar.c + * + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com + * Author: Sricharan R + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include +#include +#include +#include +#include +#include + +#define IRQ_FREE -1 +#define GIC_IRQ_START 32 + +/* + * @int_max: maximum number of supported interrupts + * @irq_map: array of interrupts to crossbar number mapping + * @crossbar_base: crossbar base address + * @register_offsets: offsets for each irq number + */ +struct crossbar_device { + uint int_max; + uint *irq_map; + void __iomem *crossbar_base; + int *register_offsets; + void (*write) (int, int); +}; + +static struct crossbar_device *cb; + +static inline void crossbar_writel(int irq_no, int cb_no) +{ + writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); +} + +static inline void crossbar_writew(int irq_no, int cb_no) +{ + writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); +} + +static inline void crossbar_writeb(int irq_no, int cb_no) +{ + writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); +} + +static inline int allocate_free_irq(int cb_no) +{ + int i; + + for (i = 0; i < cb->int_max; i++) { + if (cb->irq_map[i] == IRQ_FREE) { + cb->irq_map[i] = cb_no; + return i; + } + } + + return -ENODEV; +} + +static int crossbar_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + cb->write(hw - GIC_IRQ_START, cb->irq_map[hw - GIC_IRQ_START]); + return 0; +} + +static void crossbar_domain_unmap(struct irq_domain *d, unsigned int irq) +{ + irq_hw_number_t hw = irq_get_irq_data(irq)->hwirq; + + if (hw > GIC_IRQ_START) + cb->irq_map[hw - GIC_IRQ_START] = IRQ_FREE; +} + +static int crossbar_domain_xlate(struct irq_domain *d, + struct device_node *controller, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + unsigned long ret; + + ret = allocate_free_irq(intspec[1]); + + if (IS_ERR_VALUE(ret)) + return ret; + + *out_hwirq = ret + GIC_IRQ_START; + return 0; +} + +const struct irq_domain_ops routable_irq_domain_ops = { + .map = crossbar_domain_map, + .unmap = crossbar_domain_unmap, + .xlate = crossbar_domain_xlate +}; + +static int __init crossbar_of_init(struct device_node *node) +{ + int i, size, max, reserved = 0, entry; + const __be32 *irqsr; + + cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL); + + if (!cb) + return -ENOMEM; + + cb->crossbar_base = of_iomap(node, 0); + if (!cb->crossbar_base) + goto err1; + + of_property_read_u32(node, "ti,max-irqs", &max); + cb->irq_map = kzalloc(max * sizeof(int), GFP_KERNEL); + if (!cb->irq_map) + goto err2; + + cb->int_max = max; + + for (i = 0; i < max; i++) + cb->irq_map[i] = IRQ_FREE; + + /* Get and mark reserved irqs */ + irqsr = of_get_property(node, "ti,irqs-reserved", &size); + if (irqsr) { + size /= sizeof(__be32); + + for (i = 0; i < size; i++) { + of_property_read_u32_index(node, + "ti,irqs-reserved", + i, &entry); + if (entry > max) { + pr_err("Invalid reserved entry\n"); + goto err3; + } + cb->irq_map[entry] = 0; + } + } + + cb->register_offsets = kzalloc(max * sizeof(int), GFP_KERNEL); + if (!cb->register_offsets) + goto err3; + + of_property_read_u32(node, "ti,reg-size", &size); + + switch (size) { + case 1: + cb->write = crossbar_writeb; + break; + case 2: + cb->write = crossbar_writew; + break; + case 4: + cb->write = crossbar_writel; + break; + default: + pr_err("Invalid reg-size property\n"); + goto err4; + break; + } + + /* + * Register offsets are not linear because of the + * reserved irqs. so find and store the offsets once. + */ + for (i = 0; i < max; i++) { + if (!cb->irq_map[i]) + continue; + + cb->register_offsets[i] = reserved; + reserved += size; + } + + register_routable_domain_ops(&routable_irq_domain_ops); + return 0; + +err4: + kfree(cb->register_offsets); +err3: + kfree(cb->irq_map); +err2: + iounmap(cb->crossbar_base); +err1: + kfree(cb); + return -ENOMEM; +} + +static const struct of_device_id crossbar_match[] __initconst = { + { .compatible = "ti,irq-crossbar" }, + {} +}; + +int __init irqcrossbar_init(void) +{ + struct device_node *np; + np = of_find_matching_node(NULL, crossbar_match); + if (!np) + return -ENODEV; + + crossbar_of_init(np); + return 0; +} diff --git a/include/linux/irqchip/irq-crossbar.h b/include/linux/irqchip/irq-crossbar.h new file mode 100644 index 000000000000..e5537b81df8d --- /dev/null +++ b/include/linux/irqchip/irq-crossbar.h @@ -0,0 +1,11 @@ +/* + * drivers/irqchip/irq-crossbar.h + * + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +int irqcrossbar_init(void); -- cgit v1.2.3 From 781f6d710d4482eab05cfaad50060a0ea8c0e4e0 Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Thu, 30 Jan 2014 13:18:57 +0000 Subject: gpio: generic: Add label to platform data When registering more than one platform device, it is useful to set the gpio chip label in the platform data. Signed-off-by: Pawel Moll Signed-off-by: Linus Walleij --- drivers/gpio/gpio-generic.c | 2 ++ include/linux/basic_mmio_gpio.h | 1 + 2 files changed, 3 insertions(+) (limited to 'include/linux') diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c index d2196bf73847..8c778afdf49f 100644 --- a/drivers/gpio/gpio-generic.c +++ b/drivers/gpio/gpio-generic.c @@ -531,6 +531,8 @@ static int bgpio_pdev_probe(struct platform_device *pdev) return err; if (pdata) { + if (pdata->label) + bgc->gc.label = pdata->label; bgc->gc.base = pdata->base; if (pdata->ngpio > 0) bgc->gc.ngpio = pdata->ngpio; diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h index d8a97ec0e2b8..0e97856b2cff 100644 --- a/include/linux/basic_mmio_gpio.h +++ b/include/linux/basic_mmio_gpio.h @@ -19,6 +19,7 @@ #include struct bgpio_pdata { + const char *label; int base; int ngpio; }; -- cgit v1.2.3 From 1f7c164b6f2a8a028bfc36097fc42bf061c5212e Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 4 Feb 2014 00:45:13 +0100 Subject: ACPI / hotplug / PCI: Rework acpiphp_check_host_bridge() Since the only existing caller of acpiphp_check_host_bridge(), which is acpi_pci_root_scan_dependent(), already has a struct acpi_device pointer needed to obtain the ACPIPHP context, it doesn't make sense to execute acpi_bus_get_device() on its handle in acpiphp_handle_to_bridge() just in order to get that pointer back. For this reason, modify acpiphp_check_host_bridge() to take a struct acpi_device pointer as its argument and rearrange the code accordingly. Signed-off-by: Rafael J. Wysocki Tested-by: Mika Westerberg --- drivers/acpi/pci_root.c | 2 +- drivers/pci/hotplug/acpiphp_glue.c | 11 +++-------- include/linux/pci-acpi.h | 4 ++-- 3 files changed, 6 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index c1c4102e6478..c288ff3c6998 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -51,7 +51,7 @@ static void acpi_pci_root_remove(struct acpi_device *device); static int acpi_pci_root_scan_dependent(struct acpi_device *adev) { - acpiphp_check_host_bridge(adev->handle); + acpiphp_check_host_bridge(adev); return 0; } diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index b7342d2e819b..11a6117fb358 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -382,15 +382,11 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data, return AE_OK; } -static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle) +static struct acpiphp_bridge *acpiphp_dev_to_bridge(struct acpi_device *adev) { - struct acpi_device *adev = acpi_bus_get_acpi_device(handle); struct acpiphp_context *context; struct acpiphp_bridge *bridge = NULL; - if (!adev) - return NULL; - acpi_lock_hp_context(); context = acpiphp_get_context(adev); if (context) { @@ -401,7 +397,6 @@ static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle) acpiphp_put_context(context); } acpi_unlock_hp_context(); - acpi_bus_put_acpi_device(adev); return bridge; } @@ -768,11 +763,11 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus) * ACPI event handlers */ -void acpiphp_check_host_bridge(acpi_handle handle) +void acpiphp_check_host_bridge(struct acpi_device *adev) { struct acpiphp_bridge *bridge; - bridge = acpiphp_handle_to_bridge(handle); + bridge = acpiphp_dev_to_bridge(adev); if (bridge) { pci_lock_rescan_remove(); diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 5a462c4e5009..637a608ded0b 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -59,12 +59,12 @@ static inline void acpi_pci_slot_remove(struct pci_bus *bus) { } void acpiphp_init(void); void acpiphp_enumerate_slots(struct pci_bus *bus); void acpiphp_remove_slots(struct pci_bus *bus); -void acpiphp_check_host_bridge(acpi_handle handle); +void acpiphp_check_host_bridge(struct acpi_device *adev); #else static inline void acpiphp_init(void) { } static inline void acpiphp_enumerate_slots(struct pci_bus *bus) { } static inline void acpiphp_remove_slots(struct pci_bus *bus) { } -static inline void acpiphp_check_host_bridge(acpi_handle handle) { } +static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { } #endif #else /* CONFIG_ACPI */ -- cgit v1.2.3 From ef70bbe1aaa612f75360e5df5952fddec50b7ca9 Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Tue, 7 Jan 2014 12:34:11 +0100 Subject: gpio: make gpiod_direction_output take a logical value The documentation was not clear about whether gpio_direction_output should take a logical value or the physical level on the output line, i.e. whether the ACTIVE_LOW status would be taken into account. This converts gpiod_direction_output to use the logical level and adds a new gpiod_direction_output_raw for the raw value. Signed-off-by: Philipp Zabel Reviewed-by: Alexandre Courbot Signed-off-by: Linus Walleij --- Documentation/gpio/consumer.txt | 1 + drivers/gpio/gpiolib.c | 67 +++++++++++++++++++++++++++++------------ include/asm-generic/gpio.h | 2 +- include/linux/gpio/consumer.h | 7 +++++ 4 files changed, 57 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt index e42f77d8d4ca..09854fe59307 100644 --- a/Documentation/gpio/consumer.txt +++ b/Documentation/gpio/consumer.txt @@ -154,6 +154,7 @@ raw line value: void gpiod_set_raw_value(struct gpio_desc *desc, int value) int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value) + int gpiod_direction_output_raw(struct gpio_desc *desc, int value) The active-low state of a GPIO can also be queried using the following call: diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 50c4922fe53a..80da9f1940c9 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -350,9 +350,9 @@ static ssize_t gpio_direction_store(struct device *dev, if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else if (sysfs_streq(buf, "high")) - status = gpiod_direction_output(desc, 1); + status = gpiod_direction_output_raw(desc, 1); else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low")) - status = gpiod_direction_output(desc, 0); + status = gpiod_direction_output_raw(desc, 0); else if (sysfs_streq(buf, "in")) status = gpiod_direction_input(desc); else @@ -1590,7 +1590,7 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) if (flags & GPIOF_DIR_IN) err = gpiod_direction_input(desc); else - err = gpiod_direction_output(desc, + err = gpiod_direction_output_raw(desc, (flags & GPIOF_INIT_HIGH) ? 1 : 0); if (err) @@ -1756,28 +1756,13 @@ fail: } EXPORT_SYMBOL_GPL(gpiod_direction_input); -/** - * gpiod_direction_output - set the GPIO direction to input - * @desc: GPIO to set to output - * @value: initial output value of the GPIO - * - * Set the direction of the passed GPIO to output, such as gpiod_set_value() can - * be called safely on it. The initial value of the output must be specified. - * - * Return 0 in case of success, else an error code. - */ -int gpiod_direction_output(struct gpio_desc *desc, int value) +static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value) { unsigned long flags; struct gpio_chip *chip; int status = -EINVAL; int offset; - if (!desc || !desc->chip) { - pr_warn("%s: invalid GPIO\n", __func__); - return -EINVAL; - } - /* GPIOs used for IRQs shall not be set as output */ if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) { gpiod_err(desc, @@ -1840,6 +1825,50 @@ fail: gpiod_dbg(desc, "%s: gpio status %d\n", __func__, status); return status; } + +/** + * gpiod_direction_output_raw - set the GPIO direction to output + * @desc: GPIO to set to output + * @value: initial output value of the GPIO + * + * Set the direction of the passed GPIO to output, such as gpiod_set_value() can + * be called safely on it. The initial value of the output must be specified + * as raw value on the physical line without regard for the ACTIVE_LOW status. + * + * Return 0 in case of success, else an error code. + */ +int gpiod_direction_output_raw(struct gpio_desc *desc, int value) +{ + if (!desc || !desc->chip) { + pr_warn("%s: invalid GPIO\n", __func__); + return -EINVAL; + } + return _gpiod_direction_output_raw(desc, value); +} +EXPORT_SYMBOL_GPL(gpiod_direction_output_raw); + +/** + * gpiod_direction_output - set the GPIO direction to input + * @desc: GPIO to set to output + * @value: initial output value of the GPIO + * + * Set the direction of the passed GPIO to output, such as gpiod_set_value() can + * be called safely on it. The initial value of the output must be specified + * as the logical value of the GPIO, i.e. taking its ACTIVE_LOW status into + * account. + * + * Return 0 in case of success, else an error code. + */ +int gpiod_direction_output(struct gpio_desc *desc, int value) +{ + if (!desc || !desc->chip) { + pr_warn("%s: invalid GPIO\n", __func__); + return -EINVAL; + } + if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) + value = !value; + return _gpiod_direction_output_raw(desc, value); +} EXPORT_SYMBOL_GPL(gpiod_direction_output); /** diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index a5f56a0213a7..23e364538ab5 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -69,7 +69,7 @@ static inline int gpio_direction_input(unsigned gpio) } static inline int gpio_direction_output(unsigned gpio, int value) { - return gpiod_direction_output(gpio_to_desc(gpio), value); + return gpiod_direction_output_raw(gpio_to_desc(gpio), value); } static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 4d34dbbbad4d..387994325122 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -36,6 +36,7 @@ void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); int gpiod_get_direction(const struct gpio_desc *desc); int gpiod_direction_input(struct gpio_desc *desc); int gpiod_direction_output(struct gpio_desc *desc, int value); +int gpiod_direction_output_raw(struct gpio_desc *desc, int value); /* Value get/set from non-sleeping context */ int gpiod_get_value(const struct gpio_desc *desc); @@ -121,6 +122,12 @@ static inline int gpiod_direction_output(struct gpio_desc *desc, int value) WARN_ON(1); return -ENOSYS; } +static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return -ENOSYS; +} static inline int gpiod_get_value(const struct gpio_desc *desc) -- cgit v1.2.3 From 91eef3e2fee581b00f027bbb0d144788a3c609a9 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Tue, 21 Jan 2014 21:56:27 +0100 Subject: staging/bluetooth: Add hci_h4p driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add hci_h4p bluetooth driver to staging tree. This device is used for example on Nokia N900 cell phone. Signed-off-by: Pali Rohár Signed-off-by: Pavel Machek Thanks-to: Sebastian Reichel Thanks-to: Joe Perches Signed-off-by: Greg Kroah-Hartman --- drivers/staging/Kconfig | 2 + drivers/staging/Makefile | 1 + drivers/staging/nokia_h4p/Kconfig | 9 + drivers/staging/nokia_h4p/Makefile | 6 + drivers/staging/nokia_h4p/TODO | 140 ++++ drivers/staging/nokia_h4p/hci_h4p.h | 228 +++++ drivers/staging/nokia_h4p/nokia_core.c | 1205 +++++++++++++++++++++++++++ drivers/staging/nokia_h4p/nokia_fw-bcm.c | 147 ++++ drivers/staging/nokia_h4p/nokia_fw-csr.c | 150 ++++ drivers/staging/nokia_h4p/nokia_fw-ti1273.c | 110 +++ drivers/staging/nokia_h4p/nokia_fw.c | 195 +++++ drivers/staging/nokia_h4p/nokia_uart.c | 199 +++++ include/linux/platform_data/bt-nokia-h4p.h | 38 + 13 files changed, 2430 insertions(+) create mode 100644 drivers/staging/nokia_h4p/Kconfig create mode 100644 drivers/staging/nokia_h4p/Makefile create mode 100644 drivers/staging/nokia_h4p/TODO create mode 100644 drivers/staging/nokia_h4p/hci_h4p.h create mode 100644 drivers/staging/nokia_h4p/nokia_core.c create mode 100644 drivers/staging/nokia_h4p/nokia_fw-bcm.c create mode 100644 drivers/staging/nokia_h4p/nokia_fw-csr.c create mode 100644 drivers/staging/nokia_h4p/nokia_fw-ti1273.c create mode 100644 drivers/staging/nokia_h4p/nokia_fw.c create mode 100644 drivers/staging/nokia_h4p/nokia_uart.c create mode 100644 include/linux/platform_data/bt-nokia-h4p.h (limited to 'include/linux') diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 93a7ecf936ac..babb1cf67ce7 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -148,4 +148,6 @@ source "drivers/staging/dgap/Kconfig" source "drivers/staging/gs_fpgaboot/Kconfig" +source "drivers/staging/nokia_h4p/Kconfig" + endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 399f9fe07014..2c4949a9bd9b 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -66,3 +66,4 @@ obj-$(CONFIG_DGNC) += dgnc/ obj-$(CONFIG_DGAP) += dgap/ obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/ obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/ +obj-$(CONFIG_BT_NOKIA_H4P) += nokia_h4p/ diff --git a/drivers/staging/nokia_h4p/Kconfig b/drivers/staging/nokia_h4p/Kconfig new file mode 100644 index 000000000000..4336c0ad065b --- /dev/null +++ b/drivers/staging/nokia_h4p/Kconfig @@ -0,0 +1,9 @@ +config BT_NOKIA_H4P + tristate "HCI driver with H4 Nokia extensions" + depends on BT && ARCH_OMAP + help + Bluetooth HCI driver with H4 extensions. This driver provides + support for H4+ Bluetooth chip with vendor-specific H4 extensions. + + Say Y here to compile support for h4 extended devices into the kernel + or say M to compile it as module (btnokia_h4p). diff --git a/drivers/staging/nokia_h4p/Makefile b/drivers/staging/nokia_h4p/Makefile new file mode 100644 index 000000000000..9625db4a9af3 --- /dev/null +++ b/drivers/staging/nokia_h4p/Makefile @@ -0,0 +1,6 @@ + +obj-$(CONFIG_BT_NOKIA_H4P) += btnokia_h4p.o +btnokia_h4p-objs := nokia_core.o nokia_fw.o nokia_uart.o nokia_fw-csr.o \ + nokia_fw-bcm.o nokia_fw-ti1273.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/staging/nokia_h4p/TODO b/drivers/staging/nokia_h4p/TODO new file mode 100644 index 000000000000..d997afe14173 --- /dev/null +++ b/drivers/staging/nokia_h4p/TODO @@ -0,0 +1,140 @@ +Few attempts to submission have been made, last review comments were received in + +Date: Wed, 15 Jan 2014 19:01:51 -0800 +From: Marcel Holtmann +Subject: Re: [PATCH v6] Bluetooth: Add hci_h4p driver + +Some code refactoring is still needed. + +TODO: + +> +++ b/drivers/bluetooth/hci_h4p.h + +can we please get the naming straight. File names do not start with +hci_ anymore. We moved away from it since that term is too generic. + +> +#define FW_NAME_TI1271_LE "ti1273_le.bin" +> +#define FW_NAME_TI1271 "ti1273.bin" +> +#define FW_NAME_BCM2048 "bcmfw.bin" +> +#define FW_NAME_CSR "bc4fw.bin" + +We do these have to be global in a header file. This should be +confined to the specific firmware part. + +> +struct hci_h4p_info { + +Can we please get rid of the hci_ prefix for everything. Copying from +drivers that are over 10 years old is not a good idea. Please look at +recent ones. + +> + struct timer_list lazy_release; + +Timer? Not delayed work? + +> +void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val); +> +u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset); +> +void hci_h4p_set_rts(struct hci_h4p_info *info, int active); +> +int hci_h4p_wait_for_cts(struct hci_h4p_info *info, int active, int timeout_ms); +> +void __hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which); +> +void hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which); +> +void hci_h4p_change_speed(struct hci_h4p_info *info, unsigned long speed); +> +int hci_h4p_reset_uart(struct hci_h4p_info *info); +> +void hci_h4p_init_uart(struct hci_h4p_info *info); +> +void hci_h4p_enable_tx(struct hci_h4p_info *info); +> +void hci_h4p_store_regs(struct hci_h4p_info *info); +> +void hci_h4p_restore_regs(struct hci_h4p_info *info); +> +void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable); + +These are a lot of public functions. Are they all really needed or can +the code be done smart. + +> +static ssize_t hci_h4p_store_bdaddr(struct device *dev, +> + struct device_attribute *attr, +> + const char *buf, size_t count) +> +{ +> + struct hci_h4p_info *info = dev_get_drvdata(dev); + +Since none of these devices can function without having a valid +address, the way this should work is that we should not register the +HCI device when probing the platform device. + +The HCI device should be registered once a valid address has been +written into the sysfs file. I do not want to play the tricks with +bringing up the device without a valid address. + +> + hdev->close = hci_h4p_hci_close; +> + hdev->flush = hci_h4p_hci_flush; +> + hdev->send = hci_h4p_hci_send_frame; + +It needs to use hdev->setup to load the firmware. I assume the +firmware only needs to be loaded once. That is exactly what +hdev->setup does. It gets executed once. + +> + set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + +Is this quirk really needed? Normally only Bluetooth 1.1 and early +devices qualify for it. + +> +static int hci_h4p_bcm_set_bdaddr(struct hci_h4p_info *info, struct sk_buff *skb) +> +{ +> + int i; +> + static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf}; +> + int not_valid; + +Has this actually been confirmed that we can just randomly set an +address out of the Nokia range. I do not think so. This is a pretty +bad idea. + +I have no interest in merging a driver with such a hack. + +> + not_valid = 1; +> + for (i = 0; i < 6; i++) { +> + if (info->bd_addr[i] != 0x00) { +> + not_valid = 0; +> + break; +> + } +> + } + +Anybody every heard of memcmp or bacmp and BDADDR_ANY? + +> + if (not_valid) { +> + dev_info(info->dev, "Valid bluetooth address not found," +> + " setting some random\n"); +> + /* When address is not valid, use some random */ +> + memcpy(info->bd_addr, nokia_oui, 3); +> + get_random_bytes(info->bd_addr + 3, 3); +> + } + + +And why does every single chip firmware does this differently. Seriously, this is a mess. + +> +void hci_h4p_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb) +> +{ +> + switch (info->man_id) { +> + case H4P_ID_CSR: +> + hci_h4p_bc4_parse_fw_event(info, skb); +> + break; +... +> +} + +We have proper HCI sync command handling in recent kernels. I really +do not know why this is hand coded these days. Check how the Intel +firmware loading inside btusb.c does it. + +> +inline u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset) +> +{ +> + return __raw_readb(info->uart_base + (offset << 2)); +> +} + +Inline in a *.c file for a non-static function. Makes no sense to me. + +> +/** +> + * struct hci_h4p_platform data - hci_h4p Platform data structure +> + */ +> +struct hci_h4p_platform_data { + +please have a proper name here. For example +btnokia_h4p_platform_data. + +Please send patches to Greg Kroah-Hartman and Cc: +Pavel Machek diff --git a/drivers/staging/nokia_h4p/hci_h4p.h b/drivers/staging/nokia_h4p/hci_h4p.h new file mode 100644 index 000000000000..fd7a6407f20c --- /dev/null +++ b/drivers/staging/nokia_h4p/hci_h4p.h @@ -0,0 +1,228 @@ +/* + * This file is part of Nokia H4P bluetooth driver + * + * Copyright (C) 2005-2008 Nokia Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __DRIVERS_BLUETOOTH_HCI_H4P_H +#define __DRIVERS_BLUETOOTH_HCI_H4P_H + +#include +#include +#include + +#define FW_NAME_TI1271_PRELE "ti1273_prele.bin" +#define FW_NAME_TI1271_LE "ti1273_le.bin" +#define FW_NAME_TI1271 "ti1273.bin" +#define FW_NAME_BCM2048 "bcmfw.bin" +#define FW_NAME_CSR "bc4fw.bin" + +#define UART_SYSC_OMAP_RESET 0x03 +#define UART_SYSS_RESETDONE 0x01 +#define UART_OMAP_SCR_EMPTY_THR 0x08 +#define UART_OMAP_SCR_WAKEUP 0x10 +#define UART_OMAP_SSR_WAKEUP 0x02 +#define UART_OMAP_SSR_TXFULL 0x01 + +#define UART_OMAP_SYSC_IDLEMODE 0x03 +#define UART_OMAP_SYSC_IDLEMASK (3 << UART_OMAP_SYSC_IDLEMODE) + +#define UART_OMAP_SYSC_FORCE_IDLE (0 << UART_OMAP_SYSC_IDLEMODE) +#define UART_OMAP_SYSC_NO_IDLE (1 << UART_OMAP_SYSC_IDLEMODE) +#define UART_OMAP_SYSC_SMART_IDLE (2 << UART_OMAP_SYSC_IDLEMODE) + +#define H4P_TRANSFER_MODE 1 +#define H4P_SCHED_TRANSFER_MODE 2 +#define H4P_ACTIVE_MODE 3 + +struct hci_h4p_info { + struct timer_list lazy_release; + struct hci_dev *hdev; + spinlock_t lock; + + void __iomem *uart_base; + unsigned long uart_phys_base; + int irq; + struct device *dev; + u8 chip_type; + u8 bt_wakeup_gpio; + u8 host_wakeup_gpio; + u8 reset_gpio; + u8 reset_gpio_shared; + u8 bt_sysclk; + u8 man_id; + u8 ver_id; + + struct sk_buff_head fw_queue; + struct sk_buff *alive_cmd_skb; + struct completion init_completion; + struct completion fw_completion; + struct completion test_completion; + int fw_error; + int init_error; + + struct sk_buff_head txq; + + struct sk_buff *rx_skb; + long rx_count; + unsigned long rx_state; + unsigned long garbage_bytes; + + u8 bd_addr[6]; + struct sk_buff_head *fw_q; + + int pm_enabled; + int tx_enabled; + int autorts; + int rx_enabled; + unsigned long pm_flags; + + int tx_clocks_en; + int rx_clocks_en; + spinlock_t clocks_lock; + struct clk *uart_iclk; + struct clk *uart_fclk; + atomic_t clk_users; + u16 dll; + u16 dlh; + u16 ier; + u16 mdr1; + u16 efr; +}; + +struct hci_h4p_radio_hdr { + __u8 evt; + __u8 dlen; +} __attribute__ ((packed)); + +struct hci_h4p_neg_hdr { + __u8 dlen; +} __attribute__ ((packed)); +#define H4P_NEG_HDR_SIZE 1 + +#define H4P_NEG_REQ 0x00 +#define H4P_NEG_ACK 0x20 +#define H4P_NEG_NAK 0x40 + +#define H4P_PROTO_PKT 0x44 +#define H4P_PROTO_BYTE 0x4c + +#define H4P_ID_CSR 0x02 +#define H4P_ID_BCM2048 0x04 +#define H4P_ID_TI1271 0x31 + +struct hci_h4p_neg_cmd { + __u8 ack; + __u16 baud; + __u16 unused1; + __u8 proto; + __u16 sys_clk; + __u16 unused2; +} __attribute__ ((packed)); + +struct hci_h4p_neg_evt { + __u8 ack; + __u16 baud; + __u16 unused1; + __u8 proto; + __u16 sys_clk; + __u16 unused2; + __u8 man_id; + __u8 ver_id; +} __attribute__ ((packed)); + +#define H4P_ALIVE_REQ 0x55 +#define H4P_ALIVE_RESP 0xcc + +struct hci_h4p_alive_hdr { + __u8 dlen; +} __attribute__ ((packed)); +#define H4P_ALIVE_HDR_SIZE 1 + +struct hci_h4p_alive_pkt { + __u8 mid; + __u8 unused; +} __attribute__ ((packed)); + +#define MAX_BAUD_RATE 921600 +#define BC4_MAX_BAUD_RATE 3692300 +#define UART_CLOCK 48000000 +#define BT_INIT_DIVIDER 320 +#define BT_BAUDRATE_DIVIDER 384000000 +#define BT_SYSCLK_DIV 1000 +#define INIT_SPEED 120000 + +#define H4_TYPE_SIZE 1 +#define H4_RADIO_HDR_SIZE 2 + +/* H4+ packet types */ +#define H4_CMD_PKT 0x01 +#define H4_ACL_PKT 0x02 +#define H4_SCO_PKT 0x03 +#define H4_EVT_PKT 0x04 +#define H4_NEG_PKT 0x06 +#define H4_ALIVE_PKT 0x07 +#define H4_RADIO_PKT 0x08 + +/* TX states */ +#define WAIT_FOR_PKT_TYPE 1 +#define WAIT_FOR_HEADER 2 +#define WAIT_FOR_DATA 3 + +struct hci_fw_event { + struct hci_event_hdr hev; + struct hci_ev_cmd_complete cmd; + u8 status; +} __attribute__ ((packed)); + +int hci_h4p_send_alive_packet(struct hci_h4p_info *info); + +void hci_h4p_bcm_parse_fw_event(struct hci_h4p_info *info, + struct sk_buff *skb); +int hci_h4p_bcm_send_fw(struct hci_h4p_info *info, + struct sk_buff_head *fw_queue); + +void hci_h4p_bc4_parse_fw_event(struct hci_h4p_info *info, + struct sk_buff *skb); +int hci_h4p_bc4_send_fw(struct hci_h4p_info *info, + struct sk_buff_head *fw_queue); + +void hci_h4p_ti1273_parse_fw_event(struct hci_h4p_info *info, + struct sk_buff *skb); +int hci_h4p_ti1273_send_fw(struct hci_h4p_info *info, + struct sk_buff_head *fw_queue); + +int hci_h4p_read_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue); +int hci_h4p_send_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue); +void hci_h4p_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb); + +void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val); +u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset); +void hci_h4p_set_rts(struct hci_h4p_info *info, int active); +int hci_h4p_wait_for_cts(struct hci_h4p_info *info, int active, int timeout_ms); +void __hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which); +void hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which); +void hci_h4p_change_speed(struct hci_h4p_info *info, unsigned long speed); +int hci_h4p_reset_uart(struct hci_h4p_info *info); +void hci_h4p_init_uart(struct hci_h4p_info *info); +void hci_h4p_enable_tx(struct hci_h4p_info *info); +void hci_h4p_store_regs(struct hci_h4p_info *info); +void hci_h4p_restore_regs(struct hci_h4p_info *info); +void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable); + +#endif /* __DRIVERS_BLUETOOTH_HCI_H4P_H */ diff --git a/drivers/staging/nokia_h4p/nokia_core.c b/drivers/staging/nokia_h4p/nokia_core.c new file mode 100644 index 000000000000..5da84b06eff3 --- /dev/null +++ b/drivers/staging/nokia_h4p/nokia_core.c @@ -0,0 +1,1205 @@ +/* + * This file is part of Nokia H4P bluetooth driver + * + * Copyright (C) 2005-2008 Nokia Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * Thanks to all the Nokia people that helped with this driver, + * including Ville Tervo and Roger Quadros. + * + * Power saving functionality was removed from this driver to make + * merging easier. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "hci_h4p.h" + +/* This should be used in function that cannot release clocks */ +static void hci_h4p_set_clk(struct hci_h4p_info *info, int *clock, int enable) +{ + unsigned long flags; + + spin_lock_irqsave(&info->clocks_lock, flags); + if (enable && !*clock) { + BT_DBG("Enabling %p", clock); + clk_prepare_enable(info->uart_fclk); + clk_prepare_enable(info->uart_iclk); + if (atomic_read(&info->clk_users) == 0) + hci_h4p_restore_regs(info); + atomic_inc(&info->clk_users); + } + + if (!enable && *clock) { + BT_DBG("Disabling %p", clock); + if (atomic_dec_and_test(&info->clk_users)) + hci_h4p_store_regs(info); + clk_disable_unprepare(info->uart_fclk); + clk_disable_unprepare(info->uart_iclk); + } + + *clock = enable; + spin_unlock_irqrestore(&info->clocks_lock, flags); +} + +static void hci_h4p_lazy_clock_release(unsigned long data) +{ + struct hci_h4p_info *info = (struct hci_h4p_info *)data; + unsigned long flags; + + spin_lock_irqsave(&info->lock, flags); + if (!info->tx_enabled) + hci_h4p_set_clk(info, &info->tx_clocks_en, 0); + spin_unlock_irqrestore(&info->lock, flags); +} + +/* Power management functions */ +void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable) +{ + u8 v; + + v = hci_h4p_inb(info, UART_OMAP_SYSC); + v &= ~(UART_OMAP_SYSC_IDLEMASK); + + if (enable) + v |= UART_OMAP_SYSC_SMART_IDLE; + else + v |= UART_OMAP_SYSC_NO_IDLE; + + hci_h4p_outb(info, UART_OMAP_SYSC, v); +} + +static inline void h4p_schedule_pm(struct hci_h4p_info *info) +{ +} + +static void hci_h4p_disable_tx(struct hci_h4p_info *info) +{ + if (!info->pm_enabled) + return; + + /* Re-enable smart-idle */ + hci_h4p_smart_idle(info, 1); + + gpio_set_value(info->bt_wakeup_gpio, 0); + mod_timer(&info->lazy_release, jiffies + msecs_to_jiffies(100)); + info->tx_enabled = 0; +} + +void hci_h4p_enable_tx(struct hci_h4p_info *info) +{ + unsigned long flags; + + if (!info->pm_enabled) + return; + + h4p_schedule_pm(info); + + spin_lock_irqsave(&info->lock, flags); + del_timer(&info->lazy_release); + hci_h4p_set_clk(info, &info->tx_clocks_en, 1); + info->tx_enabled = 1; + gpio_set_value(info->bt_wakeup_gpio, 1); + hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | + UART_IER_THRI); + /* + * Disable smart-idle as UART TX interrupts + * are not wake-up capable + */ + hci_h4p_smart_idle(info, 0); + + spin_unlock_irqrestore(&info->lock, flags); +} + +static void hci_h4p_disable_rx(struct hci_h4p_info *info) +{ + if (!info->pm_enabled) + return; + + info->rx_enabled = 0; + + if (hci_h4p_inb(info, UART_LSR) & UART_LSR_DR) + return; + + if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT)) + return; + + __hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS); + info->autorts = 0; + hci_h4p_set_clk(info, &info->rx_clocks_en, 0); +} + +static void hci_h4p_enable_rx(struct hci_h4p_info *info) +{ + if (!info->pm_enabled) + return; + + h4p_schedule_pm(info); + + hci_h4p_set_clk(info, &info->rx_clocks_en, 1); + info->rx_enabled = 1; + + if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT)) + return; + + __hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS); + info->autorts = 1; +} + +/* Negotiation functions */ +int hci_h4p_send_alive_packet(struct hci_h4p_info *info) +{ + struct hci_h4p_alive_hdr *hdr; + struct hci_h4p_alive_pkt *pkt; + struct sk_buff *skb; + unsigned long flags; + int len; + + BT_DBG("Sending alive packet"); + + len = H4_TYPE_SIZE + sizeof(*hdr) + sizeof(*pkt); + skb = bt_skb_alloc(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + memset(skb->data, 0x00, len); + *skb_put(skb, 1) = H4_ALIVE_PKT; + hdr = (struct hci_h4p_alive_hdr *)skb_put(skb, sizeof(*hdr)); + hdr->dlen = sizeof(*pkt); + pkt = (struct hci_h4p_alive_pkt *)skb_put(skb, sizeof(*pkt)); + pkt->mid = H4P_ALIVE_REQ; + + skb_queue_tail(&info->txq, skb); + spin_lock_irqsave(&info->lock, flags); + hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | + UART_IER_THRI); + spin_unlock_irqrestore(&info->lock, flags); + + BT_DBG("Alive packet sent"); + + return 0; +} + +static void hci_h4p_alive_packet(struct hci_h4p_info *info, + struct sk_buff *skb) +{ + struct hci_h4p_alive_hdr *hdr; + struct hci_h4p_alive_pkt *pkt; + + BT_DBG("Received alive packet"); + hdr = (struct hci_h4p_alive_hdr *)skb->data; + if (hdr->dlen != sizeof(*pkt)) { + dev_err(info->dev, "Corrupted alive message\n"); + info->init_error = -EIO; + goto finish_alive; + } + + pkt = (struct hci_h4p_alive_pkt *)skb_pull(skb, sizeof(*hdr)); + if (pkt->mid != H4P_ALIVE_RESP) { + dev_err(info->dev, "Could not negotiate hci_h4p settings\n"); + info->init_error = -EINVAL; + } + +finish_alive: + complete(&info->init_completion); + kfree_skb(skb); +} + +static int hci_h4p_send_negotiation(struct hci_h4p_info *info) +{ + struct hci_h4p_neg_cmd *neg_cmd; + struct hci_h4p_neg_hdr *neg_hdr; + struct sk_buff *skb; + unsigned long flags; + int err, len; + u16 sysclk; + + BT_DBG("Sending negotiation.."); + + switch (info->bt_sysclk) { + case 1: + sysclk = 12000; + break; + case 2: + sysclk = 38400; + break; + default: + return -EINVAL; + } + + len = sizeof(*neg_cmd) + sizeof(*neg_hdr) + H4_TYPE_SIZE; + skb = bt_skb_alloc(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + memset(skb->data, 0x00, len); + *skb_put(skb, 1) = H4_NEG_PKT; + neg_hdr = (struct hci_h4p_neg_hdr *)skb_put(skb, sizeof(*neg_hdr)); + neg_cmd = (struct hci_h4p_neg_cmd *)skb_put(skb, sizeof(*neg_cmd)); + + neg_hdr->dlen = sizeof(*neg_cmd); + neg_cmd->ack = H4P_NEG_REQ; + neg_cmd->baud = cpu_to_le16(BT_BAUDRATE_DIVIDER/MAX_BAUD_RATE); + neg_cmd->proto = H4P_PROTO_BYTE; + neg_cmd->sys_clk = cpu_to_le16(sysclk); + + hci_h4p_change_speed(info, INIT_SPEED); + + hci_h4p_set_rts(info, 1); + info->init_error = 0; + init_completion(&info->init_completion); + skb_queue_tail(&info->txq, skb); + spin_lock_irqsave(&info->lock, flags); + hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | + UART_IER_THRI); + spin_unlock_irqrestore(&info->lock, flags); + + if (!wait_for_completion_interruptible_timeout(&info->init_completion, + msecs_to_jiffies(1000))) + return -ETIMEDOUT; + + if (info->init_error < 0) + return info->init_error; + + /* Change to operational settings */ + hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS); + hci_h4p_set_rts(info, 0); + hci_h4p_change_speed(info, MAX_BAUD_RATE); + + err = hci_h4p_wait_for_cts(info, 1, 100); + if (err < 0) + return err; + + hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS); + init_completion(&info->init_completion); + err = hci_h4p_send_alive_packet(info); + + if (err < 0) + return err; + + if (!wait_for_completion_interruptible_timeout(&info->init_completion, + msecs_to_jiffies(1000))) + return -ETIMEDOUT; + + if (info->init_error < 0) + return info->init_error; + + BT_DBG("Negotiation successful"); + return 0; +} + +static void hci_h4p_negotiation_packet(struct hci_h4p_info *info, + struct sk_buff *skb) +{ + struct hci_h4p_neg_hdr *hdr; + struct hci_h4p_neg_evt *evt; + + hdr = (struct hci_h4p_neg_hdr *)skb->data; + if (hdr->dlen != sizeof(*evt)) { + info->init_error = -EIO; + goto finish_neg; + } + + evt = (struct hci_h4p_neg_evt *)skb_pull(skb, sizeof(*hdr)); + + if (evt->ack != H4P_NEG_ACK) { + dev_err(info->dev, "Could not negotiate hci_h4p settings\n"); + info->init_error = -EINVAL; + } + + info->man_id = evt->man_id; + info->ver_id = evt->ver_id; + +finish_neg: + + complete(&info->init_completion); + kfree_skb(skb); +} + +/* H4 packet handling functions */ +static int hci_h4p_get_hdr_len(struct hci_h4p_info *info, u8 pkt_type) +{ + long retval; + + switch (pkt_type) { + case H4_EVT_PKT: + retval = HCI_EVENT_HDR_SIZE; + break; + case H4_ACL_PKT: + retval = HCI_ACL_HDR_SIZE; + break; + case H4_SCO_PKT: + retval = HCI_SCO_HDR_SIZE; + break; + case H4_NEG_PKT: + retval = H4P_NEG_HDR_SIZE; + break; + case H4_ALIVE_PKT: + retval = H4P_ALIVE_HDR_SIZE; + break; + case H4_RADIO_PKT: + retval = H4_RADIO_HDR_SIZE; + break; + default: + dev_err(info->dev, "Unknown H4 packet type 0x%.2x\n", pkt_type); + retval = -1; + break; + } + + return retval; +} + +static unsigned int hci_h4p_get_data_len(struct hci_h4p_info *info, + struct sk_buff *skb) +{ + long retval = -1; + struct hci_acl_hdr *acl_hdr; + struct hci_sco_hdr *sco_hdr; + struct hci_event_hdr *evt_hdr; + struct hci_h4p_neg_hdr *neg_hdr; + struct hci_h4p_alive_hdr *alive_hdr; + struct hci_h4p_radio_hdr *radio_hdr; + + switch (bt_cb(skb)->pkt_type) { + case H4_EVT_PKT: + evt_hdr = (struct hci_event_hdr *)skb->data; + retval = evt_hdr->plen; + break; + case H4_ACL_PKT: + acl_hdr = (struct hci_acl_hdr *)skb->data; + retval = le16_to_cpu(acl_hdr->dlen); + break; + case H4_SCO_PKT: + sco_hdr = (struct hci_sco_hdr *)skb->data; + retval = sco_hdr->dlen; + break; + case H4_RADIO_PKT: + radio_hdr = (struct hci_h4p_radio_hdr *)skb->data; + retval = radio_hdr->dlen; + break; + case H4_NEG_PKT: + neg_hdr = (struct hci_h4p_neg_hdr *)skb->data; + retval = neg_hdr->dlen; + break; + case H4_ALIVE_PKT: + alive_hdr = (struct hci_h4p_alive_hdr *)skb->data; + retval = alive_hdr->dlen; + break; + } + + return retval; +} + +static inline void hci_h4p_recv_frame(struct hci_h4p_info *info, + struct sk_buff *skb) +{ + if (unlikely(!test_bit(HCI_RUNNING, &info->hdev->flags))) { + switch (bt_cb(skb)->pkt_type) { + case H4_NEG_PKT: + hci_h4p_negotiation_packet(info, skb); + info->rx_state = WAIT_FOR_PKT_TYPE; + return; + case H4_ALIVE_PKT: + hci_h4p_alive_packet(info, skb); + info->rx_state = WAIT_FOR_PKT_TYPE; + return; + } + + if (!test_bit(HCI_UP, &info->hdev->flags)) { + BT_DBG("fw_event"); + hci_h4p_parse_fw_event(info, skb); + return; + } + } + + hci_recv_frame(info->hdev, skb); + BT_DBG("Frame sent to upper layer"); +} + +static inline void hci_h4p_handle_byte(struct hci_h4p_info *info, u8 byte) +{ + switch (info->rx_state) { + case WAIT_FOR_PKT_TYPE: + bt_cb(info->rx_skb)->pkt_type = byte; + info->rx_count = hci_h4p_get_hdr_len(info, byte); + if (info->rx_count < 0) { + info->hdev->stat.err_rx++; + kfree_skb(info->rx_skb); + info->rx_skb = NULL; + } else { + info->rx_state = WAIT_FOR_HEADER; + } + break; + case WAIT_FOR_HEADER: + info->rx_count--; + *skb_put(info->rx_skb, 1) = byte; + if (info->rx_count != 0) + break; + info->rx_count = hci_h4p_get_data_len(info, info->rx_skb); + if (info->rx_count > skb_tailroom(info->rx_skb)) { + dev_err(info->dev, "frame too long\n"); + info->garbage_bytes = info->rx_count + - skb_tailroom(info->rx_skb); + kfree_skb(info->rx_skb); + info->rx_skb = NULL; + break; + } + info->rx_state = WAIT_FOR_DATA; + break; + case WAIT_FOR_DATA: + info->rx_count--; + *skb_put(info->rx_skb, 1) = byte; + break; + default: + WARN_ON(1); + break; + } + + if (info->rx_count == 0) { + /* H4+ devices should always send word aligned packets */ + if (!(info->rx_skb->len % 2)) + info->garbage_bytes++; + hci_h4p_recv_frame(info, info->rx_skb); + info->rx_skb = NULL; + } +} + +static void hci_h4p_rx_tasklet(unsigned long data) +{ + u8 byte; + struct hci_h4p_info *info = (struct hci_h4p_info *)data; + + BT_DBG("tasklet woke up"); + BT_DBG("rx_tasklet woke up"); + + while (hci_h4p_inb(info, UART_LSR) & UART_LSR_DR) { + byte = hci_h4p_inb(info, UART_RX); + if (info->garbage_bytes) { + info->garbage_bytes--; + continue; + } + if (info->rx_skb == NULL) { + info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, + GFP_ATOMIC | GFP_DMA); + if (!info->rx_skb) { + dev_err(info->dev, + "No memory for new packet\n"); + goto finish_rx; + } + info->rx_state = WAIT_FOR_PKT_TYPE; + info->rx_skb->dev = (void *)info->hdev; + } + info->hdev->stat.byte_rx++; + hci_h4p_handle_byte(info, byte); + } + + if (!info->rx_enabled) { + if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT && + info->autorts) { + __hci_h4p_set_auto_ctsrts(info, 0 , UART_EFR_RTS); + info->autorts = 0; + } + /* Flush posted write to avoid spurious interrupts */ + hci_h4p_inb(info, UART_OMAP_SCR); + hci_h4p_set_clk(info, &info->rx_clocks_en, 0); + } + +finish_rx: + BT_DBG("rx_ended"); +} + +static void hci_h4p_tx_tasklet(unsigned long data) +{ + unsigned int sent = 0; + struct sk_buff *skb; + struct hci_h4p_info *info = (struct hci_h4p_info *)data; + + BT_DBG("tasklet woke up"); + BT_DBG("tx_tasklet woke up"); + + if (info->autorts != info->rx_enabled) { + if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT) { + if (info->autorts && !info->rx_enabled) { + __hci_h4p_set_auto_ctsrts(info, 0, + UART_EFR_RTS); + info->autorts = 0; + } + if (!info->autorts && info->rx_enabled) { + __hci_h4p_set_auto_ctsrts(info, 1, + UART_EFR_RTS); + info->autorts = 1; + } + } else { + hci_h4p_outb(info, UART_OMAP_SCR, + hci_h4p_inb(info, UART_OMAP_SCR) | + UART_OMAP_SCR_EMPTY_THR); + goto finish_tx; + } + } + + skb = skb_dequeue(&info->txq); + if (!skb) { + /* No data in buffer */ + BT_DBG("skb ready"); + if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT) { + hci_h4p_outb(info, UART_IER, + hci_h4p_inb(info, UART_IER) & + ~UART_IER_THRI); + hci_h4p_inb(info, UART_OMAP_SCR); + hci_h4p_disable_tx(info); + return; + } + hci_h4p_outb(info, UART_OMAP_SCR, + hci_h4p_inb(info, UART_OMAP_SCR) | + UART_OMAP_SCR_EMPTY_THR); + goto finish_tx; + } + + /* Copy data to tx fifo */ + while (!(hci_h4p_inb(info, UART_OMAP_SSR) & UART_OMAP_SSR_TXFULL) && + (sent < skb->len)) { + hci_h4p_outb(info, UART_TX, skb->data[sent]); + sent++; + } + + info->hdev->stat.byte_tx += sent; + if (skb->len == sent) { + kfree_skb(skb); + } else { + skb_pull(skb, sent); + skb_queue_head(&info->txq, skb); + } + + hci_h4p_outb(info, UART_OMAP_SCR, hci_h4p_inb(info, UART_OMAP_SCR) & + ~UART_OMAP_SCR_EMPTY_THR); + hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | + UART_IER_THRI); + +finish_tx: + /* Flush posted write to avoid spurious interrupts */ + hci_h4p_inb(info, UART_OMAP_SCR); + +} + +static irqreturn_t hci_h4p_interrupt(int irq, void *data) +{ + struct hci_h4p_info *info = (struct hci_h4p_info *)data; + u8 iir, msr; + int ret; + + ret = IRQ_NONE; + + iir = hci_h4p_inb(info, UART_IIR); + if (iir & UART_IIR_NO_INT) + return IRQ_HANDLED; + + BT_DBG("In interrupt handler iir 0x%.2x", iir); + + iir &= UART_IIR_ID; + + if (iir == UART_IIR_MSI) { + msr = hci_h4p_inb(info, UART_MSR); + ret = IRQ_HANDLED; + } + if (iir == UART_IIR_RLSI) { + hci_h4p_inb(info, UART_RX); + hci_h4p_inb(info, UART_LSR); + ret = IRQ_HANDLED; + } + + if (iir == UART_IIR_RDI) { + hci_h4p_rx_tasklet((unsigned long)data); + ret = IRQ_HANDLED; + } + + if (iir == UART_IIR_THRI) { + hci_h4p_tx_tasklet((unsigned long)data); + ret = IRQ_HANDLED; + } + + return ret; +} + +static irqreturn_t hci_h4p_wakeup_interrupt(int irq, void *dev_inst) +{ + struct hci_h4p_info *info = dev_inst; + int should_wakeup; + struct hci_dev *hdev; + + if (!info->hdev) + return IRQ_HANDLED; + + should_wakeup = gpio_get_value(info->host_wakeup_gpio); + hdev = info->hdev; + + if (!test_bit(HCI_RUNNING, &hdev->flags)) { + if (should_wakeup == 1) + complete_all(&info->test_completion); + + return IRQ_HANDLED; + } + + BT_DBG("gpio interrupt %d", should_wakeup); + + /* Check if wee have missed some interrupts */ + if (info->rx_enabled == should_wakeup) + return IRQ_HANDLED; + + if (should_wakeup) + hci_h4p_enable_rx(info); + else + hci_h4p_disable_rx(info); + + return IRQ_HANDLED; +} + +static inline void hci_h4p_set_pm_limits(struct hci_h4p_info *info, bool set) +{ + struct hci_h4p_platform_data *bt_plat_data = info->dev->platform_data; + const char *sset = set ? "set" : "clear"; + + if (unlikely(!bt_plat_data || !bt_plat_data->set_pm_limits)) + return; + + if (set != !!test_bit(H4P_ACTIVE_MODE, &info->pm_flags)) { + bt_plat_data->set_pm_limits(info->dev, set); + if (set) + set_bit(H4P_ACTIVE_MODE, &info->pm_flags); + else + clear_bit(H4P_ACTIVE_MODE, &info->pm_flags); + BT_DBG("Change pm constraints to: %s", sset); + return; + } + + BT_DBG("pm constraints remains: %s", sset); +} + +static int hci_h4p_reset(struct hci_h4p_info *info) +{ + int err; + + err = hci_h4p_reset_uart(info); + if (err < 0) { + dev_err(info->dev, "Uart reset failed\n"); + return err; + } + hci_h4p_init_uart(info); + hci_h4p_set_rts(info, 0); + + gpio_set_value(info->reset_gpio, 0); + gpio_set_value(info->bt_wakeup_gpio, 1); + msleep(10); + + if (gpio_get_value(info->host_wakeup_gpio) == 1) { + dev_err(info->dev, "host_wakeup_gpio not low\n"); + return -EPROTO; + } + + init_completion(&info->test_completion); + gpio_set_value(info->reset_gpio, 1); + + if (!wait_for_completion_interruptible_timeout(&info->test_completion, + msecs_to_jiffies(100))) { + dev_err(info->dev, "wakeup test timed out\n"); + complete_all(&info->test_completion); + return -EPROTO; + } + + err = hci_h4p_wait_for_cts(info, 1, 100); + if (err < 0) { + dev_err(info->dev, "No cts from bt chip\n"); + return err; + } + + hci_h4p_set_rts(info, 1); + + return 0; +} + +/* hci callback functions */ +static int hci_h4p_hci_flush(struct hci_dev *hdev) +{ + struct hci_h4p_info *info = hci_get_drvdata(hdev); + skb_queue_purge(&info->txq); + + return 0; +} + +static int hci_h4p_bt_wakeup_test(struct hci_h4p_info *info) +{ + /* + * Test Sequence: + * Host de-asserts the BT_WAKE_UP line. + * Host polls the UART_CTS line, waiting for it to be de-asserted. + * Host asserts the BT_WAKE_UP line. + * Host polls the UART_CTS line, waiting for it to be asserted. + * Host de-asserts the BT_WAKE_UP line (allow the Bluetooth device to + * sleep). + * Host polls the UART_CTS line, waiting for it to be de-asserted. + */ + int err; + int ret = -ECOMM; + + if (!info) + return -EINVAL; + + /* Disable wakeup interrupts */ + disable_irq(gpio_to_irq(info->host_wakeup_gpio)); + + gpio_set_value(info->bt_wakeup_gpio, 0); + err = hci_h4p_wait_for_cts(info, 0, 100); + if (err) { + dev_warn(info->dev, "bt_wakeup_test: fail: " + "CTS low timed out: %d\n", err); + goto out; + } + + gpio_set_value(info->bt_wakeup_gpio, 1); + err = hci_h4p_wait_for_cts(info, 1, 100); + if (err) { + dev_warn(info->dev, "bt_wakeup_test: fail: " + "CTS high timed out: %d\n", err); + goto out; + } + + gpio_set_value(info->bt_wakeup_gpio, 0); + err = hci_h4p_wait_for_cts(info, 0, 100); + if (err) { + dev_warn(info->dev, "bt_wakeup_test: fail: " + "CTS re-low timed out: %d\n", err); + goto out; + } + + ret = 0; + +out: + + /* Re-enable wakeup interrupts */ + enable_irq(gpio_to_irq(info->host_wakeup_gpio)); + + return ret; +} + +static int hci_h4p_hci_open(struct hci_dev *hdev) +{ + struct hci_h4p_info *info; + int err, retries = 0; + struct sk_buff_head fw_queue; + unsigned long flags; + + info = hci_get_drvdata(hdev); + + if (test_bit(HCI_RUNNING, &hdev->flags)) + return 0; + + /* TI1271 has HW bug and boot up might fail. Retry up to three times */ +again: + + info->rx_enabled = 1; + info->rx_state = WAIT_FOR_PKT_TYPE; + info->rx_count = 0; + info->garbage_bytes = 0; + info->rx_skb = NULL; + info->pm_enabled = 0; + init_completion(&info->fw_completion); + hci_h4p_set_clk(info, &info->tx_clocks_en, 1); + hci_h4p_set_clk(info, &info->rx_clocks_en, 1); + skb_queue_head_init(&fw_queue); + + err = hci_h4p_reset(info); + if (err < 0) + goto err_clean; + + hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_CTS | UART_EFR_RTS); + info->autorts = 1; + + err = hci_h4p_send_negotiation(info); + + err = hci_h4p_read_fw(info, &fw_queue); + if (err < 0) { + dev_err(info->dev, "Cannot read firmware\n"); + goto err_clean; + } + + err = hci_h4p_send_fw(info, &fw_queue); + if (err < 0) { + dev_err(info->dev, "Sending firmware failed.\n"); + goto err_clean; + } + + info->pm_enabled = 1; + + err = hci_h4p_bt_wakeup_test(info); + if (err < 0) { + dev_err(info->dev, "BT wakeup test failed.\n"); + goto err_clean; + } + + spin_lock_irqsave(&info->lock, flags); + info->rx_enabled = gpio_get_value(info->host_wakeup_gpio); + hci_h4p_set_clk(info, &info->rx_clocks_en, info->rx_enabled); + spin_unlock_irqrestore(&info->lock, flags); + + hci_h4p_set_clk(info, &info->tx_clocks_en, 0); + + kfree_skb(info->alive_cmd_skb); + info->alive_cmd_skb = NULL; + set_bit(HCI_RUNNING, &hdev->flags); + + BT_DBG("hci up and running"); + return 0; + +err_clean: + hci_h4p_hci_flush(hdev); + hci_h4p_reset_uart(info); + del_timer_sync(&info->lazy_release); + hci_h4p_set_clk(info, &info->tx_clocks_en, 0); + hci_h4p_set_clk(info, &info->rx_clocks_en, 0); + gpio_set_value(info->reset_gpio, 0); + gpio_set_value(info->bt_wakeup_gpio, 0); + skb_queue_purge(&fw_queue); + kfree_skb(info->alive_cmd_skb); + info->alive_cmd_skb = NULL; + kfree_skb(info->rx_skb); + info->rx_skb = NULL; + + if (retries++ < 3) { + dev_err(info->dev, "FW loading try %d fail. Retry.\n", retries); + goto again; + } + + return err; +} + +static int hci_h4p_hci_close(struct hci_dev *hdev) +{ + struct hci_h4p_info *info = hci_get_drvdata(hdev); + + if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) + return 0; + + hci_h4p_hci_flush(hdev); + hci_h4p_set_clk(info, &info->tx_clocks_en, 1); + hci_h4p_set_clk(info, &info->rx_clocks_en, 1); + hci_h4p_reset_uart(info); + del_timer_sync(&info->lazy_release); + hci_h4p_set_clk(info, &info->tx_clocks_en, 0); + hci_h4p_set_clk(info, &info->rx_clocks_en, 0); + gpio_set_value(info->reset_gpio, 0); + gpio_set_value(info->bt_wakeup_gpio, 0); + kfree_skb(info->rx_skb); + + return 0; +} + +static int hci_h4p_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_h4p_info *info; + int err = 0; + + BT_DBG("dev %p, skb %p", hdev, skb); + + info = hci_get_drvdata(hdev); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) { + dev_warn(info->dev, "Frame for non-running device\n"); + return -EIO; + } + + switch (bt_cb(skb)->pkt_type) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + } + + /* Push frame type to skb */ + *skb_push(skb, 1) = (bt_cb(skb)->pkt_type); + /* We should allways send word aligned data to h4+ devices */ + if (skb->len % 2) { + err = skb_pad(skb, 1); + if (!err) + *skb_put(skb, 1) = 0x00; + } + if (err) + return err; + + skb_queue_tail(&info->txq, skb); + hci_h4p_enable_tx(info); + + return 0; +} + +static ssize_t hci_h4p_store_bdaddr(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hci_h4p_info *info = dev_get_drvdata(dev); + unsigned int bdaddr[6]; + int ret, i; + + ret = sscanf(buf, "%2x:%2x:%2x:%2x:%2x:%2x\n", + &bdaddr[0], &bdaddr[1], &bdaddr[2], + &bdaddr[3], &bdaddr[4], &bdaddr[5]); + + if (ret != 6) + return -EINVAL; + + for (i = 0; i < 6; i++) { + if (bdaddr[i] > 0xff) + return -EINVAL; + info->bd_addr[i] = bdaddr[i] & 0xff; + } + + return count; +} + +static ssize_t hci_h4p_show_bdaddr(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hci_h4p_info *info = dev_get_drvdata(dev); + + return sprintf(buf, "%pMR\n", info->bd_addr); +} + +static DEVICE_ATTR(bdaddr, S_IRUGO | S_IWUSR, hci_h4p_show_bdaddr, + hci_h4p_store_bdaddr); + +static int hci_h4p_sysfs_create_files(struct device *dev) +{ + return device_create_file(dev, &dev_attr_bdaddr); +} + +static void hci_h4p_sysfs_remove_files(struct device *dev) +{ + device_remove_file(dev, &dev_attr_bdaddr); +} + +static int hci_h4p_register_hdev(struct hci_h4p_info *info) +{ + struct hci_dev *hdev; + + /* Initialize and register HCI device */ + + hdev = hci_alloc_dev(); + if (!hdev) { + dev_err(info->dev, "Can't allocate memory for device\n"); + return -ENOMEM; + } + info->hdev = hdev; + + hdev->bus = HCI_UART; + hci_set_drvdata(hdev, info); + + hdev->open = hci_h4p_hci_open; + hdev->close = hci_h4p_hci_close; + hdev->flush = hci_h4p_hci_flush; + hdev->send = hci_h4p_hci_send_frame; + set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + + SET_HCIDEV_DEV(hdev, info->dev); + + if (hci_h4p_sysfs_create_files(info->dev) < 0) { + dev_err(info->dev, "failed to create sysfs files\n"); + goto free; + } + + if (hci_register_dev(hdev) >= 0) + return 0; + + dev_err(info->dev, "hci_register failed %s.\n", hdev->name); + hci_h4p_sysfs_remove_files(info->dev); +free: + hci_free_dev(info->hdev); + return -ENODEV; +} + +static int hci_h4p_probe(struct platform_device *pdev) +{ + struct hci_h4p_platform_data *bt_plat_data; + struct hci_h4p_info *info; + int err; + + dev_info(&pdev->dev, "Registering HCI H4P device\n"); + info = devm_kzalloc(&pdev->dev, sizeof(struct hci_h4p_info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->dev = &pdev->dev; + info->tx_enabled = 1; + info->rx_enabled = 1; + spin_lock_init(&info->lock); + spin_lock_init(&info->clocks_lock); + skb_queue_head_init(&info->txq); + + if (pdev->dev.platform_data == NULL) { + dev_err(&pdev->dev, "Could not get Bluetooth config data\n"); + return -ENODATA; + } + + bt_plat_data = pdev->dev.platform_data; + info->chip_type = bt_plat_data->chip_type; + info->bt_wakeup_gpio = bt_plat_data->bt_wakeup_gpio; + info->host_wakeup_gpio = bt_plat_data->host_wakeup_gpio; + info->reset_gpio = bt_plat_data->reset_gpio; + info->reset_gpio_shared = bt_plat_data->reset_gpio_shared; + info->bt_sysclk = bt_plat_data->bt_sysclk; + + BT_DBG("RESET gpio: %d", info->reset_gpio); + BT_DBG("BTWU gpio: %d", info->bt_wakeup_gpio); + BT_DBG("HOSTWU gpio: %d", info->host_wakeup_gpio); + BT_DBG("sysclk: %d", info->bt_sysclk); + + init_completion(&info->test_completion); + complete_all(&info->test_completion); + + if (!info->reset_gpio_shared) { + err = devm_gpio_request_one(&pdev->dev, info->reset_gpio, + GPIOF_OUT_INIT_LOW, "bt_reset"); + if (err < 0) { + dev_err(&pdev->dev, "Cannot get GPIO line %d\n", + info->reset_gpio); + return err; + } + } + + err = devm_gpio_request_one(&pdev->dev, info->bt_wakeup_gpio, + GPIOF_OUT_INIT_LOW, "bt_wakeup"); + + if (err < 0) { + dev_err(info->dev, "Cannot get GPIO line 0x%d", + info->bt_wakeup_gpio); + return err; + } + + err = devm_gpio_request_one(&pdev->dev, info->host_wakeup_gpio, + GPIOF_DIR_IN, "host_wakeup"); + if (err < 0) { + dev_err(info->dev, "Cannot get GPIO line %d", + info->host_wakeup_gpio); + return err; + } + + info->irq = bt_plat_data->uart_irq; + info->uart_base = devm_ioremap(&pdev->dev, bt_plat_data->uart_base, SZ_2K); + info->uart_iclk = devm_clk_get(&pdev->dev, bt_plat_data->uart_iclk); + info->uart_fclk = devm_clk_get(&pdev->dev, bt_plat_data->uart_fclk); + + err = devm_request_irq(&pdev->dev, info->irq, hci_h4p_interrupt, IRQF_DISABLED, + "hci_h4p", info); + if (err < 0) { + dev_err(info->dev, "hci_h4p: unable to get IRQ %d\n", info->irq); + return err; + } + + err = devm_request_irq(&pdev->dev, gpio_to_irq(info->host_wakeup_gpio), + hci_h4p_wakeup_interrupt, IRQF_TRIGGER_FALLING | + IRQF_TRIGGER_RISING | IRQF_DISABLED, + "hci_h4p_wkup", info); + if (err < 0) { + dev_err(info->dev, "hci_h4p: unable to get wakeup IRQ %d\n", + gpio_to_irq(info->host_wakeup_gpio)); + return err; + } + + err = irq_set_irq_wake(gpio_to_irq(info->host_wakeup_gpio), 1); + if (err < 0) { + dev_err(info->dev, "hci_h4p: unable to set wakeup for IRQ %d\n", + gpio_to_irq(info->host_wakeup_gpio)); + return err; + } + + init_timer_deferrable(&info->lazy_release); + info->lazy_release.function = hci_h4p_lazy_clock_release; + info->lazy_release.data = (unsigned long)info; + hci_h4p_set_clk(info, &info->tx_clocks_en, 1); + err = hci_h4p_reset_uart(info); + if (err < 0) + return err; + gpio_set_value(info->reset_gpio, 0); + hci_h4p_set_clk(info, &info->tx_clocks_en, 0); + + platform_set_drvdata(pdev, info); + + if (hci_h4p_register_hdev(info) < 0) { + dev_err(info->dev, "failed to register hci_h4p hci device\n"); + return -EINVAL; + } + + return 0; +} + +static int hci_h4p_remove(struct platform_device *pdev) +{ + struct hci_h4p_info *info; + + info = platform_get_drvdata(pdev); + + hci_h4p_sysfs_remove_files(info->dev); + hci_h4p_hci_close(info->hdev); + hci_unregister_dev(info->hdev); + hci_free_dev(info->hdev); + + return 0; +} + +static struct platform_driver hci_h4p_driver = { + .probe = hci_h4p_probe, + .remove = hci_h4p_remove, + .driver = { + .name = "hci_h4p", + }, +}; + +module_platform_driver(hci_h4p_driver); + +MODULE_ALIAS("platform:hci_h4p"); +MODULE_DESCRIPTION("Bluetooth h4 driver with nokia extensions"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ville Tervo"); +MODULE_FIRMWARE(FW_NAME_TI1271_PRELE); +MODULE_FIRMWARE(FW_NAME_TI1271_LE); +MODULE_FIRMWARE(FW_NAME_TI1271); +MODULE_FIRMWARE(FW_NAME_BCM2048); +MODULE_FIRMWARE(FW_NAME_CSR); diff --git a/drivers/staging/nokia_h4p/nokia_fw-bcm.c b/drivers/staging/nokia_h4p/nokia_fw-bcm.c new file mode 100644 index 000000000000..e8912bfc0a91 --- /dev/null +++ b/drivers/staging/nokia_h4p/nokia_fw-bcm.c @@ -0,0 +1,147 @@ +/* + * This file is part of Nokia H4P bluetooth driver + * + * Copyright (C) 2005-2008 Nokia Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include + +#include "hci_h4p.h" + +static int hci_h4p_bcm_set_bdaddr(struct hci_h4p_info *info, struct sk_buff *skb) +{ + int i; + static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf}; + int not_valid; + + not_valid = 1; + for (i = 0; i < 6; i++) { + if (info->bd_addr[i] != 0x00) { + not_valid = 0; + break; + } + } + + if (not_valid) { + dev_info(info->dev, "Valid bluetooth address not found, setting some random\n"); + /* When address is not valid, use some random but Nokia MAC */ + memcpy(info->bd_addr, nokia_oui, 3); + get_random_bytes(info->bd_addr + 3, 3); + } + + for (i = 0; i < 6; i++) + skb->data[9 - i] = info->bd_addr[i]; + + return 0; +} + +void hci_h4p_bcm_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb) +{ + struct sk_buff *fw_skb; + int err; + unsigned long flags; + + if (skb->data[5] != 0x00) { + dev_err(info->dev, "Firmware sending command failed 0x%.2x\n", + skb->data[5]); + info->fw_error = -EPROTO; + } + + kfree_skb(skb); + + fw_skb = skb_dequeue(info->fw_q); + if (fw_skb == NULL || info->fw_error) { + complete(&info->fw_completion); + return; + } + + if (fw_skb->data[1] == 0x01 && fw_skb->data[2] == 0xfc && fw_skb->len >= 10) { + BT_DBG("Setting bluetooth address"); + err = hci_h4p_bcm_set_bdaddr(info, fw_skb); + if (err < 0) { + kfree_skb(fw_skb); + info->fw_error = err; + complete(&info->fw_completion); + return; + } + } + + skb_queue_tail(&info->txq, fw_skb); + spin_lock_irqsave(&info->lock, flags); + hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | + UART_IER_THRI); + spin_unlock_irqrestore(&info->lock, flags); +} + + +int hci_h4p_bcm_send_fw(struct hci_h4p_info *info, + struct sk_buff_head *fw_queue) +{ + struct sk_buff *skb; + unsigned long flags, time; + + info->fw_error = 0; + + BT_DBG("Sending firmware"); + + time = jiffies; + + info->fw_q = fw_queue; + skb = skb_dequeue(fw_queue); + if (!skb) + return -ENODATA; + + BT_DBG("Sending commands"); + + /* + * Disable smart-idle as UART TX interrupts + * are not wake-up capable + */ + hci_h4p_smart_idle(info, 0); + + /* Check if this is bd_address packet */ + init_completion(&info->fw_completion); + skb_queue_tail(&info->txq, skb); + spin_lock_irqsave(&info->lock, flags); + hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | + UART_IER_THRI); + spin_unlock_irqrestore(&info->lock, flags); + + if (!wait_for_completion_timeout(&info->fw_completion, + msecs_to_jiffies(2000))) { + dev_err(info->dev, "No reply to fw command\n"); + return -ETIMEDOUT; + } + + if (info->fw_error) { + dev_err(info->dev, "FW error\n"); + return -EPROTO; + } + + BT_DBG("Firmware sent in %d msecs", + jiffies_to_msecs(jiffies-time)); + + hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS); + hci_h4p_set_rts(info, 0); + hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE); + hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS); + + return 0; +} diff --git a/drivers/staging/nokia_h4p/nokia_fw-csr.c b/drivers/staging/nokia_h4p/nokia_fw-csr.c new file mode 100644 index 000000000000..e39c4a31a879 --- /dev/null +++ b/drivers/staging/nokia_h4p/nokia_fw-csr.c @@ -0,0 +1,150 @@ +/* + * This file is part of Nokia H4P bluetooth driver + * + * Copyright (C) 2005-2008 Nokia Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include + +#include "hci_h4p.h" + +void hci_h4p_bc4_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb) +{ + /* Check if this is fw packet */ + if (skb->data[0] != 0xff) { + hci_recv_frame(info->hdev, skb); + return; + } + + if (skb->data[11] || skb->data[12]) { + dev_err(info->dev, "Firmware sending command failed\n"); + info->fw_error = -EPROTO; + } + + kfree_skb(skb); + complete(&info->fw_completion); +} + +int hci_h4p_bc4_send_fw(struct hci_h4p_info *info, + struct sk_buff_head *fw_queue) +{ + static const u8 nokia_oui[3] = {0x00, 0x19, 0x4F}; + struct sk_buff *skb; + unsigned int offset; + int retries, count, i, not_valid; + unsigned long flags; + + info->fw_error = 0; + + BT_DBG("Sending firmware"); + skb = skb_dequeue(fw_queue); + + if (!skb) + return -ENOMSG; + + /* Check if this is bd_address packet */ + if (skb->data[15] == 0x01 && skb->data[16] == 0x00) { + offset = 21; + skb->data[offset + 1] = 0x00; + skb->data[offset + 5] = 0x00; + + not_valid = 1; + for (i = 0; i < 6; i++) { + if (info->bd_addr[i] != 0x00) { + not_valid = 0; + break; + } + } + + if (not_valid) { + dev_info(info->dev, "Valid bluetooth address not found," + " setting some random\n"); + /* When address is not valid, use some random */ + memcpy(info->bd_addr, nokia_oui, 3); + get_random_bytes(info->bd_addr + 3, 3); + } + + skb->data[offset + 7] = info->bd_addr[0]; + skb->data[offset + 6] = info->bd_addr[1]; + skb->data[offset + 4] = info->bd_addr[2]; + skb->data[offset + 0] = info->bd_addr[3]; + skb->data[offset + 3] = info->bd_addr[4]; + skb->data[offset + 2] = info->bd_addr[5]; + } + + for (count = 1; ; count++) { + BT_DBG("Sending firmware command %d", count); + init_completion(&info->fw_completion); + skb_queue_tail(&info->txq, skb); + spin_lock_irqsave(&info->lock, flags); + hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | + UART_IER_THRI); + spin_unlock_irqrestore(&info->lock, flags); + + skb = skb_dequeue(fw_queue); + if (!skb) + break; + + if (!wait_for_completion_timeout(&info->fw_completion, + msecs_to_jiffies(1000))) { + dev_err(info->dev, "No reply to fw command\n"); + return -ETIMEDOUT; + } + + if (info->fw_error) { + dev_err(info->dev, "FW error\n"); + return -EPROTO; + } + }; + + /* Wait for chip warm reset */ + retries = 100; + while ((!skb_queue_empty(&info->txq) || + !(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT)) && + retries--) { + msleep(10); + } + if (!retries) { + dev_err(info->dev, "Transmitter not empty\n"); + return -ETIMEDOUT; + } + + hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE); + + if (hci_h4p_wait_for_cts(info, 1, 100)) { + dev_err(info->dev, "cts didn't deassert after final speed\n"); + return -ETIMEDOUT; + } + + retries = 100; + do { + init_completion(&info->init_completion); + hci_h4p_send_alive_packet(info); + retries--; + } while (!wait_for_completion_timeout(&info->init_completion, 100) && + retries > 0); + + if (!retries) { + dev_err(info->dev, "No alive reply after speed change\n"); + return -ETIMEDOUT; + } + + return 0; +} diff --git a/drivers/staging/nokia_h4p/nokia_fw-ti1273.c b/drivers/staging/nokia_h4p/nokia_fw-ti1273.c new file mode 100644 index 000000000000..f5500f71c839 --- /dev/null +++ b/drivers/staging/nokia_h4p/nokia_fw-ti1273.c @@ -0,0 +1,110 @@ +/* + * This file is part of Nokia H4P bluetooth driver + * + * Copyright (C) 2009 Nokia Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include + +#include "hci_h4p.h" + +static struct sk_buff_head *fw_q; + +void hci_h4p_ti1273_parse_fw_event(struct hci_h4p_info *info, + struct sk_buff *skb) +{ + struct sk_buff *fw_skb; + unsigned long flags; + + if (skb->data[5] != 0x00) { + dev_err(info->dev, "Firmware sending command failed 0x%.2x\n", + skb->data[5]); + info->fw_error = -EPROTO; + } + + kfree_skb(skb); + + fw_skb = skb_dequeue(fw_q); + if (fw_skb == NULL || info->fw_error) { + complete(&info->fw_completion); + return; + } + + skb_queue_tail(&info->txq, fw_skb); + spin_lock_irqsave(&info->lock, flags); + hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | + UART_IER_THRI); + spin_unlock_irqrestore(&info->lock, flags); +} + + +int hci_h4p_ti1273_send_fw(struct hci_h4p_info *info, + struct sk_buff_head *fw_queue) +{ + struct sk_buff *skb; + unsigned long flags, time; + + info->fw_error = 0; + + BT_DBG("Sending firmware"); + + time = jiffies; + + fw_q = fw_queue; + skb = skb_dequeue(fw_queue); + if (!skb) + return -ENODATA; + + BT_DBG("Sending commands"); + /* Check if this is bd_address packet */ + init_completion(&info->fw_completion); + hci_h4p_smart_idle(info, 0); + skb_queue_tail(&info->txq, skb); + spin_lock_irqsave(&info->lock, flags); + hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | + UART_IER_THRI); + spin_unlock_irqrestore(&info->lock, flags); + + if (!wait_for_completion_timeout(&info->fw_completion, + msecs_to_jiffies(2000))) { + dev_err(info->dev, "No reply to fw command\n"); + return -ETIMEDOUT; + } + + if (info->fw_error) { + dev_err(info->dev, "FW error\n"); + return -EPROTO; + } + + BT_DBG("Firmware sent in %d msecs", + jiffies_to_msecs(jiffies-time)); + + hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS); + hci_h4p_set_rts(info, 0); + hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE); + if (hci_h4p_wait_for_cts(info, 1, 100)) { + dev_err(info->dev, + "cts didn't go down after final speed change\n"); + return -ETIMEDOUT; + } + hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS); + + return 0; +} diff --git a/drivers/staging/nokia_h4p/nokia_fw.c b/drivers/staging/nokia_h4p/nokia_fw.c new file mode 100644 index 000000000000..cfea61cd59e9 --- /dev/null +++ b/drivers/staging/nokia_h4p/nokia_fw.c @@ -0,0 +1,195 @@ +/* + * This file is part of hci_h4p bluetooth driver + * + * Copyright (C) 2005, 2006 Nokia Corporation. + * + * Contact: Ville Tervo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include + +#include + +#include "hci_h4p.h" + +static int fw_pos; + +/* Firmware handling */ +static int hci_h4p_open_firmware(struct hci_h4p_info *info, + const struct firmware **fw_entry) +{ + int err; + + fw_pos = 0; + BT_DBG("Opening firmware man_id 0x%.2x ver_id 0x%.2x", + info->man_id, info->ver_id); + switch (info->man_id) { + case H4P_ID_TI1271: + switch (info->ver_id) { + case 0xe1: + err = request_firmware(fw_entry, FW_NAME_TI1271_PRELE, + info->dev); + break; + case 0xd1: + case 0xf1: + err = request_firmware(fw_entry, FW_NAME_TI1271_LE, + info->dev); + break; + default: + err = request_firmware(fw_entry, FW_NAME_TI1271, + info->dev); + } + break; + case H4P_ID_CSR: + err = request_firmware(fw_entry, FW_NAME_CSR, info->dev); + break; + case H4P_ID_BCM2048: + err = request_firmware(fw_entry, FW_NAME_BCM2048, info->dev); + break; + default: + dev_err(info->dev, "Invalid chip type\n"); + *fw_entry = NULL; + err = -EINVAL; + } + + return err; +} + +static void hci_h4p_close_firmware(const struct firmware *fw_entry) +{ + release_firmware(fw_entry); +} + +/* Read fw. Return length of the command. If no more commands in + * fw 0 is returned. In error case return value is negative. + */ +static int hci_h4p_read_fw_cmd(struct hci_h4p_info *info, struct sk_buff **skb, + const struct firmware *fw_entry, gfp_t how) +{ + unsigned int cmd_len; + + if (fw_pos >= fw_entry->size) + return 0; + + if (fw_pos + 2 > fw_entry->size) { + dev_err(info->dev, "Corrupted firmware image 1\n"); + return -EMSGSIZE; + } + + cmd_len = fw_entry->data[fw_pos++]; + cmd_len += fw_entry->data[fw_pos++] << 8; + if (cmd_len == 0) + return 0; + + if (fw_pos + cmd_len > fw_entry->size) { + dev_err(info->dev, "Corrupted firmware image 2\n"); + return -EMSGSIZE; + } + + *skb = bt_skb_alloc(cmd_len, how); + if (!*skb) { + dev_err(info->dev, "Cannot reserve memory for buffer\n"); + return -ENOMEM; + } + memcpy(skb_put(*skb, cmd_len), &fw_entry->data[fw_pos], cmd_len); + + fw_pos += cmd_len; + + return (*skb)->len; +} + +int hci_h4p_read_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue) +{ + const struct firmware *fw_entry = NULL; + struct sk_buff *skb = NULL; + int err; + + err = hci_h4p_open_firmware(info, &fw_entry); + if (err < 0 || !fw_entry) + goto err_clean; + + while ((err = hci_h4p_read_fw_cmd(info, &skb, fw_entry, GFP_KERNEL))) { + if (err < 0 || !skb) + goto err_clean; + + skb_queue_tail(fw_queue, skb); + } + + /* Chip detection code does neg and alive stuff + * discard two first skbs */ + skb = skb_dequeue(fw_queue); + if (!skb) { + err = -EMSGSIZE; + goto err_clean; + } + kfree_skb(skb); + skb = skb_dequeue(fw_queue); + if (!skb) { + err = -EMSGSIZE; + goto err_clean; + } + kfree_skb(skb); + +err_clean: + hci_h4p_close_firmware(fw_entry); + return err; +} + +int hci_h4p_send_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue) +{ + int err; + + switch (info->man_id) { + case H4P_ID_CSR: + err = hci_h4p_bc4_send_fw(info, fw_queue); + break; + case H4P_ID_TI1271: + err = hci_h4p_ti1273_send_fw(info, fw_queue); + break; + case H4P_ID_BCM2048: + err = hci_h4p_bcm_send_fw(info, fw_queue); + break; + default: + dev_err(info->dev, "Don't know how to send firmware\n"); + err = -EINVAL; + } + + return err; +} + +void hci_h4p_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb) +{ + switch (info->man_id) { + case H4P_ID_CSR: + hci_h4p_bc4_parse_fw_event(info, skb); + break; + case H4P_ID_TI1271: + hci_h4p_ti1273_parse_fw_event(info, skb); + break; + case H4P_ID_BCM2048: + hci_h4p_bcm_parse_fw_event(info, skb); + break; + default: + dev_err(info->dev, "Don't know how to parse fw event\n"); + info->fw_error = -EINVAL; + } + + return; +} diff --git a/drivers/staging/nokia_h4p/nokia_uart.c b/drivers/staging/nokia_h4p/nokia_uart.c new file mode 100644 index 000000000000..0fb57de4b750 --- /dev/null +++ b/drivers/staging/nokia_h4p/nokia_uart.c @@ -0,0 +1,199 @@ +/* + * This file is part of Nokia H4P bluetooth driver + * + * Copyright (C) 2005, 2006 Nokia Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include + +#include + +#include "hci_h4p.h" + +inline void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val) +{ + __raw_writeb(val, info->uart_base + (offset << 2)); +} + +inline u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset) +{ + return __raw_readb(info->uart_base + (offset << 2)); +} + +void hci_h4p_set_rts(struct hci_h4p_info *info, int active) +{ + u8 b; + + b = hci_h4p_inb(info, UART_MCR); + if (active) + b |= UART_MCR_RTS; + else + b &= ~UART_MCR_RTS; + hci_h4p_outb(info, UART_MCR, b); +} + +int hci_h4p_wait_for_cts(struct hci_h4p_info *info, int active, + int timeout_ms) +{ + unsigned long timeout; + int state; + + timeout = jiffies + msecs_to_jiffies(timeout_ms); + for (;;) { + state = hci_h4p_inb(info, UART_MSR) & UART_MSR_CTS; + if (active) { + if (state) + return 0; + } else { + if (!state) + return 0; + } + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + msleep(1); + } +} + +void __hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which) +{ + u8 lcr, b; + + lcr = hci_h4p_inb(info, UART_LCR); + hci_h4p_outb(info, UART_LCR, 0xbf); + b = hci_h4p_inb(info, UART_EFR); + if (on) + b |= which; + else + b &= ~which; + hci_h4p_outb(info, UART_EFR, b); + hci_h4p_outb(info, UART_LCR, lcr); +} + +void hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which) +{ + unsigned long flags; + + spin_lock_irqsave(&info->lock, flags); + __hci_h4p_set_auto_ctsrts(info, on, which); + spin_unlock_irqrestore(&info->lock, flags); +} + +void hci_h4p_change_speed(struct hci_h4p_info *info, unsigned long speed) +{ + unsigned int divisor; + u8 lcr, mdr1; + + BT_DBG("Setting speed %lu", speed); + + if (speed >= 460800) { + divisor = UART_CLOCK / 13 / speed; + mdr1 = 3; + } else { + divisor = UART_CLOCK / 16 / speed; + mdr1 = 0; + } + + /* Make sure UART mode is disabled */ + hci_h4p_outb(info, UART_OMAP_MDR1, 7); + + lcr = hci_h4p_inb(info, UART_LCR); + hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB); /* Set DLAB */ + hci_h4p_outb(info, UART_DLL, divisor & 0xff); /* Set speed */ + hci_h4p_outb(info, UART_DLM, divisor >> 8); + hci_h4p_outb(info, UART_LCR, lcr); + + /* Make sure UART mode is enabled */ + hci_h4p_outb(info, UART_OMAP_MDR1, mdr1); +} + +int hci_h4p_reset_uart(struct hci_h4p_info *info) +{ + int count = 0; + + /* Reset the UART */ + hci_h4p_outb(info, UART_OMAP_SYSC, UART_SYSC_OMAP_RESET); + while (!(hci_h4p_inb(info, UART_OMAP_SYSS) & UART_SYSS_RESETDONE)) { + if (count++ > 100) { + dev_err(info->dev, "hci_h4p: UART reset timeout\n"); + return -ENODEV; + } + udelay(1); + } + + return 0; +} + +void hci_h4p_store_regs(struct hci_h4p_info *info) +{ + u16 lcr = 0; + + lcr = hci_h4p_inb(info, UART_LCR); + hci_h4p_outb(info, UART_LCR, 0xBF); + info->dll = hci_h4p_inb(info, UART_DLL); + info->dlh = hci_h4p_inb(info, UART_DLM); + info->efr = hci_h4p_inb(info, UART_EFR); + hci_h4p_outb(info, UART_LCR, lcr); + info->mdr1 = hci_h4p_inb(info, UART_OMAP_MDR1); + info->ier = hci_h4p_inb(info, UART_IER); +} + +void hci_h4p_restore_regs(struct hci_h4p_info *info) +{ + u16 lcr = 0; + + hci_h4p_init_uart(info); + + hci_h4p_outb(info, UART_OMAP_MDR1, 7); + lcr = hci_h4p_inb(info, UART_LCR); + hci_h4p_outb(info, UART_LCR, 0xBF); + hci_h4p_outb(info, UART_DLL, info->dll); /* Set speed */ + hci_h4p_outb(info, UART_DLM, info->dlh); + hci_h4p_outb(info, UART_EFR, info->efr); + hci_h4p_outb(info, UART_LCR, lcr); + hci_h4p_outb(info, UART_OMAP_MDR1, info->mdr1); + hci_h4p_outb(info, UART_IER, info->ier); +} + +void hci_h4p_init_uart(struct hci_h4p_info *info) +{ + u8 mcr, efr; + + /* Enable and setup FIFO */ + hci_h4p_outb(info, UART_OMAP_MDR1, 0x00); + + hci_h4p_outb(info, UART_LCR, 0xbf); + efr = hci_h4p_inb(info, UART_EFR); + hci_h4p_outb(info, UART_EFR, UART_EFR_ECB); + hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB); + mcr = hci_h4p_inb(info, UART_MCR); + hci_h4p_outb(info, UART_MCR, UART_MCR_TCRTLR); + hci_h4p_outb(info, UART_FCR, UART_FCR_ENABLE_FIFO | + UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT | + (3 << 6) | (0 << 4)); + hci_h4p_outb(info, UART_LCR, 0xbf); + hci_h4p_outb(info, UART_TI752_TLR, 0xed); + hci_h4p_outb(info, UART_TI752_TCR, 0xef); + hci_h4p_outb(info, UART_EFR, efr); + hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB); + hci_h4p_outb(info, UART_MCR, 0x00); + hci_h4p_outb(info, UART_LCR, UART_LCR_WLEN8); + hci_h4p_outb(info, UART_IER, UART_IER_RDI); + hci_h4p_outb(info, UART_OMAP_SYSC, (1 << 0) | (1 << 2) | (2 << 3)); +} diff --git a/include/linux/platform_data/bt-nokia-h4p.h b/include/linux/platform_data/bt-nokia-h4p.h new file mode 100644 index 000000000000..30d169dfadf3 --- /dev/null +++ b/include/linux/platform_data/bt-nokia-h4p.h @@ -0,0 +1,38 @@ +/* + * This file is part of Nokia H4P bluetooth driver + * + * Copyright (C) 2010 Nokia Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + + +/** + * struct hci_h4p_platform data - hci_h4p Platform data structure + */ +struct hci_h4p_platform_data { + int chip_type; + int bt_sysclk; + unsigned int bt_wakeup_gpio; + unsigned int host_wakeup_gpio; + unsigned int reset_gpio; + int reset_gpio_shared; + unsigned int uart_irq; + phys_addr_t uart_base; + const char *uart_iclk; + const char *uart_fclk; + void (*set_pm_limits)(struct device *dev, bool set); +}; -- cgit v1.2.3 From b7945b77cd03094458f3624bc82a27e0d36e75d0 Mon Sep 17 00:00:00 2001 From: Valentina Manea Date: Thu, 23 Jan 2014 23:12:29 +0200 Subject: staging: usbip: convert usbip-host driver to usb_device_driver This driver was previously an interface driver. Since USB/IP exports a whole device, not just an interface, it would make sense to be a device driver. This patch also modifies the way userspace sees and uses a shared device: * the usbip_status file is no longer created for interface 0, but for the whole device (such as /sys/devices/pci0000:00/0000:00:01.2/usb1/1-1/usbip_status). * per interface information, such as interface class or protocol, is no longer sent/received; only device specific information is transmitted. * since the driver was moved one level below in the USB architecture, there is no need to bind/unbind each interface, just the device as a whole. Signed-off-by: Valentina Manea Signed-off-by: Greg Kroah-Hartman --- drivers/staging/usbip/stub.h | 2 +- drivers/staging/usbip/stub_dev.c | 150 ++++++++------------- drivers/staging/usbip/stub_main.c | 6 +- drivers/staging/usbip/stub_rx.c | 2 +- .../usbip/userspace/libsrc/usbip_host_driver.c | 45 +++---- drivers/staging/usbip/userspace/src/usbip_bind.c | 142 ++++++++----------- drivers/staging/usbip/userspace/src/usbip_list.c | 19 +-- drivers/staging/usbip/userspace/src/usbip_unbind.c | 51 ++----- drivers/staging/usbip/userspace/src/usbipd.c | 15 --- drivers/usb/core/generic.c | 1 + drivers/usb/core/message.c | 1 + include/linux/usb.h | 4 + 12 files changed, 150 insertions(+), 288 deletions(-) (limited to 'include/linux') diff --git a/drivers/staging/usbip/stub.h b/drivers/staging/usbip/stub.h index a73e437ec215..82e539a4fcff 100644 --- a/drivers/staging/usbip/stub.h +++ b/drivers/staging/usbip/stub.h @@ -93,7 +93,7 @@ struct bus_id_priv { extern struct kmem_cache *stub_priv_cache; /* stub_dev.c */ -extern struct usb_driver stub_driver; +extern struct usb_device_driver stub_driver; /* stub_main.c */ struct bus_id_priv *get_busid_priv(const char *busid); diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c index 76a1ff0e6275..b0bfd3430d47 100644 --- a/drivers/staging/usbip/stub_dev.c +++ b/drivers/staging/usbip/stub_dev.c @@ -279,21 +279,19 @@ static void stub_device_unusable(struct usbip_device *ud) * * Allocates and initializes a new stub_device struct. */ -static struct stub_device *stub_device_alloc(struct usb_device *udev, - struct usb_interface *interface) +static struct stub_device *stub_device_alloc(struct usb_device *udev) { struct stub_device *sdev; - int busnum = interface_to_busnum(interface); - int devnum = interface_to_devnum(interface); + int busnum = udev->bus->busnum; + int devnum = udev->devnum; - dev_dbg(&interface->dev, "allocating stub device"); + dev_dbg(&udev->dev, "allocating stub device"); /* yes, it's a new device */ sdev = kzalloc(sizeof(struct stub_device), GFP_KERNEL); if (!sdev) return NULL; - sdev->interface = usb_get_intf(interface); sdev->udev = usb_get_dev(udev); /* @@ -322,7 +320,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev, usbip_start_eh(&sdev->ud); - dev_dbg(&interface->dev, "register new interface\n"); + dev_dbg(&udev->dev, "register new device\n"); return sdev; } @@ -332,32 +330,20 @@ static void stub_device_free(struct stub_device *sdev) kfree(sdev); } -/* - * If a usb device has multiple active interfaces, this driver is bound to all - * the active interfaces. However, usbip exports *a* usb device (i.e., not *an* - * active interface). Currently, a userland program must ensure that it - * looks at the usbip's sysfs entries of only the first active interface. - * - * TODO: use "struct usb_device_driver" to bind a usb device. - * However, it seems it is not fully supported in mainline kernel yet - * (2.6.19.2). - */ -static int stub_probe(struct usb_interface *interface, - const struct usb_device_id *id) +static int stub_probe(struct usb_device *udev) { - struct usb_device *udev = interface_to_usbdev(interface); struct stub_device *sdev = NULL; - const char *udev_busid = dev_name(interface->dev.parent); - int err = 0; + const char *udev_busid = dev_name(&udev->dev); + int err = 0, config; struct bus_id_priv *busid_priv; - dev_dbg(&interface->dev, "Enter\n"); + dev_dbg(&udev->dev, "Enter\n"); /* check we should claim or not by busid_table */ busid_priv = get_busid_priv(udev_busid); if (!busid_priv || (busid_priv->status == STUB_BUSID_REMOV) || (busid_priv->status == STUB_BUSID_OTHER)) { - dev_info(&interface->dev, + dev_info(&udev->dev, "%s is not in match_busid table... skip!\n", udev_busid); @@ -383,60 +369,36 @@ static int stub_probe(struct usb_interface *interface, return -ENODEV; } - if (busid_priv->status == STUB_BUSID_ALLOC) { - sdev = busid_priv->sdev; - if (!sdev) - return -ENODEV; - - busid_priv->interf_count++; - dev_info(&interface->dev, - "usbip-host: register new interface (bus %u dev %u ifn %u)\n", - udev->bus->busnum, udev->devnum, - interface->cur_altsetting->desc.bInterfaceNumber); - - /* set private data to usb_interface */ - usb_set_intfdata(interface, sdev); - - err = stub_add_files(&interface->dev); - if (err) { - dev_err(&interface->dev, "stub_add_files for %s\n", - udev_busid); - usb_set_intfdata(interface, NULL); - busid_priv->interf_count--; - return err; - } - - usb_get_intf(interface); - return 0; - } - /* ok, this is my device */ - sdev = stub_device_alloc(udev, interface); + sdev = stub_device_alloc(udev); if (!sdev) return -ENOMEM; - dev_info(&interface->dev, - "usbip-host: register new device (bus %u dev %u ifn %u)\n", - udev->bus->busnum, udev->devnum, - interface->cur_altsetting->desc.bInterfaceNumber); + dev_info(&udev->dev, + "usbip-host: register new device (bus %u dev %u)\n", + udev->bus->busnum, udev->devnum); - busid_priv->interf_count = 0; busid_priv->shutdown_busid = 0; - /* set private data to usb_interface */ - usb_set_intfdata(interface, sdev); - busid_priv->interf_count++; + config = usb_choose_configuration(udev); + if (config >= 0) { + err = usb_set_configuration(udev, config); + if (err && err != -ENODEV) + dev_err(&udev->dev, "can't set config #%d, error %d\n", + config, err); + } + + /* set private data to usb_device */ + dev_set_drvdata(&udev->dev, sdev); busid_priv->sdev = sdev; - err = stub_add_files(&interface->dev); + err = stub_add_files(&udev->dev); if (err) { - dev_err(&interface->dev, "stub_add_files for %s\n", udev_busid); - usb_set_intfdata(interface, NULL); - usb_put_intf(interface); + dev_err(&udev->dev, "stub_add_files for %s\n", udev_busid); + dev_set_drvdata(&udev->dev, NULL); usb_put_dev(udev); kthread_stop_put(sdev->ud.eh); - busid_priv->interf_count = 0; busid_priv->sdev = NULL; stub_device_free(sdev); return err; @@ -461,13 +423,13 @@ static void shutdown_busid(struct bus_id_priv *busid_priv) * called in usb_disconnect() or usb_deregister() * but only if actconfig(active configuration) exists */ -static void stub_disconnect(struct usb_interface *interface) +static void stub_disconnect(struct usb_device *udev) { struct stub_device *sdev; - const char *udev_busid = dev_name(interface->dev.parent); + const char *udev_busid = dev_name(&udev->dev); struct bus_id_priv *busid_priv; - dev_dbg(&interface->dev, "Enter\n"); + dev_dbg(&udev->dev, "Enter\n"); busid_priv = get_busid_priv(udev_busid); if (!busid_priv) { @@ -475,41 +437,29 @@ static void stub_disconnect(struct usb_interface *interface) return; } - sdev = usb_get_intfdata(interface); + sdev = dev_get_drvdata(&udev->dev); /* get stub_device */ if (!sdev) { - dev_err(&interface->dev, "could not get device"); + dev_err(&udev->dev, "could not get device"); return; } - usb_set_intfdata(interface, NULL); + dev_set_drvdata(&udev->dev, NULL); /* * NOTE: rx/tx threads are invoked for each usb_device. */ - stub_remove_files(&interface->dev); + stub_remove_files(&udev->dev); /* If usb reset is called from event handler */ - if (busid_priv->sdev->ud.eh == current) { - busid_priv->interf_count--; + if (busid_priv->sdev->ud.eh == current) return; - } - - if (busid_priv->interf_count > 1) { - busid_priv->interf_count--; - shutdown_busid(busid_priv); - usb_put_intf(interface); - return; - } - - busid_priv->interf_count = 0; /* shutdown the current connection */ shutdown_busid(busid_priv); usb_put_dev(sdev->udev); - usb_put_intf(interface); /* free sdev */ busid_priv->sdev = NULL; @@ -523,28 +473,34 @@ static void stub_disconnect(struct usb_interface *interface) } } -/* - * Presence of pre_reset and post_reset prevents the driver from being unbound - * when the device is being reset - */ +#ifdef CONFIG_PM -static int stub_pre_reset(struct usb_interface *interface) +/* These functions need usb_port_suspend and usb_port_resume, + * which reside in drivers/usb/core/usb.h. Skip for now. */ + +static int stub_suspend(struct usb_device *udev, pm_message_t message) { - dev_dbg(&interface->dev, "pre_reset\n"); + dev_dbg(&udev->dev, "stub_suspend\n"); + return 0; } -static int stub_post_reset(struct usb_interface *interface) +static int stub_resume(struct usb_device *udev, pm_message_t message) { - dev_dbg(&interface->dev, "post_reset\n"); + dev_dbg(&udev->dev, "stub_resume\n"); + return 0; } -struct usb_driver stub_driver = { +#endif /* CONFIG_PM */ + +struct usb_device_driver stub_driver = { .name = "usbip-host", .probe = stub_probe, .disconnect = stub_disconnect, - .id_table = stub_table, - .pre_reset = stub_pre_reset, - .post_reset = stub_post_reset, +#ifdef CONFIG_PM + .suspend = stub_suspend, + .resume = stub_resume, +#endif + .supports_autosuspend = 0, }; diff --git a/drivers/staging/usbip/stub_main.c b/drivers/staging/usbip/stub_main.c index baf857f7cc88..bd7b83a9d758 100644 --- a/drivers/staging/usbip/stub_main.c +++ b/drivers/staging/usbip/stub_main.c @@ -254,7 +254,7 @@ static int __init usbip_host_init(void) return -ENOMEM; } - ret = usb_register(&stub_driver); + ret = usb_register_device_driver(&stub_driver, THIS_MODULE); if (ret) { pr_err("usb_register failed %d\n", ret); goto err_usb_register; @@ -271,7 +271,7 @@ static int __init usbip_host_init(void) return ret; err_create_file: - usb_deregister(&stub_driver); + usb_deregister_device_driver(&stub_driver); err_usb_register: kmem_cache_destroy(stub_priv_cache); return ret; @@ -286,7 +286,7 @@ static void __exit usbip_host_exit(void) * deregister() calls stub_disconnect() for all devices. Device * specific data is cleared in stub_disconnect(). */ - usb_deregister(&stub_driver); + usb_deregister_device_driver(&stub_driver); kmem_cache_destroy(stub_priv_cache); } diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c index 5d1d4a183300..76e44d949232 100644 --- a/drivers/staging/usbip/stub_rx.c +++ b/drivers/staging/usbip/stub_rx.c @@ -550,7 +550,7 @@ static void stub_rx_pdu(struct usbip_device *ud) int ret; struct usbip_header pdu; struct stub_device *sdev = container_of(ud, struct stub_device, ud); - struct device *dev = &sdev->interface->dev; + struct device *dev = &sdev->udev->dev; usbip_dbg_stub_rx("Enter\n"); diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c index 71a449cf50db..86a867582de6 100644 --- a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c +++ b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c @@ -32,7 +32,6 @@ struct usbip_host_driver *host_driver; #define SYSFS_OPEN_RETRIES 100 -/* only the first interface value is true! */ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev) { char attrpath[SYSFS_PATH_MAX]; @@ -56,8 +55,8 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev) * usbip_status to reappear. */ - snprintf(attrpath, SYSFS_PATH_MAX, "%s/%s:%d.%d/usbip_status", - udev->path, udev->busid, udev->bConfigurationValue, 0); + snprintf(attrpath, SYSFS_PATH_MAX, "%s/usbip_status", + udev->path); while (retries > 0) { if (stat(attrpath, &s) == 0) @@ -168,19 +167,18 @@ static void delete_nothing(void *unused_data) static int refresh_exported_devices(void) { - /* sysfs_device of usb_interface */ - struct sysfs_device *suintf; - struct dlist *suintf_list; /* sysfs_device of usb_device */ struct sysfs_device *sudev; struct dlist *sudev_list; + struct dlist *sudev_unique_list; struct usbip_exported_device *edev; - sudev_list = dlist_new_with_delete(sizeof(struct sysfs_device), - delete_nothing); + sudev_unique_list = dlist_new_with_delete(sizeof(struct sysfs_device), + delete_nothing); - suintf_list = sysfs_get_driver_devices(host_driver->sysfs_driver); - if (!suintf_list) { + sudev_list = sysfs_get_driver_devices(host_driver->sysfs_driver); + + if (!sudev_list) { /* * Not an error condition. There are simply no devices bound to * the driver yet. @@ -190,23 +188,13 @@ static int refresh_exported_devices(void) return 0; } - /* collect unique USB devices (not interfaces) */ - dlist_for_each_data(suintf_list, suintf, struct sysfs_device) { - /* get usb device of this usb interface */ - sudev = sysfs_get_device_parent(suintf); - if (!sudev) { - dbg("sysfs_get_device_parent failed: %s", suintf->name); - continue; - } + dlist_for_each_data(sudev_list, sudev, struct sysfs_device) + if (check_new(sudev_unique_list, sudev)) + dlist_unshift(sudev_unique_list, sudev); - if (check_new(sudev_list, sudev)) { - /* insert item at head of list */ - dlist_unshift(sudev_list, sudev); - } - } - - dlist_for_each_data(sudev_list, sudev, struct sysfs_device) { + dlist_for_each_data(sudev_unique_list, sudev, struct sysfs_device) { edev = usbip_exported_device_new(sudev->path); + if (!edev) { dbg("usbip_exported_device_new failed"); continue; @@ -216,7 +204,7 @@ static int refresh_exported_devices(void) host_driver->ndevs++; } - dlist_destroy(sudev_list); + dlist_destroy(sudev_unique_list); return 0; } @@ -356,9 +344,8 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd) } /* only the first interface is true */ - snprintf(attr_path, sizeof(attr_path), "%s/%s:%d.%d/%s", - edev->udev.path, edev->udev.busid, - edev->udev.bConfigurationValue, 0, attr_name); + snprintf(attr_path, sizeof(attr_path), "%s/%s", + edev->udev.path, attr_name); attr = sysfs_open_attribute(attr_path); if (!attr) { diff --git a/drivers/staging/usbip/userspace/src/usbip_bind.c b/drivers/staging/usbip/userspace/src/usbip_bind.c index 9ecaf6e574df..8cfd2dbd9510 100644 --- a/drivers/staging/usbip/userspace/src/usbip_bind.c +++ b/drivers/staging/usbip/userspace/src/usbip_bind.c @@ -52,12 +52,8 @@ static int bind_usbip(char *busid) char attr_name[] = "bind"; char sysfs_mntpath[SYSFS_PATH_MAX]; char bind_attr_path[SYSFS_PATH_MAX]; - char intf_busid[SYSFS_BUS_ID_SIZE]; - struct sysfs_device *busid_dev; struct sysfs_attribute *bind_attr; - struct sysfs_attribute *bConfValue; - struct sysfs_attribute *bNumIntfs; - int i, failed = 0; + int failed = 0; int rc, ret = -1; rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX); @@ -76,39 +72,15 @@ static int bind_usbip(char *busid) return -1; } - busid_dev = sysfs_open_device(bus_type, busid); - if (!busid_dev) { - dbg("sysfs_open_device %s failed: %s", busid, strerror(errno)); - goto err_close_bind_attr; - } - - bConfValue = sysfs_get_device_attr(busid_dev, "bConfigurationValue"); - bNumIntfs = sysfs_get_device_attr(busid_dev, "bNumInterfaces"); - - if (!bConfValue || !bNumIntfs) { - dbg("problem getting device attributes: %s", - strerror(errno)); - goto err_close_busid_dev; - } - - for (i = 0; i < atoi(bNumIntfs->value); i++) { - snprintf(intf_busid, SYSFS_BUS_ID_SIZE, "%s:%.1s.%d", busid, - bConfValue->value, i); - - rc = sysfs_write_attribute(bind_attr, intf_busid, - SYSFS_BUS_ID_SIZE); - if (rc < 0) { - dbg("bind driver at %s failed", intf_busid); - failed = 1; - } + rc = sysfs_write_attribute(bind_attr, busid, SYSFS_BUS_ID_SIZE); + if (rc < 0) { + dbg("bind driver at %s failed", busid); + failed = 1; } if (!failed) ret = 0; -err_close_busid_dev: - sysfs_close_device(busid_dev); -err_close_bind_attr: sysfs_close_attribute(bind_attr); return ret; @@ -118,15 +90,12 @@ err_close_bind_attr: static int unbind_other(char *busid) { char bus_type[] = "usb"; - char intf_busid[SYSFS_BUS_ID_SIZE]; struct sysfs_device *busid_dev; - struct sysfs_device *intf_dev; - struct sysfs_driver *intf_drv; + struct sysfs_device *dev; + struct sysfs_driver *drv; struct sysfs_attribute *unbind_attr; - struct sysfs_attribute *bConfValue; struct sysfs_attribute *bDevClass; - struct sysfs_attribute *bNumIntfs; - int i, rc; + int rc; enum unbind_status status = UNBIND_ST_OK; busid_dev = sysfs_open_device(bus_type, busid); @@ -134,12 +103,11 @@ static int unbind_other(char *busid) dbg("sysfs_open_device %s failed: %s", busid, strerror(errno)); return -1; } + dbg("busid path: %s", busid_dev->path); - bConfValue = sysfs_get_device_attr(busid_dev, "bConfigurationValue"); bDevClass = sysfs_get_device_attr(busid_dev, "bDeviceClass"); - bNumIntfs = sysfs_get_device_attr(busid_dev, "bNumInterfaces"); - if (!bConfValue || !bDevClass || !bNumIntfs) { - dbg("problem getting device attributes: %s", + if (!bDevClass) { + dbg("problem getting device attribute: %s", strerror(errno)); goto err_close_busid_dev; } @@ -149,62 +117,62 @@ static int unbind_other(char *busid) goto err_close_busid_dev; } - for (i = 0; i < atoi(bNumIntfs->value); i++) { - snprintf(intf_busid, SYSFS_BUS_ID_SIZE, "%s:%.1s.%d", busid, - bConfValue->value, i); - intf_dev = sysfs_open_device(bus_type, intf_busid); - if (!intf_dev) { - dbg("could not open interface device: %s", - strerror(errno)); - goto err_close_busid_dev; - } - - dbg("%s -> %s", intf_dev->name, intf_dev->driver_name); + dev = sysfs_open_device(bus_type, busid); + if (!dev) { + dbg("could not open device: %s", + strerror(errno)); + goto err_close_busid_dev; + } - if (!strncmp("unknown", intf_dev->driver_name, SYSFS_NAME_LEN)) - /* unbound interface */ - continue; + dbg("%s -> %s", dev->name, dev->driver_name); - if (!strncmp(USBIP_HOST_DRV_NAME, intf_dev->driver_name, - SYSFS_NAME_LEN)) { - /* already bound to usbip-host */ - status = UNBIND_ST_USBIP_HOST; - continue; - } + if (!strncmp("unknown", dev->driver_name, SYSFS_NAME_LEN)) { + /* unbound interface */ + sysfs_close_device(dev); + goto out; + } - /* unbinding */ - intf_drv = sysfs_open_driver(bus_type, intf_dev->driver_name); - if (!intf_drv) { - dbg("could not open interface driver on %s: %s", - intf_dev->name, strerror(errno)); - goto err_close_intf_dev; - } + if (!strncmp(USBIP_HOST_DRV_NAME, dev->driver_name, + SYSFS_NAME_LEN)) { + /* already bound to usbip-host */ + status = UNBIND_ST_USBIP_HOST; + sysfs_close_device(dev); + goto out; + } - unbind_attr = sysfs_get_driver_attr(intf_drv, "unbind"); - if (!unbind_attr) { - dbg("problem getting interface driver attribute: %s", - strerror(errno)); - goto err_close_intf_drv; - } + /* unbinding */ + drv = sysfs_open_driver(bus_type, dev->driver_name); + if (!drv) { + dbg("could not open device driver on %s: %s", + dev->name, strerror(errno)); + goto err_close_intf_dev; + } + dbg("device driver: %s", drv->path); - rc = sysfs_write_attribute(unbind_attr, intf_dev->bus_id, - SYSFS_BUS_ID_SIZE); - if (rc < 0) { - /* NOTE: why keep unbinding other interfaces? */ - dbg("unbind driver at %s failed", intf_dev->bus_id); - status = UNBIND_ST_FAILED; - } + unbind_attr = sysfs_get_driver_attr(drv, "unbind"); + if (!unbind_attr) { + dbg("problem getting device driver attribute: %s", + strerror(errno)); + goto err_close_intf_drv; + } - sysfs_close_driver(intf_drv); - sysfs_close_device(intf_dev); + rc = sysfs_write_attribute(unbind_attr, dev->bus_id, + SYSFS_BUS_ID_SIZE); + if (rc < 0) { + /* NOTE: why keep unbinding other interfaces? */ + dbg("unbind driver at %s failed", dev->bus_id); + status = UNBIND_ST_FAILED; } + sysfs_close_driver(drv); + sysfs_close_device(dev); + goto out; err_close_intf_drv: - sysfs_close_driver(intf_drv); + sysfs_close_driver(drv); err_close_intf_dev: - sysfs_close_device(intf_dev); + sysfs_close_device(dev); err_close_busid_dev: status = UNBIND_ST_FAILED; out: diff --git a/drivers/staging/usbip/userspace/src/usbip_list.c b/drivers/staging/usbip/userspace/src/usbip_list.c index 237e099337a1..8864fa2a7f0b 100644 --- a/drivers/staging/usbip/userspace/src/usbip_list.c +++ b/drivers/staging/usbip/userspace/src/usbip_list.c @@ -52,9 +52,8 @@ static int get_exported_devices(char *host, int sockfd) struct op_devlist_reply reply; uint16_t code = OP_REP_DEVLIST; struct usbip_usb_device udev; - struct usbip_usb_interface uintf; unsigned int i; - int j, rc; + int rc; rc = usbip_net_send_op_common(sockfd, OP_REQ_DEVLIST, 0); if (rc < 0) { @@ -104,22 +103,6 @@ static int get_exported_devices(char *host, int sockfd) printf("%11s: %s\n", "", udev.path); printf("%11s: %s\n", "", class_name); - for (j = 0; j < udev.bNumInterfaces; j++) { - rc = usbip_net_recv(sockfd, &uintf, sizeof(uintf)); - if (rc < 0) { - dbg("usbip_net_recv failed: usbip_usb_intf[%d]", - j); - - return -1; - } - usbip_net_pack_usb_interface(0, &uintf); - - usbip_names_get_class(class_name, sizeof(class_name), - uintf.bInterfaceClass, - uintf.bInterfaceSubClass, - uintf.bInterfaceProtocol); - printf("%11s: %2d - %s\n", "", j, class_name); - } printf("\n"); } diff --git a/drivers/staging/usbip/userspace/src/usbip_unbind.c b/drivers/staging/usbip/userspace/src/usbip_unbind.c index d5a9ab6af2a6..cace87838c24 100644 --- a/drivers/staging/usbip/userspace/src/usbip_unbind.c +++ b/drivers/staging/usbip/userspace/src/usbip_unbind.c @@ -47,12 +47,10 @@ static int unbind_device(char *busid) int verified = 0; int rc, ret = -1; - char attr_name[] = "bConfigurationValue"; + char attr_name[] = "unbind"; char sysfs_mntpath[SYSFS_PATH_MAX]; - char busid_attr_path[SYSFS_PATH_MAX]; - struct sysfs_attribute *busid_attr; - char *val = NULL; - int len; + char unbind_attr_path[SYSFS_PATH_MAX]; + struct sysfs_attribute *unbind_attr; /* verify the busid device is using usbip-host */ usbip_host_drv = sysfs_open_driver(bus_type, USBIP_HOST_DRV_NAME); @@ -99,55 +97,34 @@ static int unbind_device(char *busid) return -1; } - snprintf(busid_attr_path, sizeof(busid_attr_path), "%s/%s/%s/%s/%s/%s", - sysfs_mntpath, SYSFS_BUS_NAME, bus_type, SYSFS_DEVICES_NAME, - busid, attr_name); + snprintf(unbind_attr_path, sizeof(unbind_attr_path), "%s/%s/%s/%s/%s/%s", + sysfs_mntpath, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME, + USBIP_HOST_DRV_NAME, attr_name); /* read a device attribute */ - busid_attr = sysfs_open_attribute(busid_attr_path); - if (!busid_attr) { + unbind_attr = sysfs_open_attribute(unbind_attr_path); + if (!unbind_attr) { err("could not open %s/%s: %s", busid, attr_name, strerror(errno)); return -1; } - if (sysfs_read_attribute(busid_attr) < 0) { - err("problem reading attribute: %s", strerror(errno)); - goto err_out; - } - - len = busid_attr->len; - val = malloc(len); - *val = *busid_attr->value; - sysfs_close_attribute(busid_attr); - /* notify driver of unbind */ rc = modify_match_busid(busid, 0); if (rc < 0) { err("unable to unbind device on %s", busid); - goto err_out; - } - - /* write the device attribute */ - busid_attr = sysfs_open_attribute(busid_attr_path); - if (!busid_attr) { - err("could not open %s/%s: %s", busid, attr_name, - strerror(errno)); - return -1; } - rc = sysfs_write_attribute(busid_attr, val, len); - if (rc < 0) { - err("problem writing attribute: %s", strerror(errno)); - goto err_out; - } - sysfs_close_attribute(busid_attr); + rc = sysfs_write_attribute(unbind_attr, busid, + SYSFS_BUS_ID_SIZE); + if (rc < 0) { + dbg("bind driver at %s failed", busid); + } + sysfs_close_attribute(unbind_attr); ret = 0; printf("unbind device on busid %s: complete\n", busid); -err_out: - free(val); err_close_usbip_host_drv: sysfs_close_driver(usbip_host_drv); diff --git a/drivers/staging/usbip/userspace/src/usbipd.c b/drivers/staging/usbip/userspace/src/usbipd.c index 7980f8b5517b..c2b3ced9ca6e 100644 --- a/drivers/staging/usbip/userspace/src/usbipd.c +++ b/drivers/staging/usbip/userspace/src/usbipd.c @@ -159,9 +159,7 @@ static int send_reply_devlist(int connfd) { struct usbip_exported_device *edev; struct usbip_usb_device pdu_udev; - struct usbip_usb_interface pdu_uinf; struct op_devlist_reply reply; - int i; int rc; reply.ndev = 0; @@ -196,19 +194,6 @@ static int send_reply_devlist(int connfd) dbg("usbip_net_send failed: pdu_udev"); return -1; } - - for (i = 0; i < edev->udev.bNumInterfaces; i++) { - dump_usb_interface(&edev->uinf[i]); - memcpy(&pdu_uinf, &edev->uinf[i], sizeof(pdu_uinf)); - usbip_net_pack_usb_interface(1, &pdu_uinf); - - rc = usbip_net_send(connfd, &pdu_uinf, - sizeof(pdu_uinf)); - if (rc < 0) { - dbg("usbip_net_send failed: pdu_uinf"); - return -1; - } - } } return 0; diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c index acbfeb0a0119..358ca8dd784f 100644 --- a/drivers/usb/core/generic.c +++ b/drivers/usb/core/generic.c @@ -155,6 +155,7 @@ int usb_choose_configuration(struct usb_device *udev) } return i; } +EXPORT_SYMBOL_GPL(usb_choose_configuration); static int generic_probe(struct usb_device *udev) { diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index f829a1aad1c3..08d95e9d56c2 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1920,6 +1920,7 @@ free_interfaces: usb_autosuspend_device(dev); return 0; } +EXPORT_SYMBOL_GPL(usb_set_configuration); static LIST_HEAD(set_config_list); static DEFINE_SPINLOCK(set_config_lock); diff --git a/include/linux/usb.h b/include/linux/usb.h index c716da18c668..f434619f3975 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1668,6 +1668,10 @@ extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr); /* this request isn't really synchronous, but it belongs with the others */ extern int usb_driver_set_configuration(struct usb_device *udev, int config); +/* choose and set configuration for device */ +extern int usb_choose_configuration(struct usb_device *udev); +extern int usb_set_configuration(struct usb_device *dev, int configuration); + /* * timeouts, in milliseconds, used for sending/receiving control messages * they typically complete within a few frames (msec) after they're issued -- cgit v1.2.3 From 5267cf02c7794953d89e9593a0d497bf43e3790d Mon Sep 17 00:00:00 2001 From: Bjarke Istrup Pedersen Date: Wed, 22 Jan 2014 09:16:58 +0000 Subject: hv: Add hyperv.h to uapi headers This patch adds the hyperv.h header to the uapi folder, and adds it to the Kbuild file. Doing this enables compiling userspace Hyper-V tools using the installed headers. Version 2: Split UAPI parts into new header, instead of duplicating. Signed-off-by: Bjarke Istrup Pedersen Signed-off-by: Greg Kroah-Hartman --- include/linux/hyperv.h | 321 +---------------------------------------- include/uapi/linux/Kbuild | 1 + include/uapi/linux/hyperv.h | 344 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 347 insertions(+), 319 deletions(-) create mode 100644 include/uapi/linux/hyperv.h (limited to 'include/linux') diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 15da677478dd..167ef47e3d6e 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -25,325 +25,9 @@ #ifndef _HYPERV_H #define _HYPERV_H -#include - -/* - * Framework version for util services. - */ -#define UTIL_FW_MINOR 0 - -#define UTIL_WS2K8_FW_MAJOR 1 -#define UTIL_WS2K8_FW_VERSION (UTIL_WS2K8_FW_MAJOR << 16 | UTIL_FW_MINOR) - -#define UTIL_FW_MAJOR 3 -#define UTIL_FW_VERSION (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR) - - -/* - * Implementation of host controlled snapshot of the guest. - */ - -#define VSS_OP_REGISTER 128 - -enum hv_vss_op { - VSS_OP_CREATE = 0, - VSS_OP_DELETE, - VSS_OP_HOT_BACKUP, - VSS_OP_GET_DM_INFO, - VSS_OP_BU_COMPLETE, - /* - * Following operations are only supported with IC version >= 5.0 - */ - VSS_OP_FREEZE, /* Freeze the file systems in the VM */ - VSS_OP_THAW, /* Unfreeze the file systems */ - VSS_OP_AUTO_RECOVER, - VSS_OP_COUNT /* Number of operations, must be last */ -}; - - -/* - * Header for all VSS messages. - */ -struct hv_vss_hdr { - __u8 operation; - __u8 reserved[7]; -} __attribute__((packed)); - - -/* - * Flag values for the hv_vss_check_feature. Linux supports only - * one value. - */ -#define VSS_HBU_NO_AUTO_RECOVERY 0x00000005 - -struct hv_vss_check_feature { - __u32 flags; -} __attribute__((packed)); - -struct hv_vss_check_dm_info { - __u32 flags; -} __attribute__((packed)); - -struct hv_vss_msg { - union { - struct hv_vss_hdr vss_hdr; - int error; - }; - union { - struct hv_vss_check_feature vss_cf; - struct hv_vss_check_dm_info dm_info; - }; -} __attribute__((packed)); - -/* - * An implementation of HyperV key value pair (KVP) functionality for Linux. - * - * - * Copyright (C) 2010, Novell, Inc. - * Author : K. Y. Srinivasan - * - */ - -/* - * Maximum value size - used for both key names and value data, and includes - * any applicable NULL terminators. - * - * Note: This limit is somewhat arbitrary, but falls easily within what is - * supported for all native guests (back to Win 2000) and what is reasonable - * for the IC KVP exchange functionality. Note that Windows Me/98/95 are - * limited to 255 character key names. - * - * MSDN recommends not storing data values larger than 2048 bytes in the - * registry. - * - * Note: This value is used in defining the KVP exchange message - this value - * cannot be modified without affecting the message size and compatibility. - */ - -/* - * bytes, including any null terminators - */ -#define HV_KVP_EXCHANGE_MAX_VALUE_SIZE (2048) - - -/* - * Maximum key size - the registry limit for the length of an entry name - * is 256 characters, including the null terminator - */ - -#define HV_KVP_EXCHANGE_MAX_KEY_SIZE (512) +#include -/* - * In Linux, we implement the KVP functionality in two components: - * 1) The kernel component which is packaged as part of the hv_utils driver - * is responsible for communicating with the host and responsible for - * implementing the host/guest protocol. 2) A user level daemon that is - * responsible for data gathering. - * - * Host/Guest Protocol: The host iterates over an index and expects the guest - * to assign a key name to the index and also return the value corresponding to - * the key. The host will have atmost one KVP transaction outstanding at any - * given point in time. The host side iteration stops when the guest returns - * an error. Microsoft has specified the following mapping of key names to - * host specified index: - * - * Index Key Name - * 0 FullyQualifiedDomainName - * 1 IntegrationServicesVersion - * 2 NetworkAddressIPv4 - * 3 NetworkAddressIPv6 - * 4 OSBuildNumber - * 5 OSName - * 6 OSMajorVersion - * 7 OSMinorVersion - * 8 OSVersion - * 9 ProcessorArchitecture - * - * The Windows host expects the Key Name and Key Value to be encoded in utf16. - * - * Guest Kernel/KVP Daemon Protocol: As noted earlier, we implement all of the - * data gathering functionality in a user mode daemon. The user level daemon - * is also responsible for binding the key name to the index as well. The - * kernel and user-level daemon communicate using a connector channel. - * - * The user mode component first registers with the - * the kernel component. Subsequently, the kernel component requests, data - * for the specified keys. In response to this message the user mode component - * fills in the value corresponding to the specified key. We overload the - * sequence field in the cn_msg header to define our KVP message types. - * - * - * The kernel component simply acts as a conduit for communication between the - * Windows host and the user-level daemon. The kernel component passes up the - * index received from the Host to the user-level daemon. If the index is - * valid (supported), the corresponding key as well as its - * value (both are strings) is returned. If the index is invalid - * (not supported), a NULL key string is returned. - */ - - -/* - * Registry value types. - */ - -#define REG_SZ 1 -#define REG_U32 4 -#define REG_U64 8 - -/* - * As we look at expanding the KVP functionality to include - * IP injection functionality, we need to maintain binary - * compatibility with older daemons. - * - * The KVP opcodes are defined by the host and it was unfortunate - * that I chose to treat the registration operation as part of the - * KVP operations defined by the host. - * Here is the level of compatibility - * (between the user level daemon and the kernel KVP driver) that we - * will implement: - * - * An older daemon will always be supported on a newer driver. - * A given user level daemon will require a minimal version of the - * kernel driver. - * If we cannot handle the version differences, we will fail gracefully - * (this can happen when we have a user level daemon that is more - * advanced than the KVP driver. - * - * We will use values used in this handshake for determining if we have - * workable user level daemon and the kernel driver. We begin by taking the - * registration opcode out of the KVP opcode namespace. We will however, - * maintain compatibility with the existing user-level daemon code. - */ - -/* - * Daemon code not supporting IP injection (legacy daemon). - */ - -#define KVP_OP_REGISTER 4 - -/* - * Daemon code supporting IP injection. - * The KVP opcode field is used to communicate the - * registration information; so define a namespace that - * will be distinct from the host defined KVP opcode. - */ - -#define KVP_OP_REGISTER1 100 - -enum hv_kvp_exchg_op { - KVP_OP_GET = 0, - KVP_OP_SET, - KVP_OP_DELETE, - KVP_OP_ENUMERATE, - KVP_OP_GET_IP_INFO, - KVP_OP_SET_IP_INFO, - KVP_OP_COUNT /* Number of operations, must be last. */ -}; - -enum hv_kvp_exchg_pool { - KVP_POOL_EXTERNAL = 0, - KVP_POOL_GUEST, - KVP_POOL_AUTO, - KVP_POOL_AUTO_EXTERNAL, - KVP_POOL_AUTO_INTERNAL, - KVP_POOL_COUNT /* Number of pools, must be last. */ -}; - -/* - * Some Hyper-V status codes. - */ - -#define HV_S_OK 0x00000000 -#define HV_E_FAIL 0x80004005 -#define HV_S_CONT 0x80070103 -#define HV_ERROR_NOT_SUPPORTED 0x80070032 -#define HV_ERROR_MACHINE_LOCKED 0x800704F7 -#define HV_ERROR_DEVICE_NOT_CONNECTED 0x8007048F -#define HV_INVALIDARG 0x80070057 -#define HV_GUID_NOTFOUND 0x80041002 - -#define ADDR_FAMILY_NONE 0x00 -#define ADDR_FAMILY_IPV4 0x01 -#define ADDR_FAMILY_IPV6 0x02 - -#define MAX_ADAPTER_ID_SIZE 128 -#define MAX_IP_ADDR_SIZE 1024 -#define MAX_GATEWAY_SIZE 512 - - -struct hv_kvp_ipaddr_value { - __u16 adapter_id[MAX_ADAPTER_ID_SIZE]; - __u8 addr_family; - __u8 dhcp_enabled; - __u16 ip_addr[MAX_IP_ADDR_SIZE]; - __u16 sub_net[MAX_IP_ADDR_SIZE]; - __u16 gate_way[MAX_GATEWAY_SIZE]; - __u16 dns_addr[MAX_IP_ADDR_SIZE]; -} __attribute__((packed)); - - -struct hv_kvp_hdr { - __u8 operation; - __u8 pool; - __u16 pad; -} __attribute__((packed)); - -struct hv_kvp_exchg_msg_value { - __u32 value_type; - __u32 key_size; - __u32 value_size; - __u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; - union { - __u8 value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE]; - __u32 value_u32; - __u64 value_u64; - }; -} __attribute__((packed)); - -struct hv_kvp_msg_enumerate { - __u32 index; - struct hv_kvp_exchg_msg_value data; -} __attribute__((packed)); - -struct hv_kvp_msg_get { - struct hv_kvp_exchg_msg_value data; -}; - -struct hv_kvp_msg_set { - struct hv_kvp_exchg_msg_value data; -}; - -struct hv_kvp_msg_delete { - __u32 key_size; - __u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; -}; - -struct hv_kvp_register { - __u8 version[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; -}; - -struct hv_kvp_msg { - union { - struct hv_kvp_hdr kvp_hdr; - int error; - }; - union { - struct hv_kvp_msg_get kvp_get; - struct hv_kvp_msg_set kvp_set; - struct hv_kvp_msg_delete kvp_delete; - struct hv_kvp_msg_enumerate kvp_enum_data; - struct hv_kvp_ipaddr_value kvp_ip_val; - struct hv_kvp_register kvp_register; - } body; -} __attribute__((packed)); - -struct hv_kvp_ip_msg { - __u8 operation; - __u8 pool; - struct hv_kvp_ipaddr_value kvp_ip_val; -} __attribute__((packed)); - -#ifdef __KERNEL__ +#include #include #include #include @@ -1465,5 +1149,4 @@ void hv_vss_onchannelcallback(void *); extern __u32 vmbus_proto_version; -#endif /* __KERNEL__ */ #endif /* _HYPERV_H */ diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 3ce25b5d75a9..6929571b79b0 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -139,6 +139,7 @@ header-y += hid.h header-y += hiddev.h header-y += hidraw.h header-y += hpet.h +header-y += hyperv.h header-y += hysdn_if.h header-y += i2c-dev.h header-y += i2c.h diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h new file mode 100644 index 000000000000..1861f8e2ce2b --- /dev/null +++ b/include/uapi/linux/hyperv.h @@ -0,0 +1,344 @@ +/* + * + * Copyright (c) 2011, Microsoft Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Authors: + * Haiyang Zhang + * Hank Janssen + * K. Y. Srinivasan + * + */ + +#ifndef _UAPI_HYPERV_H +#define _UAPI_HYPERV_H + +/* + * Framework version for util services. + */ +#define UTIL_FW_MINOR 0 + +#define UTIL_WS2K8_FW_MAJOR 1 +#define UTIL_WS2K8_FW_VERSION (UTIL_WS2K8_FW_MAJOR << 16 | UTIL_FW_MINOR) + +#define UTIL_FW_MAJOR 3 +#define UTIL_FW_VERSION (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR) + + +/* + * Implementation of host controlled snapshot of the guest. + */ + +#define VSS_OP_REGISTER 128 + +enum hv_vss_op { + VSS_OP_CREATE = 0, + VSS_OP_DELETE, + VSS_OP_HOT_BACKUP, + VSS_OP_GET_DM_INFO, + VSS_OP_BU_COMPLETE, + /* + * Following operations are only supported with IC version >= 5.0 + */ + VSS_OP_FREEZE, /* Freeze the file systems in the VM */ + VSS_OP_THAW, /* Unfreeze the file systems */ + VSS_OP_AUTO_RECOVER, + VSS_OP_COUNT /* Number of operations, must be last */ +}; + + +/* + * Header for all VSS messages. + */ +struct hv_vss_hdr { + __u8 operation; + __u8 reserved[7]; +} __attribute__((packed)); + + +/* + * Flag values for the hv_vss_check_feature. Linux supports only + * one value. + */ +#define VSS_HBU_NO_AUTO_RECOVERY 0x00000005 + +struct hv_vss_check_feature { + __u32 flags; +} __attribute__((packed)); + +struct hv_vss_check_dm_info { + __u32 flags; +} __attribute__((packed)); + +struct hv_vss_msg { + union { + struct hv_vss_hdr vss_hdr; + int error; + }; + union { + struct hv_vss_check_feature vss_cf; + struct hv_vss_check_dm_info dm_info; + }; +} __attribute__((packed)); + +/* + * An implementation of HyperV key value pair (KVP) functionality for Linux. + * + * + * Copyright (C) 2010, Novell, Inc. + * Author : K. Y. Srinivasan + * + */ + +/* + * Maximum value size - used for both key names and value data, and includes + * any applicable NULL terminators. + * + * Note: This limit is somewhat arbitrary, but falls easily within what is + * supported for all native guests (back to Win 2000) and what is reasonable + * for the IC KVP exchange functionality. Note that Windows Me/98/95 are + * limited to 255 character key names. + * + * MSDN recommends not storing data values larger than 2048 bytes in the + * registry. + * + * Note: This value is used in defining the KVP exchange message - this value + * cannot be modified without affecting the message size and compatibility. + */ + +/* + * bytes, including any null terminators + */ +#define HV_KVP_EXCHANGE_MAX_VALUE_SIZE (2048) + + +/* + * Maximum key size - the registry limit for the length of an entry name + * is 256 characters, including the null terminator + */ + +#define HV_KVP_EXCHANGE_MAX_KEY_SIZE (512) + +/* + * In Linux, we implement the KVP functionality in two components: + * 1) The kernel component which is packaged as part of the hv_utils driver + * is responsible for communicating with the host and responsible for + * implementing the host/guest protocol. 2) A user level daemon that is + * responsible for data gathering. + * + * Host/Guest Protocol: The host iterates over an index and expects the guest + * to assign a key name to the index and also return the value corresponding to + * the key. The host will have atmost one KVP transaction outstanding at any + * given point in time. The host side iteration stops when the guest returns + * an error. Microsoft has specified the following mapping of key names to + * host specified index: + * + * Index Key Name + * 0 FullyQualifiedDomainName + * 1 IntegrationServicesVersion + * 2 NetworkAddressIPv4 + * 3 NetworkAddressIPv6 + * 4 OSBuildNumber + * 5 OSName + * 6 OSMajorVersion + * 7 OSMinorVersion + * 8 OSVersion + * 9 ProcessorArchitecture + * + * The Windows host expects the Key Name and Key Value to be encoded in utf16. + * + * Guest Kernel/KVP Daemon Protocol: As noted earlier, we implement all of the + * data gathering functionality in a user mode daemon. The user level daemon + * is also responsible for binding the key name to the index as well. The + * kernel and user-level daemon communicate using a connector channel. + * + * The user mode component first registers with the + * the kernel component. Subsequently, the kernel component requests, data + * for the specified keys. In response to this message the user mode component + * fills in the value corresponding to the specified key. We overload the + * sequence field in the cn_msg header to define our KVP message types. + * + * + * The kernel component simply acts as a conduit for communication between the + * Windows host and the user-level daemon. The kernel component passes up the + * index received from the Host to the user-level daemon. If the index is + * valid (supported), the corresponding key as well as its + * value (both are strings) is returned. If the index is invalid + * (not supported), a NULL key string is returned. + */ + + +/* + * Registry value types. + */ + +#define REG_SZ 1 +#define REG_U32 4 +#define REG_U64 8 + +/* + * As we look at expanding the KVP functionality to include + * IP injection functionality, we need to maintain binary + * compatibility with older daemons. + * + * The KVP opcodes are defined by the host and it was unfortunate + * that I chose to treat the registration operation as part of the + * KVP operations defined by the host. + * Here is the level of compatibility + * (between the user level daemon and the kernel KVP driver) that we + * will implement: + * + * An older daemon will always be supported on a newer driver. + * A given user level daemon will require a minimal version of the + * kernel driver. + * If we cannot handle the version differences, we will fail gracefully + * (this can happen when we have a user level daemon that is more + * advanced than the KVP driver. + * + * We will use values used in this handshake for determining if we have + * workable user level daemon and the kernel driver. We begin by taking the + * registration opcode out of the KVP opcode namespace. We will however, + * maintain compatibility with the existing user-level daemon code. + */ + +/* + * Daemon code not supporting IP injection (legacy daemon). + */ + +#define KVP_OP_REGISTER 4 + +/* + * Daemon code supporting IP injection. + * The KVP opcode field is used to communicate the + * registration information; so define a namespace that + * will be distinct from the host defined KVP opcode. + */ + +#define KVP_OP_REGISTER1 100 + +enum hv_kvp_exchg_op { + KVP_OP_GET = 0, + KVP_OP_SET, + KVP_OP_DELETE, + KVP_OP_ENUMERATE, + KVP_OP_GET_IP_INFO, + KVP_OP_SET_IP_INFO, + KVP_OP_COUNT /* Number of operations, must be last. */ +}; + +enum hv_kvp_exchg_pool { + KVP_POOL_EXTERNAL = 0, + KVP_POOL_GUEST, + KVP_POOL_AUTO, + KVP_POOL_AUTO_EXTERNAL, + KVP_POOL_AUTO_INTERNAL, + KVP_POOL_COUNT /* Number of pools, must be last. */ +}; + +/* + * Some Hyper-V status codes. + */ + +#define HV_S_OK 0x00000000 +#define HV_E_FAIL 0x80004005 +#define HV_S_CONT 0x80070103 +#define HV_ERROR_NOT_SUPPORTED 0x80070032 +#define HV_ERROR_MACHINE_LOCKED 0x800704F7 +#define HV_ERROR_DEVICE_NOT_CONNECTED 0x8007048F +#define HV_INVALIDARG 0x80070057 +#define HV_GUID_NOTFOUND 0x80041002 + +#define ADDR_FAMILY_NONE 0x00 +#define ADDR_FAMILY_IPV4 0x01 +#define ADDR_FAMILY_IPV6 0x02 + +#define MAX_ADAPTER_ID_SIZE 128 +#define MAX_IP_ADDR_SIZE 1024 +#define MAX_GATEWAY_SIZE 512 + + +struct hv_kvp_ipaddr_value { + __u16 adapter_id[MAX_ADAPTER_ID_SIZE]; + __u8 addr_family; + __u8 dhcp_enabled; + __u16 ip_addr[MAX_IP_ADDR_SIZE]; + __u16 sub_net[MAX_IP_ADDR_SIZE]; + __u16 gate_way[MAX_GATEWAY_SIZE]; + __u16 dns_addr[MAX_IP_ADDR_SIZE]; +} __attribute__((packed)); + + +struct hv_kvp_hdr { + __u8 operation; + __u8 pool; + __u16 pad; +} __attribute__((packed)); + +struct hv_kvp_exchg_msg_value { + __u32 value_type; + __u32 key_size; + __u32 value_size; + __u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; + union { + __u8 value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE]; + __u32 value_u32; + __u64 value_u64; + }; +} __attribute__((packed)); + +struct hv_kvp_msg_enumerate { + __u32 index; + struct hv_kvp_exchg_msg_value data; +} __attribute__((packed)); + +struct hv_kvp_msg_get { + struct hv_kvp_exchg_msg_value data; +}; + +struct hv_kvp_msg_set { + struct hv_kvp_exchg_msg_value data; +}; + +struct hv_kvp_msg_delete { + __u32 key_size; + __u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; +}; + +struct hv_kvp_register { + __u8 version[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; +}; + +struct hv_kvp_msg { + union { + struct hv_kvp_hdr kvp_hdr; + int error; + }; + union { + struct hv_kvp_msg_get kvp_get; + struct hv_kvp_msg_set kvp_set; + struct hv_kvp_msg_delete kvp_delete; + struct hv_kvp_msg_enumerate kvp_enum_data; + struct hv_kvp_ipaddr_value kvp_ip_val; + struct hv_kvp_register kvp_register; + } body; +} __attribute__((packed)); + +struct hv_kvp_ip_msg { + __u8 operation; + __u8 pool; + struct hv_kvp_ipaddr_value kvp_ip_val; +} __attribute__((packed)); + +#endif /* _UAPI_HYPERV_H */ -- cgit v1.2.3 From 90f3453585479d5beb75058da46eb573ced0e6ac Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Wed, 29 Jan 2014 18:14:39 -0800 Subject: Drivers: hv: vmbus: Extract the mmio information from DSDT On Gen2 firmware, Hyper-V does not emulate the PCI bus. However, the MMIO information is packaged up in DSDT. Extract this information and export it for use by the synthetic framebuffer driver. This is the only driver that needs this currently. In this version of the patch mmio, I have updated the hyperv header file (linux/hyperv.h) with mmio definitions. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- drivers/hv/vmbus_drv.c | 45 ++++++++++++++++++++++++++++++++------------- include/linux/hyperv.h | 3 +++ 2 files changed, 35 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 077bb1bdac34..b37c91b6ba80 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -43,6 +43,10 @@ static struct acpi_device *hv_acpi_dev; static struct tasklet_struct msg_dpc; static struct completion probe_event; static int irq; +u64 hyperv_mmio_start; +EXPORT_SYMBOL_GPL(hyperv_mmio_start); +u64 hyperv_mmio_size; +EXPORT_SYMBOL_GPL(hyperv_mmio_size); static int vmbus_exists(void) { @@ -886,18 +890,19 @@ void vmbus_device_unregister(struct hv_device *device_obj) /* - * VMBUS is an acpi enumerated device. Get the the IRQ information - * from DSDT. + * VMBUS is an acpi enumerated device. Get the the information we + * need from DSDT. */ -static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq) +static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx) { + switch (res->type) { + case ACPI_RESOURCE_TYPE_IRQ: + irq = res->data.irq.interrupts[0]; - if (res->type == ACPI_RESOURCE_TYPE_IRQ) { - struct acpi_resource_irq *irqp; - irqp = &res->data.irq; - - *((unsigned int *)irq) = irqp->interrupts[0]; + case ACPI_RESOURCE_TYPE_ADDRESS64: + hyperv_mmio_start = res->data.address64.minimum; + hyperv_mmio_size = res->data.address64.address_length; } return AE_OK; @@ -906,18 +911,32 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq) static int vmbus_acpi_add(struct acpi_device *device) { acpi_status result; + int ret_val = -ENODEV; hv_acpi_dev = device; result = acpi_walk_resources(device->handle, METHOD_NAME__CRS, - vmbus_walk_resources, &irq); + vmbus_walk_resources, NULL); - if (ACPI_FAILURE(result)) { - complete(&probe_event); - return -ENODEV; + if (ACPI_FAILURE(result)) + goto acpi_walk_err; + /* + * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that + * has the mmio ranges. Get that. + */ + if (device->parent) { + result = acpi_walk_resources(device->parent->handle, + METHOD_NAME__CRS, + vmbus_walk_resources, NULL); + + if (ACPI_FAILURE(result)) + goto acpi_walk_err; } + ret_val = 0; + +acpi_walk_err: complete(&probe_event); - return 0; + return ret_val; } static const struct acpi_device_id vmbus_acpi_device_ids[] = { diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 167ef47e3d6e..6b862dadbb7a 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1143,6 +1143,9 @@ int hv_vss_init(struct hv_util_service *); void hv_vss_deinit(void); void hv_vss_onchannelcallback(void *); +extern u64 hyperv_mmio_start; +extern u64 hyperv_mmio_size; + /* * Negotiated version with the Host. */ -- cgit v1.2.3 From 011a7c3cc3aa60c7ea6bb49d847e80a299ba7b36 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Sat, 1 Feb 2014 19:02:20 -0800 Subject: Drivers: hv: vmbus: Cleanup the packet send path The current channel code is using scatterlist abstraction to pass data to the ringbuffer API on the send path. This causes unnecessary translations between virtual and physical addresses. Fix this. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- drivers/hv/channel.c | 42 +++++++++++++++++++++++------------------- drivers/hv/hyperv_vmbus.h | 4 ++-- drivers/hv/ring_buffer.c | 17 +++++++---------- include/linux/hyperv.h | 2 +- 4 files changed, 33 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 69ea36f07b4d..602ca86a6488 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "hyperv_vmbus.h" @@ -554,14 +555,14 @@ EXPORT_SYMBOL_GPL(vmbus_close); * * Mainly used by Hyper-V drivers. */ -int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer, +int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, u32 bufferlen, u64 requestid, enum vmbus_packet_type type, u32 flags) { struct vmpacket_descriptor desc; u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen; u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); - struct scatterlist bufferlist[3]; + struct kvec bufferlist[3]; u64 aligned_data = 0; int ret; bool signal = false; @@ -575,11 +576,12 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer, desc.len8 = (u16)(packetlen_aligned >> 3); desc.trans_id = requestid; - sg_init_table(bufferlist, 3); - sg_set_buf(&bufferlist[0], &desc, sizeof(struct vmpacket_descriptor)); - sg_set_buf(&bufferlist[1], buffer, bufferlen); - sg_set_buf(&bufferlist[2], &aligned_data, - packetlen_aligned - packetlen); + bufferlist[0].iov_base = &desc; + bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor); + bufferlist[1].iov_base = buffer; + bufferlist[1].iov_len = bufferlen; + bufferlist[2].iov_base = &aligned_data; + bufferlist[2].iov_len = (packetlen_aligned - packetlen); ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); @@ -605,7 +607,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, u32 descsize; u32 packetlen; u32 packetlen_aligned; - struct scatterlist bufferlist[3]; + struct kvec bufferlist[3]; u64 aligned_data = 0; bool signal = false; @@ -637,11 +639,12 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, desc.range[i].pfn = pagebuffers[i].pfn; } - sg_init_table(bufferlist, 3); - sg_set_buf(&bufferlist[0], &desc, descsize); - sg_set_buf(&bufferlist[1], buffer, bufferlen); - sg_set_buf(&bufferlist[2], &aligned_data, - packetlen_aligned - packetlen); + bufferlist[0].iov_base = &desc; + bufferlist[0].iov_len = descsize; + bufferlist[1].iov_base = buffer; + bufferlist[1].iov_len = bufferlen; + bufferlist[2].iov_base = &aligned_data; + bufferlist[2].iov_len = (packetlen_aligned - packetlen); ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); @@ -665,7 +668,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, u32 descsize; u32 packetlen; u32 packetlen_aligned; - struct scatterlist bufferlist[3]; + struct kvec bufferlist[3]; u64 aligned_data = 0; bool signal = false; u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, @@ -700,11 +703,12 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array, pfncount * sizeof(u64)); - sg_init_table(bufferlist, 3); - sg_set_buf(&bufferlist[0], &desc, descsize); - sg_set_buf(&bufferlist[1], buffer, bufferlen); - sg_set_buf(&bufferlist[2], &aligned_data, - packetlen_aligned - packetlen); + bufferlist[0].iov_base = &desc; + bufferlist[0].iov_len = descsize; + bufferlist[1].iov_base = buffer; + bufferlist[1].iov_len = bufferlen; + bufferlist[2].iov_base = &aligned_data; + bufferlist[2].iov_len = (packetlen_aligned - packetlen); ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index e05517616a06..1544609881fe 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -559,8 +559,8 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, void *buffer, void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, - struct scatterlist *sglist, - u32 sgcount, bool *signal); + struct kvec *kv_list, + u32 kv_count, bool *signal); int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer, u32 buflen); diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 26c93cf9f6be..15db66b74141 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "hyperv_vmbus.h" @@ -387,23 +388,20 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) * */ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, - struct scatterlist *sglist, u32 sgcount, bool *signal) + struct kvec *kv_list, u32 kv_count, bool *signal) { int i = 0; u32 bytes_avail_towrite; u32 bytes_avail_toread; u32 totalbytes_towrite = 0; - struct scatterlist *sg; u32 next_write_location; u32 old_write; u64 prev_indices = 0; unsigned long flags; - for_each_sg(sglist, sg, sgcount, i) - { - totalbytes_towrite += sg->length; - } + for (i = 0; i < kv_count; i++) + totalbytes_towrite += kv_list[i].iov_len; totalbytes_towrite += sizeof(u64); @@ -427,12 +425,11 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, old_write = next_write_location; - for_each_sg(sglist, sg, sgcount, i) - { + for (i = 0; i < kv_count; i++) { next_write_location = hv_copyto_ringbuffer(outring_info, next_write_location, - sg_virt(sg), - sg->length); + kv_list[i].iov_base, + kv_list[i].iov_len); } /* Set previous packet start */ diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 6b862dadbb7a..9b07e1f070ac 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -802,7 +802,7 @@ extern int vmbus_open(struct vmbus_channel *channel, extern void vmbus_close(struct vmbus_channel *channel); extern int vmbus_sendpacket(struct vmbus_channel *channel, - const void *buffer, + void *buffer, u32 bufferLen, u64 requestid, enum vmbus_packet_type type, -- cgit v1.2.3 From 8a7206a89f85c8439561b0eff85bb9440a2e97a9 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Mon, 3 Feb 2014 12:42:45 -0800 Subject: Drivers: hv: vmbus: Support per-channel driver state As we implement Virtual Receive Side Scaling on the networking side (the VRSS patches are currently under review), it will be useful to have per-channel state that vmbus drivers can manage. Add support for managing per-channel state. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- include/linux/hyperv.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include/linux') diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 9b07e1f070ac..d47bab9e7a18 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -727,6 +727,10 @@ struct vmbus_channel { * This will be NULL for the primary channel. */ struct vmbus_channel *primary_channel; + /* + * Support per-channel state for use by vmbus drivers. + */ + void *per_channel_state; }; static inline void set_channel_read_state(struct vmbus_channel *c, bool state) @@ -734,6 +738,16 @@ static inline void set_channel_read_state(struct vmbus_channel *c, bool state) c->batched_reading = state; } +static inline void set_per_channel_state(struct vmbus_channel *c, void *s) +{ + c->per_channel_state = s; +} + +static inline void *get_per_channel_state(struct vmbus_channel *c) +{ + return c->per_channel_state; +} + void vmbus_onmessage(void *context); int vmbus_request_offers(void); -- cgit v1.2.3 From ac8f73305eea8a12fdcb6090417eb93a74efbcbd Mon Sep 17 00:00:00 2001 From: David Fries Date: Wed, 15 Jan 2014 22:29:19 -0600 Subject: connector: add portid to unicast in addition to broadcasting This allows replying only to the requestor portid while still supporting broadcasting. Pass 0 to portid for the previous behavior. Signed-off-by: David Fries Acked-by: Evgeniy Polyakov Signed-off-by: Greg Kroah-Hartman --- Documentation/connector/cn_test.c | 2 +- drivers/connector/cn_proc.c | 18 +++++++++--------- drivers/connector/connector.c | 20 +++++++++++++------- drivers/hv/hv_kvp.c | 4 ++-- drivers/hv/hv_snapshot.c | 2 +- drivers/md/dm-log-userspace-transfer.c | 2 +- drivers/video/uvesafb.c | 4 ++-- drivers/w1/w1_netlink.c | 14 +++++++------- include/linux/connector.h | 2 +- 9 files changed, 37 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/Documentation/connector/cn_test.c b/Documentation/connector/cn_test.c index adcca0368d60..d12cc944b696 100644 --- a/Documentation/connector/cn_test.c +++ b/Documentation/connector/cn_test.c @@ -145,7 +145,7 @@ static void cn_test_timer_func(unsigned long __data) memcpy(m + 1, data, m->len); - cn_netlink_send(m, 0, GFP_ATOMIC); + cn_netlink_send(m, 0, 0, GFP_ATOMIC); kfree(m); } diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 18c5b9b16645..148d707a1d43 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -95,7 +95,7 @@ void proc_fork_connector(struct task_struct *task) msg->len = sizeof(*ev); msg->flags = 0; /* not used */ /* If cn_netlink_send() failed, the data is not sent */ - cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); + cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); } void proc_exec_connector(struct task_struct *task) @@ -122,7 +122,7 @@ void proc_exec_connector(struct task_struct *task) msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); + cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); } void proc_id_connector(struct task_struct *task, int which_id) @@ -163,7 +163,7 @@ void proc_id_connector(struct task_struct *task, int which_id) msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); + cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); } void proc_sid_connector(struct task_struct *task) @@ -190,7 +190,7 @@ void proc_sid_connector(struct task_struct *task) msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); + cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); } void proc_ptrace_connector(struct task_struct *task, int ptrace_id) @@ -225,7 +225,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id) msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); + cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); } void proc_comm_connector(struct task_struct *task) @@ -253,7 +253,7 @@ void proc_comm_connector(struct task_struct *task) msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); + cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); } void proc_coredump_connector(struct task_struct *task) @@ -280,7 +280,7 @@ void proc_coredump_connector(struct task_struct *task) msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); + cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); } void proc_exit_connector(struct task_struct *task) @@ -309,7 +309,7 @@ void proc_exit_connector(struct task_struct *task) msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); + cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); } /* @@ -343,7 +343,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) msg->ack = rcvd_ack + 1; msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); + cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); } /** diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index a36749f1e44a..77afe7487d34 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -50,7 +50,7 @@ static int cn_already_initialized; * * Sequence number is incremented with each message to be sent. * - * If we expect reply to our message then the sequence number in + * If we expect a reply to our message then the sequence number in * received message MUST be the same as in original message, and * acknowledge number MUST be the same + 1. * @@ -62,8 +62,11 @@ static int cn_already_initialized; * the acknowledgement number in the original message + 1, then it is * a new message. * + * The message is sent to, the portid if given, the group if given, both if + * both, or if both are zero then the group is looked up and sent there. */ -int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask) +int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, + gfp_t gfp_mask) { struct cn_callback_entry *__cbq; unsigned int size; @@ -74,7 +77,9 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask) u32 group = 0; int found = 0; - if (!__group) { + if (portid || __group) { + group = __group; + } else { spin_lock_bh(&dev->cbdev->queue_lock); list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { @@ -88,11 +93,9 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask) if (!found) return -ENODEV; - } else { - group = __group; } - if (!netlink_has_listeners(dev->nls, group)) + if (!portid && !netlink_has_listeners(dev->nls, group)) return -ESRCH; size = sizeof(*msg) + msg->len; @@ -113,7 +116,10 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask) NETLINK_CB(skb).dst_group = group; - return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask); + if (group) + return netlink_broadcast(dev->nls, skb, portid, group, + gfp_mask); + return netlink_unicast(dev->nls, skb, portid, !(gfp_mask&__GFP_WAIT)); } EXPORT_SYMBOL_GPL(cn_netlink_send); diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index 09988b289622..ea852537307e 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -113,7 +113,7 @@ kvp_register(int reg_value) kvp_msg->kvp_hdr.operation = reg_value; strcpy(version, HV_DRV_VERSION); msg->len = sizeof(struct hv_kvp_msg); - cn_netlink_send(msg, 0, GFP_ATOMIC); + cn_netlink_send(msg, 0, 0, GFP_ATOMIC); kfree(msg); } } @@ -435,7 +435,7 @@ kvp_send_key(struct work_struct *dummy) } msg->len = sizeof(struct hv_kvp_msg); - cn_netlink_send(msg, 0, GFP_ATOMIC); + cn_netlink_send(msg, 0, 0, GFP_ATOMIC); kfree(msg); return; diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c index 0c3546224376..34f14fddb666 100644 --- a/drivers/hv/hv_snapshot.c +++ b/drivers/hv/hv_snapshot.c @@ -98,7 +98,7 @@ static void vss_send_op(struct work_struct *dummy) vss_msg->vss_hdr.operation = op; msg->len = sizeof(struct hv_vss_msg); - cn_netlink_send(msg, 0, GFP_ATOMIC); + cn_netlink_send(msg, 0, 0, GFP_ATOMIC); kfree(msg); return; diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index 08d9a207259a..b428c0ae63d5 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c @@ -66,7 +66,7 @@ static int dm_ulog_sendto_server(struct dm_ulog_request *tfr) msg->seq = tfr->seq; msg->len = sizeof(struct dm_ulog_request) + tfr->data_size; - r = cn_netlink_send(msg, 0, gfp_any()); + r = cn_netlink_send(msg, 0, 0, gfp_any()); return r; } diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index 256fba7f4641..1f38445014c1 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c @@ -190,7 +190,7 @@ static int uvesafb_exec(struct uvesafb_ktask *task) uvfb_tasks[seq] = task; mutex_unlock(&uvfb_lock); - err = cn_netlink_send(m, 0, GFP_KERNEL); + err = cn_netlink_send(m, 0, 0, GFP_KERNEL); if (err == -ESRCH) { /* * Try to start the userspace helper if sending @@ -204,7 +204,7 @@ static int uvesafb_exec(struct uvesafb_ktask *task) "helper is installed and executable\n"); } else { v86d_started = 1; - err = cn_netlink_send(m, 0, gfp_any()); + err = cn_netlink_send(m, 0, 0, gfp_any()); if (err == -ENOBUFS) err = 0; } diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c index 06d614af1166..b63109ada5a7 100644 --- a/drivers/w1/w1_netlink.c +++ b/drivers/w1/w1_netlink.c @@ -45,7 +45,7 @@ void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg) memcpy(w, msg, sizeof(struct w1_netlink_msg)); - cn_netlink_send(m, 0, GFP_KERNEL); + cn_netlink_send(m, 0, 0, GFP_KERNEL); } static void w1_send_slave(struct w1_master *dev, u64 rn) @@ -60,7 +60,7 @@ static void w1_send_slave(struct w1_master *dev, u64 rn) if (avail < 8) { msg->ack++; - cn_netlink_send(msg, 0, GFP_KERNEL); + cn_netlink_send(msg, 0, 0, GFP_KERNEL); msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd); @@ -131,7 +131,7 @@ static int w1_get_slaves(struct w1_master *dev, } msg->ack = 0; - cn_netlink_send(msg, 0, GFP_KERNEL); + cn_netlink_send(msg, 0, 0, GFP_KERNEL); dev->priv = NULL; dev->priv_size = 0; @@ -173,7 +173,7 @@ static int w1_send_read_reply(struct cn_msg *msg, struct w1_netlink_msg *hdr, memcpy(c->data, cmd->data, c->len); - err = cn_netlink_send(cm, 0, GFP_KERNEL); + err = cn_netlink_send(cm, 0, 0, GFP_KERNEL); kfree(data); @@ -316,7 +316,7 @@ static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mc mutex_lock(&w1_mlock); list_for_each_entry(m, &w1_masters, w1_master_entry) { if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) { - cn_netlink_send(cn, 0, GFP_KERNEL); + cn_netlink_send(cn, 0, 0, GFP_KERNEL); cn->ack++; cn->len = sizeof(struct w1_netlink_msg); w->len = 0; @@ -329,7 +329,7 @@ static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mc id++; } cn->ack = 0; - cn_netlink_send(cn, 0, GFP_KERNEL); + cn_netlink_send(cn, 0, 0, GFP_KERNEL); mutex_unlock(&w1_mlock); kfree(cn); @@ -364,7 +364,7 @@ static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rm cmsg->len += sizeof(*cmd); } - error = cn_netlink_send(cmsg, 0, GFP_KERNEL); + error = cn_netlink_send(cmsg, 0, 0, GFP_KERNEL); kfree(cmsg); return error; diff --git a/include/linux/connector.h b/include/linux/connector.h index b2b5a41b6a24..be9c4747d511 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -71,7 +71,7 @@ struct cn_dev { int cn_add_callback(struct cb_id *id, const char *name, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); void cn_del_callback(struct cb_id *); -int cn_netlink_send(struct cn_msg *, u32, gfp_t); +int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask); int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, struct cb_id *id, -- cgit v1.2.3 From abd54f028ec30976d6e797e7474ec91d96186a0c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Feb 2014 14:02:55 -0500 Subject: kernfs: replace kernfs_node->u.completion with kernfs_root->deactivate_waitq kernfs_node->u.completion is used to notify deactivation completion from kernfs_put_active() to kernfs_deactivate(). We now allow multiple racing removals of the same node and the current removal scheme is no longer correct - kernfs_remove() invocation may return before the node is properly deactivated if it races against another removal. The removal path will be restructured to address the issue. To help such restructure which requires supporting multiple waiters, this patch replaces kernfs_node->u.completion with kernfs_root->deactivate_waitq. This makes deactivation event notifications share a per-root waitqueue_head; however, the wait path is quite cold and this will also allow shaving one pointer off kernfs_node. v2: Refreshed on top of ("kernfs: make kernfs_deactivate() honor KERNFS_LOCKDEP flag"). Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- fs/kernfs/dir.c | 31 +++++++++++++------------------ include/linux/kernfs.h | 4 ++-- 2 files changed, 15 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index bd6e18be6e1a..2193d30156ef 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -8,6 +8,7 @@ * This file is released under the GPLv2. */ +#include #include #include #include @@ -151,6 +152,7 @@ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn) */ void kernfs_put_active(struct kernfs_node *kn) { + struct kernfs_root *root = kernfs_root(kn); int v; if (unlikely(!kn)) @@ -162,11 +164,7 @@ void kernfs_put_active(struct kernfs_node *kn) if (likely(v != KN_DEACTIVATED_BIAS)) return; - /* - * atomic_dec_return() is a mb(), we'll always see the updated - * kn->u.completion. - */ - complete(kn->u.completion); + wake_up_all(&root->deactivate_waitq); } /** @@ -177,28 +175,24 @@ void kernfs_put_active(struct kernfs_node *kn) */ static void kernfs_deactivate(struct kernfs_node *kn) { - DECLARE_COMPLETION_ONSTACK(wait); - int v; + struct kernfs_root *root = kernfs_root(kn); BUG_ON(!(kn->flags & KERNFS_REMOVED)); if (!(kernfs_type(kn) & KERNFS_ACTIVE_REF)) return; - kn->u.completion = (void *)&wait; - if (kn->flags & KERNFS_LOCKDEP) rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_); - /* atomic_add_return() is a mb(), put_active() will always see - * the updated kn->u.completion. - */ - v = atomic_add_return(KN_DEACTIVATED_BIAS, &kn->active); - if (v != KN_DEACTIVATED_BIAS) { - if (kn->flags & KERNFS_LOCKDEP) - lock_contended(&kn->dep_map, _RET_IP_); - wait_for_completion(&wait); - } + atomic_add(KN_DEACTIVATED_BIAS, &kn->active); + + if ((kn->flags & KERNFS_LOCKDEP) && + atomic_read(&kn->active) != KN_DEACTIVATED_BIAS) + lock_contended(&kn->dep_map, _RET_IP_); + + wait_event(root->deactivate_waitq, + atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); if (kn->flags & KERNFS_LOCKDEP) { lock_acquired(&kn->dep_map, _RET_IP_); @@ -630,6 +624,7 @@ struct kernfs_root *kernfs_create_root(struct kernfs_dir_ops *kdops, void *priv) root->dir_ops = kdops; root->kn = kn; + init_waitqueue_head(&root->deactivate_waitq); return root; } diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 5be9f0228a3b..295a3bf642ba 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -15,7 +15,7 @@ #include #include #include -#include +#include struct file; struct dentry; @@ -92,7 +92,6 @@ struct kernfs_node { struct rb_node rb; union { - struct completion *completion; struct kernfs_node *removed_list; } u; @@ -133,6 +132,7 @@ struct kernfs_root { /* private fields, do not use outside kernfs proper */ struct ida ino_ida; struct kernfs_dir_ops *dir_ops; + wait_queue_head_t deactivate_waitq; }; struct kernfs_open_file { -- cgit v1.2.3 From 988cd7afb3f37598891ca70b4c6eb914c338c46a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Feb 2014 14:02:58 -0500 Subject: kernfs: remove kernfs_addrm_cxt kernfs_addrm_cxt and the accompanying kernfs_addrm_start/finish() were added because there were operations which should be performed outside kernfs_mutex after adding and removing kernfs_nodes. The necessary operations were recorded in kernfs_addrm_cxt and performed by kernfs_addrm_finish(); however, after the recent changes which relocated deactivation and unmapping so that they're performed directly during removal, the only operation kernfs_addrm_finish() performs is kernfs_put(), which can be moved inside the removal path too. This patch moves the kernfs_put() of the base ref to __kernfs_remove() and remove kernfs_addrm_cxt and kernfs_addrm_start/finish(). * kernfs_add_one() is updated to grab and release kernfs_mutex itself. sysfs_addrm_start/finish() invocations around it are removed from all users. * __kernfs_remove() puts an unlinked node directly instead of chaining it to kernfs_addrm_cxt. Its callers are updated to grab and release kernfs_mutex instead of calling kernfs_addrm_start/finish() around it. v2: Rebased on top of "kernfs: associate a new kernfs_node with its parent on creation" which dropped @parent from kernfs_add_one(). Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- fs/kernfs/dir.c | 109 ++++++++++---------------------------------- fs/kernfs/file.c | 6 +-- fs/kernfs/kernfs-internal.h | 11 +---- fs/kernfs/symlink.c | 6 +-- include/linux/kernfs.h | 4 -- 5 files changed, 28 insertions(+), 108 deletions(-) (limited to 'include/linux') diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 9603c06550a0..948551d222b4 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -395,70 +395,45 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, return kn; } -/** - * kernfs_addrm_start - prepare for kernfs_node add/remove - * @acxt: pointer to kernfs_addrm_cxt to be used - * - * This function is called when the caller is about to add or remove - * kernfs_node. This function acquires kernfs_mutex. @acxt is used - * to keep and pass context to other addrm functions. - * - * LOCKING: - * Kernel thread context (may sleep). kernfs_mutex is locked on - * return. - */ -void kernfs_addrm_start(struct kernfs_addrm_cxt *acxt) - __acquires(kernfs_mutex) -{ - memset(acxt, 0, sizeof(*acxt)); - - mutex_lock(&kernfs_mutex); -} - /** * kernfs_add_one - add kernfs_node to parent without warning - * @acxt: addrm context to use * @kn: kernfs_node to be added * * The caller must already have initialized @kn->parent. This * function increments nlink of the parent's inode if @kn is a * directory and link into the children list of the parent. * - * This function should be called between calls to - * kernfs_addrm_start() and kernfs_addrm_finish() and should be passed - * the same @acxt as passed to kernfs_addrm_start(). - * - * LOCKING: - * Determined by kernfs_addrm_start(). - * * RETURNS: * 0 on success, -EEXIST if entry with the given name already * exists. */ -int kernfs_add_one(struct kernfs_addrm_cxt *acxt, struct kernfs_node *kn) +int kernfs_add_one(struct kernfs_node *kn) { struct kernfs_node *parent = kn->parent; - bool has_ns = kernfs_ns_enabled(parent); struct kernfs_iattrs *ps_iattr; + bool has_ns; int ret; - if (has_ns != (bool)kn->ns) { - WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n", - has_ns ? "required" : "invalid", parent->name, kn->name); - return -EINVAL; - } + mutex_lock(&kernfs_mutex); + + ret = -EINVAL; + has_ns = kernfs_ns_enabled(parent); + if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n", + has_ns ? "required" : "invalid", parent->name, kn->name)) + goto out_unlock; if (kernfs_type(parent) != KERNFS_DIR) - return -EINVAL; + goto out_unlock; + ret = -ENOENT; if (parent->flags & KERNFS_REMOVED) - return -ENOENT; + goto out_unlock; kn->hash = kernfs_name_hash(kn->name, kn->ns); ret = kernfs_link_sibling(kn); if (ret) - return ret; + goto out_unlock; /* Update timestamps on the parent */ ps_iattr = parent->iattr; @@ -469,35 +444,10 @@ int kernfs_add_one(struct kernfs_addrm_cxt *acxt, struct kernfs_node *kn) /* Mark the entry added into directory tree */ kn->flags &= ~KERNFS_REMOVED; - - return 0; -} - -/** - * kernfs_addrm_finish - finish up kernfs_node add/remove - * @acxt: addrm context to finish up - * - * Finish up kernfs_node add/remove. Resources acquired by - * kernfs_addrm_start() are released and removed kernfs_nodes are - * cleaned up. - * - * LOCKING: - * kernfs_mutex is released. - */ -void kernfs_addrm_finish(struct kernfs_addrm_cxt *acxt) - __releases(kernfs_mutex) -{ - /* release resources acquired by kernfs_addrm_start() */ + ret = 0; +out_unlock: mutex_unlock(&kernfs_mutex); - - /* kill removed kernfs_nodes */ - while (acxt->removed) { - struct kernfs_node *kn = acxt->removed; - - acxt->removed = kn->u.removed_list; - - kernfs_put(kn); - } + return ret; } /** @@ -630,7 +580,6 @@ struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, const char *name, umode_t mode, void *priv, const void *ns) { - struct kernfs_addrm_cxt acxt; struct kernfs_node *kn; int rc; @@ -644,10 +593,7 @@ struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, kn->priv = priv; /* link in */ - kernfs_addrm_start(&acxt); - rc = kernfs_add_one(&acxt, kn); - kernfs_addrm_finish(&acxt); - + rc = kernfs_add_one(kn); if (!rc) return kn; @@ -800,8 +746,7 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos, return pos->parent; } -static void __kernfs_remove(struct kernfs_addrm_cxt *acxt, - struct kernfs_node *kn) +static void __kernfs_remove(struct kernfs_node *kn) { struct kernfs_node *pos; @@ -845,8 +790,7 @@ static void __kernfs_remove(struct kernfs_addrm_cxt *acxt, ps_iattr->ia_iattr.ia_mtime = CURRENT_TIME; } - pos->u.removed_list = acxt->removed; - acxt->removed = pos; + kernfs_put(pos); } kernfs_put(pos); @@ -861,11 +805,9 @@ static void __kernfs_remove(struct kernfs_addrm_cxt *acxt, */ void kernfs_remove(struct kernfs_node *kn) { - struct kernfs_addrm_cxt acxt; - - kernfs_addrm_start(&acxt); - __kernfs_remove(&acxt, kn); - kernfs_addrm_finish(&acxt); + mutex_lock(&kernfs_mutex); + __kernfs_remove(kn); + mutex_unlock(&kernfs_mutex); } /** @@ -880,7 +822,6 @@ void kernfs_remove(struct kernfs_node *kn) int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, const void *ns) { - struct kernfs_addrm_cxt acxt; struct kernfs_node *kn; if (!parent) { @@ -889,13 +830,13 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, return -ENOENT; } - kernfs_addrm_start(&acxt); + mutex_lock(&kernfs_mutex); kn = kernfs_find_ns(parent, name, ns); if (kn) - __kernfs_remove(&acxt, kn); + __kernfs_remove(kn); - kernfs_addrm_finish(&acxt); + mutex_unlock(&kernfs_mutex); if (kn) return 0; diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index dbf397bfdff2..10a8c91c49d6 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -820,7 +820,6 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, bool name_is_static, struct lock_class_key *key) { - struct kernfs_addrm_cxt acxt; struct kernfs_node *kn; unsigned flags; int rc; @@ -855,10 +854,7 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, if (ops->mmap) kn->flags |= KERNFS_HAS_MMAP; - kernfs_addrm_start(&acxt); - rc = kernfs_add_one(&acxt, kn); - kernfs_addrm_finish(&acxt); - + rc = kernfs_add_one(kn); if (rc) { kernfs_put(kn); return ERR_PTR(rc); diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h index eb536b76374a..46b58de794d6 100644 --- a/fs/kernfs/kernfs-internal.h +++ b/fs/kernfs/kernfs-internal.h @@ -44,13 +44,6 @@ static inline struct kernfs_root *kernfs_root(struct kernfs_node *kn) return kn->dir.root; } -/* - * Context structure to be used while adding/removing nodes. - */ -struct kernfs_addrm_cxt { - struct kernfs_node *removed; -}; - /* * mount.c */ @@ -100,9 +93,7 @@ extern const struct inode_operations kernfs_dir_iops; struct kernfs_node *kernfs_get_active(struct kernfs_node *kn); void kernfs_put_active(struct kernfs_node *kn); -void kernfs_addrm_start(struct kernfs_addrm_cxt *acxt); -int kernfs_add_one(struct kernfs_addrm_cxt *acxt, struct kernfs_node *kn); -void kernfs_addrm_finish(struct kernfs_addrm_cxt *acxt); +int kernfs_add_one(struct kernfs_node *kn); struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, const char *name, umode_t mode, unsigned flags); diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c index 4d457055acb9..8a198898e39a 100644 --- a/fs/kernfs/symlink.c +++ b/fs/kernfs/symlink.c @@ -27,7 +27,6 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent, struct kernfs_node *target) { struct kernfs_node *kn; - struct kernfs_addrm_cxt acxt; int error; kn = kernfs_new_node(parent, name, S_IFLNK|S_IRWXUGO, KERNFS_LINK); @@ -39,10 +38,7 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent, kn->symlink.target_kn = target; kernfs_get(target); /* ref owned by symlink */ - kernfs_addrm_start(&acxt); - error = kernfs_add_one(&acxt, kn); - kernfs_addrm_finish(&acxt); - + error = kernfs_add_one(kn); if (!error) return kn; diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 295a3bf642ba..38646f6096bc 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -91,10 +91,6 @@ struct kernfs_node { struct rb_node rb; - union { - struct kernfs_node *removed_list; - } u; - const void *ns; /* namespace tag */ unsigned int hash; /* ns + name hash */ union { -- cgit v1.2.3 From 182fd64b66342219d6fcf2b84d337529d120d95c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Feb 2014 14:02:59 -0500 Subject: kernfs: remove KERNFS_ACTIVE_REF and add kernfs_lockdep() There currently are two mechanisms gating active ref lockdep annotations - KERNFS_LOCKDEP flag and KERNFS_ACTIVE_REF type mask. The former disables lockdep annotations in kernfs_get/put_active() while the latter disables all of kernfs_deactivate(). While KERNFS_ACTIVE_REF also behaves as an optimization to skip the deactivation step for non-file nodes, the benefit is marginal and it needlessly diverges code paths. Let's drop KERNFS_ACTIVE_REF. While at it, add a test helper kernfs_lockdep() to test KERNFS_LOCKDEP flag so that it's more convenient and the related code can be compiled out when not enabled. v2: Refreshed on top of ("kernfs: make kernfs_deactivate() honor KERNFS_LOCKDEP flag"). As the earlier patch already added KERNFS_LOCKDEP tests to kernfs_deactivate(), those additions are dropped from this patch and the existing ones are simply converted to kernfs_lockdep(). Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- fs/kernfs/dir.c | 20 +++++++++++++------- include/linux/kernfs.h | 1 - 2 files changed, 13 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 948551d222b4..5cf137b63db9 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -22,6 +22,15 @@ DEFINE_MUTEX(kernfs_mutex); #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) +static bool kernfs_lockdep(struct kernfs_node *kn) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + return kn->flags & KERNFS_LOCKDEP; +#else + return false; +#endif +} + /** * kernfs_name_hash * @name: Null terminated string to hash @@ -144,7 +153,7 @@ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn) if (!atomic_inc_unless_negative(&kn->active)) return NULL; - if (kn->flags & KERNFS_LOCKDEP) + if (kernfs_lockdep(kn)) rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_); return kn; } @@ -164,7 +173,7 @@ void kernfs_put_active(struct kernfs_node *kn) if (unlikely(!kn)) return; - if (kn->flags & KERNFS_LOCKDEP) + if (kernfs_lockdep(kn)) rwsem_release(&kn->dep_map, 1, _RET_IP_); v = atomic_dec_return(&kn->active); if (likely(v != KN_DEACTIVATED_BIAS)) @@ -190,16 +199,13 @@ static void kernfs_deactivate(struct kernfs_node *kn) lockdep_assert_held(&kernfs_mutex); BUG_ON(!(kn->flags & KERNFS_REMOVED)); - if (!(kernfs_type(kn) & KERNFS_ACTIVE_REF)) - return; - /* only the first invocation on @kn should deactivate it */ if (atomic_read(&kn->active) >= 0) atomic_add(KN_DEACTIVATED_BIAS, &kn->active); mutex_unlock(&kernfs_mutex); - if (kn->flags & KERNFS_LOCKDEP) { + if (kernfs_lockdep(kn)) { rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_); if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS) lock_contended(&kn->dep_map, _RET_IP_); @@ -209,7 +215,7 @@ static void kernfs_deactivate(struct kernfs_node *kn) wait_event(root->deactivate_waitq, atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); - if (kn->flags & KERNFS_LOCKDEP) { + if (kernfs_lockdep(kn)) { lock_acquired(&kn->dep_map, _RET_IP_); rwsem_release(&kn->dep_map, 1, _RET_IP_); } diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 38646f6096bc..dc4cd6c04236 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -35,7 +35,6 @@ enum kernfs_node_type { }; #define KERNFS_TYPE_MASK 0x000f -#define KERNFS_ACTIVE_REF KERNFS_FILE #define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK enum kernfs_node_flag { -- cgit v1.2.3 From 81c173cb5e87fbb47ccd80630faefe39bbf68449 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Feb 2014 14:03:00 -0500 Subject: kernfs: remove KERNFS_REMOVED KERNFS_REMOVED is used to mark half-initialized and dying nodes so that they don't show up in lookups and deny adding new nodes under or renaming it; however, its role overlaps that of deactivation. It's necessary to deny addition of new children while removal is in progress; however, this role considerably intersects with deactivation - KERNFS_REMOVED prevents new children while deactivation prevents new file operations. There's no reason to have them separate making things more complex than necessary. This patch removes KERNFS_REMOVED. * Instead of KERNFS_REMOVED, each node now starts its life deactivated. This means that we now use both atomic_add() and atomic_sub() on KN_DEACTIVATED_BIAS, which is INT_MIN. The compiler generates an overflow warnings when negating INT_MIN as the negation can't be represented as a positive number. Nothing is actually broken but let's bump BIAS by one to avoid the warnings for archs which negates the subtrahend.. * A new helper kernfs_active() which tests whether kn->active >= 0 is added for convenience and lockdep annotation. All KERNFS_REMOVED tests are replaced with negated kernfs_active() tests. * __kernfs_remove() is updated to deactivate, but not drain, all nodes in the subtree instead of setting KERNFS_REMOVED. This removes deactivation from kernfs_deactivate(), which is now renamed to kernfs_drain(). * Sanity check on KERNFS_REMOVED in kernfs_put() is replaced with checks on the active ref. * Some comment style updates in the affected area. v2: Reordered before removal path restructuring. kernfs_active() dropped and kernfs_get/put_active() used instead. RB_EMPTY_NODE() used in the lookup paths. v3: Reverted most of v2 except for creating a new node with KN_DEACTIVATED_BIAS. Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- fs/kernfs/dir.c | 66 ++++++++++++++++++++++++--------------------- fs/kernfs/kernfs-internal.h | 3 ++- include/linux/kernfs.h | 1 - 3 files changed, 37 insertions(+), 33 deletions(-) (limited to 'include/linux') diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 5cf137b63db9..d0fd739bf82d 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -22,6 +22,12 @@ DEFINE_MUTEX(kernfs_mutex); #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) +static bool kernfs_active(struct kernfs_node *kn) +{ + lockdep_assert_held(&kernfs_mutex); + return atomic_read(&kn->active) >= 0; +} + static bool kernfs_lockdep(struct kernfs_node *kn) { #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -183,25 +189,20 @@ void kernfs_put_active(struct kernfs_node *kn) } /** - * kernfs_deactivate - deactivate kernfs_node - * @kn: kernfs_node to deactivate + * kernfs_drain - drain kernfs_node + * @kn: kernfs_node to drain * - * Deny new active references, drain existing ones and nuke all - * existing mmaps. Mutiple removers may invoke this function - * concurrently on @kn and all will return after deactivation and - * draining are complete. + * Drain existing usages and nuke all existing mmaps of @kn. Mutiple + * removers may invoke this function concurrently on @kn and all will + * return after draining is complete. */ -static void kernfs_deactivate(struct kernfs_node *kn) +static void kernfs_drain(struct kernfs_node *kn) __releases(&kernfs_mutex) __acquires(&kernfs_mutex) { struct kernfs_root *root = kernfs_root(kn); lockdep_assert_held(&kernfs_mutex); - BUG_ON(!(kn->flags & KERNFS_REMOVED)); - - /* only the first invocation on @kn should deactivate it */ - if (atomic_read(&kn->active) >= 0) - atomic_add(KN_DEACTIVATED_BIAS, &kn->active); + WARN_ON_ONCE(kernfs_active(kn)); mutex_unlock(&kernfs_mutex); @@ -253,13 +254,15 @@ void kernfs_put(struct kernfs_node *kn) return; root = kernfs_root(kn); repeat: - /* Moving/renaming is always done while holding reference. + /* + * Moving/renaming is always done while holding reference. * kn->parent won't change beneath us. */ parent = kn->parent; - WARN(!(kn->flags & KERNFS_REMOVED), "kernfs: free using entry: %s/%s\n", - parent ? parent->name : "", kn->name); + WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS, + "kernfs_put: %s/%s: released with incorrect active_ref %d\n", + parent ? parent->name : "", kn->name, atomic_read(&kn->active)); if (kernfs_type(kn) == KERNFS_LINK) kernfs_put(kn->symlink.target_kn); @@ -301,8 +304,8 @@ static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags) kn = dentry->d_fsdata; mutex_lock(&kernfs_mutex); - /* The kernfs node has been deleted */ - if (kn->flags & KERNFS_REMOVED) + /* The kernfs node has been deactivated */ + if (!kernfs_active(kn)) goto out_bad; /* The kernfs node has been moved? */ @@ -371,12 +374,12 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, kn->ino = ret; atomic_set(&kn->count, 1); - atomic_set(&kn->active, 0); + atomic_set(&kn->active, KN_DEACTIVATED_BIAS); RB_CLEAR_NODE(&kn->rb); kn->name = name; kn->mode = mode; - kn->flags = flags | KERNFS_REMOVED; + kn->flags = flags; return kn; @@ -432,7 +435,7 @@ int kernfs_add_one(struct kernfs_node *kn) goto out_unlock; ret = -ENOENT; - if (parent->flags & KERNFS_REMOVED) + if (!kernfs_active(parent)) goto out_unlock; kn->hash = kernfs_name_hash(kn->name, kn->ns); @@ -449,7 +452,7 @@ int kernfs_add_one(struct kernfs_node *kn) } /* Mark the entry added into directory tree */ - kn->flags &= ~KERNFS_REMOVED; + atomic_sub(KN_DEACTIVATED_BIAS, &kn->active); ret = 0; out_unlock: mutex_unlock(&kernfs_mutex); @@ -549,7 +552,7 @@ struct kernfs_root *kernfs_create_root(struct kernfs_dir_ops *kdops, void *priv) return ERR_PTR(-ENOMEM); } - kn->flags &= ~KERNFS_REMOVED; + atomic_sub(KN_DEACTIVATED_BIAS, &kn->active); kn->priv = priv; kn->dir.root = root; @@ -763,24 +766,25 @@ static void __kernfs_remove(struct kernfs_node *kn) pr_debug("kernfs %s: removing\n", kn->name); - /* disable lookup and node creation under @kn */ + /* prevent any new usage under @kn by deactivating all nodes */ pos = NULL; while ((pos = kernfs_next_descendant_post(pos, kn))) - pos->flags |= KERNFS_REMOVED; + if (kernfs_active(pos)) + atomic_add(KN_DEACTIVATED_BIAS, &pos->active); /* deactivate and unlink the subtree node-by-node */ do { pos = kernfs_leftmost_descendant(kn); /* - * kernfs_deactivate() drops kernfs_mutex temporarily and - * @pos's base ref could have been put by someone else by - * the time the function returns. Make sure it doesn't go - * away underneath us. + * kernfs_drain() drops kernfs_mutex temporarily and @pos's + * base ref could have been put by someone else by the time + * the function returns. Make sure it doesn't go away + * underneath us. */ kernfs_get(pos); - kernfs_deactivate(pos); + kernfs_drain(pos); /* * kernfs_unlink_sibling() succeeds once per node. Use it @@ -865,7 +869,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, mutex_lock(&kernfs_mutex); error = -ENOENT; - if ((kn->flags | new_parent->flags) & KERNFS_REMOVED) + if (!kernfs_active(kn) || !kernfs_active(new_parent)) goto out; error = 0; @@ -925,7 +929,7 @@ static struct kernfs_node *kernfs_dir_pos(const void *ns, struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos) { if (pos) { - int valid = !(pos->flags & KERNFS_REMOVED) && + int valid = kernfs_active(pos) && pos->parent == parent && hash == pos->hash; kernfs_put(pos); if (!valid) diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h index 46b58de794d6..a91d7a1113d9 100644 --- a/fs/kernfs/kernfs-internal.h +++ b/fs/kernfs/kernfs-internal.h @@ -26,7 +26,8 @@ struct kernfs_iattrs { struct simple_xattrs xattrs; }; -#define KN_DEACTIVATED_BIAS INT_MIN +/* +1 to avoid triggering overflow warning when negating it */ +#define KN_DEACTIVATED_BIAS (INT_MIN + 1) /* KERNFS_TYPE_MASK and types are defined in include/linux/kernfs.h */ diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index dc4cd6c04236..917bc6c1eb04 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -38,7 +38,6 @@ enum kernfs_node_type { #define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK enum kernfs_node_flag { - KERNFS_REMOVED = 0x0010, KERNFS_NS = 0x0020, KERNFS_HAS_SEQ_SHOW = 0x0040, KERNFS_HAS_MMAP = 0x0080, -- cgit v1.2.3 From 6b0afc2a21726b2d6b6aa441af40cafaf5405cc8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Feb 2014 14:03:01 -0500 Subject: kernfs, sysfs, driver-core: implement kernfs_remove_self() and its wrappers Sometimes it's necessary to implement a node which wants to delete nodes including itself. This isn't straightforward because of kernfs active reference. While a file operation is in progress, an active reference is held and kernfs_remove() waits for all such references to drain before completing. For a self-deleting node, this is a deadlock as kernfs_remove() ends up waiting for an active reference that itself is sitting on top of. This currently is worked around in the sysfs layer using sysfs_schedule_callback() which makes such removals asynchronous. While it works, it's rather cumbersome and inherently breaks synchronicity of the operation - the file operation which triggered the operation may complete before the removal is finished (or even started) and the removal may fail asynchronously. If a removal operation is immmediately followed by another operation which expects the specific name to be available (e.g. removal followed by rename onto the same name), there's no way to make the latter operation reliable. The thing is there's no inherent reason for this to be asynchrnous. All that's necessary to do this synchronous is a dedicated operation which drops its own active ref and deactivates self. This patch implements kernfs_remove_self() and its wrappers in sysfs and driver core. kernfs_remove_self() is to be called from one of the file operations, drops the active ref the task is holding, removes the self node, and restores active ref to the dead node so that the ref is balanced afterwards. __kernfs_remove() is updated so that it takes an early exit if the target node is already fully removed so that the active ref restored by kernfs_remove_self() after removal doesn't confuse the deactivation path. This makes implementing self-deleting nodes very easy. The normal removal path doesn't even need to be changed to use kernfs_remove_self() for the self-deleting node. The method can invoke kernfs_remove_self() on itself before proceeding the normal removal path. kernfs_remove() invoked on the node by the normal deletion path will simply be ignored. This will replace sysfs_schedule_callback(). A subtle feature of sysfs_schedule_callback() is that it collapses multiple invocations - even if multiple removals are triggered, the removal callback is run only once. An equivalent effect can be achieved by testing the return value of kernfs_remove_self() - only the one which gets %true return value should proceed with actual deletion. All other instances of kernfs_remove_self() will wait till the enclosing kernfs operation which invoked the winning instance of kernfs_remove_self() finishes and then return %false. This trivially makes all users of kernfs_remove_self() automatically show correct synchronous behavior even when there are multiple concurrent operations - all "echo 1 > delete" instances will finish only after the whole operation is completed by one of the instances. Note that manipulation of active ref is implemented in separate public functions - kernfs_[un]break_active_protection(). kernfs_remove_self() is the only user at the moment but this will be used to cater to more complex cases. v2: For !CONFIG_SYSFS, dummy version kernfs_remove_self() was missing and sysfs_remove_file_self() had incorrect return type. Fix it. Reported by kbuild test bot. v3: kernfs_[un]break_active_protection() separated out from kernfs_remove_self() and exposed as public API. Signed-off-by: Tejun Heo Cc: Alan Stern Cc: kbuild test robot Signed-off-by: Greg Kroah-Hartman --- drivers/base/core.c | 17 ++++++ fs/kernfs/dir.c | 138 ++++++++++++++++++++++++++++++++++++++++++++++++- fs/sysfs/file.c | 23 +++++++++ include/linux/device.h | 2 + include/linux/kernfs.h | 8 +++ include/linux/sysfs.h | 7 +++ 6 files changed, 194 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/base/core.c b/drivers/base/core.c index 2b567177ef78..9db57afcf81f 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -570,6 +570,23 @@ void device_remove_file(struct device *dev, } EXPORT_SYMBOL_GPL(device_remove_file); +/** + * device_remove_file_self - remove sysfs attribute file from its own method. + * @dev: device. + * @attr: device attribute descriptor. + * + * See kernfs_remove_self() for details. + */ +bool device_remove_file_self(struct device *dev, + const struct device_attribute *attr) +{ + if (dev) + return sysfs_remove_file_self(&dev->kobj, &attr->attr); + else + return false; +} +EXPORT_SYMBOL_GPL(device_remove_file_self); + /** * device_create_bin_file - create sysfs binary attribute file for device. * @dev: device. diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index d0fd739bf82d..8c63ae1bccb6 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -761,7 +761,12 @@ static void __kernfs_remove(struct kernfs_node *kn) lockdep_assert_held(&kernfs_mutex); - if (!kn) + /* + * Short-circuit if non-root @kn has already finished removal. + * This is for kernfs_remove_self() which plays with active ref + * after removal. + */ + if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb))) return; pr_debug("kernfs %s: removing\n", kn->name); @@ -820,6 +825,137 @@ void kernfs_remove(struct kernfs_node *kn) mutex_unlock(&kernfs_mutex); } +/** + * kernfs_break_active_protection - break out of active protection + * @kn: the self kernfs_node + * + * The caller must be running off of a kernfs operation which is invoked + * with an active reference - e.g. one of kernfs_ops. Each invocation of + * this function must also be matched with an invocation of + * kernfs_unbreak_active_protection(). + * + * This function releases the active reference of @kn the caller is + * holding. Once this function is called, @kn may be removed at any point + * and the caller is solely responsible for ensuring that the objects it + * dereferences are accessible. + */ +void kernfs_break_active_protection(struct kernfs_node *kn) +{ + /* + * Take out ourself out of the active ref dependency chain. If + * we're called without an active ref, lockdep will complain. + */ + kernfs_put_active(kn); +} + +/** + * kernfs_unbreak_active_protection - undo kernfs_break_active_protection() + * @kn: the self kernfs_node + * + * If kernfs_break_active_protection() was called, this function must be + * invoked before finishing the kernfs operation. Note that while this + * function restores the active reference, it doesn't and can't actually + * restore the active protection - @kn may already or be in the process of + * being removed. Once kernfs_break_active_protection() is invoked, that + * protection is irreversibly gone for the kernfs operation instance. + * + * While this function may be called at any point after + * kernfs_break_active_protection() is invoked, its most useful location + * would be right before the enclosing kernfs operation returns. + */ +void kernfs_unbreak_active_protection(struct kernfs_node *kn) +{ + /* + * @kn->active could be in any state; however, the increment we do + * here will be undone as soon as the enclosing kernfs operation + * finishes and this temporary bump can't break anything. If @kn + * is alive, nothing changes. If @kn is being deactivated, the + * soon-to-follow put will either finish deactivation or restore + * deactivated state. If @kn is already removed, the temporary + * bump is guaranteed to be gone before @kn is released. + */ + atomic_inc(&kn->active); + if (kernfs_lockdep(kn)) + rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_); +} + +/** + * kernfs_remove_self - remove a kernfs_node from its own method + * @kn: the self kernfs_node to remove + * + * The caller must be running off of a kernfs operation which is invoked + * with an active reference - e.g. one of kernfs_ops. This can be used to + * implement a file operation which deletes itself. + * + * For example, the "delete" file for a sysfs device directory can be + * implemented by invoking kernfs_remove_self() on the "delete" file + * itself. This function breaks the circular dependency of trying to + * deactivate self while holding an active ref itself. It isn't necessary + * to modify the usual removal path to use kernfs_remove_self(). The + * "delete" implementation can simply invoke kernfs_remove_self() on self + * before proceeding with the usual removal path. kernfs will ignore later + * kernfs_remove() on self. + * + * kernfs_remove_self() can be called multiple times concurrently on the + * same kernfs_node. Only the first one actually performs removal and + * returns %true. All others will wait until the kernfs operation which + * won self-removal finishes and return %false. Note that the losers wait + * for the completion of not only the winning kernfs_remove_self() but also + * the whole kernfs_ops which won the arbitration. This can be used to + * guarantee, for example, all concurrent writes to a "delete" file to + * finish only after the whole operation is complete. + */ +bool kernfs_remove_self(struct kernfs_node *kn) +{ + bool ret; + + mutex_lock(&kernfs_mutex); + kernfs_break_active_protection(kn); + + /* + * SUICIDAL is used to arbitrate among competing invocations. Only + * the first one will actually perform removal. When the removal + * is complete, SUICIDED is set and the active ref is restored + * while holding kernfs_mutex. The ones which lost arbitration + * waits for SUICDED && drained which can happen only after the + * enclosing kernfs operation which executed the winning instance + * of kernfs_remove_self() finished. + */ + if (!(kn->flags & KERNFS_SUICIDAL)) { + kn->flags |= KERNFS_SUICIDAL; + __kernfs_remove(kn); + kn->flags |= KERNFS_SUICIDED; + ret = true; + } else { + wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq; + DEFINE_WAIT(wait); + + while (true) { + prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE); + + if ((kn->flags & KERNFS_SUICIDED) && + atomic_read(&kn->active) == KN_DEACTIVATED_BIAS) + break; + + mutex_unlock(&kernfs_mutex); + schedule(); + mutex_lock(&kernfs_mutex); + } + finish_wait(waitq, &wait); + WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb)); + ret = false; + } + + /* + * This must be done while holding kernfs_mutex; otherwise, waiting + * for SUICIDED && deactivated could finish prematurely. + */ + kernfs_unbreak_active_protection(kn); + + mutex_unlock(&kernfs_mutex); + return ret; +} + /** * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it * @parent: parent of the target diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 810cf6e613e5..1b8b91b67fdb 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -372,6 +372,29 @@ void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, } EXPORT_SYMBOL_GPL(sysfs_remove_file_ns); +/** + * sysfs_remove_file_self - remove an object attribute from its own method + * @kobj: object we're acting for + * @attr: attribute descriptor + * + * See kernfs_remove_self() for details. + */ +bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr) +{ + struct kernfs_node *parent = kobj->sd; + struct kernfs_node *kn; + bool ret; + + kn = kernfs_find_and_get(parent, attr->name); + if (WARN_ON_ONCE(!kn)) + return false; + + ret = kernfs_remove_self(kn); + + kernfs_put(kn); + return ret; +} + void sysfs_remove_files(struct kobject *kobj, const struct attribute **ptr) { int i; diff --git a/include/linux/device.h b/include/linux/device.h index 952b01033c32..1ff3f1697513 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -560,6 +560,8 @@ extern int device_create_file(struct device *device, const struct device_attribute *entry); extern void device_remove_file(struct device *dev, const struct device_attribute *attr); +extern bool device_remove_file_self(struct device *dev, + const struct device_attribute *attr); extern int __must_check device_create_bin_file(struct device *dev, const struct bin_attribute *attr); extern void device_remove_bin_file(struct device *dev, diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 917bc6c1eb04..02ac33435808 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -43,6 +43,8 @@ enum kernfs_node_flag { KERNFS_HAS_MMAP = 0x0080, KERNFS_LOCKDEP = 0x0100, KERNFS_STATIC_NAME = 0x0200, + KERNFS_SUICIDAL = 0x0400, + KERNFS_SUICIDED = 0x0800, }; /* type-specific structures for kernfs_node union members */ @@ -234,6 +236,9 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent, const char *name, struct kernfs_node *target); void kernfs_remove(struct kernfs_node *kn); +void kernfs_break_active_protection(struct kernfs_node *kn); +void kernfs_unbreak_active_protection(struct kernfs_node *kn); +bool kernfs_remove_self(struct kernfs_node *kn); int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, const void *ns); int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, @@ -291,6 +296,9 @@ kernfs_create_link(struct kernfs_node *parent, const char *name, static inline void kernfs_remove(struct kernfs_node *kn) { } +static inline bool kernfs_remove_self(struct kernfs_node *kn) +{ return false; } + static inline int kernfs_remove_by_name_ns(struct kernfs_node *kn, const char *name, const void *ns) { return -ENOSYS; } diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 30b2ebee6439..bd96c603ab6c 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -198,6 +198,7 @@ int __must_check sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, umode_t mode); void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); +bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr); int __must_check sysfs_create_bin_file(struct kobject *kobj, @@ -301,6 +302,12 @@ static inline void sysfs_remove_file_ns(struct kobject *kobj, { } +static inline bool sysfs_remove_file_self(struct kobject *kobj, + const struct attribute *attr) +{ + return false; +} + static inline void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr) { -- cgit v1.2.3 From ce8b04aa6c9bdf211b921fdd18c040ea29516b97 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Feb 2014 14:03:05 -0500 Subject: sysfs, driver-core: remove unused {sysfs|device}_schedule_callback_owner() All device_schedule_callback_owner() users are converted to use device_remove_file_self(). Remove now unused {sysfs|device}_schedule_callback_owner(). Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- drivers/base/core.c | 33 ------------------ fs/sysfs/file.c | 92 -------------------------------------------------- include/linux/device.h | 11 +----- include/linux/sysfs.h | 9 ----- 4 files changed, 1 insertion(+), 144 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/core.c b/drivers/base/core.c index 9db57afcf81f..4195364f9fdd 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -615,39 +615,6 @@ void device_remove_bin_file(struct device *dev, } EXPORT_SYMBOL_GPL(device_remove_bin_file); -/** - * device_schedule_callback_owner - helper to schedule a callback for a device - * @dev: device. - * @func: callback function to invoke later. - * @owner: module owning the callback routine - * - * Attribute methods must not unregister themselves or their parent device - * (which would amount to the same thing). Attempts to do so will deadlock, - * since unregistration is mutually exclusive with driver callbacks. - * - * Instead methods can call this routine, which will attempt to allocate - * and schedule a workqueue request to call back @func with @dev as its - * argument in the workqueue's process context. @dev will be pinned until - * @func returns. - * - * This routine is usually called via the inline device_schedule_callback(), - * which automatically sets @owner to THIS_MODULE. - * - * Returns 0 if the request was submitted, -ENOMEM if storage could not - * be allocated, -ENODEV if a reference to @owner isn't available. - * - * NOTE: This routine won't work if CONFIG_SYSFS isn't set! It uses an - * underlying sysfs routine (since it is intended for use by attribute - * methods), and if sysfs isn't available you'll get nothing but -ENOSYS. - */ -int device_schedule_callback_owner(struct device *dev, - void (*func)(struct device *), struct module *owner) -{ - return sysfs_schedule_callback(&dev->kobj, - (void (*)(void *)) func, dev, owner); -} -EXPORT_SYMBOL_GPL(device_schedule_callback_owner); - static void klist_children_get(struct klist_node *n) { struct device_private *p = to_device_private_parent(n); diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 1b8b91b67fdb..28cc1acd5439 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -453,95 +453,3 @@ void sysfs_remove_bin_file(struct kobject *kobj, kernfs_remove_by_name(kobj->sd, attr->attr.name); } EXPORT_SYMBOL_GPL(sysfs_remove_bin_file); - -struct sysfs_schedule_callback_struct { - struct list_head workq_list; - struct kobject *kobj; - void (*func)(void *); - void *data; - struct module *owner; - struct work_struct work; -}; - -static struct workqueue_struct *sysfs_workqueue; -static DEFINE_MUTEX(sysfs_workq_mutex); -static LIST_HEAD(sysfs_workq); -static void sysfs_schedule_callback_work(struct work_struct *work) -{ - struct sysfs_schedule_callback_struct *ss = container_of(work, - struct sysfs_schedule_callback_struct, work); - - (ss->func)(ss->data); - kobject_put(ss->kobj); - module_put(ss->owner); - mutex_lock(&sysfs_workq_mutex); - list_del(&ss->workq_list); - mutex_unlock(&sysfs_workq_mutex); - kfree(ss); -} - -/** - * sysfs_schedule_callback - helper to schedule a callback for a kobject - * @kobj: object we're acting for. - * @func: callback function to invoke later. - * @data: argument to pass to @func. - * @owner: module owning the callback code - * - * sysfs attribute methods must not unregister themselves or their parent - * kobject (which would amount to the same thing). Attempts to do so will - * deadlock, since unregistration is mutually exclusive with driver - * callbacks. - * - * Instead methods can call this routine, which will attempt to allocate - * and schedule a workqueue request to call back @func with @data as its - * argument in the workqueue's process context. @kobj will be pinned - * until @func returns. - * - * Returns 0 if the request was submitted, -ENOMEM if storage could not - * be allocated, -ENODEV if a reference to @owner isn't available, - * -EAGAIN if a callback has already been scheduled for @kobj. - */ -int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *), - void *data, struct module *owner) -{ - struct sysfs_schedule_callback_struct *ss, *tmp; - - if (!try_module_get(owner)) - return -ENODEV; - - mutex_lock(&sysfs_workq_mutex); - list_for_each_entry_safe(ss, tmp, &sysfs_workq, workq_list) - if (ss->kobj == kobj) { - module_put(owner); - mutex_unlock(&sysfs_workq_mutex); - return -EAGAIN; - } - mutex_unlock(&sysfs_workq_mutex); - - if (sysfs_workqueue == NULL) { - sysfs_workqueue = create_singlethread_workqueue("sysfsd"); - if (sysfs_workqueue == NULL) { - module_put(owner); - return -ENOMEM; - } - } - - ss = kmalloc(sizeof(*ss), GFP_KERNEL); - if (!ss) { - module_put(owner); - return -ENOMEM; - } - kobject_get(kobj); - ss->kobj = kobj; - ss->func = func; - ss->data = data; - ss->owner = owner; - INIT_WORK(&ss->work, sysfs_schedule_callback_work); - INIT_LIST_HEAD(&ss->workq_list); - mutex_lock(&sysfs_workq_mutex); - list_add_tail(&ss->workq_list, &sysfs_workq); - mutex_unlock(&sysfs_workq_mutex); - queue_work(sysfs_workqueue, &ss->work); - return 0; -} -EXPORT_SYMBOL_GPL(sysfs_schedule_callback); diff --git a/include/linux/device.h b/include/linux/device.h index 1ff3f1697513..fb1ba13f7665 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -566,12 +566,6 @@ extern int __must_check device_create_bin_file(struct device *dev, const struct bin_attribute *attr); extern void device_remove_bin_file(struct device *dev, const struct bin_attribute *attr); -extern int device_schedule_callback_owner(struct device *dev, - void (*func)(struct device *dev), struct module *owner); - -/* This is a macro to avoid include problems with THIS_MODULE */ -#define device_schedule_callback(dev, func) \ - device_schedule_callback_owner(dev, func, THIS_MODULE) /* device resource management */ typedef void (*dr_release_t)(struct device *dev, void *res); @@ -931,10 +925,7 @@ extern int device_online(struct device *dev); extern struct device *__root_device_register(const char *name, struct module *owner); -/* - * This is a macro to avoid include problems with THIS_MODULE, - * just as per what is done for device_schedule_callback() above. - */ +/* This is a macro to avoid include problems with THIS_MODULE */ #define root_device_register(name) \ __root_device_register(name, THIS_MODULE) diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index bd96c603ab6c..14df05415af9 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -178,9 +178,6 @@ struct sysfs_ops { #ifdef CONFIG_SYSFS -int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *), - void *data, struct module *owner); - int __must_check sysfs_create_dir_ns(struct kobject *kobj, const void *ns); void sysfs_remove_dir(struct kobject *kobj); int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name, @@ -249,12 +246,6 @@ int __must_check sysfs_init(void); #else /* CONFIG_SYSFS */ -static inline int sysfs_schedule_callback(struct kobject *kobj, - void (*func)(void *), void *data, struct module *owner) -{ - return -ENOSYS; -} - static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) { return 0; -- cgit v1.2.3 From 07c7530dd46728e25e938d0eb291f8085435c365 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Feb 2014 14:09:08 -0500 Subject: kernfs: invoke dir_ops while holding active ref of the target node kernfs_dir_ops are currently being invoked without any active reference, which makes it tricky for the invoked operations to determine whether the objects associated those nodes are safe to access and will remain that way for the duration of such operations. kernfs already has active_ref mechanism to deal with this which makes the removal of a given node the synchronization point for gating the file operations. There's no reason for dir_ops to be any different. Update the dir_ops handling so that active_ref is held while the dir_ops are executing. This guarantees that while a dir_ops is executing the target nodes stay alive. As kernfs_dir_ops doesn't have any in-kernel user at this point, this doesn't affect anybody. Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- fs/kernfs/dir.c | 33 ++++++++++++++++++++++++++++++--- include/linux/kernfs.h | 3 ++- 2 files changed, 32 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 8c63ae1bccb6..bfbfb48f4ad8 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -654,22 +654,36 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry, { struct kernfs_node *parent = dir->i_private; struct kernfs_dir_ops *kdops = kernfs_root(parent)->dir_ops; + int ret; if (!kdops || !kdops->mkdir) return -EPERM; - return kdops->mkdir(parent, dentry->d_name.name, mode); + if (!kernfs_get_active(parent)) + return -ENODEV; + + ret = kdops->mkdir(parent, dentry->d_name.name, mode); + + kernfs_put_active(parent); + return ret; } static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry) { struct kernfs_node *kn = dentry->d_fsdata; struct kernfs_dir_ops *kdops = kernfs_root(kn)->dir_ops; + int ret; if (!kdops || !kdops->rmdir) return -EPERM; - return kdops->rmdir(kn); + if (!kernfs_get_active(kn)) + return -ENODEV; + + ret = kdops->rmdir(kn); + + kernfs_put_active(kn); + return ret; } static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry, @@ -678,11 +692,24 @@ static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry, struct kernfs_node *kn = old_dentry->d_fsdata; struct kernfs_node *new_parent = new_dir->i_private; struct kernfs_dir_ops *kdops = kernfs_root(kn)->dir_ops; + int ret; if (!kdops || !kdops->rename) return -EPERM; - return kdops->rename(kn, new_parent, new_dentry->d_name.name); + if (!kernfs_get_active(kn)) + return -ENODEV; + + if (!kernfs_get_active(new_parent)) { + kernfs_put_active(kn); + return -ENODEV; + } + + ret = kdops->rename(kn, new_parent, new_dentry->d_name.name); + + kernfs_put_active(new_parent); + kernfs_put_active(kn); + return ret; } const struct inode_operations kernfs_dir_iops = { diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 02ac33435808..58a131ddc6a3 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -111,7 +111,8 @@ struct kernfs_node { * kernfs_dir_ops may be specified on kernfs_create_root() to support * directory manipulation syscalls. These optional callbacks are invoked * on the matching syscalls and can perform any kernfs operations which - * don't necessarily have to be the exact operation requested. + * don't necessarily have to be the exact operation requested. An active + * reference is held for each kernfs_node parameter. */ struct kernfs_dir_ops { int (*mkdir)(struct kernfs_node *parent, const char *name, -- cgit v1.2.3 From 90c07c895c87d38db100b6afcb686ab3ef0d6a64 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Feb 2014 14:09:09 -0500 Subject: kernfs: rename kernfs_dir_ops to kernfs_syscall_ops We're gonna need non-dir syscall callbacks, which will make dir_ops a misnomer. Let's rename kernfs_dir_ops to kernfs_syscall_ops. This is pure rename. Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- fs/kernfs/dir.c | 25 +++++++++++++------------ include/linux/kernfs.h | 18 +++++++++--------- 2 files changed, 22 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index bfbfb48f4ad8..f58d2f16eaf7 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -527,13 +527,14 @@ EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns); /** * kernfs_create_root - create a new kernfs hierarchy - * @kdops: optional directory syscall operations for the hierarchy + * @scops: optional syscall operations for the hierarchy * @priv: opaque data associated with the new directory * * Returns the root of the new hierarchy on success, ERR_PTR() value on * failure. */ -struct kernfs_root *kernfs_create_root(struct kernfs_dir_ops *kdops, void *priv) +struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, + void *priv) { struct kernfs_root *root; struct kernfs_node *kn; @@ -556,7 +557,7 @@ struct kernfs_root *kernfs_create_root(struct kernfs_dir_ops *kdops, void *priv) kn->priv = priv; kn->dir.root = root; - root->dir_ops = kdops; + root->syscall_ops = scops; root->kn = kn; init_waitqueue_head(&root->deactivate_waitq); @@ -653,16 +654,16 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct kernfs_node *parent = dir->i_private; - struct kernfs_dir_ops *kdops = kernfs_root(parent)->dir_ops; + struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops; int ret; - if (!kdops || !kdops->mkdir) + if (!scops || !scops->mkdir) return -EPERM; if (!kernfs_get_active(parent)) return -ENODEV; - ret = kdops->mkdir(parent, dentry->d_name.name, mode); + ret = scops->mkdir(parent, dentry->d_name.name, mode); kernfs_put_active(parent); return ret; @@ -671,16 +672,16 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry, static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry) { struct kernfs_node *kn = dentry->d_fsdata; - struct kernfs_dir_ops *kdops = kernfs_root(kn)->dir_ops; + struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops; int ret; - if (!kdops || !kdops->rmdir) + if (!scops || !scops->rmdir) return -EPERM; if (!kernfs_get_active(kn)) return -ENODEV; - ret = kdops->rmdir(kn); + ret = scops->rmdir(kn); kernfs_put_active(kn); return ret; @@ -691,10 +692,10 @@ static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry, { struct kernfs_node *kn = old_dentry->d_fsdata; struct kernfs_node *new_parent = new_dir->i_private; - struct kernfs_dir_ops *kdops = kernfs_root(kn)->dir_ops; + struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops; int ret; - if (!kdops || !kdops->rename) + if (!scops || !scops->rename) return -EPERM; if (!kernfs_get_active(kn)) @@ -705,7 +706,7 @@ static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry, return -ENODEV; } - ret = kdops->rename(kn, new_parent, new_dentry->d_name.name); + ret = scops->rename(kn, new_parent, new_dentry->d_name.name); kernfs_put_active(new_parent); kernfs_put_active(kn); diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 58a131ddc6a3..5ddc47450335 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -108,13 +108,13 @@ struct kernfs_node { }; /* - * kernfs_dir_ops may be specified on kernfs_create_root() to support - * directory manipulation syscalls. These optional callbacks are invoked - * on the matching syscalls and can perform any kernfs operations which - * don't necessarily have to be the exact operation requested. An active - * reference is held for each kernfs_node parameter. + * kernfs_syscall_ops may be specified on kernfs_create_root() to support + * syscalls. These optional callbacks are invoked on the matching syscalls + * and can perform any kernfs operations which don't necessarily have to be + * the exact operation requested. An active reference is held for each + * kernfs_node parameter. */ -struct kernfs_dir_ops { +struct kernfs_syscall_ops { int (*mkdir)(struct kernfs_node *parent, const char *name, umode_t mode); int (*rmdir)(struct kernfs_node *kn); @@ -128,7 +128,7 @@ struct kernfs_root { /* private fields, do not use outside kernfs proper */ struct ida ino_ida; - struct kernfs_dir_ops *dir_ops; + struct kernfs_syscall_ops *syscall_ops; wait_queue_head_t deactivate_waitq; }; @@ -219,7 +219,7 @@ struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent, void kernfs_get(struct kernfs_node *kn); void kernfs_put(struct kernfs_node *kn); -struct kernfs_root *kernfs_create_root(struct kernfs_dir_ops *kdops, +struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, void *priv); void kernfs_destroy_root(struct kernfs_root *root); @@ -273,7 +273,7 @@ static inline void kernfs_get(struct kernfs_node *kn) { } static inline void kernfs_put(struct kernfs_node *kn) { } static inline struct kernfs_root * -kernfs_create_root(struct kernfs_dir_ops *kdops, void *priv) +kernfs_create_root(struct kernfs_syscall_ops *scops, void *priv) { return ERR_PTR(-ENOSYS); } static inline void kernfs_destroy_root(struct kernfs_root *root) { } -- cgit v1.2.3 From 6a7fed4eefddad48224f1c9d534b4e262f0897f6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Feb 2014 14:09:10 -0500 Subject: kernfs: implement kernfs_syscall_ops->remount_fs() and ->show_options() Add two super_block related syscall callbacks ->remount_fs() and ->show_options() to kernfs_syscall_ops. These simply forward the matching super_operations. Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- fs/kernfs/mount.c | 23 +++++++++++++++++++++++ include/linux/kernfs.h | 3 +++ 2 files changed, 26 insertions(+) (limited to 'include/linux') diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index 0d6ce895a9ee..70cc6983d9b5 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -19,10 +19,33 @@ struct kmem_cache *kernfs_node_cache; +static int kernfs_sop_remount_fs(struct super_block *sb, int *flags, char *data) +{ + struct kernfs_root *root = kernfs_info(sb)->root; + struct kernfs_syscall_ops *scops = root->syscall_ops; + + if (scops && scops->remount_fs) + return scops->remount_fs(root, flags, data); + return 0; +} + +static int kernfs_sop_show_options(struct seq_file *sf, struct dentry *dentry) +{ + struct kernfs_root *root = kernfs_root(dentry->d_fsdata); + struct kernfs_syscall_ops *scops = root->syscall_ops; + + if (scops && scops->show_options) + return scops->show_options(sf, root); + return 0; +} + static const struct super_operations kernfs_sops = { .statfs = simple_statfs, .drop_inode = generic_delete_inode, .evict_inode = kernfs_evict_inode, + + .remount_fs = kernfs_sop_remount_fs, + .show_options = kernfs_sop_show_options, }; static int kernfs_fill_super(struct super_block *sb) diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 5ddc47450335..5d5b7e947294 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -115,6 +115,9 @@ struct kernfs_node { * kernfs_node parameter. */ struct kernfs_syscall_ops { + int (*remount_fs)(struct kernfs_root *root, int *flags, char *data); + int (*show_options)(struct seq_file *sf, struct kernfs_root *root); + int (*mkdir)(struct kernfs_node *parent, const char *name, umode_t mode); int (*rmdir)(struct kernfs_node *kn); -- cgit v1.2.3 From d35258ef702cca0c4e66d799f8e38b78c02ce8a5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Feb 2014 14:09:12 -0500 Subject: kernfs: allow nodes to be created in the deactivated state Currently, kernfs_nodes are made visible to userland on creation, which makes it difficult for kernfs users to atomically succeed or fail creation of multiple nodes. In addition, if something fails after creating some nodes, the created nodes might already be in use and their active refs need to be drained for removal, which has the potential to introduce tricky reverse locking dependency on active_ref depending on how the error path is synchronized. This patch introduces per-root flag KERNFS_ROOT_CREATE_DEACTIVATED. If set, all nodes under the root are created in the deactivated state and stay invisible to userland until explicitly enabled by the new kernfs_activate() API. Also, nodes which have never been activated are guaranteed to bypass draining on removal thus allowing error paths to not worry about lockding dependency on active_ref draining. Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- fs/kernfs/dir.c | 71 +++++++++++++++++++++++++++++++++++++++++++++----- fs/sysfs/mount.c | 2 +- include/linux/kernfs.h | 15 +++++++++-- 3 files changed, 78 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 89f8462f337e..3cff0a233cd1 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -435,7 +435,7 @@ int kernfs_add_one(struct kernfs_node *kn) goto out_unlock; ret = -ENOENT; - if (!kernfs_active(parent)) + if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent)) goto out_unlock; kn->hash = kernfs_name_hash(kn->name, kn->ns); @@ -451,9 +451,19 @@ int kernfs_add_one(struct kernfs_node *kn) ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME; } - /* Mark the entry added into directory tree */ - atomic_sub(KN_DEACTIVATED_BIAS, &kn->active); - ret = 0; + mutex_unlock(&kernfs_mutex); + + /* + * Activate the new node unless CREATE_DEACTIVATED is requested. + * If not activated here, the kernfs user is responsible for + * activating the node with kernfs_activate(). A node which hasn't + * been activated is not visible to userland and its removal won't + * trigger deactivation. + */ + if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED)) + kernfs_activate(kn); + return 0; + out_unlock: mutex_unlock(&kernfs_mutex); return ret; @@ -528,13 +538,14 @@ EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns); /** * kernfs_create_root - create a new kernfs hierarchy * @scops: optional syscall operations for the hierarchy + * @flags: KERNFS_ROOT_* flags * @priv: opaque data associated with the new directory * * Returns the root of the new hierarchy on success, ERR_PTR() value on * failure. */ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, - void *priv) + unsigned int flags, void *priv) { struct kernfs_root *root; struct kernfs_node *kn; @@ -553,14 +564,17 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, return ERR_PTR(-ENOMEM); } - atomic_sub(KN_DEACTIVATED_BIAS, &kn->active); kn->priv = priv; kn->dir.root = root; root->syscall_ops = scops; + root->flags = flags; root->kn = kn; init_waitqueue_head(&root->deactivate_waitq); + if (!(root->flags & KERNFS_ROOT_CREATE_DEACTIVATED)) + kernfs_activate(kn); + return root; } @@ -783,6 +797,40 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos, return pos->parent; } +/** + * kernfs_activate - activate a node which started deactivated + * @kn: kernfs_node whose subtree is to be activated + * + * If the root has KERNFS_ROOT_CREATE_DEACTIVATED set, a newly created node + * needs to be explicitly activated. A node which hasn't been activated + * isn't visible to userland and deactivation is skipped during its + * removal. This is useful to construct atomic init sequences where + * creation of multiple nodes should either succeed or fail atomically. + * + * The caller is responsible for ensuring that this function is not called + * after kernfs_remove*() is invoked on @kn. + */ +void kernfs_activate(struct kernfs_node *kn) +{ + struct kernfs_node *pos; + + mutex_lock(&kernfs_mutex); + + pos = NULL; + while ((pos = kernfs_next_descendant_post(pos, kn))) { + if (!pos || (pos->flags & KERNFS_ACTIVATED)) + continue; + + WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb)); + WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS); + + atomic_sub(KN_DEACTIVATED_BIAS, &pos->active); + pos->flags |= KERNFS_ACTIVATED; + } + + mutex_unlock(&kernfs_mutex); +} + static void __kernfs_remove(struct kernfs_node *kn) { struct kernfs_node *pos; @@ -817,7 +865,16 @@ static void __kernfs_remove(struct kernfs_node *kn) */ kernfs_get(pos); - kernfs_drain(pos); + /* + * Drain iff @kn was activated. This avoids draining and + * its lockdep annotations for nodes which have never been + * activated and allows embedding kernfs_remove() in create + * error paths without worrying about draining. + */ + if (kn->flags & KERNFS_ACTIVATED) + kernfs_drain(pos); + else + WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); /* * kernfs_unlink_sibling() succeeds once per node. Use it diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c index 6211230814fd..5c7fdd9c6811 100644 --- a/fs/sysfs/mount.c +++ b/fs/sysfs/mount.c @@ -62,7 +62,7 @@ int __init sysfs_init(void) { int err; - sysfs_root = kernfs_create_root(NULL, NULL); + sysfs_root = kernfs_create_root(NULL, 0, NULL); if (IS_ERR(sysfs_root)) return PTR_ERR(sysfs_root); diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 5d5b7e947294..4520c86f5cb4 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -38,6 +38,7 @@ enum kernfs_node_type { #define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK enum kernfs_node_flag { + KERNFS_ACTIVATED = 0x0010, KERNFS_NS = 0x0020, KERNFS_HAS_SEQ_SHOW = 0x0040, KERNFS_HAS_MMAP = 0x0080, @@ -47,6 +48,11 @@ enum kernfs_node_flag { KERNFS_SUICIDED = 0x0800, }; +/* @flags for kernfs_create_root() */ +enum kernfs_root_flag { + KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001, +}; + /* type-specific structures for kernfs_node union members */ struct kernfs_elem_dir { unsigned long subdirs; @@ -128,6 +134,7 @@ struct kernfs_syscall_ops { struct kernfs_root { /* published fields */ struct kernfs_node *kn; + unsigned int flags; /* KERNFS_ROOT_* flags */ /* private fields, do not use outside kernfs proper */ struct ida ino_ida; @@ -223,7 +230,7 @@ void kernfs_get(struct kernfs_node *kn); void kernfs_put(struct kernfs_node *kn); struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, - void *priv); + unsigned int flags, void *priv); void kernfs_destroy_root(struct kernfs_root *root); struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, @@ -239,6 +246,7 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, struct kernfs_node *kernfs_create_link(struct kernfs_node *parent, const char *name, struct kernfs_node *target); +void kernfs_activate(struct kernfs_node *kn); void kernfs_remove(struct kernfs_node *kn); void kernfs_break_active_protection(struct kernfs_node *kn); void kernfs_unbreak_active_protection(struct kernfs_node *kn); @@ -276,7 +284,8 @@ static inline void kernfs_get(struct kernfs_node *kn) { } static inline void kernfs_put(struct kernfs_node *kn) { } static inline struct kernfs_root * -kernfs_create_root(struct kernfs_syscall_ops *scops, void *priv) +kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags, + void *priv) { return ERR_PTR(-ENOSYS); } static inline void kernfs_destroy_root(struct kernfs_root *root) { } @@ -298,6 +307,8 @@ kernfs_create_link(struct kernfs_node *parent, const char *name, struct kernfs_node *target) { return ERR_PTR(-ENOSYS); } +static inline void kernfs_activate(struct kernfs_node *kn) { } + static inline void kernfs_remove(struct kernfs_node *kn) { } static inline bool kernfs_remove_self(struct kernfs_node *kn) -- cgit v1.2.3 From 4d3773c4bb41ed5228f1ab7a4a52b79e17b10515 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Feb 2014 14:09:13 -0500 Subject: kernfs: implement kernfs_ops->atomic_write_len A write to a kernfs_node is buffered through a kernel buffer. Writes <= PAGE_SIZE are performed atomically, while larger ones are executed in PAGE_SIZE chunks. While this is enough for sysfs, cgroup which is scheduled to be converted to use kernfs needs a bit more control over it. This patch adds kernfs_ops->atomic_write_len. If not set (zero), the behavior stays the same. If set, writes upto the size are executed atomically and larger writes are rejected with -E2BIG. A different implementation strategy would be allowing configuring chunking size while making the original write size available to the write method; however, such strategy, while being more complicated, doesn't really buy anything. If the write implementation has to handle chunking, the specific chunk size shouldn't matter all that much. Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- fs/kernfs/file.c | 49 +++++++++++++++++++++++++++++++------------------ include/linux/kernfs.h | 8 ++++++-- 2 files changed, 37 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 10a8c91c49d6..ddcb471b9cc9 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -252,19 +252,9 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct kernfs_open_file *of = kernfs_of(file); - ssize_t len = min_t(size_t, count, PAGE_SIZE); const struct kernfs_ops *ops; - char *buf; - - buf = kmalloc(len + 1, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - if (copy_from_user(buf, user_buf, len)) { - len = -EFAULT; - goto out_free; - } - buf[len] = '\0'; /* guarantee string termination */ + char *buf = NULL; + ssize_t len; /* * @of->mutex nests outside active ref and is just to ensure that @@ -273,22 +263,45 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf, mutex_lock(&of->mutex); if (!kernfs_get_active(of->kn)) { mutex_unlock(&of->mutex); - len = -ENODEV; - goto out_free; + return -ENODEV; } ops = kernfs_ops(of->kn); - if (ops->write) - len = ops->write(of, buf, len, *ppos); - else + if (!ops->write) { len = -EINVAL; + goto out_unlock; + } + + if (ops->atomic_write_len) { + len = count; + if (len > ops->atomic_write_len) { + len = -E2BIG; + goto out_unlock; + } + } else { + len = min_t(size_t, count, PAGE_SIZE); + } + + buf = kmalloc(len + 1, GFP_KERNEL); + if (!buf) { + len = -ENOMEM; + goto out_unlock; + } + if (copy_from_user(buf, user_buf, len)) { + len = -EFAULT; + goto out_unlock; + } + buf[len] = '\0'; /* guarantee string termination */ + + len = ops->write(of, buf, len, *ppos); +out_unlock: kernfs_put_active(of->kn); mutex_unlock(&of->mutex); if (len > 0) *ppos += len; -out_free: + kfree(buf); return len; } diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 4520c86f5cb4..47f5235a097a 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -178,9 +178,13 @@ struct kernfs_ops { loff_t off); /* - * write() is bounced through kernel buffer and a write larger than - * PAGE_SIZE results in partial operation of PAGE_SIZE. + * write() is bounced through kernel buffer. If atomic_write_len + * is not set, a write larger than PAGE_SIZE results in partial + * operations of PAGE_SIZE chunks. If atomic_write_len is set, + * writes upto the specified size are executed atomically but + * larger ones are rejected with -E2BIG. */ + size_t atomic_write_len; ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes, loff_t off); -- cgit v1.2.3 From 2536390da0d300b2734c721235c082498879841d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Feb 2014 14:09:14 -0500 Subject: kernfs: add kernfs_open_file->priv Add a private data field to be used by kernfs file operations. This generally makes sense and will be used by cgroup. Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- include/linux/kernfs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 47f5235a097a..9ca0f09757a1 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -146,6 +146,7 @@ struct kernfs_open_file { /* published fields */ struct kernfs_node *kn; struct file *file; + void *priv; /* private fields, do not use outside kernfs proper */ struct mutex mutex; -- cgit v1.2.3 From 0c23b2259a4850494e2c53e864ea840597c6cdd3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Feb 2014 14:09:15 -0500 Subject: kernfs: implement kernfs_node_from_dentry(), kernfs_root_from_sb() and kernfs_rename() Implement helpers to determine node from dentry and root from super_block. Also add a kernfs_rename_ns() wrapper which assumes NULL namespace. These generally make sense and will be used by cgroup. v2: Some dummy implementations for !CONFIG_SYSFS was missing. Fixed. Reported by kbuild test robot. Signed-off-by: Tejun Heo Cc: kbuild test robot Signed-off-by: Greg Kroah-Hartman --- fs/kernfs/dir.c | 18 ++++++++++++++++++ fs/kernfs/mount.c | 14 ++++++++++++++ include/linux/kernfs.h | 16 ++++++++++++++++ 3 files changed, 48 insertions(+) (limited to 'include/linux') diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 3cff0a233cd1..42a250f83b98 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -350,6 +350,24 @@ const struct dentry_operations kernfs_dops = { .d_release = kernfs_dop_release, }; +/** + * kernfs_node_from_dentry - determine kernfs_node associated with a dentry + * @dentry: the dentry in question + * + * Return the kernfs_node associated with @dentry. If @dentry is not a + * kernfs one, %NULL is returned. + * + * While the returned kernfs_node will stay accessible as long as @dentry + * is accessible, the returned node can be in any state and the caller is + * fully responsible for determining what's accessible. + */ +struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry) +{ + if (dentry->d_op == &kernfs_dops) + return dentry->d_fsdata; + return NULL; +} + static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, const char *name, umode_t mode, unsigned flags) diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index 70cc6983d9b5..e5b28b0ebc37 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -48,6 +48,20 @@ static const struct super_operations kernfs_sops = { .show_options = kernfs_sop_show_options, }; +/** + * kernfs_root_from_sb - determine kernfs_root associated with a super_block + * @sb: the super_block in question + * + * Return the kernfs_root associated with @sb. If @sb is not a kernfs one, + * %NULL is returned. + */ +struct kernfs_root *kernfs_root_from_sb(struct super_block *sb) +{ + if (sb->s_op == &kernfs_sops) + return kernfs_info(sb)->root; + return NULL; +} + static int kernfs_fill_super(struct super_block *sb) { struct kernfs_super_info *info = kernfs_info(sb); diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 9ca0f09757a1..9c899040c05e 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -234,6 +234,9 @@ struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent, void kernfs_get(struct kernfs_node *kn); void kernfs_put(struct kernfs_node *kn); +struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry); +struct kernfs_root *kernfs_root_from_sb(struct super_block *sb); + struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags, void *priv); void kernfs_destroy_root(struct kernfs_root *root); @@ -288,6 +291,12 @@ kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name, static inline void kernfs_get(struct kernfs_node *kn) { } static inline void kernfs_put(struct kernfs_node *kn) { } +static inline struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry) +{ return NULL; } + +static inline struct kernfs_root *kernfs_root_from_sb(struct super_block *sb) +{ return NULL; } + static inline struct kernfs_root * kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags, void *priv) @@ -388,6 +397,13 @@ static inline int kernfs_remove_by_name(struct kernfs_node *parent, return kernfs_remove_by_name_ns(parent, name, NULL); } +static inline int kernfs_rename(struct kernfs_node *kn, + struct kernfs_node *new_parent, + const char *new_name) +{ + return kernfs_rename_ns(kn, new_parent, new_name, NULL); +} + static inline struct dentry * kernfs_mount(struct file_system_type *fs_type, int flags, struct kernfs_root *root) -- cgit v1.2.3 From 3eef34ad7dc369b7183ec383908aff3da2f6e5ec Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 7 Feb 2014 13:32:07 -0500 Subject: kernfs: implement kernfs_get_parent(), kernfs_name/path() and friends kernfs_node->parent and ->name are currently marked as "published" indicating that kernfs users may access them directly; however, those fields may get updated by kernfs_rename[_ns]() and unrestricted access may lead to erroneous values or oops. Protect ->parent and ->name updates with a irq-safe spinlock kernfs_rename_lock and implement the following accessors for these fields. * kernfs_name() - format the node's name into the specified buffer * kernfs_path() - format the node's path into the specified buffer * pr_cont_kernfs_name() - pr_cont a node's name (doesn't need buffer) * pr_cont_kernfs_path() - pr_cont a node's path (doesn't need buffer) * kernfs_get_parent() - pin and return a node's parent All can be called under any context. The recursive sysfs_pathname() in fs/sysfs/dir.c is replaced with kernfs_path() and sysfs_rename_dir_ns() is updated to use kernfs_get_parent() instead of dereferencing parent directly. v2: Dummy definition of kernfs_path() for !CONFIG_KERNFS was missing static inline making it cause a lot of build warnings. Add it. Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- fs/kernfs/dir.c | 175 ++++++++++++++++++++++++++++++++++++++++++++++--- fs/sysfs/dir.c | 44 ++++--------- include/linux/kernfs.h | 26 +++++++- 3 files changed, 203 insertions(+), 42 deletions(-) (limited to 'include/linux') diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 42a250f83b98..a347792c2e5a 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -19,6 +19,8 @@ #include "kernfs-internal.h" DEFINE_MUTEX(kernfs_mutex); +static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */ +static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by rename_lock */ #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) @@ -37,6 +39,141 @@ static bool kernfs_lockdep(struct kernfs_node *kn) #endif } +static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen) +{ + return strlcpy(buf, kn->parent ? kn->name : "/", buflen); +} + +static char * __must_check kernfs_path_locked(struct kernfs_node *kn, char *buf, + size_t buflen) +{ + char *p = buf + buflen; + int len; + + *--p = '\0'; + + do { + len = strlen(kn->name); + if (p - buf < len + 1) { + buf[0] = '\0'; + p = NULL; + break; + } + p -= len; + memcpy(p, kn->name, len); + *--p = '/'; + kn = kn->parent; + } while (kn && kn->parent); + + return p; +} + +/** + * kernfs_name - obtain the name of a given node + * @kn: kernfs_node of interest + * @buf: buffer to copy @kn's name into + * @buflen: size of @buf + * + * Copies the name of @kn into @buf of @buflen bytes. The behavior is + * similar to strlcpy(). It returns the length of @kn's name and if @buf + * isn't long enough, it's filled upto @buflen-1 and nul terminated. + * + * This function can be called from any context. + */ +int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&kernfs_rename_lock, flags); + ret = kernfs_name_locked(kn, buf, buflen); + spin_unlock_irqrestore(&kernfs_rename_lock, flags); + return ret; +} + +/** + * kernfs_path - build full path of a given node + * @kn: kernfs_node of interest + * @buf: buffer to copy @kn's name into + * @buflen: size of @buf + * + * Builds and returns the full path of @kn in @buf of @buflen bytes. The + * path is built from the end of @buf so the returned pointer usually + * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated + * and %NULL is returned. + */ +char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen) +{ + unsigned long flags; + char *p; + + spin_lock_irqsave(&kernfs_rename_lock, flags); + p = kernfs_path_locked(kn, buf, buflen); + spin_unlock_irqrestore(&kernfs_rename_lock, flags); + return p; +} + +/** + * pr_cont_kernfs_name - pr_cont name of a kernfs_node + * @kn: kernfs_node of interest + * + * This function can be called from any context. + */ +void pr_cont_kernfs_name(struct kernfs_node *kn) +{ + unsigned long flags; + + spin_lock_irqsave(&kernfs_rename_lock, flags); + + kernfs_name_locked(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf)); + pr_cont("%s", kernfs_pr_cont_buf); + + spin_unlock_irqrestore(&kernfs_rename_lock, flags); +} + +/** + * pr_cont_kernfs_path - pr_cont path of a kernfs_node + * @kn: kernfs_node of interest + * + * This function can be called from any context. + */ +void pr_cont_kernfs_path(struct kernfs_node *kn) +{ + unsigned long flags; + char *p; + + spin_lock_irqsave(&kernfs_rename_lock, flags); + + p = kernfs_path_locked(kn, kernfs_pr_cont_buf, + sizeof(kernfs_pr_cont_buf)); + if (p) + pr_cont("%s", p); + else + pr_cont(""); + + spin_unlock_irqrestore(&kernfs_rename_lock, flags); +} + +/** + * kernfs_get_parent - determine the parent node and pin it + * @kn: kernfs_node of interest + * + * Determines @kn's parent, pins and returns it. This function can be + * called from any context. + */ +struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn) +{ + struct kernfs_node *parent; + unsigned long flags; + + spin_lock_irqsave(&kernfs_rename_lock, flags); + parent = kn->parent; + kernfs_get(parent); + spin_unlock_irqrestore(&kernfs_rename_lock, flags); + + return parent; +} + /** * kernfs_name_hash * @name: Null terminated string to hash @@ -1103,8 +1240,14 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, const char *new_name, const void *new_ns) { + struct kernfs_node *old_parent; + const char *old_name = NULL; int error; + /* can't move or rename root */ + if (!kn->parent) + return -EINVAL; + mutex_lock(&kernfs_mutex); error = -ENOENT; @@ -1126,13 +1269,8 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, new_name = kstrdup(new_name, GFP_KERNEL); if (!new_name) goto out; - - if (kn->flags & KERNFS_STATIC_NAME) - kn->flags &= ~KERNFS_STATIC_NAME; - else - kfree(kn->name); - - kn->name = new_name; + } else { + new_name = NULL; } /* @@ -1140,12 +1278,29 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, */ kernfs_unlink_sibling(kn); kernfs_get(new_parent); - kernfs_put(kn->parent); - kn->ns = new_ns; - kn->hash = kernfs_name_hash(kn->name, kn->ns); + + /* rename_lock protects ->parent and ->name accessors */ + spin_lock_irq(&kernfs_rename_lock); + + old_parent = kn->parent; kn->parent = new_parent; + + kn->ns = new_ns; + if (new_name) { + if (!(kn->flags & KERNFS_STATIC_NAME)) + old_name = kn->name; + kn->flags &= ~KERNFS_STATIC_NAME; + kn->name = new_name; + } + + spin_unlock_irq(&kernfs_rename_lock); + + kn->hash = kernfs_name_hash(new_name, new_ns); kernfs_link_sibling(kn); + kernfs_put(old_parent); + kfree(old_name); + error = 0; out: mutex_unlock(&kernfs_mutex); diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index ee0d761c3179..0b45ff42f374 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -19,39 +19,18 @@ DEFINE_SPINLOCK(sysfs_symlink_target_lock); -/** - * sysfs_pathname - return full path to sysfs dirent - * @kn: kernfs_node whose path we want - * @path: caller allocated buffer of size PATH_MAX - * - * Gives the name "/" to the sysfs_root entry; any path returned - * is relative to wherever sysfs is mounted. - */ -static char *sysfs_pathname(struct kernfs_node *kn, char *path) -{ - if (kn->parent) { - sysfs_pathname(kn->parent, path); - strlcat(path, "/", PATH_MAX); - } - strlcat(path, kn->name, PATH_MAX); - return path; -} - void sysfs_warn_dup(struct kernfs_node *parent, const char *name) { - char *path; + char *buf, *path = NULL; - path = kzalloc(PATH_MAX, GFP_KERNEL); - if (path) { - sysfs_pathname(parent, path); - strlcat(path, "/", PATH_MAX); - strlcat(path, name, PATH_MAX); - } + buf = kzalloc(PATH_MAX, GFP_KERNEL); + if (buf) + path = kernfs_path(parent, buf, PATH_MAX); - WARN(1, KERN_WARNING "sysfs: cannot create duplicate filename '%s'\n", - path ? path : name); + WARN(1, KERN_WARNING "sysfs: cannot create duplicate filename '%s/%s'\n", + path, name); - kfree(path); + kfree(buf); } /** @@ -122,9 +101,13 @@ void sysfs_remove_dir(struct kobject *kobj) int sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name, const void *new_ns) { - struct kernfs_node *parent = kobj->sd->parent; + struct kernfs_node *parent; + int ret; - return kernfs_rename_ns(kobj->sd, parent, new_name, new_ns); + parent = kernfs_get_parent(kobj->sd); + ret = kernfs_rename_ns(kobj->sd, parent, new_name, new_ns); + kernfs_put(parent); + return ret; } int sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj, @@ -133,7 +116,6 @@ int sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj, struct kernfs_node *kn = kobj->sd; struct kernfs_node *new_parent; - BUG_ON(!kn->parent); new_parent = new_parent_kobj && new_parent_kobj->sd ? new_parent_kobj->sd : sysfs_root_kn; diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 9c899040c05e..8736ee86a1d6 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -91,7 +91,12 @@ struct kernfs_node { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif - /* the following two fields are published */ + /* + * Use kernfs_get_parent() and kernfs_name/path() instead of + * accessing the following two fields directly. If the node is + * never moved to a different parent, it is safe to access the + * parent directly. + */ struct kernfs_node *parent; const char *name; @@ -229,6 +234,12 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn) return kn->flags & KERNFS_NS; } +int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen); +char * __must_check kernfs_path(struct kernfs_node *kn, char *buf, + size_t buflen); +void pr_cont_kernfs_name(struct kernfs_node *kn); +void pr_cont_kernfs_path(struct kernfs_node *kn); +struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn); struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name, const void *ns); void kernfs_get(struct kernfs_node *kn); @@ -283,6 +294,19 @@ static inline void kernfs_enable_ns(struct kernfs_node *kn) { } static inline bool kernfs_ns_enabled(struct kernfs_node *kn) { return false; } +static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen) +{ return -ENOSYS; } + +static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf, + size_t buflen) +{ return NULL; } + +static inline void pr_cont_kernfs_name(struct kernfs_node *kn) { } +static inline void pr_cont_kernfs_path(struct kernfs_node *kn) { } + +static inline struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn) +{ return NULL; } + static inline struct kernfs_node * kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name, const void *ns) -- cgit v1.2.3 From fa4cd451cceb77e97432b91fcf50a7e4a7361e29 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 7 Feb 2014 13:32:07 -0500 Subject: sysfs, kobject: add sysfs wrapper for kernfs_enable_ns() Currently, kobject is invoking kernfs_enable_ns() directly. This is fine now as sysfs and kernfs are enabled and disabled together. If sysfs is disabled, kernfs_enable_ns() is switched to dummy implementation too and everything is fine; however, kernfs will soon have its own config option CONFIG_KERNFS and !SYSFS && KERNFS will be possible, which can make kobject call into non-dummy kernfs_enable_ns() with NULL kernfs_node pointers leading to an oops. Introduce sysfs_enable_ns() which is a wrapper around kernfs_enable_ns() so that it can be made a noop depending only on CONFIG_SYSFS regardless of the planned CONFIG_KERNFS. Signed-off-by: Tejun Heo Reported-by: Fengguang Wu Signed-off-by: Greg Kroah-Hartman --- include/linux/sysfs.h | 9 +++++++++ lib/kobject.c | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 14df05415af9..fdaa0c6fc7a2 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -244,6 +244,11 @@ void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); int __must_check sysfs_init(void); +static inline void sysfs_enable_ns(struct kernfs_node *kn) +{ + return kernfs_enable_ns(kn); +} + #else /* CONFIG_SYSFS */ static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) @@ -416,6 +421,10 @@ static inline int __must_check sysfs_init(void) return 0; } +static inline void sysfs_enable_ns(struct kernfs_node *kn) +{ +} + #endif /* CONFIG_SYSFS */ static inline int __must_check sysfs_create_file(struct kobject *kobj, diff --git a/lib/kobject.c b/lib/kobject.c index cb14aeac4cca..58751bb80a7c 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -94,7 +94,7 @@ static int create_dir(struct kobject *kobj) BUG_ON(ops->type >= KOBJ_NS_TYPES); BUG_ON(!kobj_ns_type_registered(ops->type)); - kernfs_enable_ns(kobj->sd); + sysfs_enable_ns(kobj->sd); } return 0; -- cgit v1.2.3 From ba341d55a420ab4fdd1a53fd395fd59bd65de880 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Feb 2014 14:09:17 -0500 Subject: kernfs: add CONFIG_KERNFS As sysfs was kernfs's only user, kernfs has been piggybacking on CONFIG_SYSFS; however, kernfs is scheduled to grow a new user very soon. Introduce a separate config option CONFIG_KERNFS which is to be selected by kernfs users. Signed-off-by: Tejun Heo Cc: linux-fsdevel@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- fs/Kconfig | 1 + fs/Makefile | 3 ++- fs/kernfs/Kconfig | 7 +++++++ fs/sysfs/Kconfig | 1 + include/linux/kernfs.h | 6 +++--- 5 files changed, 14 insertions(+), 4 deletions(-) create mode 100644 fs/kernfs/Kconfig (limited to 'include/linux') diff --git a/fs/Kconfig b/fs/Kconfig index 7385e54be4b9..312393f32948 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -96,6 +96,7 @@ endif # BLOCK menu "Pseudo filesystems" source "fs/proc/Kconfig" +source "fs/kernfs/Kconfig" source "fs/sysfs/Kconfig" config TMPFS diff --git a/fs/Makefile b/fs/Makefile index 47ac07bb4acc..f9cb9876e466 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -52,7 +52,8 @@ obj-$(CONFIG_FHANDLE) += fhandle.o obj-y += quota/ obj-$(CONFIG_PROC_FS) += proc/ -obj-$(CONFIG_SYSFS) += sysfs/ kernfs/ +obj-$(CONFIG_KERNFS) += kernfs/ +obj-$(CONFIG_SYSFS) += sysfs/ obj-$(CONFIG_CONFIGFS_FS) += configfs/ obj-y += devpts/ diff --git a/fs/kernfs/Kconfig b/fs/kernfs/Kconfig new file mode 100644 index 000000000000..397b5f7a7a16 --- /dev/null +++ b/fs/kernfs/Kconfig @@ -0,0 +1,7 @@ +# +# KERNFS should be selected by its users +# + +config KERNFS + bool + default n diff --git a/fs/sysfs/Kconfig b/fs/sysfs/Kconfig index 8c41feacbac5..b2756014508c 100644 --- a/fs/sysfs/Kconfig +++ b/fs/sysfs/Kconfig @@ -1,6 +1,7 @@ config SYSFS bool "sysfs file system support" if EXPERT default y + select KERNFS help The sysfs filesystem is a virtual filesystem that the kernel uses to export internal kernel objects, their attributes, and their diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 8736ee86a1d6..649497a56a95 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -201,7 +201,7 @@ struct kernfs_ops { #endif }; -#ifdef CONFIG_SYSFS +#ifdef CONFIG_KERNFS static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn) { @@ -284,7 +284,7 @@ void kernfs_kill_sb(struct super_block *sb); void kernfs_init(void); -#else /* CONFIG_SYSFS */ +#else /* CONFIG_KERNFS */ static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn) { return 0; } /* whatever */ @@ -379,7 +379,7 @@ static inline void kernfs_kill_sb(struct super_block *sb) { } static inline void kernfs_init(void) { } -#endif /* CONFIG_SYSFS */ +#endif /* CONFIG_KERNFS */ static inline struct kernfs_node * kernfs_find_and_get(struct kernfs_node *kn, const char *name) -- cgit v1.2.3 From f315e3fa1cf5b3317fc948708645fff889ce1e63 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Mon, 2 Dec 2013 17:49:41 +0900 Subject: slab: restrict the number of objects in a slab To prepare to implement byte sized index for managing the freelist of a slab, we should restrict the number of objects in a slab to be less or equal to 256, since byte only represent 256 different values. Setting the size of object to value equal or more than newly introduced SLAB_OBJ_MIN_SIZE ensures that the number of objects in a slab is less or equal to 256 for a slab with 1 page. If page size is rather larger than 4096, above assumption would be wrong. In this case, we would fall back on 2 bytes sized index. If minimum size of kmalloc is less than 16, we use it as minimum object size and give up this optimization. Signed-off-by: Joonsoo Kim Signed-off-by: Pekka Enberg --- include/linux/slab.h | 11 +++++++++++ mm/slab.c | 21 +++++++++++++++++++++ 2 files changed, 32 insertions(+) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 9260abdd67df..d015dec02bf3 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -201,6 +201,17 @@ struct kmem_cache { #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 5 #endif + +/* + * This restriction comes from byte sized index implementation. + * Page size is normally 2^12 bytes and, in this case, if we want to use + * byte sized index which can represent 2^8 entries, the size of the object + * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. + * If minimum size of kmalloc is less than 16, we use it as minimum object + * size and give up to use byte sized index. + */ +#define SLAB_OBJ_MIN_SIZE (KMALLOC_SHIFT_LOW < 4 ? \ + (1 << KMALLOC_SHIFT_LOW) : 16) #endif #ifdef CONFIG_SLUB diff --git a/mm/slab.c b/mm/slab.c index 878354b26b72..9d4c7b50dfdc 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -157,6 +157,17 @@ #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN #endif +#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \ + <= SLAB_OBJ_MIN_SIZE) ? 1 : 0) + +#if FREELIST_BYTE_INDEX +typedef unsigned char freelist_idx_t; +#else +typedef unsigned short freelist_idx_t; +#endif + +#define SLAB_OBJ_MAX_NUM (1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) + /* * true if a page was allocated from pfmemalloc reserves for network-based * swap @@ -2016,6 +2027,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, if (!num) continue; + /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */ + if (num > SLAB_OBJ_MAX_NUM) + break; + if (flags & CFLGS_OFF_SLAB) { /* * Max number of objs-per-slab for caches which @@ -2258,6 +2273,12 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) flags |= CFLGS_OFF_SLAB; size = ALIGN(size, cachep->align); + /* + * We should restrict the number of objects in a slab to implement + * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition. + */ + if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE) + size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); left_over = calculate_slab_order(cachep, size, cachep->align, flags); -- cgit v1.2.3 From 3ed80a62bf959d34ebd4d553b026fbe7e6fbcc54 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 8 Feb 2014 10:36:58 -0500 Subject: cgroup: drop module support With module supported dropped from net_prio, no controller is using cgroup module support. None of actual resource controllers can be built as a module and we aren't gonna add new controllers which don't control resources. This patch drops module support from cgroup. * cgroup_[un]load_subsys() and cgroup_subsys->module removed. * As there's no point in distinguishing IS_BUILTIN() and IS_MODULE(), cgroup_subsys.h now uses IS_ENABLED() directly. * enum cgroup_subsys_id now exactly matches the list of enabled controllers as ordered in cgroup_subsys.h. * cgroup_subsys[] is now a contiguously occupied array. Size specification is no longer necessary and dropped. * for_each_builtin_subsys() is removed and for_each_subsys() is updated to not require any locking. * module ref handling is removed from rebind_subsystems(). * Module related comments dropped. v2: Rebased on top of fe1217c4f3f7 ("net: net_cls: move cgroupfs classid handling into core"). v3: Added {} around the if (need_forkexit_callback) block in cgroup_post_fork() for readability as suggested by Li. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- block/blk-cgroup.c | 1 - include/linux/cgroup.h | 29 +---- include/linux/cgroup_subsys.h | 24 ++-- kernel/cgroup.c | 284 +++--------------------------------------- net/core/netclassid_cgroup.c | 1 - net/core/netprio_cgroup.c | 1 - 6 files changed, 32 insertions(+), 308 deletions(-) (limited to 'include/linux') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 4e491d9b5292..660d419918a7 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -914,7 +914,6 @@ struct cgroup_subsys blkio_subsys = { .can_attach = blkcg_can_attach, .subsys_id = blkio_subsys_id, .base_cftypes = blkcg_files, - .module = THIS_MODULE, }; EXPORT_SYMBOL_GPL(blkio_subsys); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 5c097596104b..d842a737d448 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -37,28 +37,13 @@ extern void cgroup_post_fork(struct task_struct *p); extern void cgroup_exit(struct task_struct *p, int run_callbacks); extern int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); -extern int cgroup_load_subsys(struct cgroup_subsys *ss); -extern void cgroup_unload_subsys(struct cgroup_subsys *ss); extern int proc_cgroup_show(struct seq_file *, void *); -/* - * Define the enumeration of all cgroup subsystems. - * - * We define ids for builtin subsystems and then modular ones. - */ +/* define the enumeration of all cgroup subsystems */ #define SUBSYS(_x) _x ## _subsys_id, enum cgroup_subsys_id { -#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) #include -#undef IS_SUBSYS_ENABLED - CGROUP_BUILTIN_SUBSYS_COUNT, - - __CGROUP_SUBSYS_TEMP_PLACEHOLDER = CGROUP_BUILTIN_SUBSYS_COUNT - 1, - -#define IS_SUBSYS_ENABLED(option) IS_MODULE(option) -#include -#undef IS_SUBSYS_ENABLED CGROUP_SUBSYS_COUNT, }; #undef SUBSYS @@ -370,10 +355,9 @@ struct css_set { struct list_head cgrp_links; /* - * Set of subsystem states, one for each subsystem. This array - * is immutable after creation apart from the init_css_set - * during subsystem registration (at boot time) and modular subsystem - * loading/unloading. + * Set of subsystem states, one for each subsystem. This array is + * immutable after creation apart from the init_css_set during + * subsystem registration (at boot time). */ struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; @@ -620,15 +604,10 @@ struct cgroup_subsys { /* base cftypes, automatically [de]registered with subsys itself */ struct cftype *base_cftypes; struct cftype_set base_cftset; - - /* should be defined only by modular subsystems */ - struct module *module; }; #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; -#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) #include -#undef IS_SUBSYS_ENABLED #undef SUBSYS /** diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 7b99d717411d..11c42f6a25a8 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -3,51 +3,51 @@ * * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. */ -#if IS_SUBSYS_ENABLED(CONFIG_CPUSETS) +#if IS_ENABLED(CONFIG_CPUSETS) SUBSYS(cpuset) #endif -#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEBUG) +#if IS_ENABLED(CONFIG_CGROUP_DEBUG) SUBSYS(debug) #endif -#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_SCHED) +#if IS_ENABLED(CONFIG_CGROUP_SCHED) SUBSYS(cpu_cgroup) #endif -#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_CPUACCT) +#if IS_ENABLED(CONFIG_CGROUP_CPUACCT) SUBSYS(cpuacct) #endif -#if IS_SUBSYS_ENABLED(CONFIG_MEMCG) +#if IS_ENABLED(CONFIG_MEMCG) SUBSYS(mem_cgroup) #endif -#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEVICE) +#if IS_ENABLED(CONFIG_CGROUP_DEVICE) SUBSYS(devices) #endif -#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_FREEZER) +#if IS_ENABLED(CONFIG_CGROUP_FREEZER) SUBSYS(freezer) #endif -#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_NET_CLASSID) +#if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID) SUBSYS(net_cls) #endif -#if IS_SUBSYS_ENABLED(CONFIG_BLK_CGROUP) +#if IS_ENABLED(CONFIG_BLK_CGROUP) SUBSYS(blkio) #endif -#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_PERF) +#if IS_ENABLED(CONFIG_CGROUP_PERF) SUBSYS(perf) #endif -#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_NET_PRIO) +#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) SUBSYS(net_prio) #endif -#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_HUGETLB) +#if IS_ENABLED(CONFIG_CGROUP_HUGETLB) SUBSYS(hugetlb) #endif /* diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e2f46ba37f72..ccb16b47e293 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include @@ -120,15 +119,9 @@ static struct workqueue_struct *cgroup_destroy_wq; */ static struct workqueue_struct *cgroup_pidlist_destroy_wq; -/* - * Generate an array of cgroup subsystem pointers. At boot time, this is - * populated with the built in subsystems, and modular subsystems are - * registered after that. The mutable section of this array is protected by - * cgroup_mutex. - */ +/* generate an array of cgroup subsystem pointers */ #define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys, -#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) -static struct cgroup_subsys *cgroup_subsys[CGROUP_SUBSYS_COUNT] = { +static struct cgroup_subsys *cgroup_subsys[] = { #include }; @@ -258,30 +251,13 @@ static int notify_on_release(const struct cgroup *cgrp) else /** - * for_each_subsys - iterate all loaded cgroup subsystems + * for_each_subsys - iterate all enabled cgroup subsystems * @ss: the iteration cursor * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end - * - * Iterates through all loaded subsystems. Should be called under - * cgroup_mutex or cgroup_root_mutex. */ #define for_each_subsys(ss, ssid) \ - for (({ cgroup_assert_mutex_or_root_locked(); (ssid) = 0; }); \ - (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ - if (!((ss) = cgroup_subsys[(ssid)])) { } \ - else - -/** - * for_each_builtin_subsys - iterate all built-in cgroup subsystems - * @ss: the iteration cursor - * @i: the index of @ss, CGROUP_BUILTIN_SUBSYS_COUNT after reaching the end - * - * Bulit-in subsystems are always present and iteration itself doesn't - * require any synchronization. - */ -#define for_each_builtin_subsys(ss, i) \ - for ((i) = 0; (i) < CGROUP_BUILTIN_SUBSYS_COUNT && \ - (((ss) = cgroup_subsys[i]) || true); (i)++) + for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \ + (((ss) = cgroup_subsys[ssid]) || true); (ssid)++) /* iterate across the active hierarchies */ #define for_each_active_root(root) \ @@ -975,50 +951,24 @@ static void cgroup_d_remove_dir(struct dentry *dentry) remove_dir(dentry); } -/* - * Call with cgroup_mutex held. Drops reference counts on modules, including - * any duplicate ones that parse_cgroupfs_options took. If this function - * returns an error, no reference counts are touched. - */ static int rebind_subsystems(struct cgroupfs_root *root, unsigned long added_mask, unsigned removed_mask) { struct cgroup *cgrp = &root->top_cgroup; struct cgroup_subsys *ss; - unsigned long pinned = 0; int i, ret; BUG_ON(!mutex_is_locked(&cgroup_mutex)); BUG_ON(!mutex_is_locked(&cgroup_root_mutex)); /* Check that any added subsystems are currently free */ - for_each_subsys(ss, i) { - if (!(added_mask & (1 << i))) - continue; - - /* is the subsystem mounted elsewhere? */ - if (ss->root != &cgroup_dummy_root) { - ret = -EBUSY; - goto out_put; - } - - /* pin the module */ - if (!try_module_get(ss->module)) { - ret = -ENOENT; - goto out_put; - } - pinned |= 1 << i; - } - - /* subsys could be missing if unloaded between parsing and here */ - if (added_mask != pinned) { - ret = -ENOENT; - goto out_put; - } + for_each_subsys(ss, i) + if ((added_mask & (1 << i)) && ss->root != &cgroup_dummy_root) + return -EBUSY; ret = cgroup_populate_dir(cgrp, added_mask); if (ret) - goto out_put; + return ret; /* * Nothing can fail from this point on. Remove files for the @@ -1057,9 +1007,6 @@ static int rebind_subsystems(struct cgroupfs_root *root, RCU_INIT_POINTER(cgrp->subsys[i], NULL); cgroup_subsys[i]->root = &cgroup_dummy_root; - - /* subsystem is now free - drop reference on module */ - module_put(ss->module); root->subsys_mask &= ~bit; } } @@ -1071,12 +1018,6 @@ static int rebind_subsystems(struct cgroupfs_root *root, root->flags |= CGRP_ROOT_SUBSYS_BOUND; return 0; - -out_put: - for_each_subsys(ss, i) - if (pinned & (1 << i)) - module_put(ss->module); - return ret; } static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry) @@ -4506,7 +4447,7 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) return ret; } -static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss) +static void __init cgroup_init_cftsets(struct cgroup_subsys *ss) { INIT_LIST_HEAD(&ss->cftsets); @@ -4559,185 +4500,8 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) BUG_ON(online_css(css)); mutex_unlock(&cgroup_mutex); - - /* this function shouldn't be used with modular subsystems, since they - * need to register a subsys_id, among other things */ - BUG_ON(ss->module); } -/** - * cgroup_load_subsys: load and register a modular subsystem at runtime - * @ss: the subsystem to load - * - * This function should be called in a modular subsystem's initcall. If the - * subsystem is built as a module, it will be assigned a new subsys_id and set - * up for use. If the subsystem is built-in anyway, work is delegated to the - * simpler cgroup_init_subsys. - */ -int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) -{ - struct cgroup_subsys_state *css; - int i, ret; - struct hlist_node *tmp; - struct css_set *cset; - unsigned long key; - - /* check name and function validity */ - if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN || - ss->css_alloc == NULL || ss->css_free == NULL) - return -EINVAL; - - /* - * we don't support callbacks in modular subsystems. this check is - * before the ss->module check for consistency; a subsystem that could - * be a module should still have no callbacks even if the user isn't - * compiling it as one. - */ - if (ss->fork || ss->exit) - return -EINVAL; - - /* - * an optionally modular subsystem is built-in: we want to do nothing, - * since cgroup_init_subsys will have already taken care of it. - */ - if (ss->module == NULL) { - /* a sanity check */ - BUG_ON(cgroup_subsys[ss->subsys_id] != ss); - return 0; - } - - /* init base cftset */ - cgroup_init_cftsets(ss); - - mutex_lock(&cgroup_mutex); - mutex_lock(&cgroup_root_mutex); - cgroup_subsys[ss->subsys_id] = ss; - - /* - * no ss->css_alloc seems to need anything important in the ss - * struct, so this can happen first (i.e. before the dummy root - * attachment). - */ - css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss)); - if (IS_ERR(css)) { - /* failure case - need to deassign the cgroup_subsys[] slot. */ - cgroup_subsys[ss->subsys_id] = NULL; - mutex_unlock(&cgroup_root_mutex); - mutex_unlock(&cgroup_mutex); - return PTR_ERR(css); - } - - ss->root = &cgroup_dummy_root; - - /* our new subsystem will be attached to the dummy hierarchy. */ - init_css(css, ss, cgroup_dummy_top); - - /* - * Now we need to entangle the css into the existing css_sets. unlike - * in cgroup_init_subsys, there are now multiple css_sets, so each one - * will need a new pointer to it; done by iterating the css_set_table. - * furthermore, modifying the existing css_sets will corrupt the hash - * table state, so each changed css_set will need its hash recomputed. - * this is all done under the css_set_lock. - */ - write_lock(&css_set_lock); - hash_for_each_safe(css_set_table, i, tmp, cset, hlist) { - /* skip entries that we already rehashed */ - if (cset->subsys[ss->subsys_id]) - continue; - /* remove existing entry */ - hash_del(&cset->hlist); - /* set new value */ - cset->subsys[ss->subsys_id] = css; - /* recompute hash and restore entry */ - key = css_set_hash(cset->subsys); - hash_add(css_set_table, &cset->hlist, key); - } - write_unlock(&css_set_lock); - - ret = online_css(css); - if (ret) { - ss->css_free(css); - goto err_unload; - } - - /* success! */ - mutex_unlock(&cgroup_root_mutex); - mutex_unlock(&cgroup_mutex); - return 0; - -err_unload: - mutex_unlock(&cgroup_root_mutex); - mutex_unlock(&cgroup_mutex); - /* @ss can't be mounted here as try_module_get() would fail */ - cgroup_unload_subsys(ss); - return ret; -} -EXPORT_SYMBOL_GPL(cgroup_load_subsys); - -/** - * cgroup_unload_subsys: unload a modular subsystem - * @ss: the subsystem to unload - * - * This function should be called in a modular subsystem's exitcall. When this - * function is invoked, the refcount on the subsystem's module will be 0, so - * the subsystem will not be attached to any hierarchy. - */ -void cgroup_unload_subsys(struct cgroup_subsys *ss) -{ - struct cgrp_cset_link *link; - struct cgroup_subsys_state *css; - - BUG_ON(ss->module == NULL); - - /* - * we shouldn't be called if the subsystem is in use, and the use of - * try_module_get() in rebind_subsystems() should ensure that it - * doesn't start being used while we're killing it off. - */ - BUG_ON(ss->root != &cgroup_dummy_root); - - mutex_lock(&cgroup_mutex); - mutex_lock(&cgroup_root_mutex); - - css = cgroup_css(cgroup_dummy_top, ss); - if (css) - offline_css(css); - - /* deassign the subsys_id */ - cgroup_subsys[ss->subsys_id] = NULL; - - /* - * disentangle the css from all css_sets attached to the dummy - * top. as in loading, we need to pay our respects to the hashtable - * gods. - */ - write_lock(&css_set_lock); - list_for_each_entry(link, &cgroup_dummy_top->cset_links, cset_link) { - struct css_set *cset = link->cset; - unsigned long key; - - hash_del(&cset->hlist); - cset->subsys[ss->subsys_id] = NULL; - key = css_set_hash(cset->subsys); - hash_add(css_set_table, &cset->hlist, key); - } - write_unlock(&css_set_lock); - - /* - * remove subsystem's css from the cgroup_dummy_top and free it - - * need to free before marking as null because ss->css_free needs - * the cgrp->subsys pointer to find their state. - */ - if (css) - ss->css_free(css); - RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL); - - mutex_unlock(&cgroup_root_mutex); - mutex_unlock(&cgroup_mutex); -} -EXPORT_SYMBOL_GPL(cgroup_unload_subsys); - /** * cgroup_init_early - cgroup initialization at system boot * @@ -4763,8 +4527,7 @@ int __init cgroup_init_early(void) list_add(&init_cgrp_cset_link.cset_link, &cgroup_dummy_top->cset_links); list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); - /* at bootup time, we don't worry about modular subsystems */ - for_each_builtin_subsys(ss, i) { + for_each_subsys(ss, i) { BUG_ON(!ss->name); BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN); BUG_ON(!ss->css_alloc); @@ -4797,7 +4560,7 @@ int __init cgroup_init(void) if (err) return err; - for_each_builtin_subsys(ss, i) { + for_each_subsys(ss, i) { if (!ss->early_init) cgroup_init_subsys(ss); } @@ -5032,15 +4795,7 @@ void cgroup_post_fork(struct task_struct *child) * and addition to css_set. */ if (need_forkexit_callback) { - /* - * fork/exit callbacks are supported only for builtin - * subsystems, and the builtin section of the subsys - * array is immutable, so we don't need to lock the - * subsys array here. On the other hand, modular section - * of the array can be freed at module unload, so we - * can't touch that. - */ - for_each_builtin_subsys(ss, i) + for_each_subsys(ss, i) if (ss->fork) ss->fork(child); } @@ -5105,11 +4860,8 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) RCU_INIT_POINTER(tsk->cgroups, &init_css_set); if (run_callbacks && need_forkexit_callback) { - /* - * fork/exit callbacks are supported only for builtin - * subsystems, see cgroup_post_fork() for details. - */ - for_each_builtin_subsys(ss, i) { + /* see cgroup_post_fork() for details */ + for_each_subsys(ss, i) { if (ss->exit) { struct cgroup_subsys_state *old_css = cset->subsys[i]; struct cgroup_subsys_state *css = task_css(tsk, i); @@ -5228,11 +4980,7 @@ static int __init cgroup_disable(char *str) if (!*token) continue; - /* - * cgroup_disable, being at boot time, can't know about - * module subsystems, so we don't worry about them. - */ - for_each_builtin_subsys(ss, i) { + for_each_subsys(ss, i) { if (!strcmp(token, ss->name)) { ss->disabled = 1; printk(KERN_INFO "Disabling %s control group" diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index 9fc7f90d034c..9e5ad5d74e60 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c @@ -110,5 +110,4 @@ struct cgroup_subsys net_cls_subsys = { .attach = cgrp_attach, .subsys_id = net_cls_subsys_id, .base_cftypes = ss_files, - .module = THIS_MODULE, }; diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index cc3a31e7dc08..857e1603f9b7 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -252,7 +252,6 @@ struct cgroup_subsys net_prio_subsys = { .attach = net_prio_attach, .subsys_id = net_prio_subsys_id, .base_cftypes = ss_files, - .module = THIS_MODULE, }; static int netprio_device_event(struct notifier_block *unused, -- cgit v1.2.3 From 073219e995b4a3f8cf1ce8228b7ef440b6994ac0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 8 Feb 2014 10:36:58 -0500 Subject: cgroup: clean up cgroup_subsys names and initialization cgroup_subsys is a bit messier than it needs to be. * The name of a subsys can be different from its internal identifier defined in cgroup_subsys.h. Most subsystems use the matching name but three - cpu, memory and perf_event - use different ones. * cgroup_subsys_id enums are postfixed with _subsys_id and each cgroup_subsys is postfixed with _subsys. cgroup.h is widely included throughout various subsystems, it doesn't and shouldn't have claim on such generic names which don't have any qualifier indicating that they belong to cgroup. * cgroup_subsys->subsys_id should always equal the matching cgroup_subsys_id enum; however, we require each controller to initialize it and then BUG if they don't match, which is a bit silly. This patch cleans up cgroup_subsys names and initialization by doing the followings. * cgroup_subsys_id enums are now postfixed with _cgrp_id, and each cgroup_subsys with _cgrp_subsys. * With the above, renaming subsys identifiers to match the userland visible names doesn't cause any naming conflicts. All non-matching identifiers are renamed to match the official names. cpu_cgroup -> cpu mem_cgroup -> memory perf -> perf_event * controllers no longer need to initialize ->subsys_id and ->name. They're generated in cgroup core and set automatically during boot. * Redundant cgroup_subsys declarations removed. * While updating BUG_ON()s in cgroup_init_early(), convert them to WARN()s. BUGging that early during boot is stupid - the kernel can't print anything, even through serial console and the trap handler doesn't even link stack frame properly for back-tracing. This patch doesn't introduce any behavior changes. v2: Rebased on top of fe1217c4f3f7 ("net: net_cls: move cgroupfs classid handling into core"). Signed-off-by: Tejun Heo Acked-by: Neil Horman Acked-by: "David S. Miller" Acked-by: "Rafael J. Wysocki" Acked-by: Michal Hocko Acked-by: Peter Zijlstra Acked-by: Aristeu Rozanski Acked-by: Ingo Molnar Acked-by: Li Zefan Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Serge E. Hallyn Cc: Vivek Goyal Cc: Thomas Graf --- block/blk-cgroup.c | 8 +++----- block/blk-cgroup.h | 2 +- fs/bio.c | 2 +- include/linux/cgroup.h | 7 ++++--- include/linux/cgroup_subsys.h | 6 +++--- include/linux/hugetlb_cgroup.h | 2 +- include/linux/memcontrol.h | 2 +- include/net/cls_cgroup.h | 2 +- include/net/netprio_cgroup.h | 2 +- kernel/cgroup.c | 34 ++++++++++++++++++++-------------- kernel/cgroup_freezer.c | 8 ++------ kernel/cpuset.c | 10 ++++------ kernel/events/core.c | 8 +++----- kernel/sched/core.c | 6 ++---- kernel/sched/cpuacct.c | 6 ++---- mm/hugetlb_cgroup.c | 9 +++------ mm/memcontrol.c | 22 ++++++++++------------ net/core/netclassid_cgroup.c | 6 ++---- net/core/netprio_cgroup.c | 4 +--- net/ipv4/tcp_memcontrol.c | 2 +- security/device_cgroup.c | 8 ++------ 21 files changed, 68 insertions(+), 88 deletions(-) (limited to 'include/linux') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 660d419918a7..1cef07cf9c21 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -906,16 +906,14 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css, return ret; } -struct cgroup_subsys blkio_subsys = { - .name = "blkio", +struct cgroup_subsys blkio_cgrp_subsys = { .css_alloc = blkcg_css_alloc, .css_offline = blkcg_css_offline, .css_free = blkcg_css_free, .can_attach = blkcg_can_attach, - .subsys_id = blkio_subsys_id, .base_cftypes = blkcg_files, }; -EXPORT_SYMBOL_GPL(blkio_subsys); +EXPORT_SYMBOL_GPL(blkio_cgrp_subsys); /** * blkcg_activate_policy - activate a blkcg policy on a request_queue @@ -1105,7 +1103,7 @@ int blkcg_policy_register(struct blkcg_policy *pol) /* everything is in place, add intf files for the new policy */ if (pol->cftypes) - WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes)); + WARN_ON(cgroup_add_cftypes(&blkio_cgrp_subsys, pol->cftypes)); ret = 0; out_unlock: mutex_unlock(&blkcg_pol_mutex); diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 86154eab9523..453b528c8e19 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -186,7 +186,7 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) static inline struct blkcg *task_blkcg(struct task_struct *tsk) { - return css_to_blkcg(task_css(tsk, blkio_subsys_id)); + return css_to_blkcg(task_css(tsk, blkio_cgrp_id)); } static inline struct blkcg *bio_blkcg(struct bio *bio) diff --git a/fs/bio.c b/fs/bio.c index 75c49a382239..4872102b839e 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -1965,7 +1965,7 @@ int bio_associate_current(struct bio *bio) /* associate blkcg if exists */ rcu_read_lock(); - css = task_css(current, blkio_subsys_id); + css = task_css(current, blkio_cgrp_id); if (css && css_tryget(css)) bio->bi_css = css; rcu_read_unlock(); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index d842a737d448..cd6611e622fd 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -41,7 +41,7 @@ extern int cgroupstats_build(struct cgroupstats *stats, extern int proc_cgroup_show(struct seq_file *, void *); /* define the enumeration of all cgroup subsystems */ -#define SUBSYS(_x) _x ## _subsys_id, +#define SUBSYS(_x) _x ## _cgrp_id, enum cgroup_subsys_id { #include CGROUP_SUBSYS_COUNT, @@ -573,7 +573,6 @@ struct cgroup_subsys { struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css); - int subsys_id; int disabled; int early_init; @@ -592,6 +591,8 @@ struct cgroup_subsys { bool broken_hierarchy; bool warned_broken_hierarchy; + /* the following two fields are initialized automtically during boot */ + int subsys_id; #define MAX_CGROUP_TYPE_NAMELEN 32 const char *name; @@ -606,7 +607,7 @@ struct cgroup_subsys { struct cftype_set base_cftset; }; -#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; +#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; #include #undef SUBSYS diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 11c42f6a25a8..768fe44e19f0 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -12,7 +12,7 @@ SUBSYS(debug) #endif #if IS_ENABLED(CONFIG_CGROUP_SCHED) -SUBSYS(cpu_cgroup) +SUBSYS(cpu) #endif #if IS_ENABLED(CONFIG_CGROUP_CPUACCT) @@ -20,7 +20,7 @@ SUBSYS(cpuacct) #endif #if IS_ENABLED(CONFIG_MEMCG) -SUBSYS(mem_cgroup) +SUBSYS(memory) #endif #if IS_ENABLED(CONFIG_CGROUP_DEVICE) @@ -40,7 +40,7 @@ SUBSYS(blkio) #endif #if IS_ENABLED(CONFIG_CGROUP_PERF) -SUBSYS(perf) +SUBSYS(perf_event) #endif #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 787bba3bf552..0129f89cf98d 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -49,7 +49,7 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) static inline bool hugetlb_cgroup_disabled(void) { - if (hugetlb_subsys.disabled) + if (hugetlb_cgrp_subsys.disabled) return true; return false; } diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index abd0113b6620..eccfb4a4b379 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -162,7 +162,7 @@ extern int do_swap_account; static inline bool mem_cgroup_disabled(void) { - if (mem_cgroup_subsys.disabled) + if (memory_cgrp_subsys.disabled) return true; return false; } diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h index 9cf2d5ef38d9..c15d39456e14 100644 --- a/include/net/cls_cgroup.h +++ b/include/net/cls_cgroup.h @@ -34,7 +34,7 @@ static inline u32 task_cls_classid(struct task_struct *p) return 0; rcu_read_lock(); - classid = container_of(task_css(p, net_cls_subsys_id), + classid = container_of(task_css(p, net_cls_cgrp_id), struct cgroup_cls_state, css)->classid; rcu_read_unlock(); diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h index b7ff5bd3c3c3..f2a9597ff53c 100644 --- a/include/net/netprio_cgroup.h +++ b/include/net/netprio_cgroup.h @@ -33,7 +33,7 @@ static inline u32 task_netprioidx(struct task_struct *p) u32 idx; rcu_read_lock(); - css = task_css(p, net_prio_subsys_id); + css = task_css(p, net_prio_cgrp_id); idx = css->cgroup->id; rcu_read_unlock(); return idx; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index ccb16b47e293..fe3f7253aa90 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -120,10 +120,18 @@ static struct workqueue_struct *cgroup_destroy_wq; static struct workqueue_struct *cgroup_pidlist_destroy_wq; /* generate an array of cgroup subsystem pointers */ -#define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys, +#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys, static struct cgroup_subsys *cgroup_subsys[] = { #include }; +#undef SUBSYS + +/* array of cgroup subsystem names */ +#define SUBSYS(_x) [_x ## _cgrp_id] = #_x, +static const char *cgroup_subsys_name[] = { +#include +}; +#undef SUBSYS /* * The dummy hierarchy, reserved for the subsystems that are otherwise @@ -1076,7 +1084,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) BUG_ON(!mutex_is_locked(&cgroup_mutex)); #ifdef CONFIG_CPUSETS - mask = ~(1UL << cpuset_subsys_id); + mask = ~(1UL << cpuset_cgrp_id); #endif memset(opts, 0, sizeof(*opts)); @@ -4528,15 +4536,15 @@ int __init cgroup_init_early(void) list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); for_each_subsys(ss, i) { - BUG_ON(!ss->name); - BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN); - BUG_ON(!ss->css_alloc); - BUG_ON(!ss->css_free); - if (ss->subsys_id != i) { - printk(KERN_ERR "cgroup: Subsys %s id == %d\n", - ss->name, ss->subsys_id); - BUG(); - } + WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->subsys_id, + "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n", + i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free, + ss->subsys_id, ss->name); + WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN, + "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]); + + ss->subsys_id = i; + ss->name = cgroup_subsys_name[i]; if (ss->early_init) cgroup_init_subsys(ss); @@ -5167,11 +5175,9 @@ static struct cftype debug_files[] = { { } /* terminate */ }; -struct cgroup_subsys debug_subsys = { - .name = "debug", +struct cgroup_subsys debug_cgrp_subsys = { .css_alloc = debug_css_alloc, .css_free = debug_css_free, - .subsys_id = debug_subsys_id, .base_cftypes = debug_files, }; #endif /* CONFIG_CGROUP_DEBUG */ diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 6c3154e477f6..98ea26a99076 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -52,7 +52,7 @@ static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) static inline struct freezer *task_freezer(struct task_struct *task) { - return css_freezer(task_css(task, freezer_subsys_id)); + return css_freezer(task_css(task, freezer_cgrp_id)); } static struct freezer *parent_freezer(struct freezer *freezer) @@ -84,8 +84,6 @@ static const char *freezer_state_strs(unsigned int state) return "THAWED"; }; -struct cgroup_subsys freezer_subsys; - static struct cgroup_subsys_state * freezer_css_alloc(struct cgroup_subsys_state *parent_css) { @@ -473,13 +471,11 @@ static struct cftype files[] = { { } /* terminate */ }; -struct cgroup_subsys freezer_subsys = { - .name = "freezer", +struct cgroup_subsys freezer_cgrp_subsys = { .css_alloc = freezer_css_alloc, .css_online = freezer_css_online, .css_offline = freezer_css_offline, .css_free = freezer_css_free, - .subsys_id = freezer_subsys_id, .attach = freezer_attach, .fork = freezer_fork, .base_cftypes = files, diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4410ac6a55f1..2d018c795fea 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -119,7 +119,7 @@ static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) /* Retrieve the cpuset for a task */ static inline struct cpuset *task_cs(struct task_struct *task) { - return css_cs(task_css(task, cpuset_subsys_id)); + return css_cs(task_css(task, cpuset_cgrp_id)); } static inline struct cpuset *parent_cs(struct cpuset *cs) @@ -1521,7 +1521,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css, struct task_struct *task; struct task_struct *leader = cgroup_taskset_first(tset); struct cgroup_subsys_state *oldcss = cgroup_taskset_cur_css(tset, - cpuset_subsys_id); + cpuset_cgrp_id); struct cpuset *cs = css_cs(css); struct cpuset *oldcs = css_cs(oldcss); struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); @@ -2024,8 +2024,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) kfree(cs); } -struct cgroup_subsys cpuset_subsys = { - .name = "cpuset", +struct cgroup_subsys cpuset_cgrp_subsys = { .css_alloc = cpuset_css_alloc, .css_online = cpuset_css_online, .css_offline = cpuset_css_offline, @@ -2033,7 +2032,6 @@ struct cgroup_subsys cpuset_subsys = { .can_attach = cpuset_can_attach, .cancel_attach = cpuset_cancel_attach, .attach = cpuset_attach, - .subsys_id = cpuset_subsys_id, .base_cftypes = files, .early_init = 1, }; @@ -2699,7 +2697,7 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v) goto out_free; rcu_read_lock(); - css = task_css(tsk, cpuset_subsys_id); + css = task_css(tsk, cpuset_cgrp_id); retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); rcu_read_unlock(); if (retval < 0) diff --git a/kernel/events/core.c b/kernel/events/core.c index 56003c6edfd3..64903731d834 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -342,7 +342,7 @@ struct perf_cgroup { static inline struct perf_cgroup * perf_cgroup_from_task(struct task_struct *task) { - return container_of(task_css(task, perf_subsys_id), + return container_of(task_css(task, perf_event_cgrp_id), struct perf_cgroup, css); } @@ -595,7 +595,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, rcu_read_lock(); - css = css_from_dir(f.file->f_dentry, &perf_subsys); + css = css_from_dir(f.file->f_dentry, &perf_event_cgrp_subsys); if (IS_ERR(css)) { ret = PTR_ERR(css); goto out; @@ -8055,9 +8055,7 @@ static void perf_cgroup_exit(struct cgroup_subsys_state *css, task_function_call(task, __perf_cgroup_move, task); } -struct cgroup_subsys perf_subsys = { - .name = "perf_event", - .subsys_id = perf_subsys_id, +struct cgroup_subsys perf_event_cgrp_subsys = { .css_alloc = perf_cgroup_css_alloc, .css_free = perf_cgroup_css_free, .exit = perf_cgroup_exit, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b46131ef6aab..d4cfc5561830 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7176,7 +7176,7 @@ void sched_move_task(struct task_struct *tsk) if (unlikely(running)) tsk->sched_class->put_prev_task(rq, tsk); - tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id, + tg = container_of(task_css_check(tsk, cpu_cgrp_id, lockdep_is_held(&tsk->sighand->siglock)), struct task_group, css); tg = autogroup_task_group(tsk, tg); @@ -7957,8 +7957,7 @@ static struct cftype cpu_files[] = { { } /* terminate */ }; -struct cgroup_subsys cpu_cgroup_subsys = { - .name = "cpu", +struct cgroup_subsys cpu_cgrp_subsys = { .css_alloc = cpu_cgroup_css_alloc, .css_free = cpu_cgroup_css_free, .css_online = cpu_cgroup_css_online, @@ -7966,7 +7965,6 @@ struct cgroup_subsys cpu_cgroup_subsys = { .can_attach = cpu_cgroup_can_attach, .attach = cpu_cgroup_attach, .exit = cpu_cgroup_exit, - .subsys_id = cpu_cgroup_subsys_id, .base_cftypes = cpu_files, .early_init = 1, }; diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 622e0818f905..c143ee380e3a 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -41,7 +41,7 @@ static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) /* return cpu accounting group to which this task belongs */ static inline struct cpuacct *task_ca(struct task_struct *tsk) { - return css_ca(task_css(tsk, cpuacct_subsys_id)); + return css_ca(task_css(tsk, cpuacct_cgrp_id)); } static inline struct cpuacct *parent_ca(struct cpuacct *ca) @@ -275,11 +275,9 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val) rcu_read_unlock(); } -struct cgroup_subsys cpuacct_subsys = { - .name = "cpuacct", +struct cgroup_subsys cpuacct_cgrp_subsys = { .css_alloc = cpuacct_css_alloc, .css_free = cpuacct_css_free, - .subsys_id = cpuacct_subsys_id, .base_cftypes = files, .early_init = 1, }; diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index cb00829bb466..b135853e68f3 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -30,7 +30,6 @@ struct hugetlb_cgroup { #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff) #define MEMFILE_ATTR(val) ((val) & 0xffff) -struct cgroup_subsys hugetlb_subsys __read_mostly; static struct hugetlb_cgroup *root_h_cgroup __read_mostly; static inline @@ -42,7 +41,7 @@ struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s) static inline struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task) { - return hugetlb_cgroup_from_css(task_css(task, hugetlb_subsys_id)); + return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id)); } static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg) @@ -358,7 +357,7 @@ static void __init __hugetlb_cgroup_file_init(int idx) cft = &h->cgroup_files[4]; memset(cft, 0, sizeof(*cft)); - WARN_ON(cgroup_add_cftypes(&hugetlb_subsys, h->cgroup_files)); + WARN_ON(cgroup_add_cftypes(&hugetlb_cgrp_subsys, h->cgroup_files)); return; } @@ -402,10 +401,8 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) return; } -struct cgroup_subsys hugetlb_subsys = { - .name = "hugetlb", +struct cgroup_subsys hugetlb_cgrp_subsys = { .css_alloc = hugetlb_cgroup_css_alloc, .css_offline = hugetlb_cgroup_css_offline, .css_free = hugetlb_cgroup_css_free, - .subsys_id = hugetlb_subsys_id, }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 53385cd4e6f0..04a97bce2270 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -66,8 +66,8 @@ #include -struct cgroup_subsys mem_cgroup_subsys __read_mostly; -EXPORT_SYMBOL(mem_cgroup_subsys); +struct cgroup_subsys memory_cgrp_subsys __read_mostly; +EXPORT_SYMBOL(memory_cgrp_subsys); #define MEM_CGROUP_RECLAIM_RETRIES 5 static struct mem_cgroup *root_mem_cgroup __read_mostly; @@ -538,7 +538,7 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) { struct cgroup_subsys_state *css; - css = css_from_id(id - 1, &mem_cgroup_subsys); + css = css_from_id(id - 1, &memory_cgrp_subsys); return mem_cgroup_from_css(css); } @@ -1072,7 +1072,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) if (unlikely(!p)) return NULL; - return mem_cgroup_from_css(task_css(p, mem_cgroup_subsys_id)); + return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); } struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) @@ -1702,7 +1702,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) rcu_read_lock(); mem_cgrp = memcg->css.cgroup; - task_cgrp = task_cgroup(p, mem_cgroup_subsys_id); + task_cgrp = task_cgroup(p, memory_cgrp_id); ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); if (ret < 0) { @@ -6187,7 +6187,7 @@ static int memcg_write_event_control(struct cgroup_subsys_state *css, ret = -EINVAL; cfile_css = css_from_dir(cfile.file->f_dentry->d_parent, - &mem_cgroup_subsys); + &memory_cgrp_subsys); if (cfile_css == css && css_tryget(css)) ret = 0; @@ -6566,11 +6566,11 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) * unfortunate state in our controller. */ if (parent != root_mem_cgroup) - mem_cgroup_subsys.broken_hierarchy = true; + memory_cgrp_subsys.broken_hierarchy = true; } mutex_unlock(&memcg_create_mutex); - return memcg_init_kmem(memcg, &mem_cgroup_subsys); + return memcg_init_kmem(memcg, &memory_cgrp_subsys); } /* @@ -7264,9 +7264,7 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) mem_cgroup_from_css(root_css)->use_hierarchy = true; } -struct cgroup_subsys mem_cgroup_subsys = { - .name = "memory", - .subsys_id = mem_cgroup_subsys_id, +struct cgroup_subsys memory_cgrp_subsys = { .css_alloc = mem_cgroup_css_alloc, .css_online = mem_cgroup_css_online, .css_offline = mem_cgroup_css_offline, @@ -7292,7 +7290,7 @@ __setup("swapaccount=", enable_swap_account); static void __init memsw_file_init(void) { - WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, memsw_cgroup_files)); + WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, memsw_cgroup_files)); } static void __init enable_swap_cgroup(void) diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index 9e5ad5d74e60..b865662fba71 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c @@ -23,7 +23,7 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state struct cgroup_cls_state *task_cls_state(struct task_struct *p) { - return css_cls_state(task_css(p, net_cls_subsys_id)); + return css_cls_state(task_css(p, net_cls_cgrp_id)); } EXPORT_SYMBOL_GPL(task_cls_state); @@ -102,12 +102,10 @@ static struct cftype ss_files[] = { { } /* terminate */ }; -struct cgroup_subsys net_cls_subsys = { - .name = "net_cls", +struct cgroup_subsys net_cls_cgrp_subsys = { .css_alloc = cgrp_css_alloc, .css_online = cgrp_css_online, .css_free = cgrp_css_free, .attach = cgrp_attach, - .subsys_id = net_cls_subsys_id, .base_cftypes = ss_files, }; diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 857e1603f9b7..d7d23e28fafd 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -244,13 +244,11 @@ static struct cftype ss_files[] = { { } /* terminate */ }; -struct cgroup_subsys net_prio_subsys = { - .name = "net_prio", +struct cgroup_subsys net_prio_cgrp_subsys = { .css_alloc = cgrp_css_alloc, .css_online = cgrp_css_online, .css_free = cgrp_css_free, .attach = net_prio_attach, - .subsys_id = net_prio_subsys_id, .base_cftypes = ss_files, }; diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index f7e522c558ba..20a0aca9131e 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c @@ -219,7 +219,7 @@ static struct cftype tcp_files[] = { static int __init tcp_memcontrol_init(void) { - WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, tcp_files)); + WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, tcp_files)); return 0; } __initcall(tcp_memcontrol_init); diff --git a/security/device_cgroup.c b/security/device_cgroup.c index d3b6d2cd3a06..7f88bcde7c61 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -58,11 +58,9 @@ static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s) static inline struct dev_cgroup *task_devcgroup(struct task_struct *task) { - return css_to_devcgroup(task_css(task, devices_subsys_id)); + return css_to_devcgroup(task_css(task, devices_cgrp_id)); } -struct cgroup_subsys devices_subsys; - /* * called under devcgroup_mutex */ @@ -684,13 +682,11 @@ static struct cftype dev_cgroup_files[] = { { } /* terminate */ }; -struct cgroup_subsys devices_subsys = { - .name = "devices", +struct cgroup_subsys devices_cgrp_subsys = { .css_alloc = devcgroup_css_alloc, .css_free = devcgroup_css_free, .css_online = devcgroup_online, .css_offline = devcgroup_offline, - .subsys_id = devices_subsys_id, .base_cftypes = dev_cgroup_files, }; -- cgit v1.2.3 From aec25020f5d4b69aea5317551d1cb7043f6b04fb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 8 Feb 2014 10:36:58 -0500 Subject: cgroup: rename cgroup_subsys->subsys_id to ->id It's no longer referenced outside cgroup core, so renaming is easy. Let's rename it for consistency & brevity. This patch is pure rename. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- include/linux/cgroup.h | 4 ++-- kernel/cgroup.c | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index cd6611e622fd..198c7fcd727e 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -548,7 +548,7 @@ int cgroup_taskset_size(struct cgroup_taskset *tset); (task) = cgroup_taskset_next((tset))) \ if (!(skip_css) || \ cgroup_taskset_cur_css((tset), \ - (skip_css)->ss->subsys_id) != (skip_css)) + (skip_css)->ss->id) != (skip_css)) /* * Control Group subsystem type. @@ -592,7 +592,7 @@ struct cgroup_subsys { bool warned_broken_hierarchy; /* the following two fields are initialized automtically during boot */ - int subsys_id; + int id; #define MAX_CGROUP_TYPE_NAMELEN 32 const char *name; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index fe3f7253aa90..5a77ca0784a6 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -198,7 +198,7 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, struct cgroup_subsys *ss) { if (ss) - return rcu_dereference_check(cgrp->subsys[ss->subsys_id], + return rcu_dereference_check(cgrp->subsys[ss->id], lockdep_is_held(&cgroup_mutex)); else return &cgrp->dummy_css; @@ -3982,7 +3982,7 @@ static void css_release(struct percpu_ref *ref) struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); - rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL); + rcu_assign_pointer(css->cgroup->subsys[css->ss->id], NULL); call_rcu(&css->rcu_head, css_free_rcu_fn); } @@ -4014,7 +4014,7 @@ static int online_css(struct cgroup_subsys_state *css) if (!ret) { css->flags |= CSS_ONLINE; css->cgroup->nr_css++; - rcu_assign_pointer(css->cgroup->subsys[ss->subsys_id], css); + rcu_assign_pointer(css->cgroup->subsys[ss->id], css); } return ret; } @@ -4034,7 +4034,7 @@ static void offline_css(struct cgroup_subsys_state *css) css->flags &= ~CSS_ONLINE; css->cgroup->nr_css--; - RCU_INIT_POINTER(css->cgroup->subsys[ss->subsys_id], css); + RCU_INIT_POINTER(css->cgroup->subsys[ss->id], css); } /** @@ -4065,7 +4065,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) init_css(css, ss, cgrp); - err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id); + err = cgroup_populate_dir(cgrp, 1 << ss->id); if (err) goto err_free; @@ -4292,7 +4292,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref) */ static void kill_css(struct cgroup_subsys_state *css) { - cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id); + cgroup_clear_dir(css->cgroup, 1 << css->ss->id); /* * Killing would put the base ref, but we need to keep it alive @@ -4496,7 +4496,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) * pointer to this state - since the subsystem is * newly registered, all tasks and hence the * init_css_set is in the subsystem's top cgroup. */ - init_css_set.subsys[ss->subsys_id] = css; + init_css_set.subsys[ss->id] = css; need_forkexit_callback |= ss->fork || ss->exit; @@ -4536,14 +4536,14 @@ int __init cgroup_init_early(void) list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); for_each_subsys(ss, i) { - WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->subsys_id, + WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id, "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n", i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free, - ss->subsys_id, ss->name); + ss->id, ss->name); WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN, "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]); - ss->subsys_id = i; + ss->id = i; ss->name = cgroup_subsys_name[i]; if (ss->early_init) -- cgit v1.2.3 From c11baa02c5d6ea06362fa61da070af34b7706c83 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Fri, 24 Jan 2014 16:18:02 -0600 Subject: crypto: ccp - Move HMAC calculation down to ccp ops file Move the support to perform an HMAC calculation into the CCP operations file. This eliminates the need to perform a synchronous SHA operation used to calculate the HMAC. Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto-sha.c | 130 +++++++----------------------------- drivers/crypto/ccp/ccp-crypto.h | 8 +-- drivers/crypto/ccp/ccp-ops.c | 104 ++++++++++++++++++++++++++++- include/linux/ccp.h | 7 ++ 4 files changed, 139 insertions(+), 110 deletions(-) (limited to 'include/linux') diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 3867290b3531..873f23425245 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -24,75 +24,10 @@ #include "ccp-crypto.h" -struct ccp_sha_result { - struct completion completion; - int err; -}; - -static void ccp_sync_hash_complete(struct crypto_async_request *req, int err) -{ - struct ccp_sha_result *result = req->data; - - if (err == -EINPROGRESS) - return; - - result->err = err; - complete(&result->completion); -} - -static int ccp_sync_hash(struct crypto_ahash *tfm, u8 *buf, - struct scatterlist *sg, unsigned int len) -{ - struct ccp_sha_result result; - struct ahash_request *req; - int ret; - - init_completion(&result.completion); - - req = ahash_request_alloc(tfm, GFP_KERNEL); - if (!req) - return -ENOMEM; - - ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - ccp_sync_hash_complete, &result); - ahash_request_set_crypt(req, sg, buf, len); - - ret = crypto_ahash_digest(req); - if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { - ret = wait_for_completion_interruptible(&result.completion); - if (!ret) - ret = result.err; - } - - ahash_request_free(req); - - return ret; -} - -static int ccp_sha_finish_hmac(struct crypto_async_request *async_req) -{ - struct ahash_request *req = ahash_request_cast(async_req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); - struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); - struct scatterlist sg[2]; - unsigned int block_size = - crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); - unsigned int digest_size = crypto_ahash_digestsize(tfm); - - sg_init_table(sg, ARRAY_SIZE(sg)); - sg_set_buf(&sg[0], ctx->u.sha.opad, block_size); - sg_set_buf(&sg[1], rctx->ctx, digest_size); - - return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg, - block_size + digest_size); -} - static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) { struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); unsigned int digest_size = crypto_ahash_digestsize(tfm); @@ -112,10 +47,6 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) if (req->result) memcpy(req->result, rctx->ctx, digest_size); - /* If we're doing an HMAC, we need to perform that on the final op */ - if (rctx->final && ctx->u.sha.key_len) - ret = ccp_sha_finish_hmac(async_req); - e_free: sg_free_table(&rctx->data_sg); @@ -126,6 +57,7 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, unsigned int final) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); struct scatterlist *sg; unsigned int block_size = @@ -196,6 +128,11 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx); rctx->cmd.u.sha.src = sg; rctx->cmd.u.sha.src_len = rctx->hash_cnt; + rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? + &ctx->u.sha.opad_sg : NULL; + rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ? + ctx->u.sha.opad_count : 0; + rctx->cmd.u.sha.first = rctx->first; rctx->cmd.u.sha.final = rctx->final; rctx->cmd.u.sha.msg_bits = rctx->msg_bits; @@ -218,7 +155,6 @@ static int ccp_sha_init(struct ahash_request *req) memset(rctx, 0, sizeof(*rctx)); - memcpy(rctx->ctx, alg->init, sizeof(rctx->ctx)); rctx->type = alg->type; rctx->first = 1; @@ -261,10 +197,13 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int key_len) { struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); - struct scatterlist sg; - unsigned int block_size = - crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); - unsigned int digest_size = crypto_ahash_digestsize(tfm); + struct crypto_shash *shash = ctx->u.sha.hmac_tfm; + struct { + struct shash_desc sdesc; + char ctx[crypto_shash_descsize(shash)]; + } desc; + unsigned int block_size = crypto_shash_blocksize(shash); + unsigned int digest_size = crypto_shash_digestsize(shash); int i, ret; /* Set to zero until complete */ @@ -277,8 +216,12 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, if (key_len > block_size) { /* Must hash the input key */ - sg_init_one(&sg, key, key_len); - ret = ccp_sync_hash(tfm, ctx->u.sha.key, &sg, key_len); + desc.sdesc.tfm = shash; + desc.sdesc.flags = crypto_ahash_get_flags(tfm) & + CRYPTO_TFM_REQ_MAY_SLEEP; + + ret = crypto_shash_digest(&desc.sdesc, key, key_len, + ctx->u.sha.key); if (ret) { crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; @@ -293,6 +236,9 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c; } + sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size); + ctx->u.sha.opad_count = block_size; + ctx->u.sha.key_len = key_len; return 0; @@ -319,10 +265,9 @@ static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm) { struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); - struct crypto_ahash *hmac_tfm; + struct crypto_shash *hmac_tfm; - hmac_tfm = crypto_alloc_ahash(alg->child_alg, - CRYPTO_ALG_TYPE_AHASH, 0); + hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0); if (IS_ERR(hmac_tfm)) { pr_warn("could not load driver %s need for HMAC support\n", alg->child_alg); @@ -339,35 +284,14 @@ static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm) struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->u.sha.hmac_tfm) - crypto_free_ahash(ctx->u.sha.hmac_tfm); + crypto_free_shash(ctx->u.sha.hmac_tfm); ccp_sha_cra_exit(tfm); } -static const __be32 sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { - cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), - cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), - cpu_to_be32(SHA1_H4), 0, 0, 0, -}; - -static const __be32 sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { - cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), - cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), - cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), - cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), -}; - -static const __be32 sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { - cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), - cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), - cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), - cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), -}; - struct ccp_sha_def { const char *name; const char *drv_name; - const __be32 *init; enum ccp_sha_type type; u32 digest_size; u32 block_size; @@ -377,7 +301,6 @@ static struct ccp_sha_def sha_algs[] = { { .name = "sha1", .drv_name = "sha1-ccp", - .init = sha1_init, .type = CCP_SHA_TYPE_1, .digest_size = SHA1_DIGEST_SIZE, .block_size = SHA1_BLOCK_SIZE, @@ -385,7 +308,6 @@ static struct ccp_sha_def sha_algs[] = { { .name = "sha224", .drv_name = "sha224-ccp", - .init = sha224_init, .type = CCP_SHA_TYPE_224, .digest_size = SHA224_DIGEST_SIZE, .block_size = SHA224_BLOCK_SIZE, @@ -393,7 +315,6 @@ static struct ccp_sha_def sha_algs[] = { { .name = "sha256", .drv_name = "sha256-ccp", - .init = sha256_init, .type = CCP_SHA_TYPE_256, .digest_size = SHA256_DIGEST_SIZE, .block_size = SHA256_BLOCK_SIZE, @@ -460,7 +381,6 @@ static int ccp_register_sha_alg(struct list_head *head, INIT_LIST_HEAD(&ccp_alg->entry); - ccp_alg->init = def->init; ccp_alg->type = def->type; alg = &ccp_alg->alg; diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index b222231b6169..9aa4ae184f7f 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -137,11 +137,14 @@ struct ccp_aes_cmac_req_ctx { #define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE struct ccp_sha_ctx { + struct scatterlist opad_sg; + unsigned int opad_count; + unsigned int key_len; u8 key[MAX_SHA_BLOCK_SIZE]; u8 ipad[MAX_SHA_BLOCK_SIZE]; u8 opad[MAX_SHA_BLOCK_SIZE]; - struct crypto_ahash *hmac_tfm; + struct crypto_shash *hmac_tfm; }; struct ccp_sha_req_ctx { @@ -167,9 +170,6 @@ struct ccp_sha_req_ctx { unsigned int buf_count; u8 buf[MAX_SHA_BLOCK_SIZE]; - /* HMAC support field */ - struct scatterlist pad_sg; - /* CCP driver command */ struct ccp_cmd cmd; }; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index c266a7b154bb..9ae006d69df4 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "ccp-dev.h" @@ -132,6 +133,27 @@ struct ccp_op { } u; }; +/* SHA initial context values */ +static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { + cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), + cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), + cpu_to_be32(SHA1_H4), 0, 0, 0, +}; + +static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { + cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), + cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), + cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), + cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), +}; + +static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { + cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), + cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), + cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), + cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), +}; + /* The CCP cannot perform zero-length sha operations so the caller * is required to buffer data for the final operation. However, a * sha operation for a message with a total length of zero is valid @@ -1411,7 +1433,27 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (ret) return ret; - ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); + if (sha->first) { + const __be32 *init; + + switch (sha->type) { + case CCP_SHA_TYPE_1: + init = ccp_sha1_init; + break; + case CCP_SHA_TYPE_224: + init = ccp_sha224_init; + break; + case CCP_SHA_TYPE_256: + init = ccp_sha256_init; + break; + default: + ret = -EINVAL; + goto e_ctx; + } + memcpy(ctx.address, init, CCP_SHA_CTXSIZE); + } else + ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); + ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -1451,6 +1493,66 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); + if (sha->final && sha->opad) { + /* HMAC operation, recursively perform final SHA */ + struct ccp_cmd hmac_cmd; + struct scatterlist sg; + u64 block_size, digest_size; + u8 *hmac_buf; + + switch (sha->type) { + case CCP_SHA_TYPE_1: + block_size = SHA1_BLOCK_SIZE; + digest_size = SHA1_DIGEST_SIZE; + break; + case CCP_SHA_TYPE_224: + block_size = SHA224_BLOCK_SIZE; + digest_size = SHA224_DIGEST_SIZE; + break; + case CCP_SHA_TYPE_256: + block_size = SHA256_BLOCK_SIZE; + digest_size = SHA256_DIGEST_SIZE; + break; + default: + ret = -EINVAL; + goto e_data; + } + + if (sha->opad_len != block_size) { + ret = -EINVAL; + goto e_data; + } + + hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL); + if (!hmac_buf) { + ret = -ENOMEM; + goto e_data; + } + sg_init_one(&sg, hmac_buf, block_size + digest_size); + + scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); + memcpy(hmac_buf + block_size, ctx.address, digest_size); + + memset(&hmac_cmd, 0, sizeof(hmac_cmd)); + hmac_cmd.engine = CCP_ENGINE_SHA; + hmac_cmd.u.sha.type = sha->type; + hmac_cmd.u.sha.ctx = sha->ctx; + hmac_cmd.u.sha.ctx_len = sha->ctx_len; + hmac_cmd.u.sha.src = &sg; + hmac_cmd.u.sha.src_len = block_size + digest_size; + hmac_cmd.u.sha.opad = NULL; + hmac_cmd.u.sha.opad_len = 0; + hmac_cmd.u.sha.first = 1; + hmac_cmd.u.sha.final = 1; + hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3; + + ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd); + if (ret) + cmd->engine_error = hmac_cmd.engine_error; + + kfree(hmac_buf); + } + e_data: ccp_free_data(&src, cmd_q); diff --git a/include/linux/ccp.h b/include/linux/ccp.h index b941ab9f762b..ebcc9d146219 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -232,6 +232,9 @@ enum ccp_sha_type { * @ctx_len: length in bytes of hash value * @src: data to be used for this operation * @src_len: length in bytes of data used for this operation + * @opad: data to be used for final HMAC operation + * @opad_len: length in bytes of data used for final HMAC operation + * @first: indicates first SHA operation * @final: indicates final SHA operation * @msg_bits: total length of the message in bits used in final SHA operation * @@ -251,6 +254,10 @@ struct ccp_sha_engine { struct scatterlist *src; u64 src_len; /* In bytes */ + struct scatterlist *opad; + u32 opad_len; /* In bytes */ + + u32 first; /* Indicates first sha cmd */ u32 final; /* Indicates final sha cmd */ u64 msg_bits; /* Message length in bits required for * final sha cmd */ -- cgit v1.2.3 From 6039257378e4c84da06e68230b14fef955508ce6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 10 Feb 2014 10:27:11 +1100 Subject: direct-io: add flag to allow aio writes beyond i_size Some filesystems can handle direct I/O writes beyond i_size safely, so allow them to opt into receiving them. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner --- fs/direct-io.c | 18 ++++++++++++------ include/linux/fs.h | 3 +++ 2 files changed, 15 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/fs/direct-io.c b/fs/direct-io.c index 160a5489a939..a701752dd750 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -1194,13 +1194,19 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, } /* - * For file extending writes updating i_size before data - * writeouts complete can expose uninitialized blocks. So - * even for AIO, we need to wait for i/o to complete before - * returning in this case. + * For file extending writes updating i_size before data writeouts + * complete can expose uninitialized blocks in dumb filesystems. + * In that case we need to wait for I/O completion even if asked + * for an asynchronous write. */ - dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) && - (end > i_size_read(inode))); + if (is_sync_kiocb(iocb)) + dio->is_async = false; + else if (!(dio->flags & DIO_ASYNC_EXTEND) && + (rw & WRITE) && end > i_size_read(inode)) + dio->is_async = false; + else + dio->is_async = true; + dio->inode = inode; dio->rw = rw; diff --git a/include/linux/fs.h b/include/linux/fs.h index 09f553c59813..f7faefcf4843 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2527,6 +2527,9 @@ enum { /* filesystem does not support filling holes */ DIO_SKIP_HOLES = 0x02, + + /* filesystem can handle aio writes beyond i_size */ + DIO_ASYNC_EXTEND = 0x04, }; void dio_end_io(struct bio *bio, int error); -- cgit v1.2.3 From 557fe99d9d490fe01c7aa87494313078c4ff939c Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Fri, 24 Jan 2014 08:54:16 +0100 Subject: pwm: Remove obsolete HAVE_PWM Kconfig symbol Before we had the PWM framework we used to have a barebone PWM api. The HAVE_PWM Kconfig symbol used to be selected by the PWM drivers to specify the PWM API is present in the kernel. Since the last legacy driver is gone the HAVE_PWM symbol can go aswell. Signed-off-by: Sascha Hauer Cc: Dmitry Torokhov Cc: Eric Miao Cc: Haojian Zhuang Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Cc: Thierry Reding Cc: linux-pwm@vger.kernel.orig Cc: Russell King Cc: Ralf Baechle Signed-off-by: Thierry Reding --- arch/arm/Kconfig | 4 ---- arch/arm/mach-pxa/Kconfig | 15 --------------- arch/mips/Kconfig | 1 - drivers/input/misc/Kconfig | 4 ++-- include/linux/pwm.h | 2 +- 5 files changed, 3 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e25419817791..cc6ce44064a2 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -113,9 +113,6 @@ config ARM_DMA_IOMMU_ALIGNMENT endif -config HAVE_PWM - bool - config MIGHT_HAVE_PCI bool @@ -632,7 +629,6 @@ config ARCH_LPC32XX select CPU_ARM926T select GENERIC_CLOCKEVENTS select HAVE_IDE - select HAVE_PWM select USB_ARCH_HAS_OHCI select USE_OF help diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 96100dbf5a2e..b96244c73ea4 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -7,7 +7,6 @@ comment "Intel/Marvell Dev Platforms (sorted by hardware release time)" config MACH_PXA3XX_DT bool "Support PXA3xx platforms from device tree" select CPU_PXA300 - select HAVE_PWM select POWER_SUPPLY select PXA3xx select USE_OF @@ -23,12 +22,10 @@ config ARCH_LUBBOCK config MACH_MAINSTONE bool "Intel HCDDBBVA0 Development Platform (aka Mainstone)" - select HAVE_PWM select PXA27x config MACH_ZYLONITE bool - select HAVE_PWM select PXA3xx config MACH_ZYLONITE300 @@ -69,7 +66,6 @@ config ARCH_PXA_IDP config ARCH_VIPER bool "Arcom/Eurotech VIPER SBC" select ARCOM_PCMCIA - select HAVE_PWM select I2C_GPIO select ISA select PXA25x @@ -120,7 +116,6 @@ config MACH_CM_X300 bool "CompuLab CM-X300 modules" select CPU_PXA300 select CPU_PXA310 - select HAVE_PWM select PXA3xx config MACH_CAPC7117 @@ -211,7 +206,6 @@ config TRIZEPS_PCMCIA config MACH_LOGICPD_PXA270 bool "LogicPD PXA270 Card Engine Development Platform" - select HAVE_PWM select PXA27x config MACH_PCM027 @@ -222,7 +216,6 @@ config MACH_PCM027 config MACH_PCM990_BASEBOARD bool "PHYTEC PCM-990 development board" depends on MACH_PCM027 - select HAVE_PWM choice prompt "display on pcm990" @@ -246,7 +239,6 @@ config MACH_COLIBRI config MACH_COLIBRI_PXA270_INCOME bool "Income s.r.o. PXA270 SBC" depends on MACH_COLIBRI - select HAVE_PWM select PXA27x config MACH_COLIBRI300 @@ -275,7 +267,6 @@ comment "End-user Products (sorted by vendor name)" config MACH_H4700 bool "HP iPAQ hx4700" - select HAVE_PWM select IWMMXT select PXA27x @@ -289,14 +280,12 @@ config MACH_HIMALAYA config MACH_MAGICIAN bool "Enable HTC Magician Support" - select HAVE_PWM select IWMMXT select PXA27x config MACH_MIOA701 bool "Mitac Mio A701 Support" select GPIO_SYSFS - select HAVE_PWM select IWMMXT select PXA27x help @@ -306,7 +295,6 @@ config MACH_MIOA701 config PXA_EZX bool "Motorola EZX Platform" - select HAVE_PWM select IWMMXT select PXA27x @@ -346,7 +334,6 @@ config MACH_MP900C config ARCH_PXA_PALM bool "PXA based Palm PDAs" - select HAVE_PWM config MACH_PALM27X bool @@ -444,7 +431,6 @@ config MACH_TREO680 config MACH_RAUMFELD_RC bool "Raumfeld Controller" select CPU_PXA300 - select HAVE_PWM select POWER_SUPPLY select PXA3xx @@ -608,7 +594,6 @@ config MACH_E800 config MACH_ZIPIT2 bool "Zipit Z2 Handheld" - select HAVE_PWM select PXA27x endmenu diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index dcae3a7035db..d1326032c8c8 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -235,7 +235,6 @@ config MACH_JZ4740 select IRQ_CPU select ARCH_REQUIRE_GPIOLIB select SYS_HAS_EARLY_PRINTK - select HAVE_PWM select HAVE_CLK select GENERIC_IRQ_CHIP diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 7904ab05527a..762e6d2de3c9 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -156,7 +156,7 @@ config INPUT_MAX8925_ONKEY config INPUT_MAX8997_HAPTIC tristate "MAXIM MAX8997 haptic controller support" - depends on PWM && HAVE_PWM && MFD_MAX8997 + depends on PWM && MFD_MAX8997 select INPUT_FF_MEMLESS help This option enables device driver support for the haptic controller @@ -470,7 +470,7 @@ config INPUT_PCF8574 config INPUT_PWM_BEEPER tristate "PWM beeper support" - depends on PWM && HAVE_PWM + depends on PWM help Say Y here to get support for PWM based beeper devices. diff --git a/include/linux/pwm.h b/include/linux/pwm.h index f0feafd184a0..4717f54051cb 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -7,7 +7,7 @@ struct pwm_device; struct seq_file; -#if IS_ENABLED(CONFIG_PWM) || IS_ENABLED(CONFIG_HAVE_PWM) +#if IS_ENABLED(CONFIG_PWM) /* * pwm_request - request a PWM device */ -- cgit v1.2.3 From b0504e39c27b00101c9c1fa2c58fd896ae0f64f5 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 18 Nov 2013 16:48:19 +0100 Subject: ARM: zynq: Map I/O memory on clkc init The clkc has its registers in the range of the slcr. Instead of passing around the slcr base address pointer, let the clkc get the address from the DT. This prepares the slcr to be a real driver with multiple memory ranges (slcr, clocks, pinctrl,...) Signed-off-by: Steffen Trumtrar Signed-off-by: Michal Simek --- .../devicetree/bindings/clock/zynq-7000.txt | 4 +- arch/arm/boot/dts/zynq-7000.dtsi | 42 +++++----- arch/arm/mach-zynq/common.c | 2 +- drivers/clk/zynq/clkc.c | 89 +++++++++++++++------- include/linux/clk/zynq.h | 2 +- 5 files changed, 88 insertions(+), 51 deletions(-) (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/clock/zynq-7000.txt b/Documentation/devicetree/bindings/clock/zynq-7000.txt index 17b4a94916d6..d93746cf2975 100644 --- a/Documentation/devicetree/bindings/clock/zynq-7000.txt +++ b/Documentation/devicetree/bindings/clock/zynq-7000.txt @@ -14,6 +14,7 @@ for all clock consumers of PS clocks. Required properties: - #clock-cells : Must be 1 - compatible : "xlnx,ps7-clkc" + - reg : SLCR offset and size taken via syscon < 0x100 0x100 > - ps-clk-frequency : Frequency of the oscillator providing ps_clk in HZ (usually 33 MHz oscillators are used for Zynq platforms) - clock-output-names : List of strings used to name the clock outputs. Shall be @@ -87,10 +88,11 @@ Clock outputs: 47: dbg_apb Example: - clkc: clkc { + clkc: clkc@100 { #clock-cells = <1>; compatible = "xlnx,ps7-clkc"; ps-clk-frequency = <33333333>; + reg = <0x100 0x100>; clock-output-names = "armpll", "ddrpll", "iopll", "cpu_6or4x", "cpu_3or2x", "cpu_2x", "cpu_1x", "ddr2x", "ddr3x", "dci", "lqspi", "smc", "pcap", "gem0", "gem1", diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi index 81e5677f25a2..602e12eedb01 100644 --- a/arch/arm/boot/dts/zynq-7000.dtsi +++ b/arch/arm/boot/dts/zynq-7000.dtsi @@ -123,30 +123,28 @@ } ; slcr: slcr@f8000000 { + #address-cells = <1>; + #size-cells = <1>; compatible = "xlnx,zynq-slcr", "syscon"; reg = <0xF8000000 0x1000>; - - clocks { - #address-cells = <1>; - #size-cells = <0>; - - clkc: clkc { - #clock-cells = <1>; - compatible = "xlnx,ps7-clkc"; - ps-clk-frequency = <33333333>; - fclk-enable = <0>; - clock-output-names = "armpll", "ddrpll", "iopll", "cpu_6or4x", - "cpu_3or2x", "cpu_2x", "cpu_1x", "ddr2x", "ddr3x", - "dci", "lqspi", "smc", "pcap", "gem0", "gem1", - "fclk0", "fclk1", "fclk2", "fclk3", "can0", "can1", - "sdio0", "sdio1", "uart0", "uart1", "spi0", "spi1", - "dma", "usb0_aper", "usb1_aper", "gem0_aper", - "gem1_aper", "sdio0_aper", "sdio1_aper", - "spi0_aper", "spi1_aper", "can0_aper", "can1_aper", - "i2c0_aper", "i2c1_aper", "uart0_aper", "uart1_aper", - "gpio_aper", "lqspi_aper", "smc_aper", "swdt", - "dbg_trc", "dbg_apb"; - }; + ranges; + clkc: clkc@100 { + #clock-cells = <1>; + compatible = "xlnx,ps7-clkc"; + ps-clk-frequency = <33333333>; + fclk-enable = <0>; + clock-output-names = "armpll", "ddrpll", "iopll", "cpu_6or4x", + "cpu_3or2x", "cpu_2x", "cpu_1x", "ddr2x", "ddr3x", + "dci", "lqspi", "smc", "pcap", "gem0", "gem1", + "fclk0", "fclk1", "fclk2", "fclk3", "can0", "can1", + "sdio0", "sdio1", "uart0", "uart1", "spi0", "spi1", + "dma", "usb0_aper", "usb1_aper", "gem0_aper", + "gem1_aper", "sdio0_aper", "sdio1_aper", + "spi0_aper", "spi1_aper", "can0_aper", "can1_aper", + "i2c0_aper", "i2c1_aper", "uart0_aper", "uart1_aper", + "gpio_aper", "lqspi_aper", "smc_aper", "swdt", + "dbg_trc", "dbg_apb"; + reg = <0x100 0x100>; }; }; diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c index 38401cf78383..93ea19b13e6e 100644 --- a/arch/arm/mach-zynq/common.c +++ b/arch/arm/mach-zynq/common.c @@ -67,7 +67,7 @@ static void __init zynq_timer_init(void) { zynq_early_slcr_init(); - zynq_clock_init(zynq_slcr_base); + zynq_clock_init(); clocksource_of_init(); } diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index 09dd0173ea0a..03052d67b197 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c @@ -21,34 +21,35 @@ #include #include #include +#include #include #include #include -static void __iomem *zynq_slcr_base_priv; - -#define SLCR_ARMPLL_CTRL (zynq_slcr_base_priv + 0x100) -#define SLCR_DDRPLL_CTRL (zynq_slcr_base_priv + 0x104) -#define SLCR_IOPLL_CTRL (zynq_slcr_base_priv + 0x108) -#define SLCR_PLL_STATUS (zynq_slcr_base_priv + 0x10c) -#define SLCR_ARM_CLK_CTRL (zynq_slcr_base_priv + 0x120) -#define SLCR_DDR_CLK_CTRL (zynq_slcr_base_priv + 0x124) -#define SLCR_DCI_CLK_CTRL (zynq_slcr_base_priv + 0x128) -#define SLCR_APER_CLK_CTRL (zynq_slcr_base_priv + 0x12c) -#define SLCR_GEM0_CLK_CTRL (zynq_slcr_base_priv + 0x140) -#define SLCR_GEM1_CLK_CTRL (zynq_slcr_base_priv + 0x144) -#define SLCR_SMC_CLK_CTRL (zynq_slcr_base_priv + 0x148) -#define SLCR_LQSPI_CLK_CTRL (zynq_slcr_base_priv + 0x14c) -#define SLCR_SDIO_CLK_CTRL (zynq_slcr_base_priv + 0x150) -#define SLCR_UART_CLK_CTRL (zynq_slcr_base_priv + 0x154) -#define SLCR_SPI_CLK_CTRL (zynq_slcr_base_priv + 0x158) -#define SLCR_CAN_CLK_CTRL (zynq_slcr_base_priv + 0x15c) -#define SLCR_CAN_MIOCLK_CTRL (zynq_slcr_base_priv + 0x160) -#define SLCR_DBG_CLK_CTRL (zynq_slcr_base_priv + 0x164) -#define SLCR_PCAP_CLK_CTRL (zynq_slcr_base_priv + 0x168) -#define SLCR_FPGA0_CLK_CTRL (zynq_slcr_base_priv + 0x170) -#define SLCR_621_TRUE (zynq_slcr_base_priv + 0x1c4) -#define SLCR_SWDT_CLK_SEL (zynq_slcr_base_priv + 0x304) +static void __iomem *zynq_clkc_base; + +#define SLCR_ARMPLL_CTRL (zynq_clkc_base + 0x00) +#define SLCR_DDRPLL_CTRL (zynq_clkc_base + 0x04) +#define SLCR_IOPLL_CTRL (zynq_clkc_base + 0x08) +#define SLCR_PLL_STATUS (zynq_clkc_base + 0x0c) +#define SLCR_ARM_CLK_CTRL (zynq_clkc_base + 0x20) +#define SLCR_DDR_CLK_CTRL (zynq_clkc_base + 0x24) +#define SLCR_DCI_CLK_CTRL (zynq_clkc_base + 0x28) +#define SLCR_APER_CLK_CTRL (zynq_clkc_base + 0x2c) +#define SLCR_GEM0_CLK_CTRL (zynq_clkc_base + 0x40) +#define SLCR_GEM1_CLK_CTRL (zynq_clkc_base + 0x44) +#define SLCR_SMC_CLK_CTRL (zynq_clkc_base + 0x48) +#define SLCR_LQSPI_CLK_CTRL (zynq_clkc_base + 0x4c) +#define SLCR_SDIO_CLK_CTRL (zynq_clkc_base + 0x50) +#define SLCR_UART_CLK_CTRL (zynq_clkc_base + 0x54) +#define SLCR_SPI_CLK_CTRL (zynq_clkc_base + 0x58) +#define SLCR_CAN_CLK_CTRL (zynq_clkc_base + 0x5c) +#define SLCR_CAN_MIOCLK_CTRL (zynq_clkc_base + 0x60) +#define SLCR_DBG_CLK_CTRL (zynq_clkc_base + 0x64) +#define SLCR_PCAP_CLK_CTRL (zynq_clkc_base + 0x68) +#define SLCR_FPGA0_CLK_CTRL (zynq_clkc_base + 0x70) +#define SLCR_621_TRUE (zynq_clkc_base + 0xc4) +#define SLCR_SWDT_CLK_SEL (zynq_clkc_base + 0x204) #define NUM_MIO_PINS 54 @@ -569,8 +570,44 @@ static void __init zynq_clk_setup(struct device_node *np) CLK_OF_DECLARE(zynq_clkc, "xlnx,ps7-clkc", zynq_clk_setup); -void __init zynq_clock_init(void __iomem *slcr_base) +void __init zynq_clock_init(void) { - zynq_slcr_base_priv = slcr_base; + struct device_node *np; + struct device_node *slcr; + struct resource res; + + np = of_find_compatible_node(NULL, NULL, "xlnx,ps7-clkc"); + if (!np) { + pr_err("%s: clkc node not found\n", __func__); + goto np_err; + } + + if (of_address_to_resource(np, 0, &res)) { + pr_err("%s: failed to get resource\n", np->name); + goto np_err; + } + + slcr = of_get_parent(np); + + if (slcr->data) { + zynq_clkc_base = (__force void __iomem *)slcr->data + res.start; + } else { + pr_err("%s: Unable to get I/O memory\n", np->name); + of_node_put(slcr); + goto np_err; + } + + pr_info("%s: clkc starts at %p\n", __func__, zynq_clkc_base); + + of_node_put(slcr); + of_node_put(np); + of_clk_init(NULL); + + return; + +np_err: + of_node_put(np); + BUG(); + return; } diff --git a/include/linux/clk/zynq.h b/include/linux/clk/zynq.h index e062d317ccce..7a5633b71533 100644 --- a/include/linux/clk/zynq.h +++ b/include/linux/clk/zynq.h @@ -22,7 +22,7 @@ #include -void zynq_clock_init(void __iomem *slcr); +void zynq_clock_init(void); struct clk *clk_register_zynq_pll(const char *name, const char *parent, void __iomem *pll_ctrl, void __iomem *pll_status, u8 lock_index, -- cgit v1.2.3 From b02f6695f7601c4f8442b9cf4636802e7fa8d550 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 11 Feb 2014 00:35:23 +0100 Subject: PM / QoS: Rename device resume latency QoS items Rename symbols, variables, functions and structure fields related do the resume latency device PM QoS type so that it is clear where they belong (in particular, to avoid confusion with the latency tolerance device PM QoS type introduced by a subsequent changeset). Update the PM QoS documentation to better reflect its current state. Signed-off-by: Rafael J. Wysocki --- Documentation/power/pm_qos_interface.txt | 37 ++++++++++----------- Documentation/trace/events-power.txt | 2 +- drivers/base/power/power.h | 4 +-- drivers/base/power/qos.c | 55 ++++++++++++++++---------------- drivers/base/power/sysfs.c | 32 ++++++++++--------- drivers/mtd/nand/sh_flctl.c | 2 +- include/linux/pm_qos.h | 14 ++++---- include/trace/events/power.h | 4 +-- 8 files changed, 73 insertions(+), 77 deletions(-) (limited to 'include/linux') diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt index 483632087788..22cb8f51182a 100644 --- a/Documentation/power/pm_qos_interface.txt +++ b/Documentation/power/pm_qos_interface.txt @@ -89,16 +89,16 @@ node. 2. PM QoS per-device latency and flags framework For each device, there are two lists of PM QoS requests. One is maintained -along with the aggregated target of latency value and the other is for PM QoS -flags. Values are updated in response to changes of the request list. +along with the aggregated target of resume latency value and the other is for +PM QoS flags. Values are updated in response to changes of the request list. -Target latency value is simply the minimum of the request values held in the -parameter list elements. The PM QoS flags aggregate value is a gather (bitwise -OR) of all list elements' values. Two device PM QoS flags are defined currently: -PM_QOS_FLAG_NO_POWER_OFF and PM_QOS_FLAG_REMOTE_WAKEUP. +Target resume latency value is simply the minimum of the request values held in +the parameter list elements. The PM QoS flags aggregate value is a gather +(bitwise OR) of all list elements' values. Two device PM QoS flags are defined +currently: PM_QOS_FLAG_NO_POWER_OFF and PM_QOS_FLAG_REMOTE_WAKEUP. -Note: the aggregated target value is implemented as an atomic variable so that -reading the aggregated value does not require any locking mechanism. +Note: the aggregated target value is implemented in such a way that reading the +aggregated value does not require any locking mechanism. From kernel mode the use of this interface is the following: @@ -137,14 +137,14 @@ Add a PM QoS request for the first direct ancestor of the given device whose power.ignore_children flag is unset. int dev_pm_qos_expose_latency_limit(device, value) -Add a request to the device's PM QoS list of latency constraints and create -a sysfs attribute pm_qos_resume_latency_us under the device's power directory -allowing user space to manipulate that request. +Add a request to the device's PM QoS list of resume latency constraints and +create a sysfs attribute pm_qos_resume_latency_us under the device's power +directory allowing user space to manipulate that request. void dev_pm_qos_hide_latency_limit(device) Drop the request added by dev_pm_qos_expose_latency_limit() from the device's -PM QoS list of latency constraints and remove sysfs attribute pm_qos_resume_latency_us -from the device's power directory. +PM QoS list of resume latency constraints and remove sysfs attribute +pm_qos_resume_latency_us from the device's power directory. int dev_pm_qos_expose_flags(device, value) Add a request to the device's PM QoS list of flags and create sysfs attributes @@ -163,7 +163,7 @@ a per-device notification tree and a global notification tree. int dev_pm_qos_add_notifier(device, notifier): Adds a notification callback function for the device. The callback is called when the aggregated value of the device constraints list -is changed. +is changed (for resume latency device PM QoS only). int dev_pm_qos_remove_notifier(device, notifier): Removes the notification callback function for the device. @@ -171,14 +171,9 @@ Removes the notification callback function for the device. int dev_pm_qos_add_global_notifier(notifier): Adds a notification callback function in the global notification tree of the framework. -The callback is called when the aggregated value for any device is changed. +The callback is called when the aggregated value for any device is changed +(for resume latency device PM QoS only). int dev_pm_qos_remove_global_notifier(notifier): Removes the notification callback function from the global notification tree of the framework. - - -From user mode: -No API for user space access to the per-device latency constraints is provided -yet - still under discussion. - diff --git a/Documentation/trace/events-power.txt b/Documentation/trace/events-power.txt index 3bd33b8dc7c4..21d514ced212 100644 --- a/Documentation/trace/events-power.txt +++ b/Documentation/trace/events-power.txt @@ -92,5 +92,5 @@ dev_pm_qos_remove_request "device=%s type=%s new_value=%d" The first parameter gives the device name which tries to add/update/remove QoS requests. -The second parameter gives the request type (e.g. "DEV_PM_QOS_LATENCY"). +The second parameter gives the request type (e.g. "DEV_PM_QOS_RESUME_LATENCY"). The third parameter is value to be added/updated/removed. diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index cfc3226ec492..a21223d95926 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -89,8 +89,8 @@ extern void dpm_sysfs_remove(struct device *dev); extern void rpm_sysfs_remove(struct device *dev); extern int wakeup_sysfs_add(struct device *dev); extern void wakeup_sysfs_remove(struct device *dev); -extern int pm_qos_sysfs_add_latency(struct device *dev); -extern void pm_qos_sysfs_remove_latency(struct device *dev); +extern int pm_qos_sysfs_add_resume_latency(struct device *dev); +extern void pm_qos_sysfs_remove_resume_latency(struct device *dev); extern int pm_qos_sysfs_add_flags(struct device *dev); extern void pm_qos_sysfs_remove_flags(struct device *dev); diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 5c1361a9e5dd..67c0f4219b02 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -105,7 +105,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags); s32 __dev_pm_qos_read_value(struct device *dev) { return IS_ERR_OR_NULL(dev->power.qos) ? - 0 : pm_qos_read_value(&dev->power.qos->latency); + 0 : pm_qos_read_value(&dev->power.qos->resume_latency); } /** @@ -141,11 +141,11 @@ static int apply_constraint(struct dev_pm_qos_request *req, int ret; switch(req->type) { - case DEV_PM_QOS_LATENCY: - ret = pm_qos_update_target(&qos->latency, &req->data.pnode, - action, value); + case DEV_PM_QOS_RESUME_LATENCY: + ret = pm_qos_update_target(&qos->resume_latency, + &req->data.pnode, action, value); if (ret) { - value = pm_qos_read_value(&qos->latency); + value = pm_qos_read_value(&qos->resume_latency); blocking_notifier_call_chain(&dev_pm_notifiers, (unsigned long)value, req); @@ -186,10 +186,10 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) } BLOCKING_INIT_NOTIFIER_HEAD(n); - c = &qos->latency; + c = &qos->resume_latency; plist_head_init(&c->list); - c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; - c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; + c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c->type = PM_QOS_MIN; c->notifiers = n; @@ -224,7 +224,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) * If the device's PM QoS resume latency limit or PM QoS flags have been * exposed to user space, they have to be hidden at this point. */ - pm_qos_sysfs_remove_latency(dev); + pm_qos_sysfs_remove_resume_latency(dev); pm_qos_sysfs_remove_flags(dev); mutex_lock(&dev_pm_qos_mtx); @@ -237,7 +237,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) goto out; /* Flush the constraints lists for the device. */ - c = &qos->latency; + c = &qos->resume_latency; plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { /* * Update constraints list and call the notification @@ -341,7 +341,7 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, return -ENODEV; switch(req->type) { - case DEV_PM_QOS_LATENCY: + case DEV_PM_QOS_RESUME_LATENCY: curr_value = req->data.pnode.prio; break; case DEV_PM_QOS_FLAGS: @@ -460,8 +460,8 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) ret = dev_pm_qos_constraints_allocate(dev); if (!ret) - ret = blocking_notifier_chain_register( - dev->power.qos->latency.notifiers, notifier); + ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers, + notifier); mutex_unlock(&dev_pm_qos_mtx); return ret; @@ -487,9 +487,8 @@ int dev_pm_qos_remove_notifier(struct device *dev, /* Silently return if the constraints object is not present. */ if (!IS_ERR_OR_NULL(dev->power.qos)) - retval = blocking_notifier_chain_unregister( - dev->power.qos->latency.notifiers, - notifier); + retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers, + notifier); mutex_unlock(&dev_pm_qos_mtx); return retval; @@ -543,7 +542,7 @@ int dev_pm_qos_add_ancestor_request(struct device *dev, if (ancestor) ret = dev_pm_qos_add_request(ancestor, req, - DEV_PM_QOS_LATENCY, value); + DEV_PM_QOS_RESUME_LATENCY, value); if (ret < 0) req->dev = NULL; @@ -559,9 +558,9 @@ static void __dev_pm_qos_drop_user_request(struct device *dev, struct dev_pm_qos_request *req = NULL; switch(type) { - case DEV_PM_QOS_LATENCY: - req = dev->power.qos->latency_req; - dev->power.qos->latency_req = NULL; + case DEV_PM_QOS_RESUME_LATENCY: + req = dev->power.qos->resume_latency_req; + dev->power.qos->resume_latency_req = NULL; break; case DEV_PM_QOS_FLAGS: req = dev->power.qos->flags_req; @@ -597,7 +596,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) if (!req) return -ENOMEM; - ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); + ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value); if (ret < 0) { kfree(req); return ret; @@ -609,7 +608,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) if (IS_ERR_OR_NULL(dev->power.qos)) ret = -ENODEV; - else if (dev->power.qos->latency_req) + else if (dev->power.qos->resume_latency_req) ret = -EEXIST; if (ret < 0) { @@ -618,13 +617,13 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) mutex_unlock(&dev_pm_qos_mtx); goto out; } - dev->power.qos->latency_req = req; + dev->power.qos->resume_latency_req = req; mutex_unlock(&dev_pm_qos_mtx); - ret = pm_qos_sysfs_add_latency(dev); + ret = pm_qos_sysfs_add_resume_latency(dev); if (ret) - dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); out: mutex_unlock(&dev_pm_qos_sysfs_mtx); @@ -634,8 +633,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); static void __dev_pm_qos_hide_latency_limit(struct device *dev) { - if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req) + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); } /** @@ -646,7 +645,7 @@ void dev_pm_qos_hide_latency_limit(struct device *dev) { mutex_lock(&dev_pm_qos_sysfs_mtx); - pm_qos_sysfs_remove_latency(dev); + pm_qos_sysfs_remove_resume_latency(dev); mutex_lock(&dev_pm_qos_mtx); __dev_pm_qos_hide_latency_limit(dev); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 03e089ade5ce..4e24955aac8a 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -218,15 +218,16 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, autosuspend_delay_ms_store); -static ssize_t pm_qos_latency_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t pm_qos_resume_latency_show(struct device *dev, + struct device_attribute *attr, + char *buf) { - return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev)); + return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev)); } -static ssize_t pm_qos_latency_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t n) +static ssize_t pm_qos_resume_latency_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) { s32 value; int ret; @@ -237,12 +238,13 @@ static ssize_t pm_qos_latency_store(struct device *dev, if (value < 0) return -EINVAL; - ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value); + ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, + value); return ret < 0 ? ret : n; } static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, - pm_qos_latency_show, pm_qos_latency_store); + pm_qos_resume_latency_show, pm_qos_resume_latency_store); static ssize_t pm_qos_no_power_off_show(struct device *dev, struct device_attribute *attr, @@ -618,15 +620,15 @@ static struct attribute_group pm_runtime_attr_group = { .attrs = runtime_attrs, }; -static struct attribute *pm_qos_latency_attrs[] = { +static struct attribute *pm_qos_resume_latency_attrs[] = { #ifdef CONFIG_PM_RUNTIME &dev_attr_pm_qos_resume_latency_us.attr, #endif /* CONFIG_PM_RUNTIME */ NULL, }; -static struct attribute_group pm_qos_latency_attr_group = { +static struct attribute_group pm_qos_resume_latency_attr_group = { .name = power_group_name, - .attrs = pm_qos_latency_attrs, + .attrs = pm_qos_resume_latency_attrs, }; static struct attribute *pm_qos_flags_attrs[] = { @@ -681,14 +683,14 @@ void wakeup_sysfs_remove(struct device *dev) sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); } -int pm_qos_sysfs_add_latency(struct device *dev) +int pm_qos_sysfs_add_resume_latency(struct device *dev) { - return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group); + return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); } -void pm_qos_sysfs_remove_latency(struct device *dev) +void pm_qos_sysfs_remove_resume_latency(struct device *dev) { - sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group); + sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); } int pm_qos_sysfs_add_flags(struct device *dev) diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index d72783dd7b96..c0670237e7a2 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c @@ -897,7 +897,7 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr) if (!flctl->qos_request) { ret = dev_pm_qos_add_request(&flctl->pdev->dev, &flctl->pm_qos, - DEV_PM_QOS_LATENCY, + DEV_PM_QOS_RESUME_LATENCY, 100); if (ret < 0) dev_err(&flctl->pdev->dev, diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 5a95013905c8..88a3680ae74c 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -32,7 +32,7 @@ enum pm_qos_flags_status { #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 -#define PM_QOS_DEV_LAT_DEFAULT_VALUE 0 +#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0 #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) #define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1) @@ -49,7 +49,7 @@ struct pm_qos_flags_request { }; enum dev_pm_qos_req_type { - DEV_PM_QOS_LATENCY = 1, + DEV_PM_QOS_RESUME_LATENCY = 1, DEV_PM_QOS_FLAGS, }; @@ -87,9 +87,9 @@ struct pm_qos_flags { }; struct dev_pm_qos { - struct pm_qos_constraints latency; + struct pm_qos_constraints resume_latency; struct pm_qos_flags flags; - struct dev_pm_qos_request *latency_req; + struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *flags_req; }; @@ -196,9 +196,9 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 value); void dev_pm_qos_hide_flags(struct device *dev); int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); -static inline s32 dev_pm_qos_requested_latency(struct device *dev) +static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { - return dev->power.qos->latency_req->data.pnode.prio; + return dev->power.qos->resume_latency_req->data.pnode.prio; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) @@ -215,7 +215,7 @@ static inline void dev_pm_qos_hide_flags(struct device *dev) {} static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set) { return 0; } -static inline s32 dev_pm_qos_requested_latency(struct device *dev) { return 0; } +static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } #endif diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 9e9475c85de5..f6df9868ea0c 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -412,8 +412,8 @@ DECLARE_EVENT_CLASS(dev_pm_qos_request, TP_printk("device=%s type=%s new_value=%d", __get_str(name), __print_symbolic(__entry->type, - { DEV_PM_QOS_LATENCY, "DEV_PM_QOS_LATENCY" }, - { DEV_PM_QOS_FLAGS, "DEV_PM_QOS_FLAGS" }), + { DEV_PM_QOS_RESUME_LATENCY, "DEV_PM_QOS_RESUME_LATENCY" }, + { DEV_PM_QOS_FLAGS, "DEV_PM_QOS_FLAGS" }), __entry->new_value) ); -- cgit v1.2.3 From 327adaedf2218b0e318eb393aa79cf2be64c199f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 11 Feb 2014 00:35:29 +0100 Subject: PM / QoS: Add no_constraints_value field to struct pm_qos_constraints Add a new field, no_constraints_value, to struct pm_qos_constraints representing a list of PM QoS constraint requests to be returned by pm_qos_get_value() when that list of requests is empty. That field will be equal to default_value for all of the existing global PM QoS classes and for the resume latency device PM QoS type, but it will be different from default_value for the new latency tolerance device PM QoS type introduced by the next changeset. Signed-off-by: Rafael J. Wysocki --- drivers/base/power/qos.c | 1 + include/linux/pm_qos.h | 1 + kernel/power/qos.c | 5 ++++- 3 files changed, 6 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 67c0f4219b02..c754e55f9dcb 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -190,6 +190,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) plist_head_init(&c->list); c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c->type = PM_QOS_MIN; c->notifiers = n; diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 88a3680ae74c..2d8ce50877d8 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -77,6 +77,7 @@ struct pm_qos_constraints { struct plist_head list; s32 target_value; /* Do not change to 64 bit */ s32 default_value; + s32 no_constraint_value; enum pm_qos_type type; struct blocking_notifier_head *notifiers; }; diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 8dff9b48075a..e23ae38e647f 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -66,6 +66,7 @@ static struct pm_qos_constraints cpu_dma_constraints = { .list = PLIST_HEAD_INIT(cpu_dma_constraints.list), .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, + .no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, .type = PM_QOS_MIN, .notifiers = &cpu_dma_lat_notifier, }; @@ -79,6 +80,7 @@ static struct pm_qos_constraints network_lat_constraints = { .list = PLIST_HEAD_INIT(network_lat_constraints.list), .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, + .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, .type = PM_QOS_MIN, .notifiers = &network_lat_notifier, }; @@ -93,6 +95,7 @@ static struct pm_qos_constraints network_tput_constraints = { .list = PLIST_HEAD_INIT(network_tput_constraints.list), .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, + .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, .type = PM_QOS_MAX, .notifiers = &network_throughput_notifier, }; @@ -128,7 +131,7 @@ static const struct file_operations pm_qos_power_fops = { static inline int pm_qos_get_value(struct pm_qos_constraints *c) { if (plist_head_empty(&c->list)) - return c->default_value; + return c->no_constraint_value; switch (c->type) { case PM_QOS_MIN: -- cgit v1.2.3 From 2d984ad132a87ca2112f81f21039493176a8bca0 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 11 Feb 2014 00:35:38 +0100 Subject: PM / QoS: Introcuce latency tolerance device PM QoS type Add a new latency tolerance device PM QoS type to be use for specifying active state (RPM_ACTIVE) memory access (DMA) latency tolerance requirements for devices. It may be used to prevent hardware from choosing overly aggressive energy-saving operation modes (causing too much latency to appear) for the whole platform. This feature reqiures hardware support, so it only will be available for devices having a new .set_latency_tolerance() callback in struct dev_pm_info populated, in which case the routine pointed to by it should implement whatever is necessary to transfer the effective requirement value to the hardware. Whenever the effective latency tolerance changes for the device, its .set_latency_tolerance() callback will be executed and the effective value will be passed to it. If that value is negative, which means that the list of latency tolerance requirements for the device is empty, the callback is expected to switch the underlying hardware latency tolerance control mechanism to an autonomous mode if available. If that value is PM_QOS_LATENCY_ANY, in turn, and the hardware supports a special "no requirement" setting, the callback is expected to use it. That allows software to prevent the hardware from automatically updating the device's latency tolerance in response to its power state changes (e.g. during transitions from D3cold to D0), which generally may be done in the autonomous latency tolerance control mode. If .set_latency_tolerance() is present for the device, a new pm_qos_latency_tolerance_us attribute will be present in the devivce's power directory in sysfs. Then, user space can use that attribute to specify its latency tolerance requirement for the device, if any. Writing "any" to it means "no requirement, but do not let the hardware control latency tolerance" and writing "auto" to it allows the hardware to be switched to the autonomous mode if there are no other requirements from the kernel side in the device's list. This changeset includes a fix from Mika Westerberg. Signed-off-by: Rafael J. Wysocki --- Documentation/ABI/testing/sysfs-devices-power | 27 ++++- Documentation/power/pm_qos_interface.txt | 59 +++++++++-- drivers/base/power/qos.c | 144 ++++++++++++++++++++++---- drivers/base/power/sysfs.c | 65 ++++++++++-- include/linux/pm.h | 1 + include/linux/pm_qos.h | 12 +++ kernel/power/qos.c | 13 ++- 7 files changed, 277 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power index efe449bdf811..7dbf96b724ed 100644 --- a/Documentation/ABI/testing/sysfs-devices-power +++ b/Documentation/ABI/testing/sysfs-devices-power @@ -187,7 +187,7 @@ Description: Not all drivers support this attribute. If it isn't supported, attempts to read or write it will yield I/O errors. -What: /sys/devices/.../power/pm_qos_latency_us +What: /sys/devices/.../power/pm_qos_resume_latency_us Date: March 2012 Contact: Rafael J. Wysocki Description: @@ -205,6 +205,31 @@ Description: This attribute has no effect on system-wide suspend/resume and hibernation. +What: /sys/devices/.../power/pm_qos_latency_tolerance_us +Date: January 2014 +Contact: Rafael J. Wysocki +Description: + The /sys/devices/.../power/pm_qos_latency_tolerance_us attribute + contains the PM QoS active state latency tolerance limit for the + given device in microseconds. That is the maximum memory access + latency the device can suffer without any visible adverse + effects on user space functionality. If that value is the + string "any", the latency does not matter to user space at all, + but hardware should not be allowed to set the latency tolerance + for the device automatically. + + Reading "auto" from this file means that the maximum memory + access latency for the device may be determined automatically + by the hardware as needed. Writing "auto" to it allows the + hardware to be switched to this mode if there are no other + latency tolerance requirements from the kernel side. + + This attribute is only present if the feature controlled by it + is supported by the hardware. + + This attribute has no effect on runtime suspend and resume of + devices and on system-wide suspend/resume and hibernation. + What: /sys/devices/.../power/pm_qos_no_power_off Date: September 2012 Contact: Rafael J. Wysocki diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt index 22cb8f51182a..ed743bbad87c 100644 --- a/Documentation/power/pm_qos_interface.txt +++ b/Documentation/power/pm_qos_interface.txt @@ -88,17 +88,19 @@ node. 2. PM QoS per-device latency and flags framework -For each device, there are two lists of PM QoS requests. One is maintained -along with the aggregated target of resume latency value and the other is for -PM QoS flags. Values are updated in response to changes of the request list. +For each device, there are three lists of PM QoS requests. Two of them are +maintained along with the aggregated targets of resume latency and active +state latency tolerance (in microseconds) and the third one is for PM QoS flags. +Values are updated in response to changes of the request list. -Target resume latency value is simply the minimum of the request values held in -the parameter list elements. The PM QoS flags aggregate value is a gather -(bitwise OR) of all list elements' values. Two device PM QoS flags are defined -currently: PM_QOS_FLAG_NO_POWER_OFF and PM_QOS_FLAG_REMOTE_WAKEUP. +The target values of resume latency and active state latency tolerance are +simply the minimum of the request values held in the parameter list elements. +The PM QoS flags aggregate value is a gather (bitwise OR) of all list elements' +values. Two device PM QoS flags are defined currently: PM_QOS_FLAG_NO_POWER_OFF +and PM_QOS_FLAG_REMOTE_WAKEUP. -Note: the aggregated target value is implemented in such a way that reading the -aggregated value does not require any locking mechanism. +Note: The aggregated target values are implemented in such a way that reading +the aggregated value does not require any locking mechanism. From kernel mode the use of this interface is the following: @@ -177,3 +179,42 @@ The callback is called when the aggregated value for any device is changed int dev_pm_qos_remove_global_notifier(notifier): Removes the notification callback function from the global notification tree of the framework. + + +Active state latency tolerance + +This device PM QoS type is used to support systems in which hardware may switch +to energy-saving operation modes on the fly. In those systems, if the operation +mode chosen by the hardware attempts to save energy in an overly aggressive way, +it may cause excess latencies to be visible to software, causing it to miss +certain protocol requirements or target frame or sample rates etc. + +If there is a latency tolerance control mechanism for a given device available +to software, the .set_latency_tolerance callback in that device's dev_pm_info +structure should be populated. The routine pointed to by it is should implement +whatever is necessary to transfer the effective requirement value to the +hardware. + +Whenever the effective latency tolerance changes for the device, its +.set_latency_tolerance() callback will be executed and the effective value will +be passed to it. If that value is negative, which means that the list of +latency tolerance requirements for the device is empty, the callback is expected +to switch the underlying hardware latency tolerance control mechanism to an +autonomous mode if available. If that value is PM_QOS_LATENCY_ANY, in turn, and +the hardware supports a special "no requirement" setting, the callback is +expected to use it. That allows software to prevent the hardware from +automatically updating the device's latency tolerance in response to its power +state changes (e.g. during transitions from D3cold to D0), which generally may +be done in the autonomous latency tolerance control mode. + +If .set_latency_tolerance() is present for the device, sysfs attribute +pm_qos_latency_tolerance_us will be present in the devivce's power directory. +Then, user space can use that attribute to specify its latency tolerance +requirement for the device, if any. Writing "any" to it means "no requirement, +but do not let the hardware control latency tolerance" and writing "auto" to it +allows the hardware to be switched to the autonomous mode if there are no other +requirements from the kernel side in the device's list. + +Kernel code can use the functions described above along with the +DEV_PM_QOS_LATENCY_TOLERANCE device PM QoS type to add, remove and update +latency tolerance requirements for devices. diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index c754e55f9dcb..84756f7f09d9 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -151,6 +151,14 @@ static int apply_constraint(struct dev_pm_qos_request *req, req); } break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + ret = pm_qos_update_target(&qos->latency_tolerance, + &req->data.pnode, action, value); + if (ret) { + value = pm_qos_read_value(&qos->latency_tolerance); + req->dev->power.set_latency_tolerance(req->dev, value); + } + break; case DEV_PM_QOS_FLAGS: ret = pm_qos_update_flags(&qos->flags, &req->data.flr, action, value); @@ -194,6 +202,13 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) c->type = PM_QOS_MIN; c->notifiers = n; + c = &qos->latency_tolerance; + plist_head_init(&c->list); + c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; + c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; + c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; + c->type = PM_QOS_MIN; + INIT_LIST_HEAD(&qos->flags.list); spin_lock_irq(&dev->power.lock); @@ -247,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); memset(req, 0, sizeof(*req)); } + c = &qos->latency_tolerance; + plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { + apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } f = &qos->flags; list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); @@ -266,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev) mutex_unlock(&dev_pm_qos_sysfs_mtx); } +static bool dev_pm_qos_invalid_request(struct device *dev, + struct dev_pm_qos_request *req) +{ + return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE + && !dev->power.set_latency_tolerance); +} + +static int __dev_pm_qos_add_request(struct device *dev, + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value) +{ + int ret = 0; + + if (!dev || dev_pm_qos_invalid_request(dev, req)) + return -EINVAL; + + if (WARN(dev_pm_qos_request_active(req), + "%s() called for already added request\n", __func__)) + return -EINVAL; + + if (IS_ERR(dev->power.qos)) + ret = -ENODEV; + else if (!dev->power.qos) + ret = dev_pm_qos_constraints_allocate(dev); + + trace_dev_pm_qos_add_request(dev_name(dev), type, value); + if (!ret) { + req->dev = dev; + req->type = type; + ret = apply_constraint(req, PM_QOS_ADD_REQ, value); + } + return ret; +} + /** * dev_pm_qos_add_request - inserts new qos request into the list * @dev: target device for the constraint @@ -291,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value) { - int ret = 0; - - if (!dev || !req) /*guard against callers passing in null */ - return -EINVAL; - - if (WARN(dev_pm_qos_request_active(req), - "%s() called for already added request\n", __func__)) - return -EINVAL; + int ret; mutex_lock(&dev_pm_qos_mtx); - - if (IS_ERR(dev->power.qos)) - ret = -ENODEV; - else if (!dev->power.qos) - ret = dev_pm_qos_constraints_allocate(dev); - - trace_dev_pm_qos_add_request(dev_name(dev), type, value); - if (!ret) { - req->dev = dev; - req->type = type; - ret = apply_constraint(req, PM_QOS_ADD_REQ, value); - } - + ret = __dev_pm_qos_add_request(dev, req, type, value); mutex_unlock(&dev_pm_qos_mtx); - return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); @@ -343,6 +377,7 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, switch(req->type) { case DEV_PM_QOS_RESUME_LATENCY: + case DEV_PM_QOS_LATENCY_TOLERANCE: curr_value = req->data.pnode.prio; break; case DEV_PM_QOS_FLAGS: @@ -563,6 +598,10 @@ static void __dev_pm_qos_drop_user_request(struct device *dev, req = dev->power.qos->resume_latency_req; dev->power.qos->resume_latency_req = NULL; break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + req = dev->power.qos->latency_tolerance_req; + dev->power.qos->latency_tolerance_req = NULL; + break; case DEV_PM_QOS_FLAGS: req = dev->power.qos->flags_req; dev->power.qos->flags_req = NULL; @@ -768,6 +807,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) pm_runtime_put(dev); return ret; } + +/** + * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance. + * @dev: Device to obtain the user space latency tolerance for. + */ +s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) +{ + s32 ret; + + mutex_lock(&dev_pm_qos_mtx); + ret = IS_ERR_OR_NULL(dev->power.qos) + || !dev->power.qos->latency_tolerance_req ? + PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT : + dev->power.qos->latency_tolerance_req->data.pnode.prio; + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} + +/** + * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance. + * @dev: Device to update the user space latency tolerance for. + * @val: New user space latency tolerance for @dev (negative values disable). + */ +int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) +{ + int ret; + + mutex_lock(&dev_pm_qos_mtx); + + if (IS_ERR_OR_NULL(dev->power.qos) + || !dev->power.qos->latency_tolerance_req) { + struct dev_pm_qos_request *req; + + if (val < 0) { + ret = -EINVAL; + goto out; + } + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto out; + } + ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val); + if (ret < 0) { + kfree(req); + goto out; + } + dev->power.qos->latency_tolerance_req = req; + } else { + if (val < 0) { + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE); + ret = 0; + } else { + ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val); + } + } + + out: + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} #else /* !CONFIG_PM_RUNTIME */ static void __dev_pm_qos_hide_latency_limit(struct device *dev) {} static void __dev_pm_qos_hide_flags(struct device *dev) {} diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 4e24955aac8a..95b181d1ca6d 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -246,6 +246,40 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev, static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, pm_qos_resume_latency_show, pm_qos_resume_latency_store); +static ssize_t pm_qos_latency_tolerance_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + s32 value = dev_pm_qos_get_user_latency_tolerance(dev); + + if (value < 0) + return sprintf(buf, "auto\n"); + else if (value == PM_QOS_LATENCY_ANY) + return sprintf(buf, "any\n"); + + return sprintf(buf, "%d\n", value); +} + +static ssize_t pm_qos_latency_tolerance_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) +{ + s32 value; + int ret; + + if (kstrtos32(buf, 0, &value)) { + if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n")) + value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; + else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) + value = PM_QOS_LATENCY_ANY; + } + ret = dev_pm_qos_update_user_latency_tolerance(dev, value); + return ret < 0 ? ret : n; +} + +static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644, + pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store); + static ssize_t pm_qos_no_power_off_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -631,6 +665,17 @@ static struct attribute_group pm_qos_resume_latency_attr_group = { .attrs = pm_qos_resume_latency_attrs, }; +static struct attribute *pm_qos_latency_tolerance_attrs[] = { +#ifdef CONFIG_PM_RUNTIME + &dev_attr_pm_qos_latency_tolerance_us.attr, +#endif /* CONFIG_PM_RUNTIME */ + NULL, +}; +static struct attribute_group pm_qos_latency_tolerance_attr_group = { + .name = power_group_name, + .attrs = pm_qos_latency_tolerance_attrs, +}; + static struct attribute *pm_qos_flags_attrs[] = { #ifdef CONFIG_PM_RUNTIME &dev_attr_pm_qos_no_power_off.attr, @@ -656,18 +701,23 @@ int dpm_sysfs_add(struct device *dev) if (rc) goto err_out; } - if (device_can_wakeup(dev)) { rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); - if (rc) { - if (pm_runtime_callbacks_present(dev)) - sysfs_unmerge_group(&dev->kobj, - &pm_runtime_attr_group); - goto err_out; - } + if (rc) + goto err_runtime; + } + if (dev->power.set_latency_tolerance) { + rc = sysfs_merge_group(&dev->kobj, + &pm_qos_latency_tolerance_attr_group); + if (rc) + goto err_wakeup; } return 0; + err_wakeup: + sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); + err_runtime: + sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); err_out: sysfs_remove_group(&dev->kobj, &pm_attr_group); return rc; @@ -710,6 +760,7 @@ void rpm_sysfs_remove(struct device *dev) void dpm_sysfs_remove(struct device *dev) { + sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); dev_pm_qos_constraints_destroy(dev); rpm_sysfs_remove(dev); sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); diff --git a/include/linux/pm.h b/include/linux/pm.h index 8c6583a53a06..db2be5f3e030 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -582,6 +582,7 @@ struct dev_pm_info { unsigned long accounting_timestamp; #endif struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ + void (*set_latency_tolerance)(struct device *, s32); struct dev_pm_qos *qos; }; diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 2d8ce50877d8..0b476019be55 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -33,6 +33,9 @@ enum pm_qos_flags_status { #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0 +#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 +#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) +#define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1)) #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) #define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1) @@ -50,6 +53,7 @@ struct pm_qos_flags_request { enum dev_pm_qos_req_type { DEV_PM_QOS_RESUME_LATENCY = 1, + DEV_PM_QOS_LATENCY_TOLERANCE, DEV_PM_QOS_FLAGS, }; @@ -89,8 +93,10 @@ struct pm_qos_flags { struct dev_pm_qos { struct pm_qos_constraints resume_latency; + struct pm_qos_constraints latency_tolerance; struct pm_qos_flags flags; struct dev_pm_qos_request *resume_latency_req; + struct dev_pm_qos_request *latency_tolerance_req; struct dev_pm_qos_request *flags_req; }; @@ -196,6 +202,8 @@ void dev_pm_qos_hide_latency_limit(struct device *dev); int dev_pm_qos_expose_flags(struct device *dev, s32 value); void dev_pm_qos_hide_flags(struct device *dev); int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); +s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); +int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { @@ -215,6 +223,10 @@ static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value) static inline void dev_pm_qos_hide_flags(struct device *dev) {} static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set) { return 0; } +static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) + { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; } +static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) + { return 0; } static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } diff --git a/kernel/power/qos.c b/kernel/power/qos.c index e23ae38e647f..884b77058864 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -173,6 +173,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, { unsigned long flags; int prev_value, curr_value, new_value; + int ret; spin_lock_irqsave(&pm_qos_lock, flags); prev_value = pm_qos_get_value(c); @@ -208,13 +209,15 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, trace_pm_qos_update_target(action, prev_value, curr_value); if (prev_value != curr_value) { - blocking_notifier_call_chain(c->notifiers, - (unsigned long)curr_value, - NULL); - return 1; + ret = 1; + if (c->notifiers) + blocking_notifier_call_chain(c->notifiers, + (unsigned long)curr_value, + NULL); } else { - return 0; + ret = 0; } + return ret; } /** -- cgit v1.2.3 From 71d821fdaec08afcbfb3cf258c0d64ea0e336ff3 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 11 Feb 2014 00:36:00 +0100 Subject: PM / QoS: Add type to dev_pm_qos_add_ancestor_request() arguments Rework dev_pm_qos_add_ancestor_request() so that device PM QoS type is passed to it as the third argument and make it support the DEV_PM_QOS_LATENCY_TOLERANCE device PM QoS type (in addition to DEV_PM_QOS_RESUME_LATENCY). That will allow the drivers of devices without latency tolerance hardware support to use their ancestors having it as proxies for their latency tolerance requirements. Signed-off-by: Rafael J. Wysocki --- Documentation/power/pm_qos_interface.txt | 6 ++++-- drivers/base/power/qos.c | 22 +++++++++++++++++----- drivers/input/touchscreen/st1232.c | 3 ++- include/linux/pm_qos.h | 7 +++++-- 4 files changed, 28 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt index ed743bbad87c..a5da5c7e7128 100644 --- a/Documentation/power/pm_qos_interface.txt +++ b/Documentation/power/pm_qos_interface.txt @@ -134,9 +134,11 @@ The meaning of the return values is as follows: PM_QOS_FLAGS_UNDEFINED: The device's PM QoS structure has not been initialized or the list of requests is empty. -int dev_pm_qos_add_ancestor_request(dev, handle, value) +int dev_pm_qos_add_ancestor_request(dev, handle, type, value) Add a PM QoS request for the first direct ancestor of the given device whose -power.ignore_children flag is unset. +power.ignore_children flag is unset (for DEV_PM_QOS_RESUME_LATENCY requests) +or whose power.set_latency_tolerance callback pointer is not NULL (for +DEV_PM_QOS_LATENCY_TOLERANCE requests). int dev_pm_qos_expose_latency_limit(device, value) Add a request to the device's PM QoS list of resume latency constraints and diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 84756f7f09d9..36b9eb4862cb 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -565,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. * @dev: Device whose ancestor to add the request for. * @req: Pointer to the preallocated handle. + * @type: Type of the request. * @value: Constraint latency value. */ int dev_pm_qos_add_ancestor_request(struct device *dev, - struct dev_pm_qos_request *req, s32 value) + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value) { struct device *ancestor = dev->parent; int ret = -ENODEV; - while (ancestor && !ancestor->power.ignore_children) - ancestor = ancestor->parent; + switch (type) { + case DEV_PM_QOS_RESUME_LATENCY: + while (ancestor && !ancestor->power.ignore_children) + ancestor = ancestor->parent; + break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + while (ancestor && !ancestor->power.set_latency_tolerance) + ancestor = ancestor->parent; + + break; + default: + ancestor = NULL; + } if (ancestor) - ret = dev_pm_qos_add_request(ancestor, req, - DEV_PM_QOS_RESUME_LATENCY, value); + ret = dev_pm_qos_add_request(ancestor, req, type, value); if (ret < 0) req->dev = NULL; diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c index 5c342b3139e8..3c0f57efe7b1 100644 --- a/drivers/input/touchscreen/st1232.c +++ b/drivers/input/touchscreen/st1232.c @@ -134,7 +134,8 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id) } else if (!ts->low_latency_req.dev) { /* First contact, request 100 us latency. */ dev_pm_qos_add_ancestor_request(&ts->client->dev, - &ts->low_latency_req, 100); + &ts->low_latency_req, + DEV_PM_QOS_RESUME_LATENCY, 100); } /* SYN_REPORT */ diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 0b476019be55..9ab4bf7c4646 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -149,7 +149,8 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier); void dev_pm_qos_constraints_init(struct device *dev); void dev_pm_qos_constraints_destroy(struct device *dev); int dev_pm_qos_add_ancestor_request(struct device *dev, - struct dev_pm_qos_request *req, s32 value); + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value); #else static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) @@ -192,7 +193,9 @@ static inline void dev_pm_qos_constraints_destroy(struct device *dev) dev->power.power_state = PMSG_INVALID; } static inline int dev_pm_qos_add_ancestor_request(struct device *dev, - struct dev_pm_qos_request *req, s32 value) + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, + s32 value) { return 0; } #endif -- cgit v1.2.3 From 9fe21fdc5f3d3aa7d6e78ad25668e234330b6974 Mon Sep 17 00:00:00 2001 From: Alexander Shiyan Date: Sat, 21 Dec 2013 15:08:00 +0400 Subject: video: imxfb: Use regulator API with LCD class for powering This patch replaces custom lcd_power() callback with regulator API over LCD class. Signed-off-by: Alexander Shiyan Acked-by: Shawn Guo Signed-off-by: Tomi Valkeinen --- .../devicetree/bindings/video/fsl,imx-fb.txt | 1 + arch/arm/mach-imx/mach-mx27ads.c | 55 +++++++++++++++-- drivers/video/imxfb.c | 71 +++++++++++++++++++--- include/linux/platform_data/video-imxfb.h | 1 - 4 files changed, 114 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/video/fsl,imx-fb.txt b/Documentation/devicetree/bindings/video/fsl,imx-fb.txt index 46da08db186a..e6b1ee9b8e2e 100644 --- a/Documentation/devicetree/bindings/video/fsl,imx-fb.txt +++ b/Documentation/devicetree/bindings/video/fsl,imx-fb.txt @@ -15,6 +15,7 @@ Required nodes: - fsl,pcr: LCDC PCR value Optional properties: +- lcd-supply: Regulator for LCD supply voltage. - fsl,dmacr: DMA Control Register value. This is optional. By default, the register is not modified as recommended by the datasheet. - fsl,lscr1: LCDC Sharp Configuration Register value. diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c index 9821b824dcaf..a7a4a9c67615 100644 --- a/arch/arm/mach-imx/mach-mx27ads.c +++ b/arch/arm/mach-imx/mach-mx27ads.c @@ -21,6 +21,10 @@ #include #include #include + +#include +#include + #include #include #include @@ -195,14 +199,58 @@ static const struct imxi2c_platform_data mx27ads_i2c1_data __initconst = { static struct i2c_board_info mx27ads_i2c_devices[] = { }; -void lcd_power(int on) +static void vgpio_set(struct gpio_chip *chip, unsigned offset, int value) { - if (on) + if (value) __raw_writew(PBC_BCTRL1_LCDON, PBC_BCTRL1_SET_REG); else __raw_writew(PBC_BCTRL1_LCDON, PBC_BCTRL1_CLEAR_REG); } +static int vgpio_dir_out(struct gpio_chip *chip, unsigned offset, int value) +{ + return 0; +} + +#define MX27ADS_LCD_GPIO (6 * 32) + +static struct regulator_consumer_supply mx27ads_lcd_regulator_consumer = + REGULATOR_SUPPLY("lcd", "imx-fb.0"); + +static struct regulator_init_data mx27ads_lcd_regulator_init_data = { + .constraints = { + .valid_ops_mask = REGULATOR_CHANGE_STATUS, +}, + .consumer_supplies = &mx27ads_lcd_regulator_consumer, + .num_consumer_supplies = 1, +}; + +static struct fixed_voltage_config mx27ads_lcd_regulator_pdata = { + .supply_name = "LCD", + .microvolts = 3300000, + .gpio = MX27ADS_LCD_GPIO, + .init_data = &mx27ads_lcd_regulator_init_data, +}; + +static void __init mx27ads_regulator_init(void) +{ + struct gpio_chip *vchip; + + vchip = kzalloc(sizeof(*vchip), GFP_KERNEL); + vchip->owner = THIS_MODULE; + vchip->label = "LCD"; + vchip->base = MX27ADS_LCD_GPIO; + vchip->ngpio = 1; + vchip->direction_output = vgpio_dir_out; + vchip->set = vgpio_set; + gpiochip_add(vchip); + + platform_device_register_data(&platform_bus, "reg-fixed-voltage", + PLATFORM_DEVID_AUTO, + &mx27ads_lcd_regulator_pdata, + sizeof(mx27ads_lcd_regulator_pdata)); +} + static struct imx_fb_videomode mx27ads_modes[] = { { .mode = { @@ -239,8 +287,6 @@ static const struct imx_fb_platform_data mx27ads_fb_data __initconst = { .pwmr = 0x00A903FF, .lscr1 = 0x00120300, .dmacr = 0x00020010, - - .lcd_power = lcd_power, }; static int mx27ads_sdhc1_init(struct device *dev, irq_handler_t detect_irq, @@ -304,6 +350,7 @@ static void __init mx27ads_board_init(void) i2c_register_board_info(1, mx27ads_i2c_devices, ARRAY_SIZE(mx27ads_i2c_devices)); imx27_add_imx_i2c(1, &mx27ads_i2c1_data); + mx27ads_regulator_init(); imx27_add_imx_fb(&mx27ads_fb_data); imx27_add_mxc_mmc(0, &sdhc1_pdata); imx27_add_mxc_mmc(1, &sdhc2_pdata); diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c index 44ee678481d5..e50b67fada51 100644 --- a/drivers/video/imxfb.c +++ b/drivers/video/imxfb.c @@ -30,10 +30,13 @@ #include #include #include +#include #include #include #include +#include + #include