From fa8848f27895bd19e16aed77868f464be24034e6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 14 Oct 2015 14:17:11 +0100 Subject: drm/i915: Report context GTT size Since the beginning we have conflated the size of the global GTT with that of the per-process context sizes. In recent times (gen8+), those are no longer the same where the global GTT is limited to 2/4GiB but the per-process GTT may be anything up to 256TiB. Userspace knows nothing of this discrepancy and outside of one or two hacks, uses the getaperture ioctl to determine the maximum size it can use. Let's leave that as reporting the global GTT and use the context reporting method to describe the per-process value (which naturally fallsback to reporting the aliasing or global on older platforms, so userspace can always use this method where available). Testcase: igt/gem_userptr_blits/minor-normal-sync Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=90065 Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Signed-off-by: Daniel Vetter --- include/uapi/drm/i915_drm.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 484a9fb20479..67cebe6d978f 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1125,8 +1125,9 @@ struct drm_i915_gem_context_param { __u32 ctx_id; __u32 size; __u64 param; -#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 -#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 +#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 +#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 +#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 __u64 value; }; -- cgit v1.2.3 From be15aad6e8ec09d9be1a3a563b7b17ba592df942 Mon Sep 17 00:00:00 2001 From: David Henningsson Date: Fri, 16 Oct 2015 11:24:24 +0200 Subject: drm/i915: Improve kernel-doc for i915_audio_component struct Signed-off-by: David Henningsson Link: http://patchwork.freedesktop.org/patch/msgid/1444987464-8657-1-git-send-email-david.henningsson@canonical.com Signed-off-by: Daniel Vetter --- include/drm/i915_component.h | 69 ++++++++++++++++++++++++++++++++------------ 1 file changed, 51 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h index 30d89e0da2c6..fab13851f95a 100644 --- a/include/drm/i915_component.h +++ b/include/drm/i915_component.h @@ -31,47 +31,80 @@ #define MAX_PORTS 5 /** - * struct i915_audio_component_ops - callbacks defined in gfx driver - * @owner: the module owner - * @get_power: get the POWER_DOMAIN_AUDIO power well - * @put_power: put the POWER_DOMAIN_AUDIO power well - * @codec_wake_override: Enable/Disable generating the codec wake signal - * @get_cdclk_freq: get the Core Display Clock in KHz - * @sync_audio_rate: set n/cts based on the sample rate + * struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver */ struct i915_audio_component_ops { + /** + * @owner: i915 module + */ struct module *owner; + /** + * @get_power: get the POWER_DOMAIN_AUDIO power well + * + * Request the power well to be turned on. + */ void (*get_power)(struct device *); + /** + * @put_power: put the POWER_DOMAIN_AUDIO power well + * + * Allow the power well to be turned off. + */ void (*put_power)(struct device *); + /** + * @codec_wake_override: Enable/disable codec wake signal + */ void (*codec_wake_override)(struct device *, bool enable); + /** + * @get_cdclk_freq: Get the Core Display Clock in kHz + */ int (*get_cdclk_freq)(struct device *); + /** + * @sync_audio_rate: set n/cts based on the sample rate + * + * Called from audio driver. After audio driver sets the + * sample rate, it will call this function to set n/cts + */ int (*sync_audio_rate)(struct device *, int port, int rate); }; +/** + * struct i915_audio_component_audio_ops - Ops implemented by hda driver, called by i915 driver + */ struct i915_audio_component_audio_ops { + /** + * @audio_ptr: Pointer to be used in call to pin_eld_notify + */ void *audio_ptr; /** - * Call from i915 driver, notifying the HDA driver that - * pin sense and/or ELD information has changed. - * @audio_ptr: HDA driver object - * @port: Which port has changed (PORTA / PORTB / PORTC etc) + * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed + * + * Called when the i915 driver has set up audio pipeline or has just + * begun to tear it down. This allows the HDA driver to update its + * status accordingly (even when the HDA controller is in power save + * mode). */ void (*pin_eld_notify)(void *audio_ptr, int port); }; /** - * struct i915_audio_component - used for audio video interaction - * @dev: the device from gfx driver - * @aud_sample_rate: the array of audio sample rate per port - * @ops: callback for audio driver calling - * @audio_ops: Call from i915 driver + * struct i915_audio_component - Used for direct communication between i915 and hda drivers */ struct i915_audio_component { + /** + * @dev: i915 device, used as parameter for ops + */ struct device *dev; + /** + * @aud_sample_rate: the array of audio sample rate per port + */ int aud_sample_rate[MAX_PORTS]; - + /** + * @ops: Ops implemented by i915 driver, called by hda driver + */ const struct i915_audio_component_ops *ops; - + /** + * @audio_ops: Ops implemented by hda driver, called by i915 driver + */ const struct i915_audio_component_audio_ops *audio_ops; }; -- cgit v1.2.3 From e08e19c331fb249e6dc86365ee80d16045c4aeb1 Mon Sep 17 00:00:00 2001 From: "H. Nikolaus Schaller" Date: Fri, 16 Oct 2015 14:53:38 +0200 Subject: iio:adc: add iio driver for Palmas (twl6035/7) gpadc This driver code was found as: https://android.googlesource.com/kernel/tegra/+/aaabb2e045f31e5a970109ffdaae900dd403d17e/drivers/staging/iio/adc Fixed various compilation issues and test this driver on omap5 evm. Signed-off-by: Pradeep Goudagunta Signed-off-by: H. Nikolaus Schaller Signed-off-by: Marek Belisko Acked-by: Laxman Dewangan Reviewed-by: Jonathan Cameron Acked-by: Lee Jones Signed-off-by: Jonathan Cameron --- drivers/iio/adc/Kconfig | 8 + drivers/iio/adc/Makefile | 1 + drivers/iio/adc/palmas_gpadc.c | 817 +++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/palmas.h | 75 ++-- 4 files changed, 877 insertions(+), 24 deletions(-) create mode 100644 drivers/iio/adc/palmas_gpadc.c (limited to 'include') diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 7868c744fd4b..daad72e1266d 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -275,6 +275,14 @@ config NAU7802 To compile this driver as a module, choose M here: the module will be called nau7802. +config PALMAS_GPADC + tristate "TI Palmas General Purpose ADC" + depends on MFD_PALMAS + help + Palmas series pmic chip by Texas Instruments (twl6035/6037) + is used in smartphones and tablets and supports a 16 channel + general purpose ADC. + config QCOM_SPMI_IADC tristate "Qualcomm SPMI PMIC current ADC" depends on SPMI diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile index 99b37a963a1e..11cfdfd76798 100644 --- a/drivers/iio/adc/Makefile +++ b/drivers/iio/adc/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_MCP320X) += mcp320x.o obj-$(CONFIG_MCP3422) += mcp3422.o obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o obj-$(CONFIG_NAU7802) += nau7802.o +obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c new file mode 100644 index 000000000000..71763c5da2ab --- /dev/null +++ b/drivers/iio/adc/palmas_gpadc.c @@ -0,0 +1,817 @@ +/* + * palmas-adc.c -- TI PALMAS GPADC. + * + * Copyright (c) 2013, NVIDIA Corporation. All rights reserved. + * + * Author: Pradeep Goudagunta + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MOD_NAME "palmas-gpadc" +#define PALMAS_ADC_CONVERSION_TIMEOUT (msecs_to_jiffies(5000)) +#define PALMAS_TO_BE_CALCULATED 0 +#define PALMAS_GPADC_TRIMINVALID -1 + +struct palmas_gpadc_info { +/* calibration codes and regs */ + int x1; /* lower ideal code */ + int x2; /* higher ideal code */ + int v1; /* expected lower volt reading */ + int v2; /* expected higher volt reading */ + u8 trim1_reg; /* register number for lower trim */ + u8 trim2_reg; /* register number for upper trim */ + int gain; /* calculated from above (after reading trim regs) */ + int offset; /* calculated from above (after reading trim regs) */ + int gain_error; /* calculated from above (after reading trim regs) */ + bool is_uncalibrated; /* if channel has calibration data */ +}; + +#define PALMAS_ADC_INFO(_chan, _x1, _x2, _v1, _v2, _t1, _t2, _is_uncalibrated) \ + [PALMAS_ADC_CH_##_chan] = { \ + .x1 = _x1, \ + .x2 = _x2, \ + .v1 = _v1, \ + .v2 = _v2, \ + .gain = PALMAS_TO_BE_CALCULATED, \ + .offset = PALMAS_TO_BE_CALCULATED, \ + .gain_error = PALMAS_TO_BE_CALCULATED, \ + .trim1_reg = PALMAS_GPADC_TRIM##_t1, \ + .trim2_reg = PALMAS_GPADC_TRIM##_t2, \ + .is_uncalibrated = _is_uncalibrated \ + } + +static struct palmas_gpadc_info palmas_gpadc_info[] = { + PALMAS_ADC_INFO(IN0, 2064, 3112, 630, 950, 1, 2, false), + PALMAS_ADC_INFO(IN1, 2064, 3112, 630, 950, 1, 2, false), + PALMAS_ADC_INFO(IN2, 2064, 3112, 1260, 1900, 3, 4, false), + PALMAS_ADC_INFO(IN3, 2064, 3112, 630, 950, 1, 2, false), + PALMAS_ADC_INFO(IN4, 2064, 3112, 630, 950, 1, 2, false), + PALMAS_ADC_INFO(IN5, 2064, 3112, 630, 950, 1, 2, false), + PALMAS_ADC_INFO(IN6, 2064, 3112, 2520, 3800, 5, 6, false), + PALMAS_ADC_INFO(IN7, 2064, 3112, 2520, 3800, 7, 8, false), + PALMAS_ADC_INFO(IN8, 2064, 3112, 3150, 4750, 9, 10, false), + PALMAS_ADC_INFO(IN9, 2064, 3112, 5670, 8550, 11, 12, false), + PALMAS_ADC_INFO(IN10, 2064, 3112, 3465, 5225, 13, 14, false), + PALMAS_ADC_INFO(IN11, 0, 0, 0, 0, INVALID, INVALID, true), + PALMAS_ADC_INFO(IN12, 0, 0, 0, 0, INVALID, INVALID, true), + PALMAS_ADC_INFO(IN13, 0, 0, 0, 0, INVALID, INVALID, true), + PALMAS_ADC_INFO(IN14, 2064, 3112, 3645, 5225, 15, 16, false), + PALMAS_ADC_INFO(IN15, 0, 0, 0, 0, INVALID, INVALID, true), +}; + +/** + * struct palmas_gpadc - the palmas_gpadc structure + * @ch0_current: channel 0 current source setting + * 0: 0 uA + * 1: 5 uA + * 2: 15 uA + * 3: 20 uA + * @ch3_current: channel 0 current source setting + * 0: 0 uA + * 1: 10 uA + * 2: 400 uA + * 3: 800 uA + * @extended_delay: enable the gpadc extended delay mode + * @auto_conversion_period: define the auto_conversion_period + * + * This is the palmas_gpadc structure to store run-time information + * and pointers for this driver instance. + */ + +struct palmas_gpadc { + struct device *dev; + struct palmas *palmas; + u8 ch0_current; + u8 ch3_current; + bool extended_delay; + int irq; + int irq_auto_0; + int irq_auto_1; + struct palmas_gpadc_info *adc_info; + struct completion conv_completion; + struct palmas_adc_wakeup_property wakeup1_data; + struct palmas_adc_wakeup_property wakeup2_data; + bool wakeup1_enable; + bool wakeup2_enable; + int auto_conversion_period; +}; + +/* + * GPADC lock issue in AUTO mode. + * Impact: In AUTO mode, GPADC conversion can be locked after disabling AUTO + * mode feature. + * Details: + * When the AUTO mode is the only conversion mode enabled, if the AUTO + * mode feature is disabled with bit GPADC_AUTO_CTRL. AUTO_CONV1_EN = 0 + * or bit GPADC_AUTO_CTRL. AUTO_CONV0_EN = 0 during a conversion, the + * conversion mechanism can be seen as locked meaning that all following + * conversion will give 0 as a result. Bit GPADC_STATUS.GPADC_AVAILABLE + * will stay at 0 meaning that GPADC is busy. An RT conversion can unlock + * the GPADC. + * + * Workaround(s): + * To avoid the lock mechanism, the workaround to follow before any stop + * conversion request is: + * Force the GPADC state machine to be ON by using the GPADC_CTRL1. + * GPADC_FORCE bit = 1 + * Shutdown the GPADC AUTO conversion using + * GPADC_AUTO_CTRL.SHUTDOWN_CONV[01] = 0. + * After 100us, force the GPADC state machine to be OFF by using the + * GPADC_CTRL1. GPADC_FORCE bit = 0 + */ + +static int palmas_disable_auto_conversion(struct palmas_gpadc *adc) +{ + int ret; + + ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_CTRL1, + PALMAS_GPADC_CTRL1_GPADC_FORCE, + PALMAS_GPADC_CTRL1_GPADC_FORCE); + if (ret < 0) { + dev_err(adc->dev, "GPADC_CTRL1 update failed: %d\n", ret); + return ret; + } + + ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_AUTO_CTRL, + PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV1 | + PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV0, + 0); + if (ret < 0) { + dev_err(adc->dev, "AUTO_CTRL update failed: %d\n", ret); + return ret; + } + + udelay(100); + + ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_CTRL1, + PALMAS_GPADC_CTRL1_GPADC_FORCE, 0); + if (ret < 0) + dev_err(adc->dev, "GPADC_CTRL1 update failed: %d\n", ret); + + return ret; +} + +static irqreturn_t palmas_gpadc_irq(int irq, void *data) +{ + struct palmas_gpadc *adc = data; + + complete(&adc->conv_completion); + + return IRQ_HANDLED; +} + +static irqreturn_t palmas_gpadc_irq_auto(int irq, void *data) +{ + struct palmas_gpadc *adc = data; + + dev_dbg(adc->dev, "Threshold interrupt %d occurs\n", irq); + palmas_disable_auto_conversion(adc); + + return IRQ_HANDLED; +} + +static int palmas_gpadc_start_mask_interrupt(struct palmas_gpadc *adc, + bool mask) +{ + int ret; + + if (!mask) + ret = palmas_update_bits(adc->palmas, PALMAS_INTERRUPT_BASE, + PALMAS_INT3_MASK, + PALMAS_INT3_MASK_GPADC_EOC_SW, 0); + else + ret = palmas_update_bits(adc->palmas, PALMAS_INTERRUPT_BASE, + PALMAS_INT3_MASK, + PALMAS_INT3_MASK_GPADC_EOC_SW, + PALMAS_INT3_MASK_GPADC_EOC_SW); + if (ret < 0) + dev_err(adc->dev, "GPADC INT MASK update failed: %d\n", ret); + + return ret; +} + +static int palmas_gpadc_enable(struct palmas_gpadc *adc, int adc_chan, + int enable) +{ + unsigned int mask, val; + int ret; + + if (enable) { + val = (adc->extended_delay + << PALMAS_GPADC_RT_CTRL_EXTEND_DELAY_SHIFT); + ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_RT_CTRL, + PALMAS_GPADC_RT_CTRL_EXTEND_DELAY, val); + if (ret < 0) { + dev_err(adc->dev, "RT_CTRL update failed: %d\n", ret); + return ret; + } + + mask = (PALMAS_GPADC_CTRL1_CURRENT_SRC_CH0_MASK | + PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_MASK | + PALMAS_GPADC_CTRL1_GPADC_FORCE); + val = (adc->ch0_current + << PALMAS_GPADC_CTRL1_CURRENT_SRC_CH0_SHIFT); + val |= (adc->ch3_current + << PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_SHIFT); + val |= PALMAS_GPADC_CTRL1_GPADC_FORCE; + ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_CTRL1, mask, val); + if (ret < 0) { + dev_err(adc->dev, + "Failed to update current setting: %d\n", ret); + return ret; + } + + mask = (PALMAS_GPADC_SW_SELECT_SW_CONV0_SEL_MASK | + PALMAS_GPADC_SW_SELECT_SW_CONV_EN); + val = (adc_chan | PALMAS_GPADC_SW_SELECT_SW_CONV_EN); + ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_SW_SELECT, mask, val); + if (ret < 0) { + dev_err(adc->dev, "SW_SELECT update failed: %d\n", ret); + return ret; + } + } else { + ret = palmas_write(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_SW_SELECT, 0); + if (ret < 0) + dev_err(adc->dev, "SW_SELECT write failed: %d\n", ret); + + ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_CTRL1, + PALMAS_GPADC_CTRL1_GPADC_FORCE, 0); + if (ret < 0) { + dev_err(adc->dev, "CTRL1 update failed: %d\n", ret); + return ret; + } + } + + return ret; +} + +static int palmas_gpadc_read_prepare(struct palmas_gpadc *adc, int adc_chan) +{ + int ret; + + ret = palmas_gpadc_enable(adc, adc_chan, true); + if (ret < 0) + return ret; + + return palmas_gpadc_start_mask_interrupt(adc, 0); +} + +static void palmas_gpadc_read_done(struct palmas_gpadc *adc, int adc_chan) +{ + palmas_gpadc_start_mask_interrupt(adc, 1); + palmas_gpadc_enable(adc, adc_chan, false); +} + +static int palmas_gpadc_calibrate(struct palmas_gpadc *adc, int adc_chan) +{ + int k; + int d1; + int d2; + int ret; + int gain; + int x1 = adc->adc_info[adc_chan].x1; + int x2 = adc->adc_info[adc_chan].x2; + int v1 = adc->adc_info[adc_chan].v1; + int v2 = adc->adc_info[adc_chan].v2; + + ret = palmas_read(adc->palmas, PALMAS_TRIM_GPADC_BASE, + adc->adc_info[adc_chan].trim1_reg, &d1); + if (ret < 0) { + dev_err(adc->dev, "TRIM read failed: %d\n", ret); + goto scrub; + } + + ret = palmas_read(adc->palmas, PALMAS_TRIM_GPADC_BASE, + adc->adc_info[adc_chan].trim2_reg, &d2); + if (ret < 0) { + dev_err(adc->dev, "TRIM read failed: %d\n", ret); + goto scrub; + } + + /* gain error calculation */ + k = (1000 + (1000 * (d2 - d1)) / (x2 - x1)); + + /* gain calculation */ + gain = ((v2 - v1) * 1000) / (x2 - x1); + + adc->adc_info[adc_chan].gain_error = k; + adc->adc_info[adc_chan].gain = gain; + /* offset Calculation */ + adc->adc_info[adc_chan].offset = (d1 * 1000) - ((k - 1000) * x1); + +scrub: + return ret; +} + +static int palmas_gpadc_start_conversion(struct palmas_gpadc *adc, int adc_chan) +{ + unsigned int val; + int ret; + + init_completion(&adc->conv_completion); + ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_SW_SELECT, + PALMAS_GPADC_SW_SELECT_SW_START_CONV0, + PALMAS_GPADC_SW_SELECT_SW_START_CONV0); + if (ret < 0) { + dev_err(adc->dev, "SELECT_SW_START write failed: %d\n", ret); + return ret; + } + + ret = wait_for_completion_timeout(&adc->conv_completion, + PALMAS_ADC_CONVERSION_TIMEOUT); + if (ret == 0) { + dev_err(adc->dev, "conversion not completed\n"); + return -ETIMEDOUT; + } + + ret = palmas_bulk_read(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_SW_CONV0_LSB, &val, 2); + if (ret < 0) { + dev_err(adc->dev, "SW_CONV0_LSB read failed: %d\n", ret); + return ret; + } + + ret = val & 0xFFF; + + return ret; +} + +static int palmas_gpadc_get_calibrated_code(struct palmas_gpadc *adc, + int adc_chan, int val) +{ + if (!adc->adc_info[adc_chan].is_uncalibrated) + val = (val*1000 - adc->adc_info[adc_chan].offset) / + adc->adc_info[adc_chan].gain_error; + + if (val < 0) { + dev_err(adc->dev, "Mismatch with calibration\n"); + return 0; + } + + val = (val * adc->adc_info[adc_chan].gain) / 1000; + + return val; +} + +static int palmas_gpadc_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val, int *val2, long mask) +{ + struct palmas_gpadc *adc = iio_priv(indio_dev); + int adc_chan = chan->channel; + int ret = 0; + + if (adc_chan > PALMAS_ADC_CH_MAX) + return -EINVAL; + + mutex_lock(&indio_dev->mlock); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + case IIO_CHAN_INFO_PROCESSED: + ret = palmas_gpadc_read_prepare(adc, adc_chan); + if (ret < 0) + goto out; + + ret = palmas_gpadc_start_conversion(adc, adc_chan); + if (ret < 0) { + dev_err(adc->dev, + "ADC start conversion failed\n"); + goto out; + } + + if (mask == IIO_CHAN_INFO_PROCESSED) + ret = palmas_gpadc_get_calibrated_code( + adc, adc_chan, ret); + + *val = ret; + + ret = IIO_VAL_INT; + goto out; + } + + mutex_unlock(&indio_dev->mlock); + return ret; + +out: + palmas_gpadc_read_done(adc, adc_chan); + mutex_unlock(&indio_dev->mlock); + + return ret; +} + +static const struct iio_info palmas_gpadc_iio_info = { + .read_raw = palmas_gpadc_read_raw, + .driver_module = THIS_MODULE, +}; + +#define PALMAS_ADC_CHAN_IIO(chan, _type, chan_info) \ +{ \ + .datasheet_name = PALMAS_DATASHEET_NAME(chan), \ + .type = _type, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(chan_info), \ + .indexed = 1, \ + .channel = PALMAS_ADC_CH_##chan, \ +} + +static const struct iio_chan_spec palmas_gpadc_iio_channel[] = { + PALMAS_ADC_CHAN_IIO(IN0, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED), + PALMAS_ADC_CHAN_IIO(IN1, IIO_TEMP, IIO_CHAN_INFO_RAW), + PALMAS_ADC_CHAN_IIO(IN2, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED), + PALMAS_ADC_CHAN_IIO(IN3, IIO_TEMP, IIO_CHAN_INFO_RAW), + PALMAS_ADC_CHAN_IIO(IN4, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED), + PALMAS_ADC_CHAN_IIO(IN5, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED), + PALMAS_ADC_CHAN_IIO(IN6, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED), + PALMAS_ADC_CHAN_IIO(IN7, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED), + PALMAS_ADC_CHAN_IIO(IN8, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED), + PALMAS_ADC_CHAN_IIO(IN9, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED), + PALMAS_ADC_CHAN_IIO(IN10, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED), + PALMAS_ADC_CHAN_IIO(IN11, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED), + PALMAS_ADC_CHAN_IIO(IN12, IIO_TEMP, IIO_CHAN_INFO_RAW), + PALMAS_ADC_CHAN_IIO(IN13, IIO_TEMP, IIO_CHAN_INFO_RAW), + PALMAS_ADC_CHAN_IIO(IN14, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED), + PALMAS_ADC_CHAN_IIO(IN15, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED), +}; + +static int palmas_gpadc_probe(struct platform_device *pdev) +{ + struct palmas_gpadc *adc; + struct palmas_platform_data *pdata; + struct palmas_gpadc_platform_data *gpadc_pdata = NULL; + struct iio_dev *indio_dev; + int ret, i; + + pdata = dev_get_platdata(pdev->dev.parent); + if (!pdata || !pdata->gpadc_pdata) { + dev_err(&pdev->dev, "No platform data\n"); + return -ENODEV; + } + + gpadc_pdata = pdata->gpadc_pdata; + + indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc)); + if (!indio_dev) { + dev_err(&pdev->dev, "iio_device_alloc failed\n"); + return -ENOMEM; + } + + adc = iio_priv(indio_dev); + adc->dev = &pdev->dev; + adc->palmas = dev_get_drvdata(pdev->dev.parent); + adc->adc_info = palmas_gpadc_info; + init_completion(&adc->conv_completion); + dev_set_drvdata(&pdev->dev, indio_dev); + + adc->auto_conversion_period = gpadc_pdata->auto_conversion_period_ms; + adc->irq = palmas_irq_get_virq(adc->palmas, PALMAS_GPADC_EOC_SW_IRQ); + if (adc->irq < 0) { + dev_err(adc->dev, + "get virq failed: %d\n", adc->irq); + ret = adc->irq; + goto out; + } + ret = request_threaded_irq(adc->irq, NULL, + palmas_gpadc_irq, + IRQF_ONESHOT | IRQF_EARLY_RESUME, dev_name(adc->dev), + adc); + if (ret < 0) { + dev_err(adc->dev, + "request irq %d failed: %d\n", adc->irq, ret); + goto out; + } + + if (gpadc_pdata->adc_wakeup1_data) { + memcpy(&adc->wakeup1_data, gpadc_pdata->adc_wakeup1_data, + sizeof(adc->wakeup1_data)); + adc->wakeup1_enable = true; + adc->irq_auto_0 = platform_get_irq(pdev, 1); + ret = request_threaded_irq(adc->irq_auto_0, NULL, + palmas_gpadc_irq_auto, + IRQF_ONESHOT | IRQF_EARLY_RESUME, + "palmas-adc-auto-0", adc); + if (ret < 0) { + dev_err(adc->dev, "request auto0 irq %d failed: %d\n", + adc->irq_auto_0, ret); + goto out_irq_free; + } + } + + if (gpadc_pdata->adc_wakeup2_data) { + memcpy(&adc->wakeup2_data, gpadc_pdata->adc_wakeup2_data, + sizeof(adc->wakeup2_data)); + adc->wakeup2_enable = true; + adc->irq_auto_1 = platform_get_irq(pdev, 2); + ret = request_threaded_irq(adc->irq_auto_1, NULL, + palmas_gpadc_irq_auto, + IRQF_ONESHOT | IRQF_EARLY_RESUME, + "palmas-adc-auto-1", adc); + if (ret < 0) { + dev_err(adc->dev, "request auto1 irq %d failed: %d\n", + adc->irq_auto_1, ret); + goto out_irq_auto0_free; + } + } + + /* set the current source 0 (value 0/5/15/20 uA => 0..3) */ + if (gpadc_pdata->ch0_current <= 1) + adc->ch0_current = PALMAS_ADC_CH0_CURRENT_SRC_0; + else if (gpadc_pdata->ch0_current <= 5) + adc->ch0_current = PALMAS_ADC_CH0_CURRENT_SRC_5; + else if (gpadc_pdata->ch0_current <= 15) + adc->ch0_current = PALMAS_ADC_CH0_CURRENT_SRC_15; + else + adc->ch0_current = PALMAS_ADC_CH0_CURRENT_SRC_20; + + /* set the current source 3 (value 0/10/400/800 uA => 0..3) */ + if (gpadc_pdata->ch3_current <= 1) + adc->ch3_current = PALMAS_ADC_CH3_CURRENT_SRC_0; + else if (gpadc_pdata->ch3_current <= 10) + adc->ch3_current = PALMAS_ADC_CH3_CURRENT_SRC_10; + else if (gpadc_pdata->ch3_current <= 400) + adc->ch3_current = PALMAS_ADC_CH3_CURRENT_SRC_400; + else + adc->ch3_current = PALMAS_ADC_CH3_CURRENT_SRC_800; + + adc->extended_delay = gpadc_pdata->extended_delay; + + indio_dev->name = MOD_NAME; + indio_dev->dev.parent = &pdev->dev; + indio_dev->info = &palmas_gpadc_iio_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = palmas_gpadc_iio_channel; + indio_dev->num_channels = ARRAY_SIZE(palmas_gpadc_iio_channel); + + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(adc->dev, "iio_device_register() failed: %d\n", ret); + goto out_irq_auto1_free; + } + + device_set_wakeup_capable(&pdev->dev, 1); + for (i = 0; i < PALMAS_ADC_CH_MAX; i++) { + if (!(adc->adc_info[i].is_uncalibrated)) + palmas_gpadc_calibrate(adc, i); + } + + if (adc->wakeup1_enable || adc->wakeup2_enable) + device_wakeup_enable(&pdev->dev); + + return 0; + +out_irq_auto1_free: + if (gpadc_pdata->adc_wakeup2_data) + free_irq(adc->irq_auto_1, adc); +out_irq_auto0_free: + if (gpadc_pdata->adc_wakeup1_data) + free_irq(adc->irq_auto_0, adc); +out_irq_free: + free_irq(adc->irq, adc); +out: + return ret; +} + +static int palmas_gpadc_remove(struct platform_device *pdev) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(&pdev->dev); + struct palmas_gpadc *adc = iio_priv(indio_dev); + + if (adc->wakeup1_enable || adc->wakeup2_enable) + device_wakeup_disable(&pdev->dev); + iio_device_unregister(indio_dev); + free_irq(adc->irq, adc); + if (adc->wakeup1_enable) + free_irq(adc->irq_auto_0, adc); + if (adc->wakeup2_enable) + free_irq(adc->irq_auto_1, adc); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int palmas_adc_wakeup_configure(struct palmas_gpadc *adc) +{ + int adc_period, conv; + int i; + int ch0 = 0, ch1 = 0; + int thres; + int ret; + + adc_period = adc->auto_conversion_period; + for (i = 0; i < 16; ++i) { + if (((1000 * (1 << i)) / 32) < adc_period) + continue; + } + if (i > 0) + i--; + adc_period = i; + ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_AUTO_CTRL, + PALMAS_GPADC_AUTO_CTRL_COUNTER_CONV_MASK, + adc_period); + if (ret < 0) { + dev_err(adc->dev, "AUTO_CTRL write failed: %d\n", ret); + return ret; + } + + conv = 0; + if (adc->wakeup1_enable) { + int polarity; + + ch0 = adc->wakeup1_data.adc_channel_number; + conv |= PALMAS_GPADC_AUTO_CTRL_AUTO_CONV0_EN; + if (adc->wakeup1_data.adc_high_threshold > 0) { + thres = adc->wakeup1_data.adc_high_threshold; + polarity = 0; + } else { + thres = adc->wakeup1_data.adc_low_threshold; + polarity = PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_POL; + } + + ret = palmas_write(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_THRES_CONV0_LSB, thres & 0xFF); + if (ret < 0) { + dev_err(adc->dev, + "THRES_CONV0_LSB write failed: %d\n", ret); + return ret; + } + + ret = palmas_write(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_THRES_CONV0_MSB, + ((thres >> 8) & 0xF) | polarity); + if (ret < 0) { + dev_err(adc->dev, + "THRES_CONV0_MSB write failed: %d\n", ret); + return ret; + } + } + + if (adc->wakeup2_enable) { + int polarity; + + ch1 = adc->wakeup2_data.adc_channel_number; + conv |= PALMAS_GPADC_AUTO_CTRL_AUTO_CONV1_EN; + if (adc->wakeup2_data.adc_high_threshold > 0) { + thres = adc->wakeup2_data.adc_high_threshold; + polarity = 0; + } else { + thres = adc->wakeup2_data.adc_low_threshold; + polarity = PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_POL; + } + + ret = palmas_write(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_THRES_CONV1_LSB, thres & 0xFF); + if (ret < 0) { + dev_err(adc->dev, + "THRES_CONV1_LSB write failed: %d\n", ret); + return ret; + } + + ret = palmas_write(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_THRES_CONV1_MSB, + ((thres >> 8) & 0xF) | polarity); + if (ret < 0) { + dev_err(adc->dev, + "THRES_CONV1_MSB write failed: %d\n", ret); + return ret; + } + } + + ret = palmas_write(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_AUTO_SELECT, (ch1 << 4) | ch0); + if (ret < 0) { + dev_err(adc->dev, "AUTO_SELECT write failed: %d\n", ret); + return ret; + } + + ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_AUTO_CTRL, + PALMAS_GPADC_AUTO_CTRL_AUTO_CONV1_EN | + PALMAS_GPADC_AUTO_CTRL_AUTO_CONV0_EN, conv); + if (ret < 0) + dev_err(adc->dev, "AUTO_CTRL write failed: %d\n", ret); + + return ret; +} + +static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc) +{ + int ret; + + ret = palmas_write(adc->palmas, PALMAS_GPADC_BASE, + PALMAS_GPADC_AUTO_SELECT, 0); + if (ret < 0) { + dev_err(adc->dev, "AUTO_SELECT write failed: %d\n", ret); + return ret; + } + + ret = palmas_disable_auto_conversion(adc); + if (ret < 0) + dev_err(adc->dev, "Disable auto conversion failed: %d\n", ret); + + return ret; +} + +static int palmas_gpadc_suspend(struct device *dev) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct palmas_gpadc *adc = iio_priv(indio_dev); + int wakeup = adc->wakeup1_enable || adc->wakeup2_enable; + int ret; + + if (!device_may_wakeup(dev) || !wakeup) + return 0; + + ret = palmas_adc_wakeup_configure(adc); + if (ret < 0) + return ret; + + if (adc->wakeup1_enable) + enable_irq_wake(adc->irq_auto_0); + + if (adc->wakeup2_enable) + enable_irq_wake(adc->irq_auto_1); + + return 0; +} + +static int palmas_gpadc_resume(struct device *dev) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct palmas_gpadc *adc = iio_priv(indio_dev); + int wakeup = adc->wakeup1_enable || adc->wakeup2_enable; + int ret; + + if (!device_may_wakeup(dev) || !wakeup) + return 0; + + ret = palmas_adc_wakeup_reset(adc); + if (ret < 0) + return ret; + + if (adc->wakeup1_enable) + disable_irq_wake(adc->irq_auto_0); + + if (adc->wakeup2_enable) + disable_irq_wake(adc->irq_auto_1); + + return 0; +}; +#endif + +static const struct dev_pm_ops palmas_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(palmas_gpadc_suspend, + palmas_gpadc_resume) +}; + +static struct platform_driver palmas_gpadc_driver = { + .probe = palmas_gpadc_probe, + .remove = palmas_gpadc_remove, + .driver = { + .name = MOD_NAME, + .pm = &palmas_pm_ops, + }, +}; + +static int __init palmas_gpadc_init(void) +{ + return platform_driver_register(&palmas_gpadc_driver); +} +module_init(palmas_gpadc_init); + +static void __exit palmas_gpadc_exit(void) +{ + platform_driver_unregister(&palmas_gpadc_driver); +} +module_exit(palmas_gpadc_exit); + +MODULE_DESCRIPTION("palmas GPADC driver"); +MODULE_AUTHOR("Pradeep Goudagunta"); +MODULE_ALIAS("platform:palmas-gpadc"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index 13e1d96935ed..c800dbc42079 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -134,21 +134,32 @@ struct palmas_pmic_driver_data { struct regulator_config config); }; +struct palmas_adc_wakeup_property { + int adc_channel_number; + int adc_high_threshold; + int adc_low_threshold; +}; + struct palmas_gpadc_platform_data { /* Channel 3 current source is only enabled during conversion */ - int ch3_current; + int ch3_current; /* 0: off; 1: 10uA; 2: 400uA; 3: 800 uA */ /* Channel 0 current source can be used for battery detection. * If used for battery detection this will cause a permanent current * consumption depending on current level set here. */ - int ch0_current; + int ch0_current; /* 0: off; 1: 5uA; 2: 15uA; 3: 20 uA */ + bool extended_delay; /* use extended delay for conversion */ /* default BAT_REMOVAL_DAT setting on device probe */ int bat_removal; /* Sets the START_POLARITY bit in the RT_CTRL register */ int start_polarity; + + int auto_conversion_period_ms; + struct palmas_adc_wakeup_property *adc_wakeup1_data; + struct palmas_adc_wakeup_property *adc_wakeup2_data; }; struct palmas_reg_init { @@ -405,28 +416,7 @@ struct palmas_gpadc_calibration { s32 offset_error; }; -struct palmas_gpadc { - struct device *dev; - struct palmas *palmas; - - int ch3_current; - int ch0_current; - - int gpadc_force; - - int bat_removal; - - struct mutex reading_lock; - struct completion irq_complete; - - int eoc_sw_irq; - - struct palmas_gpadc_calibration *palmas_cal_tbl; - - int conv0_channel; - int conv1_channel; - int rt_channel; -}; +#define PALMAS_DATASHEET_NAME(_name) "palmas-gpadc-chan-"#_name struct palmas_gpadc_result { s32 raw_code; @@ -520,6 +510,43 @@ enum palmas_irqs { PALMAS_NUM_IRQ, }; +/* Palmas GPADC Channels */ +enum { + PALMAS_ADC_CH_IN0, + PALMAS_ADC_CH_IN1, + PALMAS_ADC_CH_IN2, + PALMAS_ADC_CH_IN3, + PALMAS_ADC_CH_IN4, + PALMAS_ADC_CH_IN5, + PALMAS_ADC_CH_IN6, + PALMAS_ADC_CH_IN7, + PALMAS_ADC_CH_IN8, + PALMAS_ADC_CH_IN9, + PALMAS_ADC_CH_IN10, + PALMAS_ADC_CH_IN11, + PALMAS_ADC_CH_IN12, + PALMAS_ADC_CH_IN13, + PALMAS_ADC_CH_IN14, + PALMAS_ADC_CH_IN15, + PALMAS_ADC_CH_MAX, +}; + +/* Palmas GPADC Channel0 Current Source */ +enum { + PALMAS_ADC_CH0_CURRENT_SRC_0, + PALMAS_ADC_CH0_CURRENT_SRC_5, + PALMAS_ADC_CH0_CURRENT_SRC_15, + PALMAS_ADC_CH0_CURRENT_SRC_20, +}; + +/* Palmas GPADC Channel3 Current Source */ +enum { + PALMAS_ADC_CH3_CURRENT_SRC_0, + PALMAS_ADC_CH3_CURRENT_SRC_10, + PALMAS_ADC_CH3_CURRENT_SRC_400, + PALMAS_ADC_CH3_CURRENT_SRC_800, +}; + struct palmas_pmic { struct palmas *palmas; struct device *dev; -- cgit v1.2.3 From b440655b896b2d5a2fb5f918801fb0e281a537cd Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 13 Oct 2015 18:10:26 +0200 Subject: iio: Add support for indicating fixed watermarks For buffers which have a fixed wake-up watermark the watermark attribute should be read-only. Add a new FIXED_WATERMARK flag to the struct iio_buffer_access_funcs, which can be set by a buffer implementation. Signed-off-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-buffer.c | 5 +++++ include/linux/iio/buffer.h | 8 ++++++++ 2 files changed, 13 insertions(+) (limited to 'include') diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 5f2c8c8c436e..98a6447a61d3 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -998,6 +998,8 @@ static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, iio_buffer_show_enable, iio_buffer_store_enable); static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR, iio_buffer_show_watermark, iio_buffer_store_watermark); +static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark, + S_IRUGO, iio_buffer_show_watermark, NULL); static struct attribute *iio_buffer_attrs[] = { &dev_attr_length.attr, @@ -1040,6 +1042,9 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) if (!buffer->access->set_length) attr[0] = &dev_attr_length_ro.attr; + if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK) + attr[2] = &dev_attr_watermark_ro.attr; + if (buffer->attrs) memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs, sizeof(struct attribute *) * attrcount); diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 1600c55828e0..4d99a53d1fe7 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -17,6 +17,12 @@ struct iio_buffer; +/** + * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be + * configured. It has a fixed value which will be buffer specific. + */ +#define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0) + /** * struct iio_buffer_access_funcs - access functions for buffers. * @store_to: actually store stuff to the buffer @@ -30,6 +36,7 @@ struct iio_buffer; * @release: called when the last reference to the buffer is dropped, * should free all resources allocated by the buffer. * @modes: Supported operating modes by this buffer type + * @flags: A bitmask combination of INDIO_BUFFER_FLAG_* * * The purpose of this structure is to make the buffer element * modular as event for a given driver, different usecases may require @@ -54,6 +61,7 @@ struct iio_buffer_access_funcs { void (*release)(struct iio_buffer *buffer); unsigned int modes; + unsigned int flags; }; /** -- cgit v1.2.3 From e18a2ad45caeb11226e49c25068d0f2efe2adf6c Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 13 Oct 2015 18:10:27 +0200 Subject: iio: Add buffer enable/disable callbacks This patch adds a enable and disable callback that is called when the buffer is enabled/disabled. This can be used by buffer implementations that need to do some setup or teardown work. E.g. a DMA based buffer can use this to start/stop the DMA transfer. Signed-off-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-buffer.c | 36 +++++++++++++++++++++++++++++++++++- include/linux/iio/buffer.h | 8 ++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 98a6447a61d3..a4b164a478c4 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -568,6 +568,22 @@ static void iio_buffer_deactivate_all(struct iio_dev *indio_dev) iio_buffer_deactivate(buffer); } +static int iio_buffer_enable(struct iio_buffer *buffer, + struct iio_dev *indio_dev) +{ + if (!buffer->access->enable) + return 0; + return buffer->access->enable(buffer, indio_dev); +} + +static int iio_buffer_disable(struct iio_buffer *buffer, + struct iio_dev *indio_dev) +{ + if (!buffer->access->disable) + return 0; + return buffer->access->disable(buffer, indio_dev); +} + static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev, struct iio_buffer *buffer) { @@ -719,6 +735,7 @@ static int iio_verify_update(struct iio_dev *indio_dev, static int iio_enable_buffers(struct iio_dev *indio_dev, struct iio_device_config *config) { + struct iio_buffer *buffer; int ret; indio_dev->active_scan_mask = config->scan_mask; @@ -753,6 +770,12 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, indio_dev->info->hwfifo_set_watermark(indio_dev, config->watermark); + list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { + ret = iio_buffer_enable(buffer, indio_dev); + if (ret) + goto err_disable_buffers; + } + indio_dev->currentmode = config->mode; if (indio_dev->setup_ops->postenable) { @@ -760,12 +783,16 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, if (ret) { dev_dbg(&indio_dev->dev, "Buffer not started: postenable failed (%d)\n", ret); - goto err_run_postdisable; + goto err_disable_buffers; } } return 0; +err_disable_buffers: + list_for_each_entry_continue_reverse(buffer, &indio_dev->buffer_list, + buffer_list) + iio_buffer_disable(buffer, indio_dev); err_run_postdisable: indio_dev->currentmode = INDIO_DIRECT_MODE; if (indio_dev->setup_ops->postdisable) @@ -778,6 +805,7 @@ err_undo_config: static int iio_disable_buffers(struct iio_dev *indio_dev) { + struct iio_buffer *buffer; int ret = 0; int ret2; @@ -798,6 +826,12 @@ static int iio_disable_buffers(struct iio_dev *indio_dev) ret = ret2; } + list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { + ret2 = iio_buffer_disable(buffer, indio_dev); + if (ret2 && !ret) + ret = ret2; + } + indio_dev->currentmode = INDIO_DIRECT_MODE; if (indio_dev->setup_ops->postdisable) { diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 4d99a53d1fe7..2ec3ad58e8a0 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -33,6 +33,11 @@ struct iio_buffer; * storage. * @set_bytes_per_datum:set number of bytes per datum * @set_length: set number of datums in buffer + * @enable: called if the buffer is attached to a device and the + * device starts sampling. Calls are balanced with + * @disable. + * @disable: called if the buffer is attached to a device and the + * device stops sampling. Calles are balanced with @enable. * @release: called when the last reference to the buffer is dropped, * should free all resources allocated by the buffer. * @modes: Supported operating modes by this buffer type @@ -58,6 +63,9 @@ struct iio_buffer_access_funcs { int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); int (*set_length)(struct iio_buffer *buffer, int length); + int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); + int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); + void (*release)(struct iio_buffer *buffer); unsigned int modes; -- cgit v1.2.3 From 670b19ae9bfdbcb4ce2c2ffb2ec1659a7f4a2074 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 13 Oct 2015 18:10:28 +0200 Subject: iio: Add generic DMA buffer infrastructure The traditional approach used in IIO to implement buffered capture requires the generation of at least one interrupt per sample. In the interrupt handler the driver reads the sample from the device and copies it to a software buffer. This approach has a rather large per sample overhead associated with it. And while it works fine for samplerates in the range of up to 1000 samples per second it starts to consume a rather large share of the available CPU processing time once we go beyond that, this is especially true on an embedded system with limited processing power. The regular interrupt also causes increased power consumption by not allowing the hardware into deeper sleep states, which is something that becomes more and more important on mobile battery powered devices. And while the recently added watermark support mitigates some of the issues by allowing the device to generate interrupts at a rate lower than the data output rate, this still requires a storage buffer inside the device and even if it exists it is only a few 100 samples deep at most. DMA support on the other hand allows to capture multiple millions or even more samples without any CPU interaction. This allows the CPU to either go to sleep for longer periods or focus on other tasks which increases overall system performance and power consumption. In addition to that some devices might not even offer a way to read the data other than using DMA, which makes DMA mandatory to use for them. The tasks involved in implementing a DMA buffer can be divided into two categories. The first category is memory buffer management (allocation, mapping, etc.) and hooking this up the IIO buffer callbacks like read(), enable(), disable(), etc. The second category of tasks is to setup the DMA hardware and manage the DMA transfers. Tasks from the first category will be very similar for all IIO drivers supporting DMA buffers, while the tasks from the second category will be hardware specific. This patch implements a generic infrastructure that take care of the former tasks. It provides a set of functions that implement the standard IIO buffer iio_buffer_access_funcs callbacks. These can either be used as is or be overloaded and augmented with driver specific code where necessary. For the DMA buffer support infrastructure that is introduced in this series sample data is grouped by so called blocks. A block is the basic unit at which data is exchanged between the application and the hardware. The application is responsible for allocating the memory associated with the block and then passes the block to the hardware. When the hardware has captured the amount of samples equal to size of a block it will notify the application, which can then read the data from the block and process it. The block size can freely chosen (within the constraints of the hardware). This allows to make a trade-off between latency and management overhead. The larger the block size the lower the per sample overhead but the latency between when the data was captured and when the application will be able to access it increases, in a similar way smaller block sizes have a larger per sample management overhead but a lower latency. The ideal block size thus depends on system and application requirements. For the time being the infrastructure only implements a simple double buffered scheme which allocates two blocks each with half the size of the configured buffer size. This provides basic support for capturing continuous uninterrupted data over the existing file-IO ABI. Future extensions to the DMA buffer infrastructure will give applications a more fine grained control over how many blocks are allocated and the size of each block. But this requires userspace ABI additions which are intentionally not part of this patch and will be added separately. Tasks of the second category need to be implemented by a device specific driver. They can be hooked up into the generic infrastructure using two simple callbacks, submit() and abort(). The submit() callback is used to schedule DMA transfers for blocks. Once a DMA transfer has been completed it is expected that the buffer driver calls iio_dma_buffer_block_done() to notify. The abort() callback is used for stopping all pending and active DMA transfers when the buffer is disabled. Signed-off-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron --- drivers/iio/buffer/Kconfig | 9 + drivers/iio/buffer/Makefile | 1 + drivers/iio/buffer/industrialio-buffer-dma.c | 683 +++++++++++++++++++++++++++ include/linux/iio/buffer-dma.h | 152 ++++++ 4 files changed, 845 insertions(+) create mode 100644 drivers/iio/buffer/industrialio-buffer-dma.c create mode 100644 include/linux/iio/buffer-dma.h (limited to 'include') diff --git a/drivers/iio/buffer/Kconfig b/drivers/iio/buffer/Kconfig index 0a7b2fd3699b..b2fda1afc03e 100644 --- a/drivers/iio/buffer/Kconfig +++ b/drivers/iio/buffer/Kconfig @@ -9,6 +9,15 @@ config IIO_BUFFER_CB Should be selected by any drivers that do in-kernel push usage. That is, those where the data is pushed to the consumer. +config IIO_BUFFER_DMA + tristate + help + Provides the generic IIO DMA buffer infrastructure that can be used by + drivers for devices with DMA support to implement the IIO buffer. + + Should be selected by drivers that want to use the generic DMA buffer + infrastructure. + config IIO_KFIFO_BUF tristate "Industrial I/O buffering based on kfifo" help diff --git a/drivers/iio/buffer/Makefile b/drivers/iio/buffer/Makefile index 4d193b9a9123..bda3f1143e72 100644 --- a/drivers/iio/buffer/Makefile +++ b/drivers/iio/buffer/Makefile @@ -4,5 +4,6 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_IIO_BUFFER_CB) += industrialio-buffer-cb.o +obj-$(CONFIG_IIO_BUFFER_DMA) += industrialio-buffer-dma.o obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c new file mode 100644 index 000000000000..212cbedc7abb --- /dev/null +++ b/drivers/iio/buffer/industrialio-buffer-dma.c @@ -0,0 +1,683 @@ +/* + * Copyright 2013-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * For DMA buffers the storage is sub-divided into so called blocks. Each block + * has its own memory buffer. The size of the block is the granularity at which + * memory is exchanged between the hardware and the application. Increasing the + * basic unit of data exchange from one sample to one block decreases the + * management overhead that is associated with each sample. E.g. if we say the + * management overhead for one exchange is x and the unit of exchange is one + * sample the overhead will be x for each sample. Whereas when using a block + * which contains n samples the overhead per sample is reduced to x/n. This + * allows to achieve much higher samplerates than what can be sustained with + * the one sample approach. + * + * Blocks are exchanged between the DMA controller and the application via the + * means of two queues. The incoming queue and the outgoing queue. Blocks on the + * incoming queue are waiting for the DMA controller to pick them up and fill + * them with data. Block on the outgoing queue have been filled with data and + * are waiting for the application to dequeue them and read the data. + * + * A block can be in one of the following states: + * * Owned by the application. In this state the application can read data from + * the block. + * * On the incoming list: Blocks on the incoming list are queued up to be + * processed by the DMA controller. + * * Owned by the DMA controller: The DMA controller is processing the block + * and filling it with data. + * * On the outgoing list: Blocks on the outgoing list have been successfully + * processed by the DMA controller and contain data. They can be dequeued by + * the application. + * * Dead: A block that is dead has been marked as to be freed. It might still + * be owned by either the application or the DMA controller at the moment. + * But once they are done processing it instead of going to either the + * incoming or outgoing queue the block will be freed. + * + * In addition to this blocks are reference counted and the memory associated + * with both the block structure as well as the storage memory for the block + * will be freed when the last reference to the block is dropped. This means a + * block must not be accessed without holding a reference. + * + * The iio_dma_buffer implementation provides a generic infrastructure for + * managing the blocks. + * + * A driver for a specific piece of hardware that has DMA capabilities need to + * implement the submit() callback from the iio_dma_buffer_ops structure. This + * callback is supposed to initiate the DMA transfer copying data from the + * converter to the memory region of the block. Once the DMA transfer has been + * completed the driver must call iio_dma_buffer_block_done() for the completed + * block. + * + * Prior to this it must set the bytes_used field of the block contains + * the actual number of bytes in the buffer. Typically this will be equal to the + * size of the block, but if the DMA hardware has certain alignment requirements + * for the transfer length it might choose to use less than the full size. In + * either case it is expected that bytes_used is a multiple of the bytes per + * datum, i.e. the block must not contain partial samples. + * + * The driver must call iio_dma_buffer_block_done() for each block it has + * received through its submit_block() callback, even if it does not actually + * perform a DMA transfer for the block, e.g. because the buffer was disabled + * before the block transfer was started. In this case it should set bytes_used + * to 0. + * + * In addition it is recommended that a driver implements the abort() callback. + * It will be called when the buffer is disabled and can be used to cancel + * pending and stop active transfers. + * + * The specific driver implementation should use the default callback + * implementations provided by this module for the iio_buffer_access_funcs + * struct. It may overload some callbacks with custom variants if the hardware + * has special requirements that are not handled by the generic functions. If a + * driver chooses to overload a callback it has to ensure that the generic + * callback is called from within the custom callback. + */ + +static void iio_buffer_block_release(struct kref *kref) +{ + struct iio_dma_buffer_block *block = container_of(kref, + struct iio_dma_buffer_block, kref); + + WARN_ON(block->state != IIO_BLOCK_STATE_DEAD); + + dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), + block->vaddr, block->phys_addr); + + iio_buffer_put(&block->queue->buffer); + kfree(block); +} + +static void iio_buffer_block_get(struct iio_dma_buffer_block *block) +{ + kref_get(&block->kref); +} + +static void iio_buffer_block_put(struct iio_dma_buffer_block *block) +{ + kref_put(&block->kref, iio_buffer_block_release); +} + +/* + * dma_free_coherent can sleep, hence we need to take some special care to be + * able to drop a reference from an atomic context. + */ +static LIST_HEAD(iio_dma_buffer_dead_blocks); +static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock); + +static void iio_dma_buffer_cleanup_worker(struct work_struct *work) +{ + struct iio_dma_buffer_block *block, *_block; + LIST_HEAD(block_list); + + spin_lock_irq(&iio_dma_buffer_dead_blocks_lock); + list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list); + spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock); + + list_for_each_entry_safe(block, _block, &block_list, head) + iio_buffer_block_release(&block->kref); +} +static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker); + +static void iio_buffer_block_release_atomic(struct kref *kref) +{ + struct iio_dma_buffer_block *block; + unsigned long flags; + + block = container_of(kref, struct iio_dma_buffer_block, kref); + + spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags); + list_add_tail(&block->head, &iio_dma_buffer_dead_blocks); + spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags); + + schedule_work(&iio_dma_buffer_cleanup_work); +} + +/* + * Version of iio_buffer_block_put() that can be called from atomic context + */ +static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block) +{ + kref_put(&block->kref, iio_buffer_block_release_atomic); +} + +static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf) +{ + return container_of(buf, struct iio_dma_buffer_queue, buffer); +} + +static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block( + struct iio_dma_buffer_queue *queue, size_t size) +{ + struct iio_dma_buffer_block *block; + + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return NULL; + + block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), + &block->phys_addr, GFP_KERNEL); + if (!block->vaddr) { + kfree(block); + return NULL; + } + + block->size = size; + block->state = IIO_BLOCK_STATE_DEQUEUED; + block->queue = queue; + INIT_LIST_HEAD(&block->head); + kref_init(&block->kref); + + iio_buffer_get(&queue->buffer); + + return block; +} + +static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) +{ + struct iio_dma_buffer_queue *queue = block->queue; + + /* + * The buffer has already been freed by the application, just drop the + * reference. + */ + if (block->state != IIO_BLOCK_STATE_DEAD) { + block->state = IIO_BLOCK_STATE_DONE; + list_add_tail(&block->head, &queue->outgoing); + } +} + +/** + * iio_dma_buffer_block_done() - Indicate that a block has been completed + * @block: The completed block + * + * Should be called when the DMA controller has finished handling the block to + * pass back ownership of the block to the queue. + */ +void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) +{ + struct iio_dma_buffer_queue *queue = block->queue; + unsigned long flags; + + spin_lock_irqsave(&queue->list_lock, flags); + _iio_dma_buffer_block_done(block); + spin_unlock_irqrestore(&queue->list_lock, flags); + + iio_buffer_block_put_atomic(block); + wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM); +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done); + +/** + * iio_dma_buffer_block_list_abort() - Indicate that a list block has been + * aborted + * @queue: Queue for which to complete blocks. + * @list: List of aborted blocks. All blocks in this list must be from @queue. + * + * Typically called from the abort() callback after the DMA controller has been + * stopped. This will set bytes_used to 0 for each block in the list and then + * hand the blocks back to the queue. + */ +void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, + struct list_head *list) +{ + struct iio_dma_buffer_block *block, *_block; + unsigned long flags; + + spin_lock_irqsave(&queue->list_lock, flags); + list_for_each_entry_safe(block, _block, list, head) { + list_del(&block->head); + block->bytes_used = 0; + _iio_dma_buffer_block_done(block); + iio_buffer_block_put_atomic(block); + } + spin_unlock_irqrestore(&queue->list_lock, flags); + + wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM); +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort); + +static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block) +{ + /* + * If the core owns the block it can be re-used. This should be the + * default case when enabling the buffer, unless the DMA controller does + * not support abort and has not given back the block yet. + */ + switch (block->state) { + case IIO_BLOCK_STATE_DEQUEUED: + case IIO_BLOCK_STATE_QUEUED: + case IIO_BLOCK_STATE_DONE: + return true; + default: + return false; + } +} + +/** + * iio_dma_buffer_request_update() - DMA buffer request_update callback + * @buffer: The buffer which to request an update + * + * Should be used as the iio_dma_buffer_request_update() callback for + * iio_buffer_access_ops struct for DMA buffers. + */ +int iio_dma_buffer_request_update(struct iio_buffer *buffer) +{ + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); + struct iio_dma_buffer_block *block; + bool try_reuse = false; + size_t size; + int ret = 0; + int i; + + /* + * Split the buffer into two even parts. This is used as a double + * buffering scheme with usually one block at a time being used by the + * DMA and the other one by the application. + */ + size = DIV_ROUND_UP(queue->buffer.bytes_per_datum * + queue->buffer.length, 2); + + mutex_lock(&queue->lock); + + /* Allocations are page aligned */ + if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size)) + try_reuse = true; + + queue->fileio.block_size = size; + queue->fileio.active_block = NULL; + + spin_lock_irq(&queue->list_lock); + for (i = 0; i < 2; i++) { + block = queue->fileio.blocks[i]; + + /* If we can't re-use it free it */ + if (block && (!iio_dma_block_reusable(block) || !try_reuse)) + block->state = IIO_BLOCK_STATE_DEAD; + } + + /* + * At this point all blocks are either owned by the core or marked as + * dead. This means we can reset the lists without having to fear + * corrution. + */ + INIT_LIST_HEAD(&queue->outgoing); + spin_unlock_irq(&queue->list_lock); + + INIT_LIST_HEAD(&queue->incoming); + + for (i = 0; i < 2; i++) { + if (queue->fileio.blocks[i]) { + block = queue->fileio.blocks[i]; + if (block->state == IIO_BLOCK_STATE_DEAD) { + /* Could not reuse it */ + iio_buffer_block_put(block); + block = NULL; + } else { + block->size = size; + } + } else { + block = NULL; + } + + if (!block) { + block = iio_dma_buffer_alloc_block(queue, size); + if (!block) { + ret = -ENOMEM; + goto out_unlock; + } + queue->fileio.blocks[i] = block; + } + + block->state = IIO_BLOCK_STATE_QUEUED; + list_add_tail(&block->head, &queue->incoming); + } + +out_unlock: + mutex_unlock(&queue->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update); + +static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue, + struct iio_dma_buffer_block *block) +{ + int ret; + + /* + * If the hardware has already been removed we put the block into + * limbo. It will neither be on the incoming nor outgoing list, nor will + * it ever complete. It will just wait to be freed eventually. + */ + if (!queue->ops) + return; + + block->state = IIO_BLOCK_STATE_ACTIVE; + iio_buffer_block_get(block); + ret = queue->ops->submit(queue, block); + if (ret) { + /* + * This is a bit of a problem and there is not much we can do + * other then wait for the buffer to be disabled and re-enabled + * and try again. But it should not really happen unless we run + * out of memory or something similar. + * + * TODO: Implement support in the IIO core to allow buffers to + * notify consumers that something went wrong and the buffer + * should be disabled. + */ + iio_buffer_block_put(block); + } +} + +/** + * iio_dma_buffer_enable() - Enable DMA buffer + * @buffer: IIO buffer to enable + * @indio_dev: IIO device the buffer is attached to + * + * Needs to be called when the device that the buffer is attached to starts + * sampling. Typically should be the iio_buffer_access_ops enable callback. + * + * This will allocate the DMA buffers and start the DMA transfers. + */ +int iio_dma_buffer_enable(struct iio_buffer *buffer, + struct iio_dev *indio_dev) +{ + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); + struct iio_dma_buffer_block *block, *_block; + + mutex_lock(&queue->lock); + queue->active = true; + list_for_each_entry_safe(block, _block, &queue->incoming, head) { + list_del(&block->head); + iio_dma_buffer_submit_block(queue, block); + } + mutex_unlock(&queue->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_enable); + +/** + * iio_dma_buffer_disable() - Disable DMA buffer + * @buffer: IIO DMA buffer to disable + * @indio_dev: IIO device the buffer is attached to + * + * Needs to be called when the device that the buffer is attached to stops + * sampling. Typically should be the iio_buffer_access_ops disable callback. + */ +int iio_dma_buffer_disable(struct iio_buffer *buffer, + struct iio_dev *indio_dev) +{ + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); + + mutex_lock(&queue->lock); + queue->active = false; + + if (queue->ops && queue->ops->abort) + queue->ops->abort(queue); + mutex_unlock(&queue->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_disable); + +static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue, + struct iio_dma_buffer_block *block) +{ + if (block->state == IIO_BLOCK_STATE_DEAD) { + iio_buffer_block_put(block); + } else if (queue->active) { + iio_dma_buffer_submit_block(queue, block); + } else { + block->state = IIO_BLOCK_STATE_QUEUED; + list_add_tail(&block->head, &queue->incoming); + } +} + +static struct iio_dma_buffer_block *iio_dma_buffer_dequeue( + struct iio_dma_buffer_queue *queue) +{ + struct iio_dma_buffer_block *block; + + spin_lock_irq(&queue->list_lock); + block = list_first_entry_or_null(&queue->outgoing, struct + iio_dma_buffer_block, head); + if (block != NULL) { + list_del(&block->head); + block->state = IIO_BLOCK_STATE_DEQUEUED; + } + spin_unlock_irq(&queue->list_lock); + + return block; +} + +/** + * iio_dma_buffer_read() - DMA buffer read callback + * @buffer: Buffer to read form + * @n: Number of bytes to read + * @user_buffer: Userspace buffer to copy the data to + * + * Should be used as the read_first_n callback for iio_buffer_access_ops + * struct for DMA buffers. + */ +int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, + char __user *user_buffer) +{ + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); + struct iio_dma_buffer_block *block; + int ret; + + if (n < buffer->bytes_per_datum) + return -EINVAL; + + mutex_lock(&queue->lock); + + if (!queue->fileio.active_block) { + block = iio_dma_buffer_dequeue(queue); + if (block == NULL) { + ret = 0; + goto out_unlock; + } + queue->fileio.pos = 0; + queue->fileio.active_block = block; + } else { + block = queue->fileio.active_block; + } + + n = rounddown(n, buffer->bytes_per_datum); + if (n > block->bytes_used - queue->fileio.pos) + n = block->bytes_used - queue->fileio.pos; + + if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) { + ret = -EFAULT; + goto out_unlock; + } + + queue->fileio.pos += n; + + if (queue->fileio.pos == block->bytes_used) { + queue->fileio.active_block = NULL; + iio_dma_buffer_enqueue(queue, block); + } + + ret = n; + +out_unlock: + mutex_unlock(&queue->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_read); + +/** + * iio_dma_buffer_data_available() - DMA buffer data_available callback + * @buf: Buffer to check for data availability + * + * Should be used as the data_available callback for iio_buffer_access_ops + * struct for DMA buffers. + */ +size_t iio_dma_buffer_data_available(struct iio_buffer *buf) +{ + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf); + struct iio_dma_buffer_block *block; + size_t data_available = 0; + + /* + * For counting the available bytes we'll use the size of the block not + * the number of actual bytes available in the block. Otherwise it is + * possible that we end up with a value that is lower than the watermark + * but won't increase since all blocks are in use. + */ + + mutex_lock(&queue->lock); + if (queue->fileio.active_block) + data_available += queue->fileio.active_block->size; + + spin_lock_irq(&queue->list_lock); + list_for_each_entry(block, &queue->outgoing, head) + data_available += block->size; + spin_unlock_irq(&queue->list_lock); + mutex_unlock(&queue->lock); + + return data_available; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available); + +/** + * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback + * @buffer: Buffer to set the bytes-per-datum for + * @bpd: The new bytes-per-datum value + * + * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops + * struct for DMA buffers. + */ +int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd) +{ + buffer->bytes_per_datum = bpd; + + return 0; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum); + +/** + * iio_dma_buffer_set_length - DMA buffer set_length callback + * @buffer: Buffer to set the length for + * @length: The new buffer length + * + * Should be used as the set_length callback for iio_buffer_access_ops + * struct for DMA buffers. + */ +int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length) +{ + /* Avoid an invalid state */ + if (length < 2) + length = 2; + buffer->length = length; + buffer->watermark = length / 2; + + return 0; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length); + +/** + * iio_dma_buffer_init() - Initialize DMA buffer queue + * @queue: Buffer to initialize + * @dev: DMA device + * @ops: DMA buffer queue callback operations + * + * The DMA device will be used by the queue to do DMA memory allocations. So it + * should refer to the device that will perform the DMA to ensure that + * allocations are done from a memory region that can be accessed by the device. + */ +int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, + struct device *dev, const struct iio_dma_buffer_ops *ops) +{ + iio_buffer_init(&queue->buffer); + queue->buffer.length = PAGE_SIZE; + queue->buffer.watermark = queue->buffer.length / 2; + queue->dev = dev; + queue->ops = ops; + + INIT_LIST_HEAD(&queue->incoming); + INIT_LIST_HEAD(&queue->outgoing); + + mutex_init(&queue->lock); + spin_lock_init(&queue->list_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_init); + +/** + * iio_dma_buffer_exit() - Cleanup DMA buffer queue + * @queue: Buffer to cleanup + * + * After this function has completed it is safe to free any resources that are + * associated with the buffer and are accessed inside the callback operations. + */ +void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue) +{ + unsigned int i; + + mutex_lock(&queue->lock); + + spin_lock_irq(&queue->list_lock); + for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { + if (!queue->fileio.blocks[i]) + continue; + queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD; + } + INIT_LIST_HEAD(&queue->outgoing); + spin_unlock_irq(&queue->list_lock); + + INIT_LIST_HEAD(&queue->incoming); + + for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { + if (!queue->fileio.blocks[i]) + continue; + iio_buffer_block_put(queue->fileio.blocks[i]); + queue->fileio.blocks[i] = NULL; + } + queue->fileio.active_block = NULL; + queue->ops = NULL; + + mutex_unlock(&queue->lock); +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_exit); + +/** + * iio_dma_buffer_release() - Release final buffer resources + * @queue: Buffer to release + * + * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be + * called in the buffers release callback implementation right before freeing + * the memory associated with the buffer. + */ +void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue) +{ + mutex_destroy(&queue->lock); +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_release); + +MODULE_AUTHOR("Lars-Peter Clausen "); +MODULE_DESCRIPTION("DMA buffer for the IIO framework"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h new file mode 100644 index 000000000000..767467d886de --- /dev/null +++ b/include/linux/iio/buffer-dma.h @@ -0,0 +1,152 @@ +/* + * Copyright 2013-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#ifndef __INDUSTRIALIO_DMA_BUFFER_H__ +#define __INDUSTRIALIO_DMA_BUFFER_H__ + +#include +#include +#include +#include +#include + +struct iio_dma_buffer_queue; +struct iio_dma_buffer_ops; +struct device; + +struct iio_buffer_block { + u32 size; + u32 bytes_used; +}; + +/** + * enum iio_block_state - State of a struct iio_dma_buffer_block + * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued + * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue + * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA + * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue + * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed + */ +enum iio_block_state { + IIO_BLOCK_STATE_DEQUEUED, + IIO_BLOCK_STATE_QUEUED, + IIO_BLOCK_STATE_ACTIVE, + IIO_BLOCK_STATE_DONE, + IIO_BLOCK_STATE_DEAD, +}; + +/** + * struct iio_dma_buffer_block - IIO buffer block + * @head: List head + * @size: Total size of the block in bytes + * @bytes_used: Number of bytes that contain valid data + * @vaddr: Virutal address of the blocks memory + * @phys_addr: Physical address of the blocks memory + * @queue: Parent DMA buffer queue + * @kref: kref used to manage the lifetime of block + * @state: Current state of the block + */ +struct iio_dma_buffer_block { + /* May only be accessed by the owner of the block */ + struct list_head head; + size_t bytes_used; + + /* + * Set during allocation, constant thereafter. May be accessed read-only + * by anybody holding a reference to the block. + */ + void *vaddr; + dma_addr_t phys_addr; + size_t size; + struct iio_dma_buffer_queue *queue; + + /* Must not be accessed outside the core. */ + struct kref kref; + /* + * Must not be accessed outside the core. Access needs to hold + * queue->list_lock if the block is not owned by the core. + */ + enum iio_block_state state; +}; + +/** + * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer + * @blocks: Buffer blocks used for fileio + * @active_block: Block being used in read() + * @pos: Read offset in the active block + * @block_size: Size of each block + */ +struct iio_dma_buffer_queue_fileio { + struct iio_dma_buffer_block *blocks[2]; + struct iio_dma_buffer_block *active_block; + size_t pos; + size_t block_size; +}; + +/** + * struct iio_dma_buffer_queue - DMA buffer base structure + * @buffer: IIO buffer base structure + * @dev: Parent device + * @ops: DMA buffer callbacks + * @lock: Protects the incoming list, active and the fields in the fileio + * substruct + * @list_lock: Protects lists that contain blocks which can be modified in + * atomic context as well as blocks on those lists. This is the outgoing queue + * list and typically also a list of active blocks in the part that handles + * the DMA controller + * @incoming: List of buffers on the incoming queue + * @outgoing: List of buffers on the outgoing queue + * @active: Whether the buffer is currently active + * @fileio: FileIO state + */ +struct iio_dma_buffer_queue { + struct iio_buffer buffer; + struct device *dev; + const struct iio_dma_buffer_ops *ops; + + struct mutex lock; + spinlock_t list_lock; + struct list_head incoming; + struct list_head outgoing; + + bool active; + + struct iio_dma_buffer_queue_fileio fileio; +}; + +/** + * struct iio_dma_buffer_ops - DMA buffer callback operations + * @submit: Called when a block is submitted to the DMA controller + * @abort: Should abort all pending transfers + */ +struct iio_dma_buffer_ops { + int (*submit)(struct iio_dma_buffer_queue *queue, + struct iio_dma_buffer_block *block); + void (*abort)(struct iio_dma_buffer_queue *queue); +}; + +void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block); +void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, + struct list_head *list); + +int iio_dma_buffer_enable(struct iio_buffer *buffer, + struct iio_dev *indio_dev); +int iio_dma_buffer_disable(struct iio_buffer *buffer, + struct iio_dev *indio_dev); +int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, + char __user *user_buffer); +size_t iio_dma_buffer_data_available(struct iio_buffer *buffer); +int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); +int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length); +int iio_dma_buffer_request_update(struct iio_buffer *buffer); + +int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, + struct device *dma_dev, const struct iio_dma_buffer_ops *ops); +void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue); +void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue); + +#endif -- cgit v1.2.3 From 2d6ca60f328450ff5c7802d0857d12e3711348ce Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 13 Oct 2015 18:10:29 +0200 Subject: iio: Add a DMAengine framework based buffer Add a generic fully device independent DMA buffer implementation that uses the DMAegnine framework to perform the DMA transfers. This can be used by converter drivers that whish to provide a DMA buffer for converters that are connected to a DMA core that implements the DMAengine API. Apart from allocating the buffer using iio_dmaengine_buffer_alloc() and freeing it using iio_dmaengine_buffer_free() no additional converter driver specific code is required when using this DMA buffer implementation. Signed-off-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron --- drivers/iio/buffer/Kconfig | 11 ++ drivers/iio/buffer/Makefile | 1 + drivers/iio/buffer/industrialio-buffer-dmaengine.c | 213 +++++++++++++++++++++ include/linux/iio/buffer-dmaengine.h | 18 ++ 4 files changed, 243 insertions(+) create mode 100644 drivers/iio/buffer/industrialio-buffer-dmaengine.c create mode 100644 include/linux/iio/buffer-dmaengine.h (limited to 'include') diff --git a/drivers/iio/buffer/Kconfig b/drivers/iio/buffer/Kconfig index b2fda1afc03e..4ffd3db7817f 100644 --- a/drivers/iio/buffer/Kconfig +++ b/drivers/iio/buffer/Kconfig @@ -18,6 +18,17 @@ config IIO_BUFFER_DMA Should be selected by drivers that want to use the generic DMA buffer infrastructure. +config IIO_BUFFER_DMAENGINE + tristate + select IIO_BUFFER_DMA + help + Provides a bonding of the generic IIO DMA buffer infrastructure with the + DMAengine framework. This can be used by converter drivers with a DMA port + connected to an external DMA controller which is supported by the + DMAengine framework. + + Should be selected by drivers that want to use this functionality. + config IIO_KFIFO_BUF tristate "Industrial I/O buffering based on kfifo" help diff --git a/drivers/iio/buffer/Makefile b/drivers/iio/buffer/Makefile index bda3f1143e72..85beaae831ae 100644 --- a/drivers/iio/buffer/Makefile +++ b/drivers/iio/buffer/Makefile @@ -5,5 +5,6 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_IIO_BUFFER_CB) += industrialio-buffer-cb.o obj-$(CONFIG_IIO_BUFFER_DMA) += industrialio-buffer-dma.o +obj-$(CONFIG_IIO_BUFFER_DMAENGINE) += industrialio-buffer-dmaengine.o obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c new file mode 100644 index 000000000000..ebdb838d3a1c --- /dev/null +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -0,0 +1,213 @@ +/* + * Copyright 2014-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure + * with the DMAengine framework. The generic IIO DMA buffer infrastructure is + * used to manage the buffer memory and implement the IIO buffer operations + * while the DMAengine framework is used to perform the DMA transfers. Combined + * this results in a device independent fully functional DMA buffer + * implementation that can be used by device drivers for peripherals which are + * connected to a DMA controller which has a DMAengine driver implementation. + */ + +struct dmaengine_buffer { + struct iio_dma_buffer_queue queue; + + struct dma_chan *chan; + struct list_head active; + + size_t align; + size_t max_size; +}; + +static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer( + struct iio_buffer *buffer) +{ + return container_of(buffer, struct dmaengine_buffer, queue.buffer); +} + +static void iio_dmaengine_buffer_block_done(void *data) +{ + struct iio_dma_buffer_block *block = data; + unsigned long flags; + + spin_lock_irqsave(&block->queue->list_lock, flags); + list_del(&block->head); + spin_unlock_irqrestore(&block->queue->list_lock, flags); + iio_dma_buffer_block_done(block); +} + +static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue, + struct iio_dma_buffer_block *block) +{ + struct dmaengine_buffer *dmaengine_buffer = + iio_buffer_to_dmaengine_buffer(&queue->buffer); + struct dma_async_tx_descriptor *desc; + dma_cookie_t cookie; + + block->bytes_used = min(block->size, dmaengine_buffer->max_size); + block->bytes_used = rounddown(block->bytes_used, + dmaengine_buffer->align); + + desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, + block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT); + if (!desc) + return -ENOMEM; + + desc->callback = iio_dmaengine_buffer_block_done; + desc->callback_param = block; + + cookie = dmaengine_submit(desc); + if (dma_submit_error(cookie)) + return dma_submit_error(cookie); + + spin_lock_irq(&dmaengine_buffer->queue.list_lock); + list_add_tail(&block->head, &dmaengine_buffer->active); + spin_unlock_irq(&dmaengine_buffer->queue.list_lock); + + dma_async_issue_pending(dmaengine_buffer->chan); + + return 0; +} + +static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue) +{ + struct dmaengine_buffer *dmaengine_buffer = + iio_buffer_to_dmaengine_buffer(&queue->buffer); + + dmaengine_terminate_all(dmaengine_buffer->chan); + /* FIXME: There is a slight chance of a race condition here. + * dmaengine_terminate_all() does not guarantee that all transfer + * callbacks have finished running. Need to introduce a + * dmaengine_terminate_all_sync(). + */ + iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active); +} + +static void iio_dmaengine_buffer_release(struct iio_buffer *buf) +{ + struct dmaengine_buffer *dmaengine_buffer = + iio_buffer_to_dmaengine_buffer(buf); + + iio_dma_buffer_release(&dmaengine_buffer->queue); + kfree(dmaengine_buffer); +} + +static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = { + .read_first_n = iio_dma_buffer_read, + .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum, + .set_length = iio_dma_buffer_set_length, + .request_update = iio_dma_buffer_request_update, + .enable = iio_dma_buffer_enable, + .disable = iio_dma_buffer_disable, + .data_available = iio_dma_buffer_data_available, + .release = iio_dmaengine_buffer_release, + + .modes = INDIO_BUFFER_HARDWARE, + .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK, +}; + +static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = { + .submit = iio_dmaengine_buffer_submit_block, + .abort = iio_dmaengine_buffer_abort, +}; + +/** + * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine + * @dev: Parent device for the buffer + * @channel: DMA channel name, typically "rx". + * + * This allocates a new IIO buffer which internally uses the DMAengine framework + * to perform its transfers. The parent device will be used to request the DMA + * channel. + * + * Once done using the buffer iio_dmaengine_buffer_free() should be used to + * release it. + */ +struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, + const char *channel) +{ + struct dmaengine_buffer *dmaengine_buffer; + unsigned int width, src_width, dest_width; + struct dma_slave_caps caps; + struct dma_chan *chan; + int ret; + + dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL); + if (!dmaengine_buffer) + return ERR_PTR(-ENOMEM); + + chan = dma_request_slave_channel_reason(dev, channel); + if (IS_ERR(chan)) { + ret = PTR_ERR(chan); + goto err_free; + } + + ret = dma_get_slave_caps(chan, &caps); + if (ret < 0) + goto err_free; + + /* Needs to be aligned to the maximum of the minimums */ + if (caps.src_addr_widths) + src_width = __ffs(caps.src_addr_widths); + else + src_width = 1; + if (caps.dst_addr_widths) + dest_width = __ffs(caps.dst_addr_widths); + else + dest_width = 1; + width = max(src_width, dest_width); + + INIT_LIST_HEAD(&dmaengine_buffer->active); + dmaengine_buffer->chan = chan; + dmaengine_buffer->align = width; + dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev); + + iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev, + &iio_dmaengine_default_ops); + + dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops; + + return &dmaengine_buffer->queue.buffer; + +err_free: + kfree(dmaengine_buffer); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(iio_dmaengine_buffer_alloc); + +/** + * iio_dmaengine_buffer_free() - Free dmaengine buffer + * @buffer: Buffer to free + * + * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc(). + */ +void iio_dmaengine_buffer_free(struct iio_buffer *buffer) +{ + struct dmaengine_buffer *dmaengine_buffer = + iio_buffer_to_dmaengine_buffer(buffer); + + iio_dma_buffer_exit(&dmaengine_buffer->queue); + dma_release_channel(dmaengine_buffer->chan); + + iio_buffer_put(buffer); +} +EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free); diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h new file mode 100644 index 000000000000..5dcddf427bb0 --- /dev/null +++ b/include/linux/iio/buffer-dmaengine.h @@ -0,0 +1,18 @@ +/* + * Copyright 2014-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __IIO_DMAENGINE_H__ +#define __IIO_DMAENGINE_H__ + +struct iio_buffer; +struct device; + +struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, + const char *channel); +void iio_dmaengine_buffer_free(struct iio_buffer *buffer); + +#endif -- cgit v1.2.3 From d97044b661d0d56b2a2ae9b2b95ab0b359b417dc Mon Sep 17 00:00:00 2001 From: Deepak S Date: Wed, 28 Oct 2015 12:19:51 -0700 Subject: drm/i915/kbl: Add Kabylake PCI ID v2: separate out device info into different GT (Damien) v3: Add is_kabylake to the KBL gt3 structuer (Damien) Sort the platforms in older -> newer order (Damien) v4: Split platform definition since is_skylake=1 on kabylake structure was Nacked. (Rodrigo) v5: (Rodrigo) Rebase after commit 3cb27f38f ("drm/i915: remove an extra level of indirection in PCI ID list") Cc: Jani Nikula Reviewed-by: Damien Lespiau Signed-off-by: Deepak S Signed-off-by: Damien Lespiau Signed-off-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1446059991-17033-1-git-send-email-rodrigo.vivi@intel.com Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_drv.c | 3 +++ include/drm/i915_pciids.h | 29 +++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 7b29aeeae29e..f020daadc16d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -466,6 +466,9 @@ static const struct pci_device_id pciidlist[] = { INTEL_SKL_GT2_IDS(&intel_skylake_info), INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), INTEL_BXT_IDS(&intel_broxton_info), + INTEL_KBL_GT1_IDS(&intel_kabylake_info), + INTEL_KBL_GT2_IDS(&intel_kabylake_info), + INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), {0, 0, 0} }; diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 17c445612e01..2e7a159ccf93 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -291,4 +291,33 @@ INTEL_VGA_DEVICE(0x1A84, info), \ INTEL_VGA_DEVICE(0x5A84, info) +#define INTEL_KBL_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \ + INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \ + INTEL_VGA_DEVICE(0x5917, info), /* DT GT1.5 */ \ + INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ + INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ + INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ + INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */ + +#define INTEL_KBL_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \ + INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \ + INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \ + INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ + +#define INTEL_KBL_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \ + INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */ + +#define INTEL_KBL_IDS(info) \ + INTEL_KBL_GT1_IDS(info), \ + INTEL_KBL_GT2_IDS(info), \ + INTEL_KBL_GT3_IDS(info) + #endif /* _I915_PCIIDS_H */ -- cgit v1.2.3 From 8b10c0cf21ec84618d4bf02c73c0543500ece68d Mon Sep 17 00:00:00 2001 From: Deepak S Date: Wed, 28 Oct 2015 12:21:12 -0700 Subject: drm/i915/kbl: Add Kabylake GT4 PCI ID v2: (Rodrigo) Rebase after commit 3cb27f38f ("drm/i915: remove an extra level of indirection in PCI ID list") Cc: Jani Nikula Reviewed-by: Damien Lespiau Signed-off-by: Deepak S Signed-off-by: Damien Lespiau Signed-off-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1446060072-19489-1-git-send-email-rodrigo.vivi@intel.com Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_drv.c | 1 + include/drm/i915_pciids.h | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f020daadc16d..9f552094b41b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -469,6 +469,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_KBL_GT1_IDS(&intel_kabylake_info), INTEL_KBL_GT2_IDS(&intel_kabylake_info), INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), + INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), {0, 0, 0} }; diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 2e7a159ccf93..f1a113e35f98 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -315,9 +315,16 @@ INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \ INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */ +#define INTEL_KBL_GT4_IDS(info) \ + INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \ + INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \ + INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \ + INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */ + #define INTEL_KBL_IDS(info) \ INTEL_KBL_GT1_IDS(info), \ INTEL_KBL_GT2_IDS(info), \ - INTEL_KBL_GT3_IDS(info) + INTEL_KBL_GT3_IDS(info), \ + INTEL_KBL_GT4_IDS(info) #endif /* _I915_PCIIDS_H */ -- cgit v1.2.3 From 0b5da8db145bfd44266ac964a2636a0cf8d7c286 Mon Sep 17 00:00:00 2001 From: Ravishankar N Date: Tue, 30 Jun 2015 23:40:22 +0530 Subject: fuse: add support for SEEK_HOLE and SEEK_DATA in lseek A useful performance improvement for accessing virtual machine images via FUSE mount. See https://bugzilla.redhat.com/show_bug.cgi?id=1220173 for a use-case for glusterFS. Signed-off-by: Ravishankar N Signed-off-by: Miklos Szeredi --- fs/fuse/file.c | 73 +++++++++++++++++++++++++++++++++++++++++------ fs/fuse/fuse_i.h | 3 ++ include/uapi/linux/fuse.h | 17 ++++++++++- 3 files changed, 84 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 195476a24148..47f181191060 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2231,20 +2231,77 @@ static sector_t fuse_bmap(struct address_space *mapping, sector_t block) return err ? 0 : outarg.block; } +static loff_t fuse_lseek(struct file *file, loff_t offset, int whence) +{ + struct inode *inode = file->f_mapping->host; + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_file *ff = file->private_data; + FUSE_ARGS(args); + struct fuse_lseek_in inarg = { + .fh = ff->fh, + .offset = offset, + .whence = whence + }; + struct fuse_lseek_out outarg; + int err; + + if (fc->no_lseek) + goto fallback; + + args.in.h.opcode = FUSE_LSEEK; + args.in.h.nodeid = ff->nodeid; + args.in.numargs = 1; + args.in.args[0].size = sizeof(inarg); + args.in.args[0].value = &inarg; + args.out.numargs = 1; + args.out.args[0].size = sizeof(outarg); + args.out.args[0].value = &outarg; + err = fuse_simple_request(fc, &args); + if (err) { + if (err == -ENOSYS) { + fc->no_lseek = 1; + goto fallback; + } + return err; + } + + return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes); + +fallback: + err = fuse_update_attributes(inode, NULL, file, NULL); + if (!err) + return generic_file_llseek(file, offset, whence); + else + return err; +} + static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence) { loff_t retval; struct inode *inode = file_inode(file); - /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ - if (whence == SEEK_CUR || whence == SEEK_SET) - return generic_file_llseek(file, offset, whence); - - mutex_lock(&inode->i_mutex); - retval = fuse_update_attributes(inode, NULL, file, NULL); - if (!retval) + switch (whence) { + case SEEK_SET: + case SEEK_CUR: + /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ retval = generic_file_llseek(file, offset, whence); - mutex_unlock(&inode->i_mutex); + break; + case SEEK_END: + mutex_lock(&inode->i_mutex); + retval = fuse_update_attributes(inode, NULL, file, NULL); + if (!retval) + retval = generic_file_llseek(file, offset, whence); + mutex_unlock(&inode->i_mutex); + break; + case SEEK_HOLE: + case SEEK_DATA: + mutex_lock(&inode->i_mutex); + retval = fuse_lseek(file, offset, whence); + mutex_unlock(&inode->i_mutex); + break; + default: + retval = -EINVAL; + } return retval; } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 405113101db8..ce394b5fe6b4 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -605,6 +605,9 @@ struct fuse_conn { /** Does the filesystem support asynchronous direct-IO submission? */ unsigned async_dio:1; + /** Is lseek not implemented by fs? */ + unsigned no_lseek:1; + /** The number of requests waiting for completion */ atomic_t num_waiting; diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index c9aca042e61d..5974fae54e12 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -102,6 +102,9 @@ * - add ctime and ctimensec to fuse_setattr_in * - add FUSE_RENAME2 request * - add FUSE_NO_OPEN_SUPPORT flag + * + * 7.24 + * - add FUSE_LSEEK for SEEK_HOLE and SEEK_DATA support */ #ifndef _LINUX_FUSE_H @@ -137,7 +140,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 23 +#define FUSE_KERNEL_MINOR_VERSION 24 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -358,6 +361,7 @@ enum fuse_opcode { FUSE_FALLOCATE = 43, FUSE_READDIRPLUS = 44, FUSE_RENAME2 = 45, + FUSE_LSEEK = 46, /* CUSE specific operations */ CUSE_INIT = 4096, @@ -758,4 +762,15 @@ struct fuse_notify_retrieve_in { /* Device ioctls: */ #define FUSE_DEV_IOC_CLONE _IOR(229, 0, uint32_t) +struct fuse_lseek_in { + uint64_t fh; + uint64_t offset; + uint32_t whence; + uint32_t padding; +}; + +struct fuse_lseek_out { + uint64_t offset; +}; + #endif /* _LINUX_FUSE_H */ -- cgit v1.2.3 From 257f871993474e2bde6c497b54022c362cf398e1 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Wed, 4 Nov 2015 10:59:52 -0800 Subject: ovl: move super block magic number to magic.h The overlayfs file system is not recognized by programs like tail because the magic number is not in standard header location. Move it so that the value will propagate on for the GNU library and utilities. Needs to go in the fstatfs manual page as well. Signed-off-by: Stephen Hemminger Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 2 -- include/uapi/linux/magic.h | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 97cacb525974..32f31243d36a 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -24,8 +24,6 @@ MODULE_AUTHOR("Miklos Szeredi "); MODULE_DESCRIPTION("Overlay filesystem"); MODULE_LICENSE("GPL"); -#define OVERLAYFS_SUPER_MAGIC 0x794c7630 - struct ovl_config { char *lowerdir; char *upperdir; diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 7b1425a6b370..eec438952aa7 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -31,6 +31,7 @@ #define PSTOREFS_MAGIC 0x6165676C #define EFIVARFS_MAGIC 0xde5e81e4 #define HOSTFS_SUPER_MAGIC 0x00c0ffee +#define OVERLAYFS_SUPER_MAGIC 0x794c7630 #define MINIX_SUPER_MAGIC 0x137F /* minix v1 fs, 14 char names */ #define MINIX_SUPER_MAGIC2 0x138F /* minix v1 fs, 30 char names */ -- cgit v1.2.3 From 28b8b26b308e656edfa9467867d5f79212da2ec3 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 30 Oct 2015 20:33:20 -0700 Subject: mtd: add get/set of_node/flash_node helpers We are going to begin using the mtd->dev.of_node field for MTD device nodes, so let's add helpers for it. Also, we'll be making some conversions on spi_nor (and nand_chip eventually) too, so get that ready with their own helpers. Signed-off-by: Brian Norris Reviewed-by: Boris Brezillon --- include/linux/mtd/mtd.h | 11 +++++++++++ include/linux/mtd/nand.h | 11 +++++++++++ include/linux/mtd/spi-nor.h | 11 +++++++++++ 3 files changed, 33 insertions(+) (limited to 'include') diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index f17fa75809aa..cc84923011c0 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -254,6 +254,17 @@ struct mtd_info { int usecount; }; +static inline void mtd_set_of_node(struct mtd_info *mtd, + struct device_node *np) +{ + mtd->dev.of_node = np; +} + +static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd) +{ + return mtd->dev.of_node; +} + int mtd_erase(struct mtd_info *mtd, struct erase_info *instr); int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys); diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 5a9d1d4c2487..4f7c9b97982f 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -719,6 +719,17 @@ struct nand_chip { void *priv; }; +static inline void nand_set_flash_node(struct nand_chip *chip, + struct device_node *np) +{ + chip->flash_node = np; +} + +static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) +{ + return chip->flash_node; +} + /* * NAND Flash Manufacturer ID Codes */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index c8723b62c4cd..6d991df8f986 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -185,6 +185,17 @@ struct spi_nor { void *priv; }; +static inline void spi_nor_set_flash_node(struct spi_nor *nor, + struct device_node *np) +{ + nor->flash_node = np; +} + +static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) +{ + return nor->flash_node; +} + /** * spi_nor_scan() - scan the SPI NOR * @nor: the spi_nor structure -- cgit v1.2.3 From 3b6521eab0386a4854d47b1a01947d7dc46ec98d Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 30 Oct 2015 20:33:21 -0700 Subject: mtd: ofpart: grab device tree node directly from master device node It seems more logical to use a device node directly associated with the MTD master device (i.e., mtd->dev.of_node field) rather than requiring auxiliary partition parser information to be passed in by the driver in a separate struct. This patch supports the mtd->dev.of_node field and deprecates the parser data 'of_node' field Driver conversions may now follow. Additional side benefit to assigning mtd->dev.of_node rather than using parser data: the driver core will automatically create a device -> node symlink for us. Signed-off-by: Brian Norris Reviewed-by: Boris Brezillon --- drivers/mtd/ofpart.c | 18 ++++++++++-------- include/linux/mtd/partitions.h | 4 +++- 2 files changed, 13 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index 669c3452f278..7bf996a4cf5e 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c @@ -37,10 +37,11 @@ static int parse_ofpart_partitions(struct mtd_info *master, bool dedicated = true; - if (!data) - return 0; - - mtd_node = data->of_node; + /* + * of_node can be provided through auxiliary parser data or (preferred) + * by assigning the master device node + */ + mtd_node = data && data->of_node ? data->of_node : mtd_get_of_node(master); if (!mtd_node) return 0; @@ -149,10 +150,11 @@ static int parse_ofoldpart_partitions(struct mtd_info *master, } *part; const char *names; - if (!data) - return 0; - - dp = data->of_node; + /* + * of_node can be provided through auxiliary parser data or (preferred) + * by assigning the master device node + */ + dp = data && data->of_node ? data->of_node : mtd_get_of_node(master); if (!dp) return 0; diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index 6a35e6de5da1..e742f34b67eb 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -56,7 +56,9 @@ struct device_node; /** * struct mtd_part_parser_data - used to pass data to MTD partition parsers. * @origin: for RedBoot, start address of MTD device - * @of_node: for OF parsers, device node containing partitioning information + * @of_node: for OF parsers, device node containing partitioning information. + * This field is deprecated, as the device node should simply be + * assigned to the master struct device. */ struct mtd_part_parser_data { unsigned long origin; -- cgit v1.2.3 From 30069af7348b56eb8c5e1dda7788a531c5f24ca2 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 30 Oct 2015 20:33:27 -0700 Subject: mtd: spi-nor: drop flash_node field We can just alias to the MTD of_node. Signed-off-by: Brian Norris Reviewed-by: Boris Brezillon --- drivers/mtd/spi-nor/spi-nor.c | 1 - include/linux/mtd/spi-nor.h | 6 ++---- 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 924d455dadb5..12041e181630 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -1258,7 +1258,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) mtd->flags |= MTD_NO_ERASE; mtd->dev.parent = dev; - mtd_set_of_node(mtd, np); nor->page_size = info->page_size; mtd->writebufsize = nor->page_size; diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 6d991df8f986..955f268d159a 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -124,7 +124,6 @@ struct mtd_info; * @mtd: point to a mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations * @dev: point to a spi device, or a spi nor controller device. - * @flash_node: point to a device node describing this flash instance. * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector @@ -155,7 +154,6 @@ struct spi_nor { struct mtd_info mtd; struct mutex lock; struct device *dev; - struct device_node *flash_node; u32 page_size; u8 addr_width; u8 erase_opcode; @@ -188,12 +186,12 @@ struct spi_nor { static inline void spi_nor_set_flash_node(struct spi_nor *nor, struct device_node *np) { - nor->flash_node = np; + mtd_set_of_node(&nor->mtd, np); } static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) { - return nor->flash_node; + return mtd_get_of_node(&nor->mtd); } /** -- cgit v1.2.3 From e270bca531b40cd0a143176eb093d173b9c6f418 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 30 Oct 2015 20:33:29 -0700 Subject: mtd: ofpart: drop 'of_node' partition parser data This field is no longer used anywhere, as it is superseded by mtd->dev.of_node. Signed-off-by: Brian Norris Reviewed-by: Boris Brezillon --- drivers/mtd/ofpart.c | 14 ++++---------- include/linux/mtd/partitions.h | 4 ---- 2 files changed, 4 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index 7bf996a4cf5e..f78d2aea5545 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c @@ -37,11 +37,8 @@ static int parse_ofpart_partitions(struct mtd_info *master, bool dedicated = true; - /* - * of_node can be provided through auxiliary parser data or (preferred) - * by assigning the master device node - */ - mtd_node = data && data->of_node ? data->of_node : mtd_get_of_node(master); + /* Pull of_node from the master device node */ + mtd_node = mtd_get_of_node(master); if (!mtd_node) return 0; @@ -150,11 +147,8 @@ static int parse_ofoldpart_partitions(struct mtd_info *master, } *part; const char *names; - /* - * of_node can be provided through auxiliary parser data or (preferred) - * by assigning the master device node - */ - dp = data && data->of_node ? data->of_node : mtd_get_of_node(master); + /* Pull of_node from the master device node */ + dp = mtd_get_of_node(master); if (!dp) return 0; diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index e742f34b67eb..773975a3c9e6 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -56,13 +56,9 @@ struct device_node; /** * struct mtd_part_parser_data - used to pass data to MTD partition parsers. * @origin: for RedBoot, start address of MTD device - * @of_node: for OF parsers, device node containing partitioning information. - * This field is deprecated, as the device node should simply be - * assigned to the master struct device. */ struct mtd_part_parser_data { unsigned long origin; - struct device_node *of_node; }; -- cgit v1.2.3 From 26add94cd535d1e000e7871fe69c7bb89e942d67 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Wed, 11 Nov 2015 17:05:56 -0800 Subject: mtd: partitions: kill unused ecclayout struct This field is not used. Reported here: http://lists.infradead.org/pipermail/linux-mtd/2015-October/062417.html Reported-by: Brian Foster Cc: Brian Foster Signed-off-by: Brian Norris Reviewed-by: Boris Brezillon --- include/linux/mtd/partitions.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index 773975a3c9e6..8421520c10eb 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -41,7 +41,6 @@ struct mtd_partition { uint64_t size; /* partition size */ uint64_t offset; /* offset within the master MTD space */ uint32_t mask_flags; /* master MTD flags to mask out for this partition */ - struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */ }; #define MTDPART_OFS_RETAIN (-3) -- cgit v1.2.3 From b36f09c3c441a6e59eab9315032e7d546571de3f Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 20 Oct 2015 11:46:28 +0200 Subject: dmaengine: Add transfer termination synchronization support The DMAengine API has a long standing race condition that is inherent to the API itself. Calling dmaengine_terminate_all() is supposed to stop and abort any pending or active transfers that have previously been submitted. Unfortunately it is possible that this operation races against a currently running (or with some drivers also scheduled) completion callback. Since the API allows dmaengine_terminate_all() to be called from atomic context as well as from within a completion callback it is not possible to synchronize to the execution of the completion callback from within dmaengine_terminate_all() itself. This means that a user of the DMAengine API does not know when it is safe to free resources used in the completion callback, which can result in a use-after-free race condition. This patch addresses the issue by introducing an explicit synchronization primitive to the DMAengine API called dmaengine_synchronize(). The existing dmaengine_terminate_all() is deprecated in favor of dmaengine_terminate_sync() and dmaengine_terminate_async(). The former aborts all pending and active transfers and synchronizes to the current context, meaning it will wait until all running completion callbacks have finished. This means it is only possible to call this function from non-atomic context. The later function does not synchronize, but can still be used in atomic context or from within a complete callback. It has to be followed up by dmaengine_synchronize() before a client can free the resources used in a completion callback. In addition to this the semantics of the device_terminate_all() callback are slightly relaxed by this patch. It is now OK for a driver to only schedule the termination of the active transfer, but does not necessarily have to wait until the DMA controller has completely stopped. The driver must ensure though that the controller has stopped and no longer accesses any memory when the device_synchronize() callback returns. This was in part done since most drivers do not pay attention to this anyway at the moment and to emphasize that this needs to be done when the device_synchronize() callback is implemented. But it also helps with implementing support for devices where stopping the controller can require operations that may sleep. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- Documentation/dmaengine/client.txt | 38 ++++++++++++++- Documentation/dmaengine/provider.txt | 20 +++++++- drivers/dma/dmaengine.c | 5 +- include/linux/dmaengine.h | 90 ++++++++++++++++++++++++++++++++++++ 4 files changed, 148 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/Documentation/dmaengine/client.txt b/Documentation/dmaengine/client.txt index 11fb87ff6cd0..d9f9f461102a 100644 --- a/Documentation/dmaengine/client.txt +++ b/Documentation/dmaengine/client.txt @@ -128,7 +128,7 @@ The slave DMA usage consists of following steps: transaction. For cyclic DMA, a callback function may wish to terminate the - DMA via dmaengine_terminate_all(). + DMA via dmaengine_terminate_async(). Therefore, it is important that DMA engine drivers drop any locks before calling the callback function which may cause a @@ -166,12 +166,29 @@ The slave DMA usage consists of following steps: Further APIs: -1. int dmaengine_terminate_all(struct dma_chan *chan) +1. int dmaengine_terminate_sync(struct dma_chan *chan) + int dmaengine_terminate_async(struct dma_chan *chan) + int dmaengine_terminate_all(struct dma_chan *chan) /* DEPRECATED */ This causes all activity for the DMA channel to be stopped, and may discard data in the DMA FIFO which hasn't been fully transferred. No callback functions will be called for any incomplete transfers. + Two variants of this function are available. + + dmaengine_terminate_async() might not wait until the DMA has been fully + stopped or until any running complete callbacks have finished. But it is + possible to call dmaengine_terminate_async() from atomic context or from + within a complete callback. dmaengine_synchronize() must be called before it + is safe to free the memory accessed by the DMA transfer or free resources + accessed from within the complete callback. + + dmaengine_terminate_sync() will wait for the transfer and any running + complete callbacks to finish before it returns. But the function must not be + called from atomic context or from within a complete callback. + + dmaengine_terminate_all() is deprecated and should not be used in new code. + 2. int dmaengine_pause(struct dma_chan *chan) This pauses activity on the DMA channel without data loss. @@ -197,3 +214,20 @@ Further APIs: a running DMA channel. It is recommended that DMA engine users pause or stop (via dmaengine_terminate_all()) the channel before using this API. + +5. void dmaengine_synchronize(struct dma_chan *chan) + + Synchronize the termination of the DMA channel to the current context. + + This function should be used after dmaengine_terminate_async() to synchronize + the termination of the DMA channel to the current context. The function will + wait for the transfer and any running complete callbacks to finish before it + returns. + + If dmaengine_terminate_async() is used to stop the DMA channel this function + must be called before it is safe to free memory accessed by previously + submitted descriptors or to free any resources accessed within the complete + callback of previously submitted descriptors. + + The behavior of this function is undefined if dma_async_issue_pending() has + been called between dmaengine_terminate_async() and this function. diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt index 67d4ce4df109..122b7f4876bb 100644 --- a/Documentation/dmaengine/provider.txt +++ b/Documentation/dmaengine/provider.txt @@ -327,8 +327,24 @@ supported. * device_terminate_all - Aborts all the pending and ongoing transfers on the channel - - This command should operate synchronously on the channel, - terminating right away all the channels + - For aborted transfers the complete callback should not be called + - Can be called from atomic context or from within a complete + callback of a descriptor. Must not sleep. Drivers must be able + to handle this correctly. + - Termination may be asynchronous. The driver does not have to + wait until the currently active transfer has completely stopped. + See device_synchronize. + + * device_synchronize + - Must synchronize the termination of a channel to the current + context. + - Must make sure that memory for previously submitted + descriptors is no longer accessed by the DMA controller. + - Must make sure that all complete callbacks for previously + submitted descriptors have finished running and none are + scheduled to run. + - May sleep. + Misc notes (stuff that should be documented, but don't really know where to put them) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ecec1445adf..d6fc82e3986f 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -265,8 +265,11 @@ static void dma_chan_put(struct dma_chan *chan) module_put(dma_chan_to_owner(chan)); /* This channel is not in use anymore, free it */ - if (!chan->client_count && chan->device->device_free_chan_resources) + if (!chan->client_count && chan->device->device_free_chan_resources) { + /* Make sure all operations have completed */ + dmaengine_synchronize(chan); chan->device->device_free_chan_resources(chan); + } /* If the channel is used via a DMA request router, free the mapping */ if (chan->router && chan->router->route_free) { diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c47c68e535e8..4662d9aa6d5a 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -654,6 +654,8 @@ enum dmaengine_alignment { * paused. Returns 0 or an error code * @device_terminate_all: Aborts all transfers on a channel. Returns 0 * or an error code + * @device_synchronize: Synchronizes the termination of a transfers to the + * current context. * @device_tx_status: poll for transaction completion, the optional * txstate parameter can be supplied with a pointer to get a * struct with auxiliary transfer status information, otherwise the call @@ -737,6 +739,7 @@ struct dma_device { int (*device_pause)(struct dma_chan *chan); int (*device_resume)(struct dma_chan *chan); int (*device_terminate_all)(struct dma_chan *chan); + void (*device_synchronize)(struct dma_chan *chan); enum dma_status (*device_tx_status)(struct dma_chan *chan, dma_cookie_t cookie, @@ -828,6 +831,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( src_sg, src_nents, flags); } +/** + * dmaengine_terminate_all() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * This function is DEPRECATED use either dmaengine_terminate_sync() or + * dmaengine_terminate_async() instead. + */ static inline int dmaengine_terminate_all(struct dma_chan *chan) { if (chan->device->device_terminate_all) @@ -836,6 +846,86 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan) return -ENOSYS; } +/** + * dmaengine_terminate_async() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * Calling this function will terminate all active and pending descriptors + * that have previously been submitted to the channel. It is not guaranteed + * though that the transfer for the active descriptor has stopped when the + * function returns. Furthermore it is possible the complete callback of a + * submitted transfer is still running when this function returns. + * + * dmaengine_synchronize() needs to be called before it is safe to free + * any memory that is accessed by previously submitted descriptors or before + * freeing any resources accessed from within the completion callback of any + * perviously submitted descriptors. + * + * This function can be called from atomic context as well as from within a + * complete callback of a descriptor submitted on the same channel. + * + * If none of the two conditions above apply consider using + * dmaengine_terminate_sync() instead. + */ +static inline int dmaengine_terminate_async(struct dma_chan *chan) +{ + if (chan->device->device_terminate_all) + return chan->device->device_terminate_all(chan); + + return -EINVAL; +} + +/** + * dmaengine_synchronize() - Synchronize DMA channel termination + * @chan: The channel to synchronize + * + * Synchronizes to the DMA channel termination to the current context. When this + * function returns it is guaranteed that all transfers for previously issued + * descriptors have stopped and and it is safe to free the memory assoicated + * with them. Furthermore it is guaranteed that all complete callback functions + * for a previously submitted descriptor have finished running and it is safe to + * free resources accessed from within the complete callbacks. + * + * The behavior of this function is undefined if dma_async_issue_pending() has + * been called between dmaengine_terminate_async() and this function. + * + * This function must only be called from non-atomic context and must not be + * called from within a complete callback of a descriptor submitted on the same + * channel. + */ +static inline void dmaengine_synchronize(struct dma_chan *chan) +{ + if (chan->device->device_synchronize) + chan->device->device_synchronize(chan); +} + +/** + * dmaengine_terminate_sync() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * Calling this function will terminate all active and pending transfers + * that have previously been submitted to the channel. It is similar to + * dmaengine_terminate_async() but guarantees that the DMA transfer has actually + * stopped and that all complete callbacks have finished running when the + * function returns. + * + * This function must only be called from non-atomic context and must not be + * called from within a complete callback of a descriptor submitted on the same + * channel. + */ +static inline int dmaengine_terminate_sync(struct dma_chan *chan) +{ + int ret; + + ret = dmaengine_terminate_async(chan); + if (ret) + return ret; + + dmaengine_synchronize(chan); + + return 0; +} + static inline int dmaengine_pause(struct dma_chan *chan) { if (chan->device->device_pause) -- cgit v1.2.3 From 9eeacd3a2f17438d9d286ff2f78c4709a4148be7 Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Tue, 13 Oct 2015 21:54:29 +0200 Subject: dmaengine: enable DMA_CTRL_REUSE In the current state, the capability of transfer reuse can neither be set by a slave dmaengine driver, nor used by a client driver, because the capability is not available to dma_get_slave_caps(). Fix this by adding a way to declare the capability. Fixes: 272420214d26 ("dmaengine: Add DMA_CTRL_REUSE") Signed-off-by: Robert Jarzmik Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 1 + include/linux/dmaengine.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'include') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ecec1445adf..4aced6689734 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -493,6 +493,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) caps->dst_addr_widths = device->dst_addr_widths; caps->directions = device->directions; caps->residue_granularity = device->residue_granularity; + caps->descriptor_reuse = device->descriptor_reuse; /* * Some devices implement only pause (e.g. to get residuum) but no diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c47c68e535e8..6f94b5cbd97c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -659,6 +659,7 @@ enum dmaengine_alignment { * struct with auxiliary transfer status information, otherwise the call * will just return a simple status code * @device_issue_pending: push pending transactions to hardware + * @descriptor_reuse: a submitted transfer can be resubmitted after completion */ struct dma_device { @@ -681,6 +682,7 @@ struct dma_device { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; + bool descriptor_reuse; enum dma_residue_granularity residue_granularity; int (*device_alloc_chan_resources)(struct dma_chan *chan); -- cgit v1.2.3 From 2bb129ebb23d2dfec3cd9c22dc7defd681cfcd58 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 13 Nov 2015 12:46:00 +0100 Subject: dmaengine: ioatdma: constify dca_ops structures The dca_ops structure is never modified, so declare it as const. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Acked-by: Dan Williams Signed-off-by: Vinod Koul --- drivers/dca/dca-core.c | 3 ++- drivers/dma/ioat/dca.c | 2 +- include/linux/dca.h | 5 +++-- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index 819dfda88236..7afbb28d6a0f 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c @@ -321,7 +321,8 @@ EXPORT_SYMBOL_GPL(dca_get_tag); * @ops - pointer to struct of dca operation function pointers * @priv_size - size of extra mem to be added for provider's needs */ -struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size) +struct dca_provider *alloc_dca_provider(const struct dca_ops *ops, + int priv_size) { struct dca_provider *dca; int alloc_size; diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index 2cb7c308d5c7..0b9b6b07db9e 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c @@ -224,7 +224,7 @@ static u8 ioat_dca_get_tag(struct dca_provider *dca, return tag; } -static struct dca_ops ioat_dca_ops = { +static const struct dca_ops ioat_dca_ops = { .add_requester = ioat_dca_add_requester, .remove_requester = ioat_dca_remove_requester, .get_tag = ioat_dca_get_tag, diff --git a/include/linux/dca.h b/include/linux/dca.h index d27a7a05718d..ad956c2e07a8 100644 --- a/include/linux/dca.h +++ b/include/linux/dca.h @@ -34,7 +34,7 @@ void dca_unregister_notify(struct notifier_block *nb); struct dca_provider { struct list_head node; - struct dca_ops *ops; + const struct dca_ops *ops; struct device *cd; int id; }; @@ -53,7 +53,8 @@ struct dca_ops { int (*dev_managed) (struct dca_provider *, struct device *); }; -struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); +struct dca_provider *alloc_dca_provider(const struct dca_ops *ops, + int priv_size); void free_dca_provider(struct dca_provider *dca); int register_dca_provider(struct dca_provider *dca, struct device *dev); void unregister_dca_provider(struct dca_provider *dca, struct device *dev); -- cgit v1.2.3 From 3bd4dce1366fefe6575b841816e595f54e8e9752 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Fri, 30 Oct 2015 18:58:41 -0400 Subject: staging/rdma/hfi1: Clean up macro indentation In preparation for implementing Expected TID caching we do some simple clean up of header file macros. Signed-off-by: Mitko Haralanov Signed-off-by: Ira Weiny Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rdma/hfi1/common.h | 15 ++++++++------- include/uapi/rdma/hfi/hfi1_user.h | 26 +++++++++++++------------- 2 files changed, 21 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/staging/rdma/hfi1/common.h index 5e203239c5b0..5dd92720faae 100644 --- a/drivers/staging/rdma/hfi1/common.h +++ b/drivers/staging/rdma/hfi1/common.h @@ -132,13 +132,14 @@ * HFI1_CAP_RESERVED_MASK bits. */ #define HFI1_CAP_WRITABLE_MASK (HFI1_CAP_SDMA_AHG | \ - HFI1_CAP_HDRSUPP | \ - HFI1_CAP_MULTI_PKT_EGR | \ - HFI1_CAP_NODROP_RHQ_FULL | \ - HFI1_CAP_NODROP_EGR_FULL | \ - HFI1_CAP_ALLOW_PERM_JKEY | \ - HFI1_CAP_STATIC_RATE_CTRL | \ - HFI1_CAP_PRINT_UNIMPL) + HFI1_CAP_HDRSUPP | \ + HFI1_CAP_MULTI_PKT_EGR | \ + HFI1_CAP_NODROP_RHQ_FULL | \ + HFI1_CAP_NODROP_EGR_FULL | \ + HFI1_CAP_ALLOW_PERM_JKEY | \ + HFI1_CAP_STATIC_RATE_CTRL | \ + HFI1_CAP_PRINT_UNIMPL | \ + HFI1_CAP_TID_UNMAP) /* * A set of capability bits that are "global" and are not allowed to be * set in the user bitmask. diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h index 599562fe5d57..a2fc6cbfe414 100644 --- a/include/uapi/rdma/hfi/hfi1_user.h +++ b/include/uapi/rdma/hfi/hfi1_user.h @@ -127,13 +127,13 @@ #define HFI1_CMD_TID_UPDATE 4 /* update expected TID entries */ #define HFI1_CMD_TID_FREE 5 /* free expected TID entries */ #define HFI1_CMD_CREDIT_UPD 6 /* force an update of PIO credit */ -#define HFI1_CMD_SDMA_STATUS_UPD 7 /* force update of SDMA status ring */ +#define HFI1_CMD_SDMA_STATUS_UPD 7 /* force update of SDMA status ring */ #define HFI1_CMD_RECV_CTRL 8 /* control receipt of packets */ #define HFI1_CMD_POLL_TYPE 9 /* set the kind of polling we want */ #define HFI1_CMD_ACK_EVENT 10 /* ack & clear user status bits */ -#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */ -#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */ +#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */ +#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */ /* separate EPROM commands from normal PSM commands */ #define HFI1_CMD_EP_INFO 64 /* read EPROM device ID */ #define HFI1_CMD_EP_ERASE_CHIP 65 /* erase whole EPROM */ @@ -144,18 +144,18 @@ #define HFI1_CMD_EP_WRITE_P0 70 /* write EPROM partition 0 */ #define HFI1_CMD_EP_WRITE_P1 71 /* write EPROM partition 1 */ -#define _HFI1_EVENT_FROZEN_BIT 0 -#define _HFI1_EVENT_LINKDOWN_BIT 1 -#define _HFI1_EVENT_LID_CHANGE_BIT 2 -#define _HFI1_EVENT_LMC_CHANGE_BIT 3 -#define _HFI1_EVENT_SL2VL_CHANGE_BIT 4 +#define _HFI1_EVENT_FROZEN_BIT 0 +#define _HFI1_EVENT_LINKDOWN_BIT 1 +#define _HFI1_EVENT_LID_CHANGE_BIT 2 +#define _HFI1_EVENT_LMC_CHANGE_BIT 3 +#define _HFI1_EVENT_SL2VL_CHANGE_BIT 4 #define _HFI1_MAX_EVENT_BIT _HFI1_EVENT_SL2VL_CHANGE_BIT -#define HFI1_EVENT_FROZEN (1UL << _HFI1_EVENT_FROZEN_BIT) -#define HFI1_EVENT_LINKDOWN_BIT (1UL << _HFI1_EVENT_LINKDOWN_BIT) -#define HFI1_EVENT_LID_CHANGE_BIT (1UL << _HFI1_EVENT_LID_CHANGE_BIT) -#define HFI1_EVENT_LMC_CHANGE_BIT (1UL << _HFI1_EVENT_LMC_CHANGE_BIT) -#define HFI1_EVENT_SL2VL_CHANGE_BIT (1UL << _HFI1_EVENT_SL2VL_CHANGE_BIT) +#define HFI1_EVENT_FROZEN (1UL << _HFI1_EVENT_FROZEN_BIT) +#define HFI1_EVENT_LINKDOWN (1UL << _HFI1_EVENT_LINKDOWN_BIT) +#define HFI1_EVENT_LID_CHANGE (1UL << _HFI1_EVENT_LID_CHANGE_BIT) +#define HFI1_EVENT_LMC_CHANGE (1UL << _HFI1_EVENT_LMC_CHANGE_BIT) +#define HFI1_EVENT_SL2VL_CHANGE (1UL << _HFI1_EVENT_SL2VL_CHANGE_BIT) /* * These are the status bits readable (in ASCII form, 64bit value) -- cgit v1.2.3 From 1d15cb9ce9a220192f672cd32369f8d3c7d3a89b Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Mon, 16 Nov 2015 06:51:36 +0100 Subject: clk: tegra: Add Tegra210 device tree binding Add a header file that defines the clock numbers for Tegra210. It is meant to be included by device trees so that they can refer to the clocks by symbolic name instead of numeric value. Also add the device tree binding documentation which is largely the same as for earlier generations of Tegra. Extracted from a larger patch by Rhyland Klein . Signed-off-by: Thierry Reding --- .../bindings/clock/nvidia,tegra210-car.txt | 56 +++ include/dt-bindings/clock/tegra210-car.h | 401 +++++++++++++++++++++ 2 files changed, 457 insertions(+) create mode 100644 Documentation/devicetree/bindings/clock/nvidia,tegra210-car.txt create mode 100644 include/dt-bindings/clock/tegra210-car.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra210-car.txt b/Documentation/devicetree/bindings/clock/nvidia,tegra210-car.txt new file mode 100644 index 000000000000..26f237f641b7 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/nvidia,tegra210-car.txt @@ -0,0 +1,56 @@ +NVIDIA Tegra210 Clock And Reset Controller + +This binding uses the common clock binding: +Documentation/devicetree/bindings/clock/clock-bindings.txt + +The CAR (Clock And Reset) Controller on Tegra is the HW module responsible +for muxing and gating Tegra's clocks, and setting their rates. + +Required properties : +- compatible : Should be "nvidia,tegra210-car" +- reg : Should contain CAR registers location and length +- clocks : Should contain phandle and clock specifiers for two clocks: + the 32 KHz "32k_in". +- #clock-cells : Should be 1. + In clock consumers, this cell represents the clock ID exposed by the + CAR. The assignments may be found in header file + . +- #reset-cells : Should be 1. + In clock consumers, this cell represents the bit number in the CAR's + array of CLK_RST_CONTROLLER_RST_DEVICES_* registers. + +Example SoC include file: + +/ { + tegra_car: clock { + compatible = "nvidia,tegra210-car"; + reg = <0x60006000 0x1000>; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + usb@c5004000 { + clocks = <&tegra_car TEGRA210_CLK_USB2>; + }; +}; + +Example board file: + +/ { + clocks { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <0>; + + clk_32k: clock@1 { + compatible = "fixed-clock"; + reg = <1>; + #clock-cells = <0>; + clock-frequency = <32768>; + }; + }; + + &tegra_car { + clocks = <&clk_32k>; + }; +}; diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h new file mode 100644 index 000000000000..6f45aea49e4f --- /dev/null +++ b/include/dt-bindings/clock/tegra210-car.h @@ -0,0 +1,401 @@ +/* + * This header provides constants for binding nvidia,tegra210-car. + * + * The first 224 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB + * registers. These IDs often match those in the CAR's RST_DEVICES registers, + * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In + * this case, those clocks are assigned IDs above 224 in order to highlight + * this issue. Implementations that interpret these clock IDs as bit values + * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to + * explicitly handle these special cases. + * + * The balance of the clocks controlled by the CAR are assigned IDs of 224 and + * above. + */ + +#ifndef _DT_BINDINGS_CLOCK_TEGRA210_CAR_H +#define _DT_BINDINGS_CLOCK_TEGRA210_CAR_H + +/* 0 */ +/* 1 */ +/* 2 */ +#define TEGRA210_CLK_ISPB 3 +#define TEGRA210_CLK_RTC 4 +#define TEGRA210_CLK_TIMER 5 +#define TEGRA210_CLK_UARTA 6 +/* 7 (register bit affects uartb and vfir) */ +#define TEGRA210_CLK_GPIO 8 +#define TEGRA210_CLK_SDMMC2 9 +/* 10 (register bit affects spdif_in and spdif_out) */ +#define TEGRA210_CLK_I2S1 11 +#define TEGRA210_CLK_I2C1 12 +/* 13 */ +#define TEGRA210_CLK_SDMMC1 14 +#define TEGRA210_CLK_SDMMC4 15 +/* 16 */ +#define TEGRA210_CLK_PWM 17 +#define TEGRA210_CLK_I2S2 18 +/* 19 */ +/* 20 (register bit affects vi and vi_sensor) */ +/* 21 */ +#define TEGRA210_CLK_USBD 22 +#define TEGRA210_CLK_ISP 23 +/* 24 */ +/* 25 */ +#define TEGRA210_CLK_DISP2 26 +#define TEGRA210_CLK_DISP1 27 +#define TEGRA210_CLK_HOST1X 28 +/* 29 */ +#define TEGRA210_CLK_I2S0 30 +/* 31 */ + +#define TEGRA210_CLK_MC 32 +#define TEGRA210_CLK_AHBDMA 33 +#define TEGRA210_CLK_APBDMA 34 +/* 35 */ +/* 36 */ +/* 37 */ +#define TEGRA210_CLK_PMC 38 +/* 39 (register bit affects fuse and fuse_burn) */ +#define TEGRA210_CLK_KFUSE 40 +#define TEGRA210_CLK_SBC1 41 +/* 42 */ +/* 43 */ +#define TEGRA210_CLK_SBC2 44 +/* 45 */ +#define TEGRA210_CLK_SBC3 46 +#define TEGRA210_CLK_I2C5 47 +#define TEGRA210_CLK_DSIA 48 +/* 49 */ +/* 50 */ +/* 51 */ +#define TEGRA210_CLK_CSI 52 +/* 53 */ +#define TEGRA210_CLK_I2C2 54 +#define TEGRA210_CLK_UARTC 55 +#define TEGRA210_CLK_MIPI_CAL 56 +#define TEGRA210_CLK_EMC 57 +#define TEGRA210_CLK_USB2 58 +/* 59 */ +/* 60 */ +/* 61 */ +/* 62 */ +#define TEGRA210_CLK_BSEV 63 + +/* 64 */ +#define TEGRA210_CLK_UARTD 65 +/* 66 */ +#define TEGRA210_CLK_I2C3 67 +#define TEGRA210_CLK_SBC4 68 +#define TEGRA210_CLK_SDMMC3 69 +#define TEGRA210_CLK_PCIE 70 +#define TEGRA210_CLK_OWR 71 +#define TEGRA210_CLK_AFI 72 +#define TEGRA210_CLK_CSITE 73 +/* 74 */ +/* 75 */ +/* 76 */ +/* 77 */ +#define TEGRA210_CLK_SOC_THERM 78 +#define TEGRA210_CLK_DTV 79 +/* 80 */ +#define TEGRA210_CLK_I2CSLOW 81 +#define TEGRA210_CLK_DSIB 82 +#define TEGRA210_CLK_TSEC 83 +/* 84 */ +/* 85 */ +/* 86 */ +/* 87 */ +/* 88 */ +#define TEGRA210_CLK_XUSB_HOST 89 +/* 90 */ +/* 91 */ +#define TEGRA210_CLK_CSUS 92 +/* 93 */ +/* 94 */ +/* 95 (bit affects xusb_dev and xusb_dev_src) */ + +/* 96 */ +/* 97 */ +/* 98 */ +#define TEGRA210_CLK_MSELECT 99 +#define TEGRA210_CLK_TSENSOR 100 +#define TEGRA210_CLK_I2S3 101 +#define TEGRA210_CLK_I2S4 102 +#define TEGRA210_CLK_I2C4 103 +/* 104 */ +/* 105 */ +#define TEGRA210_CLK_D_AUDIO 106 +/* 107 ( affects abp -> ape) */ +/* 108 */ +/* 109 */ +/* 110 */ +#define TEGRA210_CLK_HDA2CODEC_2X 111 +/* 112 */ +/* 113 */ +/* 114 */ +/* 115 */ +/* 116 */ +/* 117 */ +#define TEGRA210_CLK_SPDIF_2X 118 +#define TEGRA210_CLK_ACTMON 119 +#define TEGRA210_CLK_EXTERN1 120 +#define TEGRA210_CLK_EXTERN2 121 +#define TEGRA210_CLK_EXTERN3 122 +#define TEGRA210_CLK_SATA_OOB 123 +#define TEGRA210_CLK_SATA 124 +#define TEGRA210_CLK_HDA 125 +/* 126 */ +/* 127 */ + +#define TEGRA210_CLK_HDA2HDMI 128 +/* 129 */ +/* 130 */ +/* 131 */ +/* 132 */ +/* 133 */ +/* 134 */ +/* 135 */ +/* 136 */ +/* 137 */ +/* 138 */ +/* 139 */ +/* 140 */ +/* 141 */ +/* 142 */ +/* (bit affects xusb_falcon_src, xusb_fs_src, xusb_host_src and xusb_ss_src) */ +#define TEGRA210_CLK_XUSB_GATE 143 +#define TEGRA210_CLK_CILAB 144 +#define TEGRA210_CLK_CILCD 145 +#define TEGRA210_CLK_CILE 146 +#define TEGRA210_CLK_DSIALP 147 +#define TEGRA210_CLK_DSIBLP 148 +#define TEGRA210_CLK_ENTROPY 149 +/* 150 */ +/* 151 */ +/* 152 */ +/* 153 */ +/* 154 */ +/* 155 (bit affects dfll_ref and dfll_soc) */ +#define TEGRA210_CLK_XUSB_SS 156 +/* 157 */ +/* 158 */ +/* 159 */ + +/* 160 */ +#define TEGRA210_CLK_DMIC1 161 +#define TEGRA210_CLK_DMIC2 162 +/* 163 */ +/* 164 */ +/* 165 */ +#define TEGRA210_CLK_I2C6 166 +/* 167 */ +/* 168 */ +/* 169 */ +/* 170 */ +#define TEGRA210_CLK_VIM2_CLK 171 +/* 172 */ +#define TEGRA210_CLK_MIPIBIF 173 +/* 174 */ +/* 175 */ +/* 176 */ +#define TEGRA210_CLK_CLK72MHZ 177 +#define TEGRA210_CLK_VIC03 178 +/* 179 */ +/* 180 */ +#define TEGRA210_CLK_DPAUX 181 +#define TEGRA210_CLK_SOR0 182 +#define TEGRA210_CLK_SOR1 183 +#define TEGRA210_CLK_GPU 184 +#define TEGRA210_CLK_DBGAPB 185 +/* 186 */ +#define TEGRA210_CLK_PLL_P_OUT_ADSP 187 +/* 188 */ +#define TEGRA210_CLK_PLL_G_REF 189 +/* 190 */ +/* 191 */ + +/* 192 */ +#define TEGRA210_CLK_SDMMC_LEGACY 193 +#define TEGRA210_CLK_NVDEC 194 +#define TEGRA210_CLK_NVJPG 195 +/* 196 */ +#define TEGRA210_CLK_DMIC3 197 +#define TEGRA210_CLK_APE 198 +/* 199 */ +/* 200 */ +/* 201 */ +#define TEGRA210_CLK_MAUD 202 +/* 203 */ +/* 204 */ +/* 205 */ +#define TEGRA210_CLK_TSECB 206 +#define TEGRA210_CLK_DPAUX1 207 +#define TEGRA210_CLK_VI_I2C 208 +#define TEGRA210_CLK_HSIC_TRK 209 +#define TEGRA210_CLK_USB2_TRK 210 +#define TEGRA210_CLK_QSPI 211 +#define TEGRA210_CLK_UARTAPE 212 +/* 213 */ +/* 214 */ +/* 215 */ +/* 216 */ +/* 217 */ +/* 218 */ +#define TEGRA210_CLK_NVENC 219 +/* 220 */ +/* 221 */ +#define TEGRA210_CLK_SOR_SAFE 222 +#define TEGRA210_CLK_PLL_P_OUT_CPU 223 + + +#define TEGRA210_CLK_UARTB 224 +#define TEGRA210_CLK_VFIR 225 +#define TEGRA210_CLK_SPDIF_IN 226 +#define TEGRA210_CLK_SPDIF_OUT 227 +#define TEGRA210_CLK_VI 228 +#define TEGRA210_CLK_VI_SENSOR 229 +#define TEGRA210_CLK_FUSE 230 +#define TEGRA210_CLK_FUSE_BURN 231 +#define TEGRA210_CLK_CLK_32K 232 +#define TEGRA210_CLK_CLK_M 233 +#define TEGRA210_CLK_CLK_M_DIV2 234 +#define TEGRA210_CLK_CLK_M_DIV4 235 +#define TEGRA210_CLK_PLL_REF 236 +#define TEGRA210_CLK_PLL_C 237 +#define TEGRA210_CLK_PLL_C_OUT1 238 +#define TEGRA210_CLK_PLL_C2 239 +#define TEGRA210_CLK_PLL_C3 240 +#define TEGRA210_CLK_PLL_M 241 +#define TEGRA210_CLK_PLL_M_OUT1 242 +#define TEGRA210_CLK_PLL_P 243 +#define TEGRA210_CLK_PLL_P_OUT1 244 +#define TEGRA210_CLK_PLL_P_OUT2 245 +#define TEGRA210_CLK_PLL_P_OUT3 246 +#define TEGRA210_CLK_PLL_P_OUT4 247 +#define TEGRA210_CLK_PLL_A 248 +#define TEGRA210_CLK_PLL_A_OUT0 249 +#define TEGRA210_CLK_PLL_D 250 +#define TEGRA210_CLK_PLL_D_OUT0 251 +#define TEGRA210_CLK_PLL_D2 252 +#define TEGRA210_CLK_PLL_D2_OUT0 253 +#define TEGRA210_CLK_PLL_U 254 +#define TEGRA210_CLK_PLL_U_480M 255 + +#define TEGRA210_CLK_PLL_U_60M 256 +#define TEGRA210_CLK_PLL_U_48M 257 +/* 258 */ +#define TEGRA210_CLK_PLL_X 259 +#define TEGRA210_CLK_PLL_X_OUT0 260 +#define TEGRA210_CLK_PLL_RE_VCO 261 +#define TEGRA210_CLK_PLL_RE_OUT 262 +#define TEGRA210_CLK_PLL_E 263 +#define TEGRA210_CLK_SPDIF_IN_SYNC 264 +#define TEGRA210_CLK_I2S0_SYNC 265 +#define TEGRA210_CLK_I2S1_SYNC 266 +#define TEGRA210_CLK_I2S2_SYNC 267 +#define TEGRA210_CLK_I2S3_SYNC 268 +#define TEGRA210_CLK_I2S4_SYNC 269 +#define TEGRA210_CLK_VIMCLK_SYNC 270 +#define TEGRA210_CLK_AUDIO0 271 +#define TEGRA210_CLK_AUDIO1 272 +#define TEGRA210_CLK_AUDIO2 273 +#define TEGRA210_CLK_AUDIO3 274 +#define TEGRA210_CLK_AUDIO4 275 +#define TEGRA210_CLK_SPDIF 276 +#define TEGRA210_CLK_CLK_OUT_1 277 +#define TEGRA210_CLK_CLK_OUT_2 278 +#define TEGRA210_CLK_CLK_OUT_3 279 +#define TEGRA210_CLK_BLINK 280 +/* 281 */ +/* 282 */ +/* 283 */ +#define TEGRA210_CLK_XUSB_HOST_SRC 284 +#define TEGRA210_CLK_XUSB_FALCON_SRC 285 +#define TEGRA210_CLK_XUSB_FS_SRC 286 +#define TEGRA210_CLK_XUSB_SS_SRC 287 + +#define TEGRA210_CLK_XUSB_DEV_SRC 288 +#define TEGRA210_CLK_XUSB_DEV 289 +#define TEGRA210_CLK_XUSB_HS_SRC 290 +#define TEGRA210_CLK_SCLK 291 +#define TEGRA210_CLK_HCLK 292 +#define TEGRA210_CLK_PCLK 293 +#define TEGRA210_CLK_CCLK_G 294 +#define TEGRA210_CLK_CCLK_LP 295 +#define TEGRA210_CLK_DFLL_REF 296 +#define TEGRA210_CLK_DFLL_SOC 297 +#define TEGRA210_CLK_VI_SENSOR2 298 +#define TEGRA210_CLK_PLL_P_OUT5 299 +#define TEGRA210_CLK_CML0 300 +#define TEGRA210_CLK_CML1 301 +#define TEGRA210_CLK_PLL_C4 302 +#define TEGRA210_CLK_PLL_DP 303 +#define TEGRA210_CLK_PLL_E_MUX 304 +#define TEGRA210_CLK_PLL_MB 305 +#define TEGRA210_CLK_PLL_A1 306 +#define TEGRA210_CLK_PLL_D_DSI_OUT 307 +#define TEGRA210_CLK_PLL_C4_OUT0 308 +#define TEGRA210_CLK_PLL_C4_OUT1 309 +#define TEGRA210_CLK_PLL_C4_OUT2 310 +#define TEGRA210_CLK_PLL_C4_OUT3 311 +#define TEGRA210_CLK_PLL_U_OUT 312 +#define TEGRA210_CLK_PLL_U_OUT1 313 +#define TEGRA210_CLK_PLL_U_OUT2 314 +#define TEGRA210_CLK_USB2_HSIC_TRK 315 +#define TEGRA210_CLK_PLL_P_OUT_HSIO 316 +#define TEGRA210_CLK_PLL_P_OUT_XUSB 317 +#define TEGRA210_CLK_XUSB_SSP_SRC 318 +/* 319 */ +/* 320 */ +/* 321 */ +/* 322 */ +/* 323 */ +/* 324 */ +/* 325 */ +/* 326 */ +/* 327 */ +/* 328 */ +/* 329 */ +/* 330 */ +/* 331 */ +/* 332 */ +/* 333 */ +/* 334 */ +/* 335 */ +/* 336 */ +/* 337 */ +/* 338 */ +/* 339 */ +/* 340 */ +/* 341 */ +/* 342 */ +/* 343 */ +/* 344 */ +/* 345 */ +/* 346 */ +/* 347 */ +/* 348 */ +/* 349 */ + +#define TEGRA210_CLK_AUDIO0_MUX 350 +#define TEGRA210_CLK_AUDIO1_MUX 351 +#define TEGRA210_CLK_AUDIO2_MUX 352 +#define TEGRA210_CLK_AUDIO3_MUX 353 +#define TEGRA210_CLK_AUDIO4_MUX 354 +#define TEGRA210_CLK_SPDIF_MUX 355 +#define TEGRA210_CLK_CLK_OUT_1_MUX 356 +#define TEGRA210_CLK_CLK_OUT_2_MUX 357 +#define TEGRA210_CLK_CLK_OUT_3_MUX 358 +#define TEGRA210_CLK_DSIA_MUX 359 +#define TEGRA210_CLK_DSIB_MUX 360 +#define TEGRA210_CLK_SOR0_LVDS 361 +#define TEGRA210_CLK_XUSB_SS_DIV2 362 + +#define TEGRA210_CLK_PLL_M_UD 363 +#define TEGRA210_CLK_PLL_C_UD 364 +#define TEGRA210_CLK_SCLK_MUX 365 + +#define TEGRA210_CLK_CLK_MAX 366 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA210_CAR_H */ -- cgit v1.2.3 From c0a13aa6da5da19f9eedb562b226ec585aabdca9 Mon Sep 17 00:00:00 2001 From: Vince Hsu Date: Mon, 13 Jul 2015 13:39:39 +0100 Subject: reset: add of_reset_control_get_by_index() Add of_reset_control_get_by_index() to allow the drivers to get reset device without knowing its name. Signed-off-by: Vince Hsu [jonathanh@nvidia.com: Updated stub function to return -ENOTSUPP instead of -ENOSYS which should only be used for system calls.] Signed-off-by: Jon Hunter Signed-off-by: Philipp Zabel --- drivers/reset/core.c | 40 +++++++++++++++++++++++++++++----------- include/linux/reset.h | 9 +++++++++ 2 files changed, 38 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 7955e00d04d4..81ae17d15480 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -141,27 +141,24 @@ int reset_control_status(struct reset_control *rstc) EXPORT_SYMBOL_GPL(reset_control_status); /** - * of_reset_control_get - Lookup and obtain a reference to a reset controller. + * of_reset_control_get_by_index - Lookup and obtain a reference to a reset + * controller by index. * @node: device to be reset by the controller - * @id: reset line name - * - * Returns a struct reset_control or IS_ERR() condition containing errno. + * @index: index of the reset controller * - * Use of id names is optional. + * This is to be used to perform a list of resets for a device or power domain + * in whatever order. Returns a struct reset_control or IS_ERR() condition + * containing errno. */ -struct reset_control *of_reset_control_get(struct device_node *node, - const char *id) +struct reset_control *of_reset_control_get_by_index(struct device_node *node, + int index) { struct reset_control *rstc = ERR_PTR(-EPROBE_DEFER); struct reset_controller_dev *r, *rcdev; struct of_phandle_args args; - int index = 0; int rstc_id; int ret; - if (id) - index = of_property_match_string(node, - "reset-names", id); ret = of_parse_phandle_with_args(node, "resets", "#reset-cells", index, &args); if (ret) @@ -202,6 +199,27 @@ struct reset_control *of_reset_control_get(struct device_node *node, return rstc; } +EXPORT_SYMBOL_GPL(of_reset_control_get_by_index); + +/** + * of_reset_control_get - Lookup and obtain a reference to a reset controller. + * @node: device to be reset by the controller + * @id: reset line name + * + * Returns a struct reset_control or IS_ERR() condition containing errno. + * + * Use of id names is optional. + */ +struct reset_control *of_reset_control_get(struct device_node *node, + const char *id) +{ + int index = 0; + + if (id) + index = of_property_match_string(node, + "reset-names", id); + return of_reset_control_get_by_index(node, index); +} EXPORT_SYMBOL_GPL(of_reset_control_get); /** diff --git a/include/linux/reset.h b/include/linux/reset.h index 7f65f9cff951..6db74ad3dec7 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -38,6 +38,9 @@ static inline struct reset_control *devm_reset_control_get_optional( struct reset_control *of_reset_control_get(struct device_node *node, const char *id); +struct reset_control *of_reset_control_get_by_index( + struct device_node *node, int index); + #else static inline int reset_control_reset(struct reset_control *rstc) @@ -106,6 +109,12 @@ static inline struct reset_control *of_reset_control_get( return ERR_PTR(-ENOSYS); } +static inline struct reset_control *of_reset_control_get_by_index( + struct device_node *node, int index) +{ + return ERR_PTR(-ENOTSUPP); +} + #endif /* CONFIG_RESET_CONTROLLER */ #endif -- cgit v1.2.3 From c9bfec0032fb7f17ff8707581122d6d9d15051bc Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Mon, 26 Oct 2015 10:56:07 +0000 Subject: ARM: STi: Add DT defines for co-processor reset lines Signed-off-by: Lee Jones Signed-off-by: Philipp Zabel --- include/dt-bindings/reset/stih407-resets.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/dt-bindings/reset/stih407-resets.h b/include/dt-bindings/reset/stih407-resets.h index 02d4328fe479..4ab3a1c94958 100644 --- a/include/dt-bindings/reset/stih407-resets.h +++ b/include/dt-bindings/reset/stih407-resets.h @@ -52,6 +52,10 @@ #define STIH407_KEYSCAN_SOFTRESET 26 #define STIH407_USB2_PORT0_SOFTRESET 27 #define STIH407_USB2_PORT1_SOFTRESET 28 +#define STIH407_ST231_AUD_SOFTRESET 29 +#define STIH407_ST231_DMU_SOFTRESET 30 +#define STIH407_ST231_GP0_SOFTRESET 31 +#define STIH407_ST231_GP1_SOFTRESET 32 /* Picophy reset defines */ #define STIH407_PICOPHY0_RESET 0 -- cgit v1.2.3 From 39b4da71ca334354f30941067f214ea2f2b92f3e Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 29 Oct 2015 09:55:00 +0100 Subject: reset: use ENOTSUPP instead of ENOSYS ENOSYS is reserved to report invalid syscalls to userspace. Consistently return ENOTSUPP to indicate that the driver doesn't support the functionality or the reset framework is not enabled at all. Signed-off-by: Philipp Zabel --- drivers/reset/core.c | 8 ++++---- include/linux/reset.h | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 77cfc49218c6..9ab929049b9d 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -95,7 +95,7 @@ int reset_control_reset(struct reset_control *rstc) if (rstc->rcdev->ops->reset) return rstc->rcdev->ops->reset(rstc->rcdev, rstc->id); - return -ENOSYS; + return -ENOTSUPP; } EXPORT_SYMBOL_GPL(reset_control_reset); @@ -108,7 +108,7 @@ int reset_control_assert(struct reset_control *rstc) if (rstc->rcdev->ops->assert) return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id); - return -ENOSYS; + return -ENOTSUPP; } EXPORT_SYMBOL_GPL(reset_control_assert); @@ -121,7 +121,7 @@ int reset_control_deassert(struct reset_control *rstc) if (rstc->rcdev->ops->deassert) return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id); - return -ENOSYS; + return -ENOTSUPP; } EXPORT_SYMBOL_GPL(reset_control_deassert); @@ -136,7 +136,7 @@ int reset_control_status(struct reset_control *rstc) if (rstc->rcdev->ops->status) return rstc->rcdev->ops->status(rstc->rcdev, rstc->id); - return -ENOSYS; + return -ENOTSUPP; } EXPORT_SYMBOL_GPL(reset_control_status); diff --git a/include/linux/reset.h b/include/linux/reset.h index 6db74ad3dec7..c4c097de0ba9 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -74,7 +74,7 @@ static inline void reset_control_put(struct reset_control *rstc) static inline int device_reset_optional(struct device *dev) { - return -ENOSYS; + return -ENOTSUPP; } static inline struct reset_control *__must_check reset_control_get( @@ -94,19 +94,19 @@ static inline struct reset_control *__must_check devm_reset_control_get( static inline struct reset_control *reset_control_get_optional( struct device *dev, const char *id) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-ENOTSUPP); } static inline struct reset_control *devm_reset_control_get_optional( struct device *dev, const char *id) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-ENOTSUPP); } static inline struct reset_control *of_reset_control_get( struct device_node *node, const char *id) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-ENOTSUPP); } static inline struct reset_control *of_reset_control_get_by_index( -- cgit v1.2.3 From 4b6c56c2f5d2b24629780a76718c3a836e7bf044 Mon Sep 17 00:00:00 2001 From: Mengdong Lin Date: Fri, 30 Oct 2015 15:13:16 +0800 Subject: ASoC: topology: ABI - Rename dai_elems to pcm_elems in manifest This field is the number of PCM objects (a pair of FE DAI and DAI link). Signed-off-by: Lin Mengdong Signed-off-by: Mark Brown --- include/uapi/sound/asoc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index 26539a7e4880..c4cc1e40b35c 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -243,7 +243,7 @@ struct snd_soc_tplg_manifest { __le32 control_elems; /* number of control elements */ __le32 widget_elems; /* number of widget elements */ __le32 graph_elems; /* number of graph elements */ - __le32 dai_elems; /* number of DAI elements */ + __le32 pcm_elems; /* number of PCM elements */ __le32 dai_link_elems; /* number of DAI link elements */ struct snd_soc_tplg_private priv; } __attribute__((packed)); -- cgit v1.2.3 From 9e8925b67a809bb27ce4b7d352d67f25cf1d7fc5 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 16 Nov 2015 09:49:34 -0500 Subject: locks: Allow disabling mandatory locking at compile time Mandatory locking appears to be almost unused and buggy and there appears no real interest in doing anything with it. Since effectively no one uses the code and since the code is buggy let's allow it to be disabled at compile time. I would just suggest removing the code but undoubtedly that will break some piece of userspace code somewhere. For the distributions that don't care about this piece of code this gives a nice starting point to make mandatory locking go away. Cc: Benjamin Coddington Cc: Dmitry Vyukov Cc: Jeff Layton Cc: J. Bruce Fields Signed-off-by: "Eric W. Biederman" Signed-off-by: Jeff Layton --- fs/Kconfig | 10 ++++++++ fs/locks.c | 2 ++ fs/namespace.c | 10 ++++++++ include/linux/fs.h | 74 +++++++++++++++++++++++++++++------------------------- 4 files changed, 62 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/fs/Kconfig b/fs/Kconfig index da3f32f1a4e4..59322e6e76f4 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -67,6 +67,16 @@ config FILE_LOCKING for filesystems like NFS and for the flock() system call. Disabling this option saves about 11k. +config MANDATORY_FILE_LOCKING + bool "Enable Mandatory file locking" + depends on FILE_LOCKING + default y + help + This option enables files appropriately marked files on appropriely + mounted filesystems to support mandatory locking. + + To the best of my knowledge this is dead code that no one cares about. + source "fs/notify/Kconfig" source "fs/quota/Kconfig" diff --git a/fs/locks.c b/fs/locks.c index 0d2b3267e2a3..86c94674ab22 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1191,6 +1191,7 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl) return error; } +#ifdef CONFIG_MANDATORY_FILE_LOCKING /** * locks_mandatory_locked - Check for an active lock * @file: the file to check @@ -1289,6 +1290,7 @@ int locks_mandatory_area(int read_write, struct inode *inode, } EXPORT_SYMBOL(locks_mandatory_area); +#endif /* CONFIG_MANDATORY_FILE_LOCKING */ static void lease_clear_pending(struct file_lock *fl, int arg) { diff --git a/fs/namespace.c b/fs/namespace.c index 0570729c87fd..4219885e9681 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1584,6 +1584,14 @@ static inline bool may_mount(void) return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); } +static inline bool may_mandlock(void) +{ +#ifndef CONFIG_MANDATORY_FILE_LOCKING + return false; +#endif + return true; +} + /* * Now umount can handle mount points as well as block devices. * This is important for filesystems which use unnamed block devices. @@ -2677,6 +2685,8 @@ long do_mount(const char *dev_name, const char __user *dir_name, type_page, flags, data_page); if (!retval && !may_mount()) retval = -EPERM; + if (!retval && (flags & MS_MANDLOCK) && !may_mandlock()) + retval = -EPERM; if (retval) goto dput_out; diff --git a/include/linux/fs.h b/include/linux/fs.h index 3aa514254161..cbf08d5c246e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2030,7 +2030,7 @@ extern struct kobject *fs_kobj; #define FLOCK_VERIFY_READ 1 #define FLOCK_VERIFY_WRITE 2 -#ifdef CONFIG_FILE_LOCKING +#ifdef CONFIG_MANDATORY_FILE_LOCKING extern int locks_mandatory_locked(struct file *); extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t); @@ -2075,6 +2075,45 @@ static inline int locks_verify_truncate(struct inode *inode, return 0; } +#else /* !CONFIG_MANDATORY_FILE_LOCKING */ + +static inline int locks_mandatory_locked(struct file *file) +{ + return 0; +} + +static inline int locks_mandatory_area(int rw, struct inode *inode, + struct file *filp, loff_t offset, + size_t count) +{ + return 0; +} + +static inline int __mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int locks_verify_locked(struct file *file) +{ + return 0; +} + +static inline int locks_verify_truncate(struct inode *inode, struct file *filp, + size_t size) +{ + return 0; +} + +#endif /* CONFIG_MANDATORY_FILE_LOCKING */ + + +#ifdef CONFIG_FILE_LOCKING static inline int break_lease(struct inode *inode, unsigned int mode) { /* @@ -2136,39 +2175,6 @@ static inline int break_layout(struct inode *inode, bool wait) } #else /* !CONFIG_FILE_LOCKING */ -static inline int locks_mandatory_locked(struct file *file) -{ - return 0; -} - -static inline int locks_mandatory_area(int rw, struct inode *inode, - struct file *filp, loff_t offset, - size_t count) -{ - return 0; -} - -static inline int __mandatory_lock(struct inode *inode) -{ - return 0; -} - -static inline int mandatory_lock(struct inode *inode) -{ - return 0; -} - -static inline int locks_verify_locked(struct file *file) -{ - return 0; -} - -static inline int locks_verify_truncate(struct inode *inode, struct file *filp, - size_t size) -{ - return 0; -} - static inline int break_lease(struct inode *inode, unsigned int mode) { return 0; -- cgit v1.2.3 From 4d4142696e18cf30af319031d47bba46853a4605 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Wed, 23 Sep 2015 12:34:30 +0200 Subject: percpu: Remove unneeded return from void function Signed-off-by: Guillaume Gomez Acked-by: Christoph Lameter Signed-off-by: Tejun Heo --- include/linux/percpu-refcount.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 12c9b485beb7..84f542df7ff5 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -116,7 +116,7 @@ void percpu_ref_reinit(struct percpu_ref *ref); */ static inline void percpu_ref_kill(struct percpu_ref *ref) { - return percpu_ref_kill_and_confirm(ref, NULL); + percpu_ref_kill_and_confirm(ref, NULL); } /* -- cgit v1.2.3 From b916b785af99088916a122cb37de1bda3fa7f70e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 28 Oct 2015 12:32:17 +0000 Subject: drivers/perf: kill armpmu_register Nothing outside of drivers/perf/arm_pmu.c should call armpmu_register any more, so it no longer needs to be in include/linux/perf/arm_pmu.h. Additionally, by folding it in to arm_pmu_device_probe we can allow drivers to override struct pmu fields without getting blatted by the armpmu code. This patch folds armpmu_register into arm_pmu_device_probe. The logging to the console is moved to after the PMU is successfully registered with the core perf code. Signed-off-by: Mark Rutland Suggested-by: Will Deacon Cc: Drew Richardson Cc: Pawel Moll Signed-off-by: Will Deacon --- drivers/perf/arm_pmu.c | 15 ++++++--------- include/linux/perf/arm_pmu.h | 2 -- 2 files changed, 6 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index be3755c973e9..166637f2917c 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -551,14 +551,6 @@ static void armpmu_init(struct arm_pmu *armpmu) }; } -int armpmu_register(struct arm_pmu *armpmu, int type) -{ - armpmu_init(armpmu); - pr_info("enabled with %s PMU driver, %d counters available\n", - armpmu->name, armpmu->num_events); - return perf_pmu_register(&armpmu->pmu, armpmu->name, type); -} - /* Set at runtime when we know what CPU type we are. */ static struct arm_pmu *__oprofile_cpu_pmu; @@ -887,6 +879,8 @@ int arm_pmu_device_probe(struct platform_device *pdev, return -ENOMEM; } + armpmu_init(pmu); + if (!__oprofile_cpu_pmu) __oprofile_cpu_pmu = pmu; @@ -912,10 +906,13 @@ int arm_pmu_device_probe(struct platform_device *pdev, if (ret) goto out_free; - ret = armpmu_register(pmu, -1); + ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); if (ret) goto out_destroy; + pr_info("enabled with %s PMU driver, %d counters available\n", + pmu->name, pmu->num_events); + return 0; out_destroy: diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index bfa673bb822d..83b5e34c6580 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -111,8 +111,6 @@ struct arm_pmu { #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) -int armpmu_register(struct arm_pmu *armpmu, int type); - u64 armpmu_event_update(struct perf_event *event); int armpmu_event_set_period(struct perf_event *event); -- cgit v1.2.3 From 18fc93fd64129c96432812cb44f59c963871889b Mon Sep 17 00:00:00 2001 From: Jungseok Lee Date: Wed, 4 Nov 2015 13:26:07 +0000 Subject: percpu: remove PERCPU_ENOUGH_ROOM which is stale definition As pure cleanup, this patch removes PERCPU_ENOUGH_ROOM which is not used any more. That is, no code refers to the definition. Acked-by: Christoph Lameter Signed-off-by: Jungseok Lee Signed-off-by: Tejun Heo --- arch/ia64/include/asm/percpu.h | 2 -- include/linux/percpu.h | 6 ------ 2 files changed, 8 deletions(-) (limited to 'include') diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h index 0ec484d2dcbc..b9295793a5e2 100644 --- a/arch/ia64/include/asm/percpu.h +++ b/arch/ia64/include/asm/percpu.h @@ -6,8 +6,6 @@ * David Mosberger-Tang */ -#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE - #ifdef __ASSEMBLY__ # define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */ #else /* !__ASSEMBLY__ */ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index caebf2a758dc..4bc6dafb703e 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -18,12 +18,6 @@ #define PERCPU_MODULE_RESERVE 0 #endif -#ifndef PERCPU_ENOUGH_ROOM -#define PERCPU_ENOUGH_ROOM \ - (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ - PERCPU_MODULE_RESERVE) -#endif - /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) -- cgit v1.2.3 From 67e9c74b8a873408c27ac9a8e4c1d1c8d72c93ff Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 16 Nov 2015 11:13:34 -0500 Subject: cgroup: replace __DEVEL__sane_behavior with cgroup2 fs type With major controllers - cpu, memory and io - shaping up for the unified hierarchy, cgroup2 is about ready to be, gradually, released into the wild. Replace __DEVEL__sane_behavior flag which was used to select the unified hierarchy with a separate filesystem type "cgroup2" so that unified hierarchy can be mounted as follows. mount -t cgroup2 none $MOUNT_POINT The cgroup2 fs has its own magic number - 0x63677270 ("cgrp"). v2: Assign a different magic number to cgroup2 fs. Signed-off-by: Tejun Heo Acked-by: Li Zefan Cc: Johannes Weiner --- Documentation/cgroups/unified-hierarchy.txt | 6 ++-- include/linux/cgroup-defs.h | 1 - include/uapi/linux/magic.h | 1 + kernel/cgroup.c | 47 ++++++++++++++--------------- 4 files changed, 26 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/Documentation/cgroups/unified-hierarchy.txt b/Documentation/cgroups/unified-hierarchy.txt index 781b1d475bcf..c1f0e8780960 100644 --- a/Documentation/cgroups/unified-hierarchy.txt +++ b/Documentation/cgroups/unified-hierarchy.txt @@ -94,11 +94,9 @@ the process. 2-1. Mounting -Currently, unified hierarchy can be mounted with the following mount -command. Note that this is still under development and scheduled to -change soon. +Unified hierarchy can be mounted with the following mount command. - mount -t cgroup -o __DEVEL__sane_behavior cgroup $MOUNT_POINT + mount -t cgroup2 none $MOUNT_POINT All controllers which support the unified hierarchy and are not bound to other hierarchies are automatically bound to unified hierarchy and diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 869fd4a3d28e..80e2ae655208 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -66,7 +66,6 @@ enum { /* cgroup_root->flags */ enum { - CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */ CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ }; diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index accb036bbc9c..b283d56c1db9 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -54,6 +54,7 @@ #define SMB_SUPER_MAGIC 0x517B #define CGROUP_SUPER_MAGIC 0x27e0eb +#define CGROUP2_SUPER_MAGIC 0x63677270 #define STACK_END_MAGIC 0x57AC6E9D diff --git a/kernel/cgroup.c b/kernel/cgroup.c index b316debadeb3..af0886262f58 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -211,6 +211,7 @@ static unsigned long have_free_callback __read_mostly; /* Ditto for the can_fork callback. */ static unsigned long have_canfork_callback __read_mostly; +static struct file_system_type cgroup2_fs_type; static struct cftype cgroup_dfl_base_files[]; static struct cftype cgroup_legacy_base_files[]; @@ -1641,10 +1642,6 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) all_ss = true; continue; } - if (!strcmp(token, "__DEVEL__sane_behavior")) { - opts->flags |= CGRP_ROOT_SANE_BEHAVIOR; - continue; - } if (!strcmp(token, "noprefix")) { opts->flags |= CGRP_ROOT_NOPREFIX; continue; @@ -1711,15 +1708,6 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) return -ENOENT; } - if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) { - pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n"); - if (nr_opts != 1) { - pr_err("sane_behavior: no other mount options allowed\n"); - return -EINVAL; - } - return 0; - } - /* * If the 'all' option was specified select all the subsystems, * otherwise if 'none', 'name=' and a subsystem name options were @@ -1998,6 +1986,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, int flags, const char *unused_dev_name, void *data) { + bool is_v2 = fs_type == &cgroup2_fs_type; struct super_block *pinned_sb = NULL; struct cgroup_subsys *ss; struct cgroup_root *root; @@ -2014,6 +2003,17 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, if (!use_task_css_set_links) cgroup_enable_task_cg_lists(); + if (is_v2) { + if (data) { + pr_err("cgroup2: unknown option \"%s\"\n", (char *)data); + return ERR_PTR(-EINVAL); + } + cgrp_dfl_root_visible = true; + root = &cgrp_dfl_root; + cgroup_get(&root->cgrp); + goto out_mount; + } + mutex_lock(&cgroup_mutex); /* First find the desired set of subsystems */ @@ -2021,15 +2021,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, if (ret) goto out_unlock; - /* look for a matching existing root */ - if (opts.flags & CGRP_ROOT_SANE_BEHAVIOR) { - cgrp_dfl_root_visible = true; - root = &cgrp_dfl_root; - cgroup_get(&root->cgrp); - ret = 0; - goto out_unlock; - } - /* * Destruction of cgroup root is asynchronous, so subsystems may * still be dying after the previous unmount. Let's drain the @@ -2140,9 +2131,10 @@ out_free: if (ret) return ERR_PTR(ret); - +out_mount: dentry = kernfs_mount(fs_type, flags, root->kf_root, - CGROUP_SUPER_MAGIC, &new_sb); + is_v2 ? CGROUP2_SUPER_MAGIC : CGROUP_SUPER_MAGIC, + &new_sb); if (IS_ERR(dentry) || !new_sb) cgroup_put(&root->cgrp); @@ -2185,6 +2177,12 @@ static struct file_system_type cgroup_fs_type = { .kill_sb = cgroup_kill_sb, }; +static struct file_system_type cgroup2_fs_type = { + .name = "cgroup2", + .mount = cgroup_mount, + .kill_sb = cgroup_kill_sb, +}; + /** * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy * @task: target task @@ -5315,6 +5313,7 @@ int __init cgroup_init(void) WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup")); WARN_ON(register_filesystem(&cgroup_fs_type)); + WARN_ON(register_filesystem(&cgroup2_fs_type)); WARN_ON(!proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations)); return 0; -- cgit v1.2.3 From 911918aa7ef6f868c135505b0015e42072c54682 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 2 Nov 2015 14:55:05 -0500 Subject: div64.h: optimize do_div() for power-of-two constant divisors Let's perform the obvious mask and shift operation in this case. On 32-bit targets, gcc is able to do the same thing with a constant divisor that happens to be a power of two i.e. it turns the division into an inline shift, but it doesn't hurt to be explicit. Signed-off-by: Nicolas Pitre --- include/asm-generic/div64.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h index 8f4e3193342e..5d974683193a 100644 --- a/include/asm-generic/div64.h +++ b/include/asm-generic/div64.h @@ -32,6 +32,8 @@ #elif BITS_PER_LONG == 32 +#include + extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); /* The unnecessary pointer compare is there @@ -41,7 +43,11 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); uint32_t __base = (base); \ uint32_t __rem; \ (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \ - if (likely(((n) >> 32) == 0)) { \ + if (__builtin_constant_p(__base) && \ + is_power_of_2(__base)) { \ + __rem = (n) & (__base - 1); \ + (n) >>= ilog2(__base); \ + } else if (likely(((n) >> 32) == 0)) { \ __rem = (uint32_t)(n) % __base; \ (n) = (uint32_t)(n) / __base; \ } else \ -- cgit v1.2.3 From 461a5e51060c93f5844113f4be9dba513cc92830 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Fri, 30 Oct 2015 15:36:39 -0400 Subject: do_div(): generic optimization for constant divisor on 32-bit machines 64-by-32-bit divisions are prominent in the kernel, even on 32-bit machines. Luckily, many of them use a constant divisor that allows for a much faster multiplication by the divisor's reciprocal. The compiler already performs this optimization when compiling a 32-by-32 division with a constant divisor. Unfortunately, on 32-bit machines, gcc does not optimize 64-by-32 divisions in that case, except for constant divisors that happen to be a power of 2. Let's avoid the slow path whenever the divisor is constant by manually computing the reciprocal ourselves and performing the multiplication inline. In most cases, this improves performance of 64-by-32 divisions by about two orders of magnitude compared to the __div64_32() fallback, especially on architectures lacking a native div instruction. The algorithm used here comes from the existing ARM code. The __div64_const32_is_OK macro can be predefined by architectures to disable this optimization in some cases. For example, some ancient gcc version on ARM would crash with an ICE when fed this code. Signed-off-by: Nicolas Pitre Acked-by: Alexey Brodkin --- include/asm-generic/div64.h | 147 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) (limited to 'include') diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h index 5d974683193a..5a1bf1aff502 100644 --- a/include/asm-generic/div64.h +++ b/include/asm-generic/div64.h @@ -4,6 +4,9 @@ * Copyright (C) 2003 Bernardo Innocenti * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h * + * Optimization for constant divisors on 32-bit machines: + * Copyright (C) 2006-2015 Nicolas Pitre + * * The semantics of do_div() are: * * uint32_t do_div(uint64_t *n, uint32_t base) @@ -34,6 +37,142 @@ #include +/* + * If the divisor happens to be constant, we determine the appropriate + * inverse at compile time to turn the division into a few inline + * multiplications which ought to be much faster. And yet only if compiling + * with a sufficiently recent gcc version to perform proper 64-bit constant + * propagation. + * + * (It is unfortunate that gcc doesn't perform all this internally.) + */ + +#ifndef __div64_const32_is_OK +#define __div64_const32_is_OK (__GNUC__ >= 4) +#endif + +#define __div64_const32(n, ___b) \ +({ \ + /* \ + * Multiplication by reciprocal of b: n / b = n * (p / b) / p \ + * \ + * We rely on the fact that most of this code gets optimized \ + * away at compile time due to constant propagation and only \ + * a few multiplication instructions should remain. \ + * Hence this monstrous macro (static inline doesn't always \ + * do the trick here). \ + */ \ + uint64_t ___res, ___x, ___t, ___m, ___n = (n); \ + uint32_t ___p, ___bias, ___m_lo, ___m_hi, ___n_lo, ___n_hi; \ + \ + /* determine MSB of b */ \ + ___p = 1 << ilog2(___b); \ + \ + /* compute m = ((p << 64) + b - 1) / b */ \ + ___m = (~0ULL / ___b) * ___p; \ + ___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b; \ + \ + /* one less than the dividend with highest result */ \ + ___x = ~0ULL / ___b * ___b - 1; \ + \ + /* test our ___m with res = m * x / (p << 64) */ \ + ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \ + ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \ + ___res += (___x & 0xffffffff) * (___m >> 32); \ + ___t = (___res < ___t) ? (1ULL << 32) : 0; \ + ___res = (___res >> 32) + ___t; \ + ___res += (___m >> 32) * (___x >> 32); \ + ___res /= ___p; \ + \ + /* Now sanitize and optimize what we've got. */ \ + if (~0ULL % (___b / (___b & -___b)) == 0) { \ + /* special case, can be simplified to ... */ \ + ___n /= (___b & -___b); \ + ___m = ~0ULL / (___b / (___b & -___b)); \ + ___p = 1; \ + ___bias = 1; \ + } else if (___res != ___x / ___b) { \ + /* \ + * We can't get away without a bias to compensate \ + * for bit truncation errors. To avoid it we'd need an \ + * additional bit to represent m which would overflow \ + * a 64-bit variable. \ + * \ + * Instead we do m = p / b and n / b = (n * m + m) / p. \ + */ \ + ___bias = 1; \ + /* Compute m = (p << 64) / b */ \ + ___m = (~0ULL / ___b) * ___p; \ + ___m += ((~0ULL % ___b + 1) * ___p) / ___b; \ + } else { \ + /* \ + * Reduce m / p, and try to clear bit 31 of m when \ + * possible, otherwise that'll need extra overflow \ + * handling later. \ + */ \ + uint32_t ___bits = -(___m & -___m); \ + ___bits |= ___m >> 32; \ + ___bits = (~___bits) << 1; \ + /* \ + * If ___bits == 0 then setting bit 31 is unavoidable. \ + * Simply apply the maximum possible reduction in that \ + * case. Otherwise the MSB of ___bits indicates the \ + * best reduction we should apply. \ + */ \ + if (!___bits) { \ + ___p /= (___m & -___m); \ + ___m /= (___m & -___m); \ + } else { \ + ___p >>= ilog2(___bits); \ + ___m >>= ilog2(___bits); \ + } \ + /* No bias needed. */ \ + ___bias = 0; \ + } \ + \ + /* \ + * Now we have a combination of 2 conditions: \ + * \ + * 1) whether or not we need to apply a bias, and \ + * \ + * 2) whether or not there might be an overflow in the cross \ + * product determined by (___m & ((1 << 63) | (1 << 31))). \ + * \ + * Select the best way to do (m_bias + m * n) / (p << 64). \ + * From now on there will be actual runtime code generated. \ + */ \ + \ + ___m_lo = ___m; \ + ___m_hi = ___m >> 32; \ + ___n_lo = ___n; \ + ___n_hi = ___n >> 32; \ + \ + if (!___bias) { \ + ___res = ((uint64_t)___m_lo * ___n_lo) >> 32; \ + } else if (!(___m & ((1ULL << 63) | (1ULL << 31)))) { \ + ___res = (___m + (uint64_t)___m_lo * ___n_lo) >> 32; \ + } else { \ + ___res = ___m + (uint64_t)___m_lo * ___n_lo; \ + ___t = (___res < ___m) ? (1ULL << 32) : 0; \ + ___res = (___res >> 32) + ___t; \ + } \ + \ + if (!(___m & ((1ULL << 63) | (1ULL << 31)))) { \ + ___res += (uint64_t)___m_lo * ___n_hi; \ + ___res += (uint64_t)___m_hi * ___n_lo; \ + ___res >>= 32; \ + } else { \ + ___t = ___res += (uint64_t)___m_lo * ___n_hi; \ + ___res += (uint64_t)___m_hi * ___n_lo; \ + ___t = (___res < ___t) ? (1ULL << 32) : 0; \ + ___res = (___res >> 32) + ___t; \ + } \ + \ + ___res += (uint64_t)___m_hi * ___n_hi; \ + \ + ___res /= ___p; \ +}) + extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); /* The unnecessary pointer compare is there @@ -47,6 +186,14 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); is_power_of_2(__base)) { \ __rem = (n) & (__base - 1); \ (n) >>= ilog2(__base); \ + } else if (__div64_const32_is_OK && \ + __builtin_constant_p(__base) && \ + __base != 0) { \ + uint32_t __res_lo, __n_lo = (n); \ + (n) = __div64_const32(n, __base); \ + /* the remainder can be computed with 32-bit regs */ \ + __res_lo = (n); \ + __rem = __n_lo - __res_lo * __base; \ } else if (likely(((n) >> 32) == 0)) { \ __rem = (uint32_t)(n) % __base; \ (n) = (uint32_t)(n) / __base; \ -- cgit v1.2.3 From f682b27c57aec2f0ca8927f9bb7c267c6165ad5a Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Fri, 30 Oct 2015 17:54:56 -0400 Subject: __div64_const32(): abstract out the actual 128-bit cross product code The default C implementation for the 128-bit cross product is abstracted into the __arch_xprod_64() macro that can be overridden to let architectures provide their own assembly optimized implementation. There are many advantages to an assembly version for this operation. Carry bit handling becomes trivial, and 32-bit shifts may be achieved simply by inverting register pairs on some architectures. This has the potential to be quite faster and use much fewer instructions. Signed-off-by: Nicolas Pitre --- include/asm-generic/div64.h | 81 ++++++++++++++++++++++++++++----------------- 1 file changed, 51 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h index 5a1bf1aff502..408856a9aba1 100644 --- a/include/asm-generic/div64.h +++ b/include/asm-generic/div64.h @@ -63,7 +63,7 @@ * do the trick here). \ */ \ uint64_t ___res, ___x, ___t, ___m, ___n = (n); \ - uint32_t ___p, ___bias, ___m_lo, ___m_hi, ___n_lo, ___n_hi; \ + uint32_t ___p, ___bias; \ \ /* determine MSB of b */ \ ___p = 1 << ilog2(___b); \ @@ -138,41 +138,62 @@ * 2) whether or not there might be an overflow in the cross \ * product determined by (___m & ((1 << 63) | (1 << 31))). \ * \ - * Select the best way to do (m_bias + m * n) / (p << 64). \ + * Select the best way to do (m_bias + m * n) / (1 << 64). \ * From now on there will be actual runtime code generated. \ */ \ - \ - ___m_lo = ___m; \ - ___m_hi = ___m >> 32; \ - ___n_lo = ___n; \ - ___n_hi = ___n >> 32; \ - \ - if (!___bias) { \ - ___res = ((uint64_t)___m_lo * ___n_lo) >> 32; \ - } else if (!(___m & ((1ULL << 63) | (1ULL << 31)))) { \ - ___res = (___m + (uint64_t)___m_lo * ___n_lo) >> 32; \ - } else { \ - ___res = ___m + (uint64_t)___m_lo * ___n_lo; \ - ___t = (___res < ___m) ? (1ULL << 32) : 0; \ - ___res = (___res >> 32) + ___t; \ - } \ - \ - if (!(___m & ((1ULL << 63) | (1ULL << 31)))) { \ - ___res += (uint64_t)___m_lo * ___n_hi; \ - ___res += (uint64_t)___m_hi * ___n_lo; \ - ___res >>= 32; \ - } else { \ - ___t = ___res += (uint64_t)___m_lo * ___n_hi; \ - ___res += (uint64_t)___m_hi * ___n_lo; \ - ___t = (___res < ___t) ? (1ULL << 32) : 0; \ - ___res = (___res >> 32) + ___t; \ - } \ - \ - ___res += (uint64_t)___m_hi * ___n_hi; \ + ___res = __arch_xprod_64(___m, ___n, ___bias); \ \ ___res /= ___p; \ }) +#ifndef __arch_xprod_64 +/* + * Default C implementation for __arch_xprod_64() + * + * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) + * Semantic: retval = ((bias ? m : 0) + m * n) >> 64 + * + * The product is a 128-bit value, scaled down to 64 bits. + * Assuming constant propagation to optimize away unused conditional code. + * Architectures may provide their own optimized assembly implementation. + */ +static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) +{ + uint32_t m_lo = m; + uint32_t m_hi = m >> 32; + uint32_t n_lo = n; + uint32_t n_hi = n >> 32; + uint64_t res, tmp; + + if (!bias) { + res = ((uint64_t)m_lo * n_lo) >> 32; + } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) { + /* there can't be any overflow here */ + res = (m + (uint64_t)m_lo * n_lo) >> 32; + } else { + res = m + (uint64_t)m_lo * n_lo; + tmp = (res < m) ? (1ULL << 32) : 0; + res = (res >> 32) + tmp; + } + + if (!(m & ((1ULL << 63) | (1ULL << 31)))) { + /* there can't be any overflow here */ + res += (uint64_t)m_lo * n_hi; + res += (uint64_t)m_hi * n_lo; + res >>= 32; + } else { + tmp = res += (uint64_t)m_lo * n_hi; + res += (uint64_t)m_hi * n_lo; + tmp = (res < tmp) ? (1ULL << 32) : 0; + res = (res >> 32) + tmp; + } + + res += (uint64_t)m_hi * n_hi; + + return res; +} +#endif + extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); /* The unnecessary pointer compare is there -- cgit v1.2.3 From dce1eb93b19b2a1a441708f51c97c4a554054d00 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 2 Nov 2015 13:02:46 -0500 Subject: __div64_32(): make it overridable at compile time Some architectures may want to override the default implementation at compile time to do things inline. For example, ARM uses a non-standard calling convention for better efficiency in this case. Signed-off-by: Nicolas Pitre --- include/asm-generic/div64.h | 2 ++ lib/div64.c | 6 ++++-- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h index 408856a9aba1..163f77999ea4 100644 --- a/include/asm-generic/div64.h +++ b/include/asm-generic/div64.h @@ -194,7 +194,9 @@ static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) } #endif +#ifndef __div64_32 extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); +#endif /* The unnecessary pointer compare is there * to check for type safety (n must be 64bit) diff --git a/lib/div64.c b/lib/div64.c index 62a698a432bc..7f345259c32f 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -13,7 +13,8 @@ * * Code generated for this function might be very inefficient * for some CPUs. __div64_32() can be overridden by linking arch-specific - * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. + * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S + * or by defining a preprocessor macro in arch/include/asm/div64.h. */ #include @@ -23,6 +24,7 @@ /* Not needed on 64bit architectures */ #if BITS_PER_LONG == 32 +#ifndef __div64_32 uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) { uint64_t rem = *n; @@ -55,8 +57,8 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) *n = res; return rem; } - EXPORT_SYMBOL(__div64_32); +#endif #ifndef div_s64_rem s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) -- cgit v1.2.3 From b7be755733dc44c72956c91876e5d86c56052a54 Mon Sep 17 00:00:00 2001 From: Alec Leamas Date: Thu, 12 Nov 2015 16:03:00 -0200 Subject: [media] bz#75751: Move internal header file lirc.h to uapi/ The file include/media/lirc.h describes a public interface and should thus be a public header. See kernel bug https://bugzilla.kernel.org/show_bug.cgi?id=75751 which has a manpage describing the interface + an acknowledgment that this info belongs to uapi. Signed-off-by: Mauro Carvalho Chehab --- include/media/lirc.h | 169 +--------------------------------------------- include/uapi/linux/lirc.h | 168 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 169 insertions(+), 168 deletions(-) create mode 100644 include/uapi/linux/lirc.h (limited to 'include') diff --git a/include/media/lirc.h b/include/media/lirc.h index 4b3ab2966b5a..554988c860c1 100644 --- a/include/media/lirc.h +++ b/include/media/lirc.h @@ -1,168 +1 @@ -/* - * lirc.h - linux infrared remote control header file - * last modified 2010/07/13 by Jarod Wilson - */ - -#ifndef _LINUX_LIRC_H -#define _LINUX_LIRC_H - -#include -#include - -#define PULSE_BIT 0x01000000 -#define PULSE_MASK 0x00FFFFFF - -#define LIRC_MODE2_SPACE 0x00000000 -#define LIRC_MODE2_PULSE 0x01000000 -#define LIRC_MODE2_FREQUENCY 0x02000000 -#define LIRC_MODE2_TIMEOUT 0x03000000 - -#define LIRC_VALUE_MASK 0x00FFFFFF -#define LIRC_MODE2_MASK 0xFF000000 - -#define LIRC_SPACE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_SPACE) -#define LIRC_PULSE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_PULSE) -#define LIRC_FREQUENCY(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY) -#define LIRC_TIMEOUT(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT) - -#define LIRC_VALUE(val) ((val)&LIRC_VALUE_MASK) -#define LIRC_MODE2(val) ((val)&LIRC_MODE2_MASK) - -#define LIRC_IS_SPACE(val) (LIRC_MODE2(val) == LIRC_MODE2_SPACE) -#define LIRC_IS_PULSE(val) (LIRC_MODE2(val) == LIRC_MODE2_PULSE) -#define LIRC_IS_FREQUENCY(val) (LIRC_MODE2(val) == LIRC_MODE2_FREQUENCY) -#define LIRC_IS_TIMEOUT(val) (LIRC_MODE2(val) == LIRC_MODE2_TIMEOUT) - -/* used heavily by lirc userspace */ -#define lirc_t int - -/*** lirc compatible hardware features ***/ - -#define LIRC_MODE2SEND(x) (x) -#define LIRC_SEND2MODE(x) (x) -#define LIRC_MODE2REC(x) ((x) << 16) -#define LIRC_REC2MODE(x) ((x) >> 16) - -#define LIRC_MODE_RAW 0x00000001 -#define LIRC_MODE_PULSE 0x00000002 -#define LIRC_MODE_MODE2 0x00000004 -#define LIRC_MODE_LIRCCODE 0x00000010 - - -#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW) -#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE) -#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2) -#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE) - -#define LIRC_CAN_SEND_MASK 0x0000003f - -#define LIRC_CAN_SET_SEND_CARRIER 0x00000100 -#define LIRC_CAN_SET_SEND_DUTY_CYCLE 0x00000200 -#define LIRC_CAN_SET_TRANSMITTER_MASK 0x00000400 - -#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW) -#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE) -#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2) -#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE) - -#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) - -#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16) -#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16) - -#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000 -#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000 -#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000 -#define LIRC_CAN_SET_REC_TIMEOUT 0x10000000 -#define LIRC_CAN_SET_REC_FILTER 0x08000000 - -#define LIRC_CAN_MEASURE_CARRIER 0x02000000 -#define LIRC_CAN_USE_WIDEBAND_RECEIVER 0x04000000 - -#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK) -#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK) - -#define LIRC_CAN_NOTIFY_DECODE 0x01000000 - -/*** IOCTL commands for lirc driver ***/ - -#define LIRC_GET_FEATURES _IOR('i', 0x00000000, __u32) - -#define LIRC_GET_SEND_MODE _IOR('i', 0x00000001, __u32) -#define LIRC_GET_REC_MODE _IOR('i', 0x00000002, __u32) -#define LIRC_GET_SEND_CARRIER _IOR('i', 0x00000003, __u32) -#define LIRC_GET_REC_CARRIER _IOR('i', 0x00000004, __u32) -#define LIRC_GET_SEND_DUTY_CYCLE _IOR('i', 0x00000005, __u32) -#define LIRC_GET_REC_DUTY_CYCLE _IOR('i', 0x00000006, __u32) -#define LIRC_GET_REC_RESOLUTION _IOR('i', 0x00000007, __u32) - -#define LIRC_GET_MIN_TIMEOUT _IOR('i', 0x00000008, __u32) -#define LIRC_GET_MAX_TIMEOUT _IOR('i', 0x00000009, __u32) - -#define LIRC_GET_MIN_FILTER_PULSE _IOR('i', 0x0000000a, __u32) -#define LIRC_GET_MAX_FILTER_PULSE _IOR('i', 0x0000000b, __u32) -#define LIRC_GET_MIN_FILTER_SPACE _IOR('i', 0x0000000c, __u32) -#define LIRC_GET_MAX_FILTER_SPACE _IOR('i', 0x0000000d, __u32) - -/* code length in bits, currently only for LIRC_MODE_LIRCCODE */ -#define LIRC_GET_LENGTH _IOR('i', 0x0000000f, __u32) - -#define LIRC_SET_SEND_MODE _IOW('i', 0x00000011, __u32) -#define LIRC_SET_REC_MODE _IOW('i', 0x00000012, __u32) -/* Note: these can reset the according pulse_width */ -#define LIRC_SET_SEND_CARRIER _IOW('i', 0x00000013, __u32) -#define LIRC_SET_REC_CARRIER _IOW('i', 0x00000014, __u32) -#define LIRC_SET_SEND_DUTY_CYCLE _IOW('i', 0x00000015, __u32) -#define LIRC_SET_REC_DUTY_CYCLE _IOW('i', 0x00000016, __u32) -#define LIRC_SET_TRANSMITTER_MASK _IOW('i', 0x00000017, __u32) - -/* - * when a timeout != 0 is set the driver will send a - * LIRC_MODE2_TIMEOUT data packet, otherwise LIRC_MODE2_TIMEOUT is - * never sent, timeout is disabled by default - */ -#define LIRC_SET_REC_TIMEOUT _IOW('i', 0x00000018, __u32) - -/* 1 enables, 0 disables timeout reports in MODE2 */ -#define LIRC_SET_REC_TIMEOUT_REPORTS _IOW('i', 0x00000019, __u32) - -/* - * pulses shorter than this are filtered out by hardware (software - * emulation in lirc_dev?) - */ -#define LIRC_SET_REC_FILTER_PULSE _IOW('i', 0x0000001a, __u32) -/* - * spaces shorter than this are filtered out by hardware (software - * emulation in lirc_dev?) - */ -#define LIRC_SET_REC_FILTER_SPACE _IOW('i', 0x0000001b, __u32) -/* - * if filter cannot be set independently for pulse/space, this should - * be used - */ -#define LIRC_SET_REC_FILTER _IOW('i', 0x0000001c, __u32) - -/* - * if enabled from the next key press on the driver will send - * LIRC_MODE2_FREQUENCY packets - */ -#define LIRC_SET_MEASURE_CARRIER_MODE _IOW('i', 0x0000001d, __u32) - -/* - * to set a range use - * LIRC_SET_REC_DUTY_CYCLE_RANGE/LIRC_SET_REC_CARRIER_RANGE with the - * lower bound first and later - * LIRC_SET_REC_DUTY_CYCLE/LIRC_SET_REC_CARRIER with the upper bound - */ - -#define LIRC_SET_REC_DUTY_CYCLE_RANGE _IOW('i', 0x0000001e, __u32) -#define LIRC_SET_REC_CARRIER_RANGE _IOW('i', 0x0000001f, __u32) - -#define LIRC_NOTIFY_DECODE _IO('i', 0x00000020) - -#define LIRC_SETUP_START _IO('i', 0x00000021) -#define LIRC_SETUP_END _IO('i', 0x00000022) - -#define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32) - -#endif +#include diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h new file mode 100644 index 000000000000..4b3ab2966b5a --- /dev/null +++ b/include/uapi/linux/lirc.h @@ -0,0 +1,168 @@ +/* + * lirc.h - linux infrared remote control header file + * last modified 2010/07/13 by Jarod Wilson + */ + +#ifndef _LINUX_LIRC_H +#define _LINUX_LIRC_H + +#include +#include + +#define PULSE_BIT 0x01000000 +#define PULSE_MASK 0x00FFFFFF + +#define LIRC_MODE2_SPACE 0x00000000 +#define LIRC_MODE2_PULSE 0x01000000 +#define LIRC_MODE2_FREQUENCY 0x02000000 +#define LIRC_MODE2_TIMEOUT 0x03000000 + +#define LIRC_VALUE_MASK 0x00FFFFFF +#define LIRC_MODE2_MASK 0xFF000000 + +#define LIRC_SPACE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_SPACE) +#define LIRC_PULSE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_PULSE) +#define LIRC_FREQUENCY(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY) +#define LIRC_TIMEOUT(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT) + +#define LIRC_VALUE(val) ((val)&LIRC_VALUE_MASK) +#define LIRC_MODE2(val) ((val)&LIRC_MODE2_MASK) + +#define LIRC_IS_SPACE(val) (LIRC_MODE2(val) == LIRC_MODE2_SPACE) +#define LIRC_IS_PULSE(val) (LIRC_MODE2(val) == LIRC_MODE2_PULSE) +#define LIRC_IS_FREQUENCY(val) (LIRC_MODE2(val) == LIRC_MODE2_FREQUENCY) +#define LIRC_IS_TIMEOUT(val) (LIRC_MODE2(val) == LIRC_MODE2_TIMEOUT) + +/* used heavily by lirc userspace */ +#define lirc_t int + +/*** lirc compatible hardware features ***/ + +#define LIRC_MODE2SEND(x) (x) +#define LIRC_SEND2MODE(x) (x) +#define LIRC_MODE2REC(x) ((x) << 16) +#define LIRC_REC2MODE(x) ((x) >> 16) + +#define LIRC_MODE_RAW 0x00000001 +#define LIRC_MODE_PULSE 0x00000002 +#define LIRC_MODE_MODE2 0x00000004 +#define LIRC_MODE_LIRCCODE 0x00000010 + + +#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW) +#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE) +#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2) +#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE) + +#define LIRC_CAN_SEND_MASK 0x0000003f + +#define LIRC_CAN_SET_SEND_CARRIER 0x00000100 +#define LIRC_CAN_SET_SEND_DUTY_CYCLE 0x00000200 +#define LIRC_CAN_SET_TRANSMITTER_MASK 0x00000400 + +#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW) +#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE) +#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2) +#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE) + +#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) + +#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16) +#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16) + +#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000 +#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000 +#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000 +#define LIRC_CAN_SET_REC_TIMEOUT 0x10000000 +#define LIRC_CAN_SET_REC_FILTER 0x08000000 + +#define LIRC_CAN_MEASURE_CARRIER 0x02000000 +#define LIRC_CAN_USE_WIDEBAND_RECEIVER 0x04000000 + +#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK) +#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK) + +#define LIRC_CAN_NOTIFY_DECODE 0x01000000 + +/*** IOCTL commands for lirc driver ***/ + +#define LIRC_GET_FEATURES _IOR('i', 0x00000000, __u32) + +#define LIRC_GET_SEND_MODE _IOR('i', 0x00000001, __u32) +#define LIRC_GET_REC_MODE _IOR('i', 0x00000002, __u32) +#define LIRC_GET_SEND_CARRIER _IOR('i', 0x00000003, __u32) +#define LIRC_GET_REC_CARRIER _IOR('i', 0x00000004, __u32) +#define LIRC_GET_SEND_DUTY_CYCLE _IOR('i', 0x00000005, __u32) +#define LIRC_GET_REC_DUTY_CYCLE _IOR('i', 0x00000006, __u32) +#define LIRC_GET_REC_RESOLUTION _IOR('i', 0x00000007, __u32) + +#define LIRC_GET_MIN_TIMEOUT _IOR('i', 0x00000008, __u32) +#define LIRC_GET_MAX_TIMEOUT _IOR('i', 0x00000009, __u32) + +#define LIRC_GET_MIN_FILTER_PULSE _IOR('i', 0x0000000a, __u32) +#define LIRC_GET_MAX_FILTER_PULSE _IOR('i', 0x0000000b, __u32) +#define LIRC_GET_MIN_FILTER_SPACE _IOR('i', 0x0000000c, __u32) +#define LIRC_GET_MAX_FILTER_SPACE _IOR('i', 0x0000000d, __u32) + +/* code length in bits, currently only for LIRC_MODE_LIRCCODE */ +#define LIRC_GET_LENGTH _IOR('i', 0x0000000f, __u32) + +#define LIRC_SET_SEND_MODE _IOW('i', 0x00000011, __u32) +#define LIRC_SET_REC_MODE _IOW('i', 0x00000012, __u32) +/* Note: these can reset the according pulse_width */ +#define LIRC_SET_SEND_CARRIER _IOW('i', 0x00000013, __u32) +#define LIRC_SET_REC_CARRIER _IOW('i', 0x00000014, __u32) +#define LIRC_SET_SEND_DUTY_CYCLE _IOW('i', 0x00000015, __u32) +#define LIRC_SET_REC_DUTY_CYCLE _IOW('i', 0x00000016, __u32) +#define LIRC_SET_TRANSMITTER_MASK _IOW('i', 0x00000017, __u32) + +/* + * when a timeout != 0 is set the driver will send a + * LIRC_MODE2_TIMEOUT data packet, otherwise LIRC_MODE2_TIMEOUT is + * never sent, timeout is disabled by default + */ +#define LIRC_SET_REC_TIMEOUT _IOW('i', 0x00000018, __u32) + +/* 1 enables, 0 disables timeout reports in MODE2 */ +#define LIRC_SET_REC_TIMEOUT_REPORTS _IOW('i', 0x00000019, __u32) + +/* + * pulses shorter than this are filtered out by hardware (software + * emulation in lirc_dev?) + */ +#define LIRC_SET_REC_FILTER_PULSE _IOW('i', 0x0000001a, __u32) +/* + * spaces shorter than this are filtered out by hardware (software + * emulation in lirc_dev?) + */ +#define LIRC_SET_REC_FILTER_SPACE _IOW('i', 0x0000001b, __u32) +/* + * if filter cannot be set independently for pulse/space, this should + * be used + */ +#define LIRC_SET_REC_FILTER _IOW('i', 0x0000001c, __u32) + +/* + * if enabled from the next key press on the driver will send + * LIRC_MODE2_FREQUENCY packets + */ +#define LIRC_SET_MEASURE_CARRIER_MODE _IOW('i', 0x0000001d, __u32) + +/* + * to set a range use + * LIRC_SET_REC_DUTY_CYCLE_RANGE/LIRC_SET_REC_CARRIER_RANGE with the + * lower bound first and later + * LIRC_SET_REC_DUTY_CYCLE/LIRC_SET_REC_CARRIER with the upper bound + */ + +#define LIRC_SET_REC_DUTY_CYCLE_RANGE _IOW('i', 0x0000001e, __u32) +#define LIRC_SET_REC_CARRIER_RANGE _IOW('i', 0x0000001f, __u32) + +#define LIRC_NOTIFY_DECODE _IO('i', 0x00000020) + +#define LIRC_SETUP_START _IO('i', 0x00000021) +#define LIRC_SETUP_END _IO('i', 0x00000022) + +#define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32) + +#endif -- cgit v1.2.3 From b5dcee225ce972fecb054e104be22b2a6f65303d Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 10 Nov 2015 12:01:44 -0200 Subject: [media] include/media: split I2C headers from V4L2 core Currently, include/media is messy, as it contains both the V4L2 core headers and some driver-specific headers on the same place. That makes harder to identify what core headers should be documented and what headers belong to I2C drivers that are included only by bridge/main drivers that would require the functions provided by them. Let's move those i2c specific files to its own subdirectory. The files to move were produced via the following script: mkdir include/media/i2c (cd include/media; for i in *.h; do n=`echo $i|sed s/.h$/.c/`; if [ -e ../../drivers/media/i2c/$n ]; then echo $i; git mv $i i2c/; fi; done) (cd include/media; for i in *.h; do n=`echo $i|sed s/.h$/.c/`; if [ -e ../../drivers/media/*/i2c/$n ]; then echo $i; git mv $i i2c/; fi; done) for i in include/media/*.h; do n=`basename $i`; (for j in $(git grep -l $n); do dirname $j; done)|sort|uniq|grep -ve '^.$' > list; num=$(wc -l list|cut -d' ' -f1); if [ $num == 1 ]; then if [ "`grep i2c list`" != "" ]; then git mv $i include/media/i2c; fi; fi; done And the references corrected via this script: MAIN_DIR="media/" PREV_DIR="media/" DIRS="i2c/" echo "Checking affected files" >&2 for i in $DIRS; do for j in $(find include/$MAIN_DIR/$i -type f -name '*.h'); do n=`basename $j` git grep -l $n done done|sort|uniq >files && ( echo "Handling files..." >&2; echo "for i in \$(cat files|grep -v Documentation); do cat \$i | \\"; ( cd include/$MAIN_DIR; for j in $DIRS; do for i in $(ls $j); do echo "perl -ne 's,(include [\\\"\\<])$PREV_DIR($i)([\\\"\\>]),\1$MAIN_DIR$j\2\3,; print \$_' |\\"; done; done; echo "cat > a && mv a \$i; done"; ); echo "Handling documentation..." >&2; echo "for i in MAINTAINERS \$(cat files); do cat \$i | \\"; ( cd include/$MAIN_DIR; for j in $DIRS; do for i in $(ls $j); do echo " perl -ne 's,include/$PREV_DIR($i)\b,include/$MAIN_DIR$j\1,; print \$_' |\\"; done; done; echo "cat > a && mv a \$i; done" ); ) >script && . ./script Merged Sakari Ailus patch that moves smiapp.h to include/media/i2c. Signed-off-by: Mauro Carvalho Chehab Acked-by: Arnd Bergmann --- MAINTAINERS | 20 +- arch/arm/mach-davinci/board-da850-evm.c | 4 +- arch/arm/mach-davinci/board-dm355-evm.c | 2 +- arch/arm/mach-davinci/board-dm365-evm.c | 4 +- arch/arm/mach-davinci/board-dm644x-evm.c | 2 +- arch/arm/mach-davinci/board-dm646x-evm.c | 4 +- arch/arm/mach-pxa/pcm990-baseboard.c | 2 +- arch/blackfin/mach-bf561/boards/ezkit.c | 2 +- arch/blackfin/mach-bf609/boards/ezkit.c | 6 +- arch/sh/boards/mach-ap325rxa/setup.c | 2 +- arch/sh/boards/mach-ecovec24/setup.c | 6 +- arch/sh/boards/mach-kfr2r09/setup.c | 2 +- arch/sh/boards/mach-migor/setup.c | 4 +- arch/sh/boards/mach-se/7724/setup.c | 2 +- drivers/media/i2c/ad9389b.c | 2 +- drivers/media/i2c/adp1653.c | 2 +- drivers/media/i2c/adv7183.c | 2 +- drivers/media/i2c/adv7343.c | 2 +- drivers/media/i2c/adv7393.c | 2 +- drivers/media/i2c/adv7511.c | 2 +- drivers/media/i2c/adv7604.c | 2 +- drivers/media/i2c/adv7842.c | 2 +- drivers/media/i2c/ak881x.c | 2 +- drivers/media/i2c/as3645a.c | 2 +- drivers/media/i2c/bt819.c | 2 +- drivers/media/i2c/ir-kbd-i2c.c | 2 +- drivers/media/i2c/lm3560.c | 2 +- drivers/media/i2c/lm3646.c | 2 +- drivers/media/i2c/m52790.c | 2 +- drivers/media/i2c/m5mols/m5mols_capture.c | 2 +- drivers/media/i2c/m5mols/m5mols_core.c | 2 +- drivers/media/i2c/msp3400-driver.c | 2 +- drivers/media/i2c/mt9m032.c | 2 +- drivers/media/i2c/mt9p031.c | 2 +- drivers/media/i2c/mt9t001.c | 2 +- drivers/media/i2c/mt9v011.c | 2 +- drivers/media/i2c/mt9v032.c | 2 +- drivers/media/i2c/noon010pc30.c | 2 +- drivers/media/i2c/ov2659.c | 2 +- drivers/media/i2c/ov7670.c | 2 +- drivers/media/i2c/ov9650.c | 2 +- drivers/media/i2c/s5c73m3/s5c73m3-core.c | 2 +- drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c | 2 +- drivers/media/i2c/s5c73m3/s5c73m3.h | 2 +- drivers/media/i2c/s5k4ecgx.c | 2 +- drivers/media/i2c/s5k6aa.c | 2 +- drivers/media/i2c/saa6588.c | 2 +- drivers/media/i2c/saa7115.c | 2 +- drivers/media/i2c/saa7127.c | 2 +- drivers/media/i2c/smiapp/smiapp.h | 2 +- drivers/media/i2c/soc_camera/mt9t112.c | 2 +- drivers/media/i2c/soc_camera/mt9v022.c | 2 +- drivers/media/i2c/soc_camera/ov772x.c | 2 +- drivers/media/i2c/soc_camera/rj54n1cb0c.c | 2 +- drivers/media/i2c/soc_camera/tw9910.c | 2 +- drivers/media/i2c/sr030pc30.c | 2 +- drivers/media/i2c/tc358743.c | 2 +- drivers/media/i2c/ths7303.c | 2 +- drivers/media/i2c/tvaudio.c | 2 +- drivers/media/i2c/tvp514x.c | 2 +- drivers/media/i2c/tvp5150.c | 2 +- drivers/media/i2c/tvp7002.c | 2 +- drivers/media/i2c/uda1342.c | 2 +- drivers/media/i2c/upd64031a.c | 2 +- drivers/media/i2c/upd64083.c | 2 +- drivers/media/i2c/wm8775.c | 2 +- drivers/media/pci/bt8xx/bttv-cards.c | 2 +- drivers/media/pci/bt8xx/bttv-driver.c | 4 +- drivers/media/pci/bt8xx/bttvp.h | 2 +- drivers/media/pci/cobalt/cobalt-driver.c | 6 +- drivers/media/pci/cobalt/cobalt-irq.c | 2 +- drivers/media/pci/cobalt/cobalt-v4l2.c | 4 +- drivers/media/pci/cx18/cx18-cards.c | 2 +- drivers/media/pci/cx18/cx18-driver.h | 2 +- drivers/media/pci/cx88/cx88-alsa.c | 2 +- drivers/media/pci/cx88/cx88-video.c | 2 +- drivers/media/pci/cx88/cx88.h | 4 +- drivers/media/pci/ivtv/ivtv-cards.c | 8 +- drivers/media/pci/ivtv/ivtv-driver.c | 2 +- drivers/media/pci/ivtv/ivtv-driver.h | 2 +- drivers/media/pci/ivtv/ivtv-fileops.c | 2 +- drivers/media/pci/ivtv/ivtv-firmware.c | 2 +- drivers/media/pci/ivtv/ivtv-ioctl.c | 2 +- drivers/media/pci/ivtv/ivtv-routing.c | 6 +- drivers/media/pci/saa7134/saa7134-video.c | 2 +- drivers/media/pci/saa7134/saa7134.h | 2 +- drivers/media/pci/saa7146/mxb.c | 2 +- drivers/media/pci/zoran/zoran_card.c | 2 +- drivers/media/platform/marvell-ccic/mcam-core.c | 2 +- drivers/media/platform/via-camera.c | 2 +- drivers/media/usb/cx231xx/cx231xx.h | 2 +- drivers/media/usb/em28xx/em28xx-camera.c | 2 +- drivers/media/usb/em28xx/em28xx-cards.c | 6 +- drivers/media/usb/em28xx/em28xx.h | 2 +- drivers/media/usb/go7007/go7007-usb.c | 4 +- drivers/media/usb/go7007/go7007-v4l2.c | 2 +- drivers/media/usb/hdpvr/hdpvr.h | 2 +- drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h | 2 +- drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c | 2 +- drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c | 2 +- drivers/media/usb/stk1160/stk1160-core.c | 2 +- drivers/media/usb/stk1160/stk1160-v4l.c | 2 +- drivers/media/usb/tm6000/tm6000-cards.c | 2 +- drivers/media/usb/usbvision/usbvision-core.c | 2 +- drivers/media/usb/usbvision/usbvision-video.c | 2 +- include/media/ad9389b.h | 49 ----- include/media/adp1653.h | 128 ------------ include/media/adv7183.h | 47 ----- include/media/adv7343.h | 63 ------ include/media/adv7393.h | 28 --- include/media/adv7511.h | 49 ----- include/media/adv7604.h | 172 ---------------- include/media/adv7842.h | 242 ----------------------- include/media/ak881x.h | 25 --- include/media/as3645a.h | 71 ------- include/media/bt819.h | 36 ---- include/media/cs5345.h | 39 ---- include/media/cs53l32a.h | 34 ---- include/media/i2c/ad9389b.h | 49 +++++ include/media/i2c/adp1653.h | 128 ++++++++++++ include/media/i2c/adv7183.h | 47 +++++ include/media/i2c/adv7343.h | 63 ++++++ include/media/i2c/adv7393.h | 28 +++ include/media/i2c/adv7511.h | 49 +++++ include/media/i2c/adv7604.h | 172 ++++++++++++++++ include/media/i2c/adv7842.h | 242 +++++++++++++++++++++++ include/media/i2c/ak881x.h | 25 +++ include/media/i2c/as3645a.h | 71 +++++++ include/media/i2c/bt819.h | 36 ++++ include/media/i2c/cs5345.h | 39 ++++ include/media/i2c/cs53l32a.h | 34 ++++ include/media/i2c/ir-kbd-i2c.h | 54 +++++ include/media/i2c/lm3560.h | 97 +++++++++ include/media/i2c/lm3646.h | 87 ++++++++ include/media/i2c/m52790.h | 93 +++++++++ include/media/i2c/m5mols.h | 33 ++++ include/media/i2c/mt9m032.h | 36 ++++ include/media/i2c/mt9p031.h | 16 ++ include/media/i2c/mt9t001.h | 9 + include/media/i2c/mt9t112.h | 30 +++ include/media/i2c/mt9v011.h | 17 ++ include/media/i2c/mt9v022.h | 16 ++ include/media/i2c/mt9v032.h | 11 ++ include/media/i2c/noon010pc30.h | 28 +++ include/media/i2c/ov2659.h | 34 ++++ include/media/i2c/ov7670.h | 22 +++ include/media/i2c/ov772x.h | 59 ++++++ include/media/i2c/ov9650.h | 27 +++ include/media/i2c/rj54n1cb0c.h | 19 ++ include/media/i2c/s5c73m3.h | 55 ++++++ include/media/i2c/s5k4ecgx.h | 37 ++++ include/media/i2c/s5k6aa.h | 51 +++++ include/media/i2c/saa6588.h | 42 ++++ include/media/i2c/saa7115.h | 140 +++++++++++++ include/media/i2c/saa7127.h | 40 ++++ include/media/i2c/smiapp.h | 83 ++++++++ include/media/i2c/sr030pc30.h | 21 ++ include/media/i2c/tc358743.h | 131 ++++++++++++ include/media/i2c/ths7303.h | 40 ++++ include/media/i2c/tvaudio.h | 49 +++++ include/media/i2c/tvp514x.h | 111 +++++++++++ include/media/i2c/tvp5150.h | 33 ++++ include/media/i2c/tvp7002.h | 54 +++++ include/media/i2c/tw9910.h | 38 ++++ include/media/i2c/uda1342.h | 29 +++ include/media/i2c/upd64031a.h | 40 ++++ include/media/i2c/upd64083.h | 58 ++++++ include/media/i2c/wm8775.h | 44 +++++ include/media/ir-kbd-i2c.h | 54 ----- include/media/lm3560.h | 97 --------- include/media/lm3646.h | 87 -------- include/media/m52790.h | 93 --------- include/media/m5mols.h | 33 ---- include/media/mt9m032.h | 36 ---- include/media/mt9p031.h | 16 -- include/media/mt9t001.h | 9 - include/media/mt9t112.h | 30 --- include/media/mt9v011.h | 17 -- include/media/mt9v022.h | 16 -- include/media/mt9v032.h | 11 -- include/media/noon010pc30.h | 28 --- include/media/ov2659.h | 34 ---- include/media/ov7670.h | 22 --- include/media/ov772x.h | 59 ------ include/media/ov9650.h | 27 --- include/media/rj54n1cb0c.h | 19 -- include/media/s5c73m3.h | 55 ------ include/media/s5k4ecgx.h | 37 ---- include/media/s5k6aa.h | 51 ----- include/media/saa6588.h | 42 ---- include/media/saa7115.h | 141 ------------- include/media/saa7127.h | 41 ---- include/media/smiapp.h | 83 -------- include/media/sr030pc30.h | 21 -- include/media/tc358743.h | 131 ------------ include/media/ths7303.h | 40 ---- include/media/tvaudio.h | 49 ----- include/media/tvp514x.h | 111 ----------- include/media/tvp5150.h | 34 ---- include/media/tvp7002.h | 54 ----- include/media/tw9910.h | 38 ---- include/media/uda1342.h | 29 --- include/media/upd64031a.h | 40 ---- include/media/upd64083.h | 58 ------ include/media/wm8775.h | 44 ----- 205 files changed, 2902 insertions(+), 2905 deletions(-) delete mode 100644 include/media/ad9389b.h delete mode 100644 include/media/adp1653.h delete mode 100644 include/media/adv7183.h delete mode 100644 include/media/adv7343.h delete mode 100644 include/media/adv7393.h delete mode 100644 include/media/adv7511.h delete mode 100644 include/media/adv7604.h delete mode 100644 include/media/adv7842.h delete mode 100644 include/media/ak881x.h delete mode 100644 include/media/as3645a.h delete mode 100644 include/media/bt819.h delete mode 100644 include/media/cs5345.h delete mode 100644 include/media/cs53l32a.h create mode 100644 include/media/i2c/ad9389b.h create mode 100644 include/media/i2c/adp1653.h create mode 100644 include/media/i2c/adv7183.h create mode 100644 include/media/i2c/adv7343.h create mode 100644 include/media/i2c/adv7393.h create mode 100644 include/media/i2c/adv7511.h create mode 100644 include/media/i2c/adv7604.h create mode 100644 include/media/i2c/adv7842.h create mode 100644 include/media/i2c/ak881x.h create mode 100644 include/media/i2c/as3645a.h create mode 100644 include/media/i2c/bt819.h create mode 100644 include/media/i2c/cs5345.h create mode 100644 include/media/i2c/cs53l32a.h create mode 100644 include/media/i2c/ir-kbd-i2c.h create mode 100644 include/media/i2c/lm3560.h create mode 100644 include/media/i2c/lm3646.h create mode 100644 include/media/i2c/m52790.h create mode 100644 include/media/i2c/m5mols.h create mode 100644 include/media/i2c/mt9m032.h create mode 100644 include/media/i2c/mt9p031.h create mode 100644 include/media/i2c/mt9t001.h create mode 100644 include/media/i2c/mt9t112.h create mode 100644 include/media/i2c/mt9v011.h create mode 100644 include/media/i2c/mt9v022.h create mode 100644 include/media/i2c/mt9v032.h create mode 100644 include/media/i2c/noon010pc30.h create mode 100644 include/media/i2c/ov2659.h create mode 100644 include/media/i2c/ov7670.h create mode 100644 include/media/i2c/ov772x.h create mode 100644 include/media/i2c/ov9650.h create mode 100644 include/media/i2c/rj54n1cb0c.h create mode 100644 include/media/i2c/s5c73m3.h create mode 100644 include/media/i2c/s5k4ecgx.h create mode 100644 include/media/i2c/s5k6aa.h create mode 100644 include/media/i2c/saa6588.h create mode 100644 include/media/i2c/saa7115.h create mode 100644 include/media/i2c/saa7127.h create mode 100644 include/media/i2c/smiapp.h create mode 100644 include/media/i2c/sr030pc30.h create mode 100644 include/media/i2c/tc358743.h create mode 100644 include/media/i2c/ths7303.h create mode 100644 include/media/i2c/tvaudio.h create mode 100644 include/media/i2c/tvp514x.h create mode 100644 include/media/i2c/tvp5150.h create mode 100644 include/media/i2c/tvp7002.h create mode 100644 include/media/i2c/tw9910.h create mode 100644 include/media/i2c/uda1342.h create mode 100644 include/media/i2c/upd64031a.h create mode 100644 include/media/i2c/upd64083.h create mode 100644 include/media/i2c/wm8775.h delete mode 100644 include/media/ir-kbd-i2c.h delete mode 100644 include/media/lm3560.h delete mode 100644 include/media/lm3646.h delete mode 100644 include/media/m52790.h delete mode 100644 include/media/m5mols.h delete mode 100644 include/media/mt9m032.h delete mode 100644 include/media/mt9p031.h delete mode 100644 include/media/mt9t001.h delete mode 100644 include/media/mt9t112.h delete mode 100644 include/media/mt9v011.h delete mode 100644 include/media/mt9v022.h delete mode 100644 include/media/mt9v032.h delete mode 100644 include/media/noon010pc30.h delete mode 100644 include/media/ov2659.h delete mode 100644 include/media/ov7670.h delete mode 100644 include/media/ov772x.h delete mode 100644 include/media/ov9650.h delete mode 100644 include/media/rj54n1cb0c.h delete mode 100644 include/media/s5c73m3.h delete mode 100644 include/media/s5k4ecgx.h delete mode 100644 include/media/s5k6aa.h delete mode 100644 include/media/saa6588.h delete mode 100644 include/media/saa7115.h delete mode 100644 include/media/saa7127.h delete mode 100644 include/media/smiapp.h delete mode 100644 include/media/sr030pc30.h delete mode 100644 include/media/tc358743.h delete mode 100644 include/media/ths7303.h delete mode 100644 include/media/tvaudio.h delete mode 100644 include/media/tvp514x.h delete mode 100644 include/media/tvp5150.h delete mode 100644 include/media/tvp7002.h delete mode 100644 include/media/tw9910.h delete mode 100644 include/media/uda1342.h delete mode 100644 include/media/upd64031a.h delete mode 100644 include/media/upd64083.h delete mode 100644 include/media/wm8775.h (limited to 'include') diff --git a/MAINTAINERS b/MAINTAINERS index e9caa4b28828..a8e3f478d869 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -395,7 +395,7 @@ M: Sakari Ailus L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/adp1653.c -F: include/media/adp1653.h +F: include/media/i2c/adp1653.h ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501) M: Michael Hennerich @@ -1773,7 +1773,7 @@ L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/as3645a.c -F: include/media/as3645a.h +F: include/media/i2c/as3645a.h ASC7621 HARDWARE MONITOR DRIVER M: George Joseph @@ -4596,7 +4596,7 @@ M: Heungjun Kim L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/m5mols/ -F: include/media/m5mols.h +F: include/media/i2c/m5mols.h FUJITSU TABLET EXTRAS M: Robert Gerlach @@ -7169,7 +7169,7 @@ L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/mt9m032.c -F: include/media/mt9m032.h +F: include/media/i2c/mt9m032.h MT9P031 APTINA CAMERA SENSOR M: Laurent Pinchart @@ -7177,7 +7177,7 @@ L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/mt9p031.c -F: include/media/mt9p031.h +F: include/media/i2c/mt9p031.h MT9T001 APTINA CAMERA SENSOR M: Laurent Pinchart @@ -7185,7 +7185,7 @@ L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/mt9t001.c -F: include/media/mt9t001.h +F: include/media/i2c/mt9t001.h MT9V032 APTINA CAMERA SENSOR M: Laurent Pinchart @@ -7194,7 +7194,7 @@ T: git git://linuxtv.org/media_tree.git S: Maintained F: Documentation/devicetree/bindings/media/i2c/mt9v032.txt F: drivers/media/i2c/mt9v032.c -F: include/media/mt9v032.h +F: include/media/i2c/mt9v032.h MULTIFUNCTION DEVICES (MFD) M: Lee Jones @@ -9751,7 +9751,7 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git S: Maintained F: drivers/media/i2c/ov2659.c -F: include/media/ov2659.h +F: include/media/i2c/ov2659.h SILICON MOTION SM712 FRAME BUFFER DRIVER M: Sudip Mukherjee @@ -9840,7 +9840,7 @@ M: Sakari Ailus L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/smiapp/ -F: include/media/smiapp.h +F: include/media/i2c/smiapp.h F: drivers/media/i2c/smiapp-pll.c F: drivers/media/i2c/smiapp-pll.h F: include/uapi/linux/smiapp.h @@ -10781,7 +10781,7 @@ M: Mats Randgaard L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/tc358743* -F: include/media/tc358743.h +F: include/media/i2c/tc358743.h TMIO MMC DRIVER M: Ian Molton diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index 1ed545cc2b83..9cc7b818fbf6 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -49,8 +49,8 @@ #include #include -#include -#include +#include +#include #define DA850_EVM_PHY_ID "davinci_mdio-0:00" #define DA850_LCD_PWR_PIN GPIO_TO_PIN(2, 8) diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index b46b4d25f93e..c71dd9982f03 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index a756003595e9..f073518f621a 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -40,8 +40,8 @@ #include #include -#include -#include +#include +#include #include "davinci.h" diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index bbdd2d614b49..7a20507a3eef 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -26,7 +26,7 @@ #include #include -#include +#include #include #include diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index 846a84ddc28e..ee6ab7e8d3b0 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -25,8 +25,8 @@ #include #include -#include -#include +#include +#include #include #include diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c index b71c96f614f9..e3b58cb84c06 100644 --- a/arch/arm/mach-pxa/pcm990-baseboard.c +++ b/arch/arm/mach-pxa/pcm990-baseboard.c @@ -27,7 +27,7 @@ #include #include -#include +#include #include #include diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c index 2de71e8c104b..f35525b55819 100644 --- a/arch/blackfin/mach-bf561/boards/ezkit.c +++ b/arch/blackfin/mach-bf561/boards/ezkit.c @@ -443,7 +443,7 @@ static const struct ppi_info ppi_info = { }; #if IS_ENABLED(CONFIG_VIDEO_ADV7183) -#include +#include static struct v4l2_input adv7183_inputs[] = { { .index = 0, diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c index 2c61fc0c98f9..c7928d8ebb82 100644 --- a/arch/blackfin/mach-bf609/boards/ezkit.c +++ b/arch/blackfin/mach-bf609/boards/ezkit.c @@ -933,7 +933,7 @@ static struct bfin_capture_config bfin_capture_data = { #endif #if IS_ENABLED(CONFIG_VIDEO_ADV7842) -#include +#include static struct v4l2_input adv7842_inputs[] = { { @@ -1084,7 +1084,7 @@ static const struct ppi_info ppi_info = { }; #if IS_ENABLED(CONFIG_VIDEO_ADV7511) -#include +#include static struct v4l2_output adv7511_outputs[] = { { @@ -1125,7 +1125,7 @@ static struct bfin_display_config bfin_display_data = { #endif #if IS_ENABLED(CONFIG_VIDEO_ADV7343) -#include +#include static struct v4l2_output adv7343_outputs[] = { { diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c index cbd2a9f02a91..62b045c6d289 100644 --- a/arch/sh/boards/mach-ap325rxa/setup.c +++ b/arch/sh/boards/mach-ap325rxa/setup.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index d531791f06ff..5fcec7648d52 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -40,8 +40,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -900,7 +900,7 @@ static struct platform_device irda_device = { .resource = irda_resources, }; -#include +#include #include static struct ak881x_pdata ak881x_pdata = { diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index 7d997cec09c5..ec9357333878 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include #include